aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/files/0511-drm-amd-dal-Adding-amdgpu_dm-for-dal-v2.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/files/0511-drm-amd-dal-Adding-amdgpu_dm-for-dal-v2.patch')
-rw-r--r--common/recipes-kernel/linux/files/0511-drm-amd-dal-Adding-amdgpu_dm-for-dal-v2.patch6016
1 files changed, 0 insertions, 6016 deletions
diff --git a/common/recipes-kernel/linux/files/0511-drm-amd-dal-Adding-amdgpu_dm-for-dal-v2.patch b/common/recipes-kernel/linux/files/0511-drm-amd-dal-Adding-amdgpu_dm-for-dal-v2.patch
deleted file mode 100644
index c709f6e8..00000000
--- a/common/recipes-kernel/linux/files/0511-drm-amd-dal-Adding-amdgpu_dm-for-dal-v2.patch
+++ /dev/null
@@ -1,6016 +0,0 @@
-From a04ef2511da8e6d563253dd70f193979ca0ebb81 Mon Sep 17 00:00:00 2001
-From: Harry Wentland <harry.wentland@amd.com>
-Date: Wed, 25 Nov 2015 14:48:16 -0500
-Subject: [PATCH 0511/1110] drm/amd/dal: Adding amdgpu_dm for dal (v2)
-
-v2: agd: fix for API changes in kernel 4.6
-
-Signed-off-by: Harry Wentland <harry.wentland@amd.com>
-Acked-by: Alex Deucher <alexander.deucher@amd.com>
----
- drivers/gpu/drm/amd/dal/amdgpu_dm/Makefile | 17 +
- .../drm/amd/dal/amdgpu_dm/amdgpu_dal_services.c | 251 ++
- .../gpu/drm/amd/dal/amdgpu_dm/amdgpu_dc_helpers.c | 350 +++
- drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.c | 1318 +++++++++++
- drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.h | 166 ++
- drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.c | 814 +++++++
- drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.h | 122 +
- .../drm/amd/dal/amdgpu_dm/amdgpu_dm_mst_types.c | 353 +++
- .../drm/amd/dal/amdgpu_dm/amdgpu_dm_mst_types.h | 36 +
- .../gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.c | 2390 ++++++++++++++++++++
- .../gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.h | 96 +
- 11 files changed, 5913 insertions(+)
- create mode 100644 drivers/gpu/drm/amd/dal/amdgpu_dm/Makefile
- create mode 100644 drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dal_services.c
- create mode 100644 drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dc_helpers.c
- create mode 100644 drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.c
- create mode 100644 drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.h
- create mode 100644 drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.c
- create mode 100644 drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.h
- create mode 100644 drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_mst_types.c
- create mode 100644 drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_mst_types.h
- create mode 100644 drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.c
- create mode 100644 drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.h
-
-diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/dal/amdgpu_dm/Makefile
-new file mode 100644
-index 0000000..65ad370
---- /dev/null
-+++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/Makefile
-@@ -0,0 +1,17 @@
-+#
-+# Makefile for the 'dm' sub-component of DAL.
-+# It provides the control and status of dm blocks.
-+
-+
-+
-+AMDGPUDM = amdgpu_dm_types.o amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o
-+
-+ifneq ($(CONFIG_DRM_AMD_DAL),)
-+AMDGPUDM += amdgpu_dal_services.o amdgpu_dc_helpers.o
-+endif
-+
-+subdir-ccflags-y += -I$(FULL_AMD_DAL_PATH)/dc
-+
-+AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM))
-+
-+AMD_DAL_FILES += $(AMDGPU_DM)
-diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dal_services.c b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dal_services.c
-new file mode 100644
-index 0000000..a497093
---- /dev/null
-+++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dal_services.c
-@@ -0,0 +1,251 @@
-+/*
-+ * Copyright 2015 Advanced Micro Devices, Inc.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ * Authors: AMD
-+ *
-+ */
-+
-+#include <linux/string.h>
-+#include <linux/acpi.h>
-+
-+#include <drm/drmP.h>
-+#include <drm/drm_crtc_helper.h>
-+#include <drm/amdgpu_drm.h>
-+
-+#include "amdgpu.h"
-+#include "dal_services.h"
-+#include "amdgpu_dm.h"
-+#include "amdgpu_dm_irq.h"
-+#include "amdgpu_dm_types.h"
-+#include "amdgpu_pm.h"
-+
-+/*
-+#include "logger_interface.h"
-+#include "acpimethod_atif.h"
-+#include "amdgpu_powerplay.h"
-+#include "amdgpu_notifications.h"
-+*/
-+
-+/* if the pointer is not NULL, the allocated memory is zeroed */
-+void *dc_service_alloc(struct dc_context *ctx, uint32_t size)
-+{
-+ return kzalloc(size, GFP_KERNEL);
-+}
-+
-+/* Reallocate memory. The contents will remain unchanged.*/
-+void *dc_service_realloc(struct dc_context *ctx, const void *ptr, uint32_t size)
-+{
-+ return krealloc(ptr, size, GFP_KERNEL);
-+}
-+
-+void dc_service_memmove(void *dst, const void *src, uint32_t size)
-+{
-+ memmove(dst, src, size);
-+}
-+
-+void dc_service_free(struct dc_context *ctx, void *p)
-+{
-+ kfree(p);
-+}
-+
-+void dc_service_memset(void *p, int32_t c, uint32_t count)
-+{
-+ memset(p, c, count);
-+}
-+
-+int32_t dal_memcmp(const void *p1, const void *p2, uint32_t count)
-+{
-+ return memcmp(p1, p2, count);
-+}
-+
-+int32_t dal_strncmp(const int8_t *p1, const int8_t *p2, uint32_t count)
-+{
-+ return strncmp(p1, p2, count);
-+}
-+
-+void dc_service_sleep_in_milliseconds(struct dc_context *ctx, uint32_t milliseconds)
-+{
-+ if (milliseconds >= 20)
-+ msleep(milliseconds);
-+ else
-+ usleep_range(milliseconds*1000, milliseconds*1000+1);
-+}
-+
-+void dal_delay_in_nanoseconds(uint32_t nanoseconds)
-+{
-+ ndelay(nanoseconds);
-+}
-+
-+void dc_service_delay_in_microseconds(struct dc_context *ctx, uint32_t microseconds)
-+{
-+ udelay(microseconds);
-+}
-+
-+/******************************************************************************
-+ * IRQ Interfaces.
-+ *****************************************************************************/
-+
-+void dal_register_timer_interrupt(
-+ struct dc_context *ctx,
-+ struct dc_timer_interrupt_params *int_params,
-+ interrupt_handler ih,
-+ void *args)
-+{
-+ struct amdgpu_device *adev = ctx->driver_context;
-+
-+ if (!adev || !int_params) {
-+ DRM_ERROR("DM_IRQ: invalid input!\n");
-+ return;
-+ }
-+
-+ if (int_params->int_context != INTERRUPT_LOW_IRQ_CONTEXT) {
-+ /* only low irq ctx is supported. */
-+ DRM_ERROR("DM_IRQ: invalid context: %d!\n",
-+ int_params->int_context);
-+ return;
-+ }
-+
-+ amdgpu_dm_irq_register_timer(adev, int_params, ih, args);
-+}
-+
-+void dal_isr_acquire_lock(struct dc_context *ctx)
-+{
-+ /*TODO*/
-+}
-+
-+void dal_isr_release_lock(struct dc_context *ctx)
-+{
-+ /*TODO*/
-+}
-+
-+/******************************************************************************
-+ * End-of-IRQ Interfaces.
-+ *****************************************************************************/
-+
-+bool dal_get_platform_info(struct dc_context *ctx,
-+ struct platform_info_params *params)
-+{
-+ /*TODO*/
-+ return false;
-+}
-+
-+/* Next calls are to power component */
-+bool dc_service_pp_pre_dce_clock_change(struct dc_context *ctx,
-+ struct dal_to_power_info *input,
-+ struct power_to_dal_info *output)
-+{
-+ /*TODO*/
-+ return false;
-+}
-+
-+bool dc_service_pp_post_dce_clock_change(struct dc_context *ctx,
-+ const struct dc_pp_display_configuration *pp_display_cfg)
-+{
-+#ifdef CONFIG_DRM_AMD_POWERPLAY
-+ struct amdgpu_device *adev = ctx->driver_context;
-+
-+ if (adev->pm.dpm_enabled) {
-+
-+ memset(&adev->pm.pm_display_cfg, 0,
-+ sizeof(adev->pm.pm_display_cfg));
-+
-+ adev->pm.pm_display_cfg.cpu_cc6_disable =
-+ pp_display_cfg->cpu_cc6_disable;
-+
-+ adev->pm.pm_display_cfg.cpu_pstate_disable =
-+ pp_display_cfg->cpu_pstate_disable;
-+
-+ adev->pm.pm_display_cfg.cpu_pstate_separation_time =
-+ pp_display_cfg->cpu_pstate_separation_time;
-+
-+ adev->pm.pm_display_cfg.nb_pstate_switch_disable =
-+ pp_display_cfg->nb_pstate_switch_disable;
-+
-+ amd_powerplay_display_configuration_change(
-+ adev->powerplay.pp_handle,
-+ &adev->pm.pm_display_cfg);
-+
-+ /* TODO: replace by a separate call to 'apply display cfg'? */
-+ amdgpu_pm_compute_clocks(adev);
-+ }
-+ return true;
-+#else
-+ return false;
-+#endif
-+}
-+
-+bool dc_service_get_system_clocks_range(struct dc_context *ctx,
-+ struct dal_system_clock_range *sys_clks)
-+{
-+ struct amdgpu_device *adev = ctx->driver_context;
-+
-+ /* Default values, in case PPLib is not compiled-in. */
-+ sys_clks->max_mclk = 80000;
-+ sys_clks->min_mclk = 80000;
-+
-+ sys_clks->max_sclk = 60000;
-+ sys_clks->min_sclk = 30000;
-+
-+#ifdef CONFIG_DRM_AMD_POWERPLAY
-+ if (adev->pm.dpm_enabled) {
-+ sys_clks->max_mclk = amdgpu_dpm_get_mclk(adev, false);
-+ sys_clks->min_mclk = amdgpu_dpm_get_mclk(adev, true);
-+
-+ sys_clks->max_sclk = amdgpu_dpm_get_sclk(adev, false);
-+ sys_clks->min_sclk = amdgpu_dpm_get_sclk(adev, true);
-+ }
-+#endif
-+
-+ return true;
-+}
-+
-+
-+bool dc_service_pp_set_display_clock(struct dc_context *ctx,
-+ struct dal_to_power_dclk *dclk)
-+{
-+ /* TODO: need power component to provide appropriate interface */
-+ return false;
-+}
-+
-+/* end of calls to power component */
-+
-+/* Calls to notification */
-+
-+void dal_notify_setmode_complete(struct dc_context *ctx,
-+ uint32_t h_total,
-+ uint32_t v_total,
-+ uint32_t h_active,
-+ uint32_t v_active,
-+ uint32_t pix_clk_in_khz)
-+{
-+ /*TODO*/
-+}
-+/* End of calls to notification */
-+
-+long dal_get_pid(void)
-+{
-+ return current->pid;
-+}
-+
-+long dal_get_tgid(void)
-+{
-+ return current->tgid;
-+}
-diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dc_helpers.c b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dc_helpers.c
-new file mode 100644
-index 0000000..beaef70
---- /dev/null
-+++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dc_helpers.c
-@@ -0,0 +1,350 @@
-+/*
-+ * Copyright 2015 Advanced Micro Devices, Inc.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ * Authors: AMD
-+ *
-+ */
-+
-+#include <linux/string.h>
-+#include <linux/acpi.h>
-+
-+#include <drm/drmP.h>
-+#include <drm/drm_crtc_helper.h>
-+#include <drm/amdgpu_drm.h>
-+#include <drm/drm_edid.h>
-+
-+#include "dc_types.h"
-+#include "amdgpu.h"
-+#include "dc.h"
-+#include "dc_services.h"
-+
-+#include "amdgpu_dm.h"
-+#include "amdgpu_dm_irq.h"
-+#include "amdgpu_dm_types.h"
-+
-+/* dc_helpers_parse_edid_caps
-+ *
-+ * Parse edid caps
-+ *
-+ * @edid: [in] pointer to edid
-+ * edid_caps: [in] pointer to edid caps
-+ * @return
-+ * void
-+ * */
-+enum dc_edid_status dc_helpers_parse_edid_caps(
-+ struct dc_context *ctx,
-+ const struct dc_edid *edid,
-+ struct dc_edid_caps *edid_caps)
-+{
-+ struct edid *edid_buf = (struct edid *) edid->raw_edid;
-+ struct cea_sad *sads;
-+ int sad_count = -1;
-+ int sadb_count = -1;
-+ int i = 0;
-+ int j = 0;
-+ uint8_t *sadb = NULL;
-+
-+ enum dc_edid_status result = EDID_OK;
-+
-+ if (!edid_caps || !edid)
-+ return EDID_BAD_INPUT;
-+
-+ if (!drm_edid_is_valid(edid_buf))
-+ result = EDID_BAD_CHECKSUM;
-+
-+ edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] |
-+ ((uint16_t) edid_buf->mfg_id[1])<<8;
-+ edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] |
-+ ((uint16_t) edid_buf->prod_code[1])<<8;
-+ edid_caps->serial_number = edid_buf->serial;
-+ edid_caps->manufacture_week = edid_buf->mfg_week;
-+ edid_caps->manufacture_year = edid_buf->mfg_year;
-+
-+ /* One of the four detailed_timings stores the monitor name. It's
-+ * stored in an array of length 13. */
-+ for (i = 0; i < 4; i++) {
-+ if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) {
-+ while (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] && j < 13) {
-+ if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n')
-+ break;
-+
-+ edid_caps->display_name[j] =
-+ edid_buf->detailed_timings[i].data.other_data.data.str.str[j];
-+ j++;
-+ }
-+ }
-+ }
-+
-+ sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
-+ if (sad_count <= 0) {
-+ DRM_INFO("SADs count is: %d, don't need to read it\n",
-+ sad_count);
-+ return result;
-+ }
-+
-+ edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT;
-+ for (i = 0; i < edid_caps->audio_mode_count; ++i) {
-+ struct cea_sad *sad = &sads[i];
-+
-+ edid_caps->audio_modes[i].format_code = sad->format;
-+ edid_caps->audio_modes[i].channel_count = sad->channels;
-+ edid_caps->audio_modes[i].sample_rate = sad->freq;
-+ edid_caps->audio_modes[i].sample_size = sad->byte2;
-+ }
-+
-+ sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb);
-+
-+ if (sadb_count < 0) {
-+ DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count);
-+ sadb_count = 0;
-+ }
-+
-+ if (sadb_count)
-+ edid_caps->speaker_flags = sadb[0];
-+ else
-+ edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
-+
-+ kfree(sads);
-+ kfree(sadb);
-+
-+ return result;
-+}
-+
-+
-+static struct amdgpu_connector *get_connector_for_sink(
-+ struct drm_device *dev,
-+ const struct dc_sink *sink)
-+{
-+ struct drm_connector *connector;
-+ struct amdgpu_connector *aconnector = NULL;
-+
-+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-+ aconnector = to_amdgpu_connector(connector);
-+ if (aconnector->dc_sink == sink)
-+ break;
-+ }
-+
-+ return aconnector;
-+}
-+
-+static struct amdgpu_connector *get_connector_for_link(
-+ struct drm_device *dev,
-+ const struct dc_link *link)
-+{
-+ struct drm_connector *connector;
-+ struct amdgpu_connector *aconnector = NULL;
-+
-+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-+ aconnector = to_amdgpu_connector(connector);
-+ if (aconnector->dc_link == link)
-+ break;
-+ }
-+
-+ return aconnector;
-+}
-+
-+/*
-+ * Writes payload allocation table in immediate downstream device.
-+ */
-+bool dc_helpers_dp_mst_write_payload_allocation_table(
-+ struct dc_context *ctx,
-+ const struct dc_sink *sink,
-+ struct dp_mst_stream_allocation *alloc_entity,
-+ bool enable)
-+{
-+ struct amdgpu_device *adev = ctx->driver_context;
-+ struct drm_device *dev = adev->ddev;
-+ struct amdgpu_connector *aconnector;
-+ struct drm_crtc *crtc;
-+ struct drm_dp_mst_topology_mgr *mst_mgr;
-+ struct drm_dp_mst_port *mst_port;
-+ int slots = 0;
-+ bool ret;
-+ int clock;
-+ int bpp;
-+ int pbn = 0;
-+
-+ aconnector = get_connector_for_sink(dev, sink);
-+ crtc = aconnector->base.state->crtc;
-+
-+ if (!aconnector->mst_port)
-+ return false;
-+
-+ mst_mgr = &aconnector->mst_port->mst_mgr;
-+ mst_port = aconnector->port;
-+
-+ if (enable) {
-+ clock = crtc->state->mode.clock;
-+ /* TODO remove following hardcode value */
-+ bpp = 30;
-+
-+ /* TODO need to know link rate */
-+
-+ pbn = drm_dp_calc_pbn_mode(clock, bpp);
-+
-+ ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, pbn, &slots);
-+
-+ if (!ret)
-+ return false;
-+
-+ } else {
-+ drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
-+ }
-+
-+ alloc_entity->slot_count = slots;
-+ alloc_entity->pbn = pbn;
-+ alloc_entity->pbn_per_slot = mst_mgr->pbn_div;
-+
-+ ret = drm_dp_update_payload_part1(mst_mgr);
-+ if (ret)
-+ return false;
-+
-+ return true;
-+}
-+
-+/*
-+ * Polls for ACT (allocation change trigger) handled and sends
-+ * ALLOCATE_PAYLOAD message.
-+ */
-+bool dc_helpers_dp_mst_poll_for_allocation_change_trigger(
-+ struct dc_context *ctx,
-+ const struct dc_sink *sink)
-+{
-+ struct amdgpu_device *adev = ctx->driver_context;
-+ struct drm_device *dev = adev->ddev;
-+ struct amdgpu_connector *aconnector;
-+ struct drm_dp_mst_topology_mgr *mst_mgr;
-+ int ret;
-+
-+ aconnector = get_connector_for_sink(dev, sink);
-+
-+ if (!aconnector->mst_port)
-+ return false;
-+
-+ mst_mgr = &aconnector->mst_port->mst_mgr;
-+
-+ ret = drm_dp_check_act_status(mst_mgr);
-+
-+ if (ret)
-+ return false;
-+
-+ return true;
-+}
-+
-+bool dc_helpers_dp_mst_send_payload_allocation(
-+ struct dc_context *ctx,
-+ const struct dc_sink *sink,
-+ bool enable)
-+{
-+ struct amdgpu_device *adev = ctx->driver_context;
-+ struct drm_device *dev = adev->ddev;
-+ struct amdgpu_connector *aconnector;
-+ struct drm_dp_mst_topology_mgr *mst_mgr;
-+ struct drm_dp_mst_port *mst_port;
-+ int ret;
-+
-+ aconnector = get_connector_for_sink(dev, sink);
-+
-+ mst_port = aconnector->port;
-+
-+ if (!aconnector->mst_port)
-+ return false;
-+
-+ mst_mgr = &aconnector->mst_port->mst_mgr;
-+
-+ ret = drm_dp_update_payload_part2(mst_mgr);
-+
-+ if (ret)
-+ return false;
-+
-+ if (!enable)
-+ drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
-+
-+ return true;
-+}
-+
-+void dc_helpers_dp_mst_handle_mst_hpd_rx_irq(void *param)
-+{
-+ uint8_t esi[8] = { 0 };
-+ uint8_t dret;
-+ bool new_irq_handled = true;
-+ struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
-+
-+ /* DPCD 0x2002 - 0x2008 for down stream IRQ from MST, eDP etc. */
-+ dret = drm_dp_dpcd_read(
-+ &aconnector->dm_dp_aux.aux,
-+ DP_SINK_COUNT_ESI, esi, 8);
-+
-+ while ((dret == 8) && new_irq_handled) {
-+ uint8_t retry;
-+
-+ DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
-+
-+ /* handle HPD short pulse irq */
-+ drm_dp_mst_hpd_irq(&aconnector->mst_mgr, esi, &new_irq_handled);
-+
-+ if (new_irq_handled) {
-+ /* ACK at DPCD to notify down stream */
-+ for (retry = 0; retry < 3; retry++) {
-+ uint8_t wret;
-+
-+ wret = drm_dp_dpcd_write(
-+ &aconnector->dm_dp_aux.aux,
-+ DP_SINK_COUNT_ESI + 1,
-+ &esi[1],
-+ 3);
-+ if (wret == 3)
-+ break;
-+ }
-+
-+ /* check if there is new irq to be handle */
-+ dret = drm_dp_dpcd_read(
-+ &aconnector->dm_dp_aux.aux,
-+ DP_SINK_COUNT_ESI, esi, 8);
-+ }
-+ }
-+}
-+
-+bool dc_helpers_dp_mst_start_top_mgr(
-+ struct dc_context *ctx,
-+ const struct dc_link *link)
-+{
-+ struct amdgpu_device *adev = ctx->driver_context;
-+ struct drm_device *dev = adev->ddev;
-+ struct amdgpu_connector *aconnector = get_connector_for_link(dev, link);
-+
-+ if (aconnector)
-+ drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
-+
-+ return true;
-+}
-+
-+void dc_helpers_dp_mst_stop_top_mgr(
-+ struct dc_context *ctx,
-+ const struct dc_link *link)
-+{
-+ struct amdgpu_device *adev = ctx->driver_context;
-+ struct drm_device *dev = adev->ddev;
-+ struct amdgpu_connector *aconnector = get_connector_for_link(dev, link);
-+
-+ if (aconnector)
-+ drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false);
-+}
-diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.c
-new file mode 100644
-index 0000000..37810ff
---- /dev/null
-+++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.c
-@@ -0,0 +1,1318 @@
-+/*
-+ * Copyright 2015 Advanced Micro Devices, Inc.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ * Authors: AMD
-+ *
-+ */
-+
-+#include "dal_services_types.h"
-+#include "dc.h"
-+
-+#include "vid.h"
-+#include "amdgpu.h"
-+#include "atom.h"
-+#include "amdgpu_dm.h"
-+#include "amdgpu_dm_types.h"
-+
-+#include "amd_shared.h"
-+#include "amdgpu_dm_irq.h"
-+#include "dc_helpers.h"
-+
-+#include "dce/dce_11_0_d.h"
-+#include "dce/dce_11_0_sh_mask.h"
-+#include "dce/dce_11_0_enum.h"
-+#include "ivsrcid/ivsrcid_vislands30.h"
-+
-+#include "oss/oss_3_0_d.h"
-+#include "oss/oss_3_0_sh_mask.h"
-+#include "gmc/gmc_8_1_d.h"
-+#include "gmc/gmc_8_1_sh_mask.h"
-+
-+#include <linux/module.h>
-+#include <linux/moduleparam.h>
-+
-+#include <drm/drm_atomic.h>
-+#include <drm/drm_atomic_helper.h>
-+#include <drm/drm_dp_mst_helper.h>
-+
-+/* TODO: Remove when mc access work around is removed */
-+static const u32 crtc_offsets[] =
-+{
-+ CRTC0_REGISTER_OFFSET,
-+ CRTC1_REGISTER_OFFSET,
-+ CRTC2_REGISTER_OFFSET,
-+ CRTC3_REGISTER_OFFSET,
-+ CRTC4_REGISTER_OFFSET,
-+ CRTC5_REGISTER_OFFSET,
-+ CRTC6_REGISTER_OFFSET
-+};
-+/* TODO: End of when Remove mc access work around is removed */
-+
-+/* Define variables here
-+ * These values will be passed to DAL for feature enable purpose
-+ * Disable ALL for HDMI light up
-+ * TODO: follow up if need this mechanism*/
-+struct dal_override_parameters display_param = {
-+ .bool_param_enable_mask = 0,
-+ .bool_param_values = 0,
-+ .int_param_values[DAL_PARAM_MAX_COFUNC_NON_DP_DISPLAYS] = DAL_PARAM_INVALID_INT,
-+ .int_param_values[DAL_PARAM_DRR_SUPPORT] = DAL_PARAM_INVALID_INT,
-+};
-+
-+/* Debug facilities */
-+#define AMDGPU_DM_NOT_IMPL(fmt, ...) \
-+ DRM_INFO("DM_NOT_IMPL: " fmt, ##__VA_ARGS__)
-+
-+/*
-+ * dm_vblank_get_counter
-+ *
-+ * @brief
-+ * Get counter for number of vertical blanks
-+ *
-+ * @param
-+ * struct amdgpu_device *adev - [in] desired amdgpu device
-+ * int disp_idx - [in] which CRTC to get the counter from
-+ *
-+ * @return
-+ * Counter for vertical blanks
-+ */
-+static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
-+{
-+ if (crtc >= adev->mode_info.num_crtc)
-+ return 0;
-+ else {
-+ struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
-+
-+ if (NULL == acrtc->target) {
-+ DRM_ERROR("dc_target is NULL for crtc '%d'!\n", crtc);
-+ return 0;
-+ }
-+
-+ return dc_target_get_vblank_counter(acrtc->target);
-+ }
-+}
-+
-+static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
-+ u32 *vbl, u32 *position)
-+{
-+ if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
-+ return -EINVAL;
-+
-+/* TODO: #DAL3 Implement scanoutpos
-+ dal_get_crtc_scanoutpos(adev->dm.dal, crtc, vbl, position);
-+*/
-+ return 0;
-+}
-+
-+static u32 dm_hpd_get_gpio_reg(struct amdgpu_device *adev)
-+{
-+ return mmDC_GPIO_HPD_A;
-+}
-+
-+
-+static bool dm_is_display_hung(struct amdgpu_device *adev)
-+{
-+ /* TODO: #DAL3 need to replace
-+ u32 crtc_hung = 0;
-+ u32 i, j, tmp;
-+
-+ crtc_hung = dal_get_connected_targets_vector(adev->dm.dal);
-+
-+ for (j = 0; j < 10; j++) {
-+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
-+ if (crtc_hung & (1 << i)) {
-+ int32_t vpos1, hpos1;
-+ int32_t vpos2, hpos2;
-+
-+ tmp = dal_get_crtc_scanoutpos(
-+ adev->dm.dal,
-+ i,
-+ &vpos1,
-+ &hpos1);
-+ udelay(10);
-+ tmp = dal_get_crtc_scanoutpos(
-+ adev->dm.dal,
-+ i,
-+ &vpos2,
-+ &hpos2);
-+
-+ if (hpos1 != hpos2 && vpos1 != vpos2)
-+ crtc_hung &= ~(1 << i);
-+ }
-+ }
-+
-+ if (crtc_hung == 0)
-+ return false;
-+ }
-+*/
-+ return true;
-+}
-+
-+/* TODO: Remove mc access work around*/
-+static void dm_stop_mc_access(struct amdgpu_device *adev,
-+ struct amdgpu_mode_mc_save *save)
-+{
-+
-+ u32 crtc_enabled, tmp;
-+ int i;
-+
-+ save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
-+ save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
-+
-+ /* disable VGA render */
-+ tmp = RREG32(mmVGA_RENDER_CONTROL);
-+ tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
-+ WREG32(mmVGA_RENDER_CONTROL, tmp);
-+
-+ /* blank the display controllers */
-+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
-+ crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
-+ CRTC_CONTROL, CRTC_MASTER_EN);
-+ if (crtc_enabled) {
-+#if 0
-+ u32 frame_count;
-+ int j;
-+
-+ save->crtc_enabled[i] = true;
-+ tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
-+ if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
-+ amdgpu_display_vblank_wait(adev, i);
-+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
-+ tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
-+ WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
-+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
-+ }
-+ /* wait for the next frame */
-+ frame_count = amdgpu_display_vblank_get_counter(adev, i);
-+ for (j = 0; j < adev->usec_timeout; j++) {
-+ if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
-+ break;
-+ udelay(1);
-+ }
-+ tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
-+ if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
-+ tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
-+ WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
-+ }
-+ tmp = RREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i]);
-+ if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
-+ tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
-+ WREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
-+ }
-+#else
-+ /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
-+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
-+ tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
-+ tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
-+ WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
-+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
-+ save->crtc_enabled[i] = false;
-+ /* ***** */
-+#endif
-+ } else {
-+ save->crtc_enabled[i] = false;
-+ }
-+ }
-+}
-+
-+
-+static void dm_resume_mc_access(struct amdgpu_device *adev,
-+ struct amdgpu_mode_mc_save *save)
-+{
-+
-+ u32 tmp, frame_count;
-+ int i, j;
-+
-+ /* update crtc base addresses */
-+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
-+ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
-+ upper_32_bits(adev->mc.vram_start));
-+ WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
-+ upper_32_bits(adev->mc.vram_start));
-+ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
-+ (u32)adev->mc.vram_start);
-+ WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
-+ (u32)adev->mc.vram_start);
-+
-+ if (save->crtc_enabled[i]) {
-+ tmp = RREG32(mmCRTC_MASTER_UPDATE_MODE + crtc_offsets[i]);
-+ if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
-+ tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
-+ WREG32(mmCRTC_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
-+ }
-+ tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
-+ if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
-+ tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
-+ WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
-+ }
-+ tmp = RREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i]);
-+ if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
-+ tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
-+ WREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
-+ }
-+ for (j = 0; j < adev->usec_timeout; j++) {
-+ tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
-+ if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
-+ break;
-+ udelay(1);
-+ }
-+ tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
-+ tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
-+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
-+ WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
-+ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
-+ /* wait for the next frame */
-+ frame_count = amdgpu_display_vblank_get_counter(adev, i);
-+ for (j = 0; j < adev->usec_timeout; j++) {
-+ if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
-+ break;
-+ udelay(1);
-+ }
-+ }
-+ }
-+
-+ WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
-+ WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
-+
-+ /* Unlock vga access */
-+ WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
-+ mdelay(1);
-+ WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
-+}
-+
-+/* End of TODO: Remove mc access work around*/
-+
-+static bool dm_is_idle(void *handle)
-+{
-+ /* XXX todo */
-+ return true;
-+}
-+
-+static int dm_wait_for_idle(void *handle)
-+{
-+ /* XXX todo */
-+ return 0;
-+}
-+
-+static void dm_print_status(void *handle)
-+{
-+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-+ dev_info(adev->dev, "DCE 10.x registers\n");
-+ /* XXX todo */
-+}
-+
-+static int dm_soft_reset(void *handle)
-+{
-+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-+ u32 srbm_soft_reset = 0, tmp;
-+
-+ if (dm_is_display_hung(adev))
-+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
-+
-+ if (srbm_soft_reset) {
-+ dm_print_status(adev);
-+
-+ tmp = RREG32(mmSRBM_SOFT_RESET);
-+ tmp |= srbm_soft_reset;
-+ dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
-+ WREG32(mmSRBM_SOFT_RESET, tmp);
-+ tmp = RREG32(mmSRBM_SOFT_RESET);
-+
-+ udelay(50);
-+
-+ tmp &= ~srbm_soft_reset;
-+ WREG32(mmSRBM_SOFT_RESET, tmp);
-+ tmp = RREG32(mmSRBM_SOFT_RESET);
-+
-+ /* Wait a little for things to settle down */
-+ udelay(50);
-+ dm_print_status(adev);
-+ }
-+ return 0;
-+}
-+
-+
-+
-+static void dm_pflip_high_irq(void *interrupt_params)
-+{
-+ struct amdgpu_flip_work *works;
-+ struct amdgpu_crtc *amdgpu_crtc;
-+ struct common_irq_params *irq_params = interrupt_params;
-+ struct amdgpu_device *adev = irq_params->adev;
-+ unsigned long flags;
-+ const struct dc *dc = irq_params->adev->dm.dc;
-+ const struct dc_target *dc_target =
-+ dc_get_target_on_irq_source(dc, irq_params->irq_src);
-+ uint8_t link_index = 0;
-+
-+ /* TODO: #flip address all tags together*/
-+ if (dc_target != NULL)
-+ link_index = dc_target_get_link_index(dc_target);
-+
-+ amdgpu_crtc= adev->mode_info.crtcs[link_index];
-+
-+ /* IRQ could occur when in initial stage */
-+ if(amdgpu_crtc == NULL)
-+ return;
-+
-+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
-+ works = amdgpu_crtc->pflip_works;
-+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
-+ DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
-+ "AMDGPU_FLIP_SUBMITTED(%d)\n",
-+ amdgpu_crtc->pflip_status,
-+ AMDGPU_FLIP_SUBMITTED);
-+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
-+ return;
-+ }
-+
-+ /* page flip completed. clean up */
-+ amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
-+ amdgpu_crtc->pflip_works = NULL;
-+
-+ /* wakeup usersapce */
-+ if(works->event)
-+ drm_send_vblank_event(
-+ adev->ddev,
-+ amdgpu_crtc->crtc_id,
-+ works->event);
-+
-+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
-+
-+ drm_crtc_vblank_put(&amdgpu_crtc->base);
-+ schedule_work(&works->unpin_work);
-+}
-+
-+static void dm_crtc_high_irq(void *interrupt_params)
-+{
-+ struct common_irq_params *irq_params = interrupt_params;
-+ struct amdgpu_device *adev = irq_params->adev;
-+ const struct dc *dc = irq_params->adev->dm.dc;
-+ const struct dc_target *dc_target =
-+ dc_get_target_on_irq_source(dc, irq_params->irq_src);
-+ uint8_t link_index = 0;
-+
-+ /* TODO: #flip fix all tags together*/
-+ if (dc_target != NULL)
-+ link_index = dc_target_get_link_index(dc_target);
-+
-+ drm_handle_vblank(adev->ddev, link_index);
-+
-+}
-+
-+static int dm_set_clockgating_state(void *handle,
-+ enum amd_clockgating_state state)
-+{
-+ return 0;
-+}
-+
-+static int dm_set_powergating_state(void *handle,
-+ enum amd_powergating_state state)
-+{
-+ return 0;
-+}
-+
-+/* Prototypes of private functions */
-+static int dm_early_init(void* handle);
-+
-+static void detect_on_all_dc_links(struct amdgpu_display_manager *dm)
-+{
-+ uint32_t i;
-+ const struct dc_link *dc_link;
-+ struct dc_caps caps = { 0 };
-+
-+ dc_get_caps(dm->dc, &caps);
-+
-+ for (i = 0; i < caps.max_links; i++) {
-+ dc_link = dc_get_link_at_index(dm->dc, i);
-+ dc_link_detect(dc_link);
-+ }
-+}
-+
-+/* Init display KMS
-+ *
-+ * Returns 0 on success
-+ */
-+int amdgpu_dm_init(struct amdgpu_device *adev)
-+{
-+ struct dal_init_data init_data;
-+ struct drm_device *ddev = adev->ddev;
-+ adev->dm.ddev = adev->ddev;
-+ adev->dm.adev = adev;
-+
-+ /* Zero all the fields */
-+ memset(&init_data, 0, sizeof(init_data));
-+
-+ /* initialize DAL's lock (for SYNC context use) */
-+ spin_lock_init(&adev->dm.dal_lock);
-+
-+ /* initialize DAL's mutex */
-+ mutex_init(&adev->dm.dal_mutex);
-+
-+ if(amdgpu_dm_irq_init(adev)) {
-+ DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
-+ goto error;
-+ }
-+
-+ if (ddev->pdev) {
-+ init_data.bdf_info.DEVICE_NUMBER = PCI_SLOT(ddev->pdev->devfn);
-+ init_data.bdf_info.FUNCTION_NUMBER =
-+ PCI_FUNC(ddev->pdev->devfn);
-+ if (ddev->pdev->bus)
-+ init_data.bdf_info.BUS_NUMBER = ddev->pdev->bus->number;
-+ }
-+
-+ init_data.display_param = display_param;
-+
-+ init_data.asic_id.chip_family = adev->family;
-+
-+ init_data.asic_id.pci_revision_id = adev->rev_id;
-+ init_data.asic_id.hw_internal_rev = adev->external_rev_id;
-+
-+ init_data.asic_id.vram_width = adev->mc.vram_width;
-+ /* TODO: initialize init_data.asic_id.vram_type here!!!! */
-+ init_data.asic_id.atombios_base_address =
-+ adev->mode_info.atom_context->bios;
-+ init_data.asic_id.runtime_flags.flags.bits.SKIP_POWER_DOWN_ON_RESUME = 1;
-+
-+ if (adev->asic_type == CHIP_CARRIZO)
-+ init_data.asic_id.runtime_flags.flags.bits.GNB_WAKEUP_SUPPORTED = 1;
-+
-+ init_data.driver = adev;
-+
-+ adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
-+
-+ if (!adev->dm.cgs_device) {
-+ DRM_ERROR("amdgpu: failed to create cgs device.\n");
-+ goto error;
-+ }
-+
-+ init_data.cgs_device = adev->dm.cgs_device;
-+
-+ adev->dm.dal = NULL;
-+
-+ /* enable gpu scaling in DAL */
-+ init_data.display_param.bool_param_enable_mask |=
-+ 1 << DAL_PARAM_ENABLE_GPU_SCALING;
-+ init_data.display_param.bool_param_values |=
-+ 1 << DAL_PARAM_ENABLE_GPU_SCALING;
-+
-+ /* Display Core create. */
-+ adev->dm.dc = dc_create(&init_data);
-+
-+ if (amdgpu_dm_initialize_drm_device(adev)) {
-+ DRM_ERROR(
-+ "amdgpu: failed to initialize sw for display support.\n");
-+ goto error;
-+ }
-+
-+ /* Update the actual used number of crtc */
-+ adev->mode_info.num_crtc = adev->dm.display_indexes_num;
-+
-+ /* TODO: Add_display_info? */
-+
-+ /* TODO use dynamic cursor width */
-+ adev->ddev->mode_config.cursor_width = 128;
-+ adev->ddev->mode_config.cursor_height = 128;
-+
-+ if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
-+ DRM_ERROR(
-+ "amdgpu: failed to initialize sw for display support.\n");
-+ goto error;
-+ }
-+
-+ DRM_INFO("KMS initialized.\n");
-+
-+ return 0;
-+error:
-+ amdgpu_dm_fini(adev);
-+
-+ return -1;
-+}
-+
-+void amdgpu_dm_fini(struct amdgpu_device *adev)
-+{
-+ /*
-+ * TODO: pageflip, vlank interrupt
-+ *
-+ * amdgpu_dm_destroy_drm_device(&adev->dm);
-+ * amdgpu_dm_irq_fini(adev);
-+ */
-+
-+ if (adev->dm.cgs_device) {
-+ amdgpu_cgs_destroy_device(adev->dm.cgs_device);
-+ adev->dm.cgs_device = NULL;
-+ }
-+
-+ /* DC Destroy TODO: Replace destroy DAL */
-+ {
-+ dc_destroy(&adev->dm.dc);
-+ }
-+ return;
-+}
-+
-+/* moved from amdgpu_dm_kms.c */
-+void amdgpu_dm_destroy()
-+{
-+}
-+
-+static int dm_sw_init(void *handle)
-+{
-+ return 0;
-+}
-+
-+static int dm_sw_fini(void *handle)
-+{
-+ return 0;
-+}
-+
-+static int dm_hw_init(void *handle)
-+{
-+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-+ /* Create DAL display manager */
-+ amdgpu_dm_init(adev);
-+
-+ amdgpu_dm_hpd_init(adev);
-+
-+ return 0;
-+}
-+
-+static int dm_hw_fini(void *handle)
-+{
-+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-+
-+ amdgpu_dm_hpd_fini(adev);
-+
-+ amdgpu_dm_irq_fini(adev);
-+
-+ return 0;
-+}
-+
-+static int dm_suspend(void *handle)
-+{
-+ struct amdgpu_device *adev = handle;
-+ struct amdgpu_display_manager *dm = &adev->dm;
-+ struct drm_crtc *crtc;
-+
-+ dc_set_power_state(
-+ dm->dc,
-+ DC_ACPI_CM_POWER_STATE_D3,
-+ DC_VIDEO_POWER_SUSPEND);
-+
-+ amdgpu_dm_irq_suspend(adev);
-+
-+ list_for_each_entry(crtc, &dm->ddev->mode_config.crtc_list, head) {
-+ crtc->mode.clock = 0;
-+ }
-+
-+ return 0;
-+}
-+
-+static int dm_resume(void *handle)
-+{
-+ int ret = 0;
-+ struct amdgpu_device *adev = handle;
-+ struct amdgpu_display_manager *dm = &adev->dm;
-+
-+ dc_set_power_state(
-+ dm->dc,
-+ DC_ACPI_CM_POWER_STATE_D0,
-+ DC_VIDEO_POWER_ON);
-+
-+ amdgpu_dm_irq_resume(adev);
-+
-+ dc_resume(dm->dc);
-+
-+ detect_on_all_dc_links(dm);
-+
-+ drm_mode_config_reset(adev->ddev);
-+
-+ return ret;
-+}
-+const struct amd_ip_funcs amdgpu_dm_funcs = {
-+ .early_init = dm_early_init,
-+ .late_init = NULL,
-+ .sw_init = dm_sw_init,
-+ .sw_fini = dm_sw_fini,
-+ .hw_init = dm_hw_init,
-+ .hw_fini = dm_hw_fini,
-+ .suspend = dm_suspend,
-+ .resume = dm_resume,
-+ .is_idle = dm_is_idle,
-+ .wait_for_idle = dm_wait_for_idle,
-+ .soft_reset = dm_soft_reset,
-+ .print_status = dm_print_status,
-+ .set_clockgating_state = dm_set_clockgating_state,
-+ .set_powergating_state = dm_set_powergating_state,
-+};
-+
-+/* TODO: it is temporary non-const, should fixed later */
-+static struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
-+ .atomic_check = amdgpu_dm_atomic_check,
-+ .atomic_commit = amdgpu_dm_atomic_commit
-+};
-+
-+static bool dm_get_sink_from_link(const struct dc_link *link,
-+ struct amdgpu_connector *aconnector,
-+ const struct dc_sink **sink)
-+{
-+ int i;
-+ *sink = NULL;
-+
-+ if (!link->sink_count) {
-+ DRM_INFO("No sinks on link!\n");
-+ return true;
-+ } else if (link->sink_count > 1 && !aconnector) {
-+ DRM_ERROR("Multi sink link but no connector given!\n");
-+ return false;
-+ }
-+
-+ if (link->sink_count == 1) {
-+ *sink = link->sink[0];
-+ return true;
-+ }
-+
-+ for (i = 0; i < link->sink_count; i++)
-+ if (aconnector->dc_sink == link->sink[i])
-+ *sink = aconnector->dc_sink;
-+
-+ return true;
-+}
-+
-+void amdgpu_dm_update_connector_after_detect(
-+ struct amdgpu_connector *aconnector)
-+{
-+ struct drm_connector *connector = &aconnector->base;
-+ struct drm_device *dev = connector->dev;
-+ const struct dc_link *dc_link = aconnector->dc_link;
-+ const struct dc_sink *sink;
-+
-+ /* MST handled by drm_mst framework */
-+ if (aconnector->mst_mgr.mst_state)
-+ return;
-+ if (!dm_get_sink_from_link(dc_link, aconnector, &sink)) {
-+ return;
-+ }
-+
-+ if (aconnector->dc_sink == sink) {
-+ /* We got a DP short pulse (Link Loss, DP CTS, etc...).
-+ * Do nothing!! */
-+ DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n",
-+ aconnector->connector_id);
-+ return;
-+ }
-+
-+ DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
-+ aconnector->connector_id, aconnector->dc_sink, sink);
-+
-+ mutex_lock(&dev->mode_config.mutex);
-+
-+ /* 1. Update status of the drm connector
-+ * 2. Send an event and let userspace tell us what to do */
-+ if (sink) {
-+ /* TODO: check if we still need the S3 mode update workaround.
-+ * If yes, put it here. */
-+
-+ aconnector->dc_sink = sink;
-+ if (sink->dc_edid.length == 0)
-+ aconnector->edid = NULL;
-+ else {
-+ aconnector->edid =
-+ (struct edid *) sink->dc_edid.raw_edid;
-+ drm_mode_connector_update_edid_property(connector,
-+ aconnector->edid);
-+ amdgpu_dm_connector_get_modes(&aconnector->base);
-+ }
-+ } else {
-+ drm_mode_connector_update_edid_property(connector, NULL);
-+ aconnector->num_modes = 0;
-+ aconnector->dc_sink = NULL;
-+ }
-+
-+ mutex_unlock(&dev->mode_config.mutex);
-+}
-+
-+static void handle_hpd_irq(void *param)
-+{
-+ struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
-+ struct drm_connector *connector = &aconnector->base;
-+ struct drm_device *dev = connector->dev;
-+
-+ dc_link_detect(aconnector->dc_link);
-+ amdgpu_dm_update_connector_after_detect(aconnector);
-+ drm_helper_hpd_irq_event(dev);
-+}
-+
-+static void handle_hpd_rx_irq(void *param)
-+{
-+ struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
-+ struct drm_connector *connector = &aconnector->base;
-+ struct drm_device *dev = connector->dev;
-+
-+ if (dc_link_handle_hpd_rx_irq(aconnector->dc_link) &&
-+ !aconnector->mst_mgr.mst_state) {
-+ /* Downstream Port status changed. */
-+ dc_link_detect(aconnector->dc_link);
-+ amdgpu_dm_update_connector_after_detect(aconnector);
-+ drm_helper_hpd_irq_event(dev);
-+ }
-+
-+ if (aconnector->mst_mgr.mst_state)
-+ dc_helpers_dp_mst_handle_mst_hpd_rx_irq(param);
-+}
-+
-+static void register_hpd_handlers(struct amdgpu_device *adev)
-+{
-+ struct drm_device *dev = adev->ddev;
-+ struct drm_connector *connector;
-+ struct amdgpu_connector *aconnector;
-+ const struct dc_link *dc_link;
-+ struct dc_interrupt_params int_params = {0};
-+
-+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
-+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
-+
-+ list_for_each_entry(connector,
-+ &dev->mode_config.connector_list, head) {
-+
-+ aconnector = to_amdgpu_connector(connector);
-+ dc_link = aconnector->dc_link;
-+
-+ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
-+ int_params.irq_source = dc_link->irq_source_hpd;
-+
-+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
-+ handle_hpd_irq,
-+ (void *) aconnector);
-+
-+ if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
-+
-+ /* Also register for DP short pulse (hpd_rx). */
-+ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
-+ int_params.irq_source = dc_link->irq_source_hpd_rx;
-+
-+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
-+ handle_hpd_rx_irq,
-+ (void *) aconnector);
-+ }
-+ }
-+}
-+
-+/* Register IRQ sources and initialize IRQ callbacks */
-+static int dce110_register_irq_handlers(struct amdgpu_device *adev)
-+{
-+ struct dc *dc = adev->dm.dc;
-+ struct common_irq_params *c_irq_params;
-+ struct dc_interrupt_params int_params = {0};
-+ int r;
-+ int i;
-+ struct dc_caps caps = { 0 };
-+
-+ dc_get_caps(dc, &caps);
-+
-+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
-+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
-+
-+ /* Actions of amdgpu_irq_add_id():
-+ * 1. Register a set() function with base driver.
-+ * Base driver will call set() function to enable/disable an
-+ * interrupt in DC hardware.
-+ * 2. Register amdgpu_dm_irq_handler().
-+ * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
-+ * coming from DC hardware.
-+ * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
-+ * for acknowledging and handling. */
-+
-+ for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT;
-+ i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
-+ r = amdgpu_irq_add_id(adev, i, &adev->crtc_irq);
-+ if (r) {
-+ DRM_ERROR("Failed to add crtc irq id!\n");
-+ return r;
-+ }
-+
-+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
-+ int_params.irq_source =
-+ dc_interrupt_to_irq_source(dc, i, 0);
-+
-+ c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
-+
-+ c_irq_params->adev = adev;
-+ c_irq_params->irq_src = int_params.irq_source;
-+
-+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
-+ dm_crtc_high_irq, c_irq_params);
-+ }
-+
-+ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
-+ i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
-+ r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
-+ if (r) {
-+ DRM_ERROR("Failed to add page flip irq id!\n");
-+ return r;
-+ }
-+
-+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
-+ int_params.irq_source =
-+ dc_interrupt_to_irq_source(dc, i, 0);
-+
-+ c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
-+
-+ c_irq_params->adev = adev;
-+ c_irq_params->irq_src = int_params.irq_source;
-+
-+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
-+ dm_pflip_high_irq, c_irq_params);
-+
-+ }
-+
-+ /* HPD */
-+ r = amdgpu_irq_add_id(adev, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A,
-+ &adev->hpd_irq);
-+ if (r) {
-+ DRM_ERROR("Failed to add hpd irq id!\n");
-+ return r;
-+ }
-+
-+ register_hpd_handlers(adev);
-+
-+ /* This is a part of HPD initialization. */
-+ drm_kms_helper_poll_init(adev->ddev);
-+
-+ return 0;
-+}
-+
-+static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
-+{
-+ int r;
-+
-+ adev->mode_info.mode_config_initialized = true;
-+
-+ amdgpu_dm_mode_funcs.fb_create =
-+ amdgpu_mode_funcs.fb_create;
-+ amdgpu_dm_mode_funcs.output_poll_changed =
-+ amdgpu_mode_funcs.output_poll_changed;
-+
-+ adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
-+
-+ adev->ddev->mode_config.max_width = 16384;
-+ adev->ddev->mode_config.max_height = 16384;
-+
-+ adev->ddev->mode_config.preferred_depth = 24;
-+ adev->ddev->mode_config.prefer_shadow = 1;
-+
-+ adev->ddev->mode_config.fb_base = adev->mc.aper_base;
-+
-+ r = amdgpu_modeset_create_props(adev);
-+ if (r)
-+ return r;
-+
-+ return 0;
-+}
-+
-+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
-+ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
-+
-+static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
-+{
-+ struct amdgpu_display_manager *dm = bl_get_data(bd);
-+
-+ if (dc_link_set_backlight_level(dm->backlight_link,
-+ bd->props.brightness))
-+ return 0;
-+ else
-+ return 1;
-+}
-+
-+static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
-+{
-+ return bd->props.brightness;
-+}
-+
-+static const struct backlight_ops amdgpu_dm_backlight_ops = {
-+ .get_brightness = amdgpu_dm_backlight_get_brightness,
-+ .update_status = amdgpu_dm_backlight_update_status,
-+};
-+
-+void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
-+{
-+ char bl_name[16];
-+ struct backlight_properties props = { 0 };
-+
-+ props.max_brightness = AMDGPU_MAX_BL_LEVEL;
-+ props.type = BACKLIGHT_RAW;
-+
-+ snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
-+ dm->adev->ddev->primary->index);
-+
-+ dm->backlight_dev = backlight_device_register(bl_name,
-+ dm->adev->ddev->dev,
-+ dm,
-+ &amdgpu_dm_backlight_ops,
-+ &props);
-+
-+ if (NULL == dm->backlight_dev)
-+ DRM_ERROR("DM: Backlight registration failed!\n");
-+ else
-+ DRM_INFO("DM: Registered Backlight device: %s\n", bl_name);
-+}
-+
-+#endif
-+
-+/* In this architecture, the association
-+ * connector -> encoder -> crtc
-+ * id not really requried. The crtc and connector will hold the
-+ * display_index as an abstraction to use with DAL component
-+ *
-+ * Returns 0 on success
-+ */
-+int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
-+{
-+ struct amdgpu_display_manager *dm = &adev->dm;
-+ uint32_t link_index;
-+ struct drm_connector *connector;
-+ struct amdgpu_connector *aconnector;
-+ struct amdgpu_encoder *aencoder;
-+ struct amdgpu_crtc *acrtc;
-+ struct dc_caps caps = { 0 };
-+ uint32_t link_cnt;
-+
-+ dc_get_caps(dm->dc, &caps);
-+ link_cnt = caps.max_links;
-+
-+ if (amdgpu_dm_mode_config_init(dm->adev)) {
-+ DRM_ERROR("DM: Failed to initialize mode config\n");
-+ return -1;
-+ }
-+
-+ /* loops over all connectors on the board */
-+ for (link_index = 0; link_index < link_cnt; link_index++) {
-+
-+ if (link_index > AMDGPU_DM_MAX_DISPLAY_INDEX) {
-+ DRM_ERROR(
-+ "KMS: Cannot support more than %d display indeces\n",
-+ AMDGPU_DM_MAX_DISPLAY_INDEX);
-+ continue;
-+ }
-+
-+ aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
-+ if (!aconnector)
-+ goto fail_connector;
-+
-+ aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
-+ if (!aencoder)
-+ goto fail_encoder;
-+
-+ acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
-+ if (!acrtc)
-+ goto fail_crtc;
-+
-+ if (amdgpu_dm_crtc_init(
-+ dm,
-+ acrtc,
-+ link_index)) {
-+ DRM_ERROR("KMS: Failed to initialize crtc\n");
-+ goto fail;
-+ }
-+
-+ if (amdgpu_dm_encoder_init(
-+ dm->ddev,
-+ aencoder,
-+ link_index,
-+ acrtc)) {
-+ DRM_ERROR("KMS: Failed to initialize encoder\n");
-+ goto fail;
-+ }
-+
-+ if (amdgpu_dm_connector_init(
-+ dm,
-+ aconnector,
-+ link_index,
-+ aencoder)) {
-+ DRM_ERROR("KMS: Failed to initialize connector\n");
-+ goto fail;
-+ }
-+ }
-+
-+ dm->display_indexes_num = link_cnt;
-+
-+ detect_on_all_dc_links(&adev->dm);
-+ list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head)
-+ amdgpu_dm_update_connector_after_detect(to_amdgpu_connector(connector));
-+
-+ /* Software is initialized. Now we can register interrupt handlers. */
-+ switch (adev->asic_type) {
-+ case CHIP_CARRIZO:
-+ if (dce110_register_irq_handlers(dm->adev)) {
-+ DRM_ERROR("DM: Failed to initialize IRQ\n");
-+ return -1;
-+ }
-+ break;
-+ default:
-+ DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
-+ return -1;
-+ }
-+
-+ drm_mode_config_reset(dm->ddev);
-+
-+ return 0;
-+
-+fail:
-+ /* clean any dongling drm structure for the last (corrupted)
-+ display target */
-+ amdgpu_dm_crtc_destroy(&acrtc->base);
-+fail_crtc:
-+ amdgpu_dm_encoder_destroy(&aencoder->base);
-+fail_encoder:
-+ amdgpu_dm_connector_destroy(&aconnector->base);
-+fail_connector:
-+ if (dm->backlight_dev) {
-+ backlight_device_unregister(dm->backlight_dev);
-+ dm->backlight_dev = NULL;
-+ }
-+ return -1;
-+}
-+
-+void amdgpu_dm_destroy_drm_device(
-+ struct amdgpu_display_manager *dm)
-+{
-+ drm_mode_config_cleanup(dm->ddev);
-+ /*switch_dev_unregister(&dm->hdmi_audio_dev);*/
-+ return;
-+}
-+
-+/******************************************************************************
-+ * amdgpu_display_funcs functions
-+ *****************************************************************************/
-+
-+
-+static void dm_set_vga_render_state(struct amdgpu_device *adev,
-+ bool render)
-+{
-+ u32 tmp;
-+
-+ /* Lockout access through VGA aperture*/
-+ tmp = RREG32(mmVGA_HDP_CONTROL);
-+ if (render)
-+ tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
-+ else
-+ tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
-+ WREG32(mmVGA_HDP_CONTROL, tmp);
-+
-+ /* disable VGA render */
-+ tmp = RREG32(mmVGA_RENDER_CONTROL);
-+ if (render)
-+ tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
-+ else
-+ tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
-+ WREG32(mmVGA_RENDER_CONTROL, tmp);
-+}
-+
-+/**
-+ * dm_bandwidth_update - program display watermarks
-+ *
-+ * @adev: amdgpu_device pointer
-+ *
-+ * Calculate and program the display watermarks and line buffer allocation.
-+ */
-+static void dm_bandwidth_update(struct amdgpu_device *adev)
-+{
-+ AMDGPU_DM_NOT_IMPL("%s\n", __func__);
-+}
-+
-+static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
-+ u8 level)
-+{
-+ /* TODO: translate amdgpu_encoder to display_index and call DAL */
-+ AMDGPU_DM_NOT_IMPL("%s\n", __func__);
-+}
-+
-+static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
-+{
-+ /* TODO: translate amdgpu_encoder to display_index and call DAL */
-+ AMDGPU_DM_NOT_IMPL("%s\n", __func__);
-+ return 0;
-+}
-+
-+/******************************************************************************
-+ * Page Flip functions
-+ ******************************************************************************/
-+
-+void amdgpu_dm_flip_cleanup(
-+ struct amdgpu_device *adev,
-+ struct amdgpu_crtc *acrtc)
-+{
-+ int r;
-+ struct amdgpu_flip_work *works = acrtc->pflip_works;
-+
-+ acrtc->pflip_works = NULL;
-+ acrtc->pflip_status = AMDGPU_FLIP_NONE;
-+
-+ if (works) {
-+ if(works->event)
-+ drm_send_vblank_event(
-+ adev->ddev,
-+ acrtc->crtc_id,
-+ works->event);
-+
-+ r = amdgpu_bo_reserve(works->old_rbo, false);
-+ if (likely(r == 0)) {
-+ r = amdgpu_bo_unpin(works->old_rbo);
-+ if (unlikely(r != 0)) {
-+ DRM_ERROR("failed to unpin buffer after flip\n");
-+ }
-+ amdgpu_bo_unreserve(works->old_rbo);
-+ } else
-+ DRM_ERROR("failed to reserve buffer after flip\n");
-+
-+ drm_gem_object_unreference_unlocked(&works->old_rbo->gem_base);
-+ kfree(works->shared);
-+ kfree(works);
-+ }
-+}
-+
-+/**
-+ * dm_page_flip - called by amdgpu_flip_work_func(), which is triggered
-+ * via DRM IOCTL, by user mode.
-+ *
-+ * @adev: amdgpu_device pointer
-+ * @crtc_id: crtc to cleanup pageflip on
-+ * @crtc_base: new address of the crtc (GPU MC address)
-+ *
-+ * Does the actual pageflip (surface address update).
-+ */
-+static void dm_page_flip(struct amdgpu_device *adev,
-+ int crtc_id, u64 crtc_base)
-+{
-+ struct amdgpu_crtc *acrtc;
-+ struct dc_target *target;
-+ struct dc_flip_addrs addr = { {0} };
-+
-+ /*
-+ * TODO risk of concurrency issues
-+ *
-+ * This should guarded by the dal_mutex but we can't do this since the
-+ * caller uses a spin_lock on event_lock.
-+ *
-+ * If we wait on the dal_mutex a second page flip interrupt might come,
-+ * spin on the event_lock, disabling interrupts while it does so. At
-+ * this point the core can no longer be pre-empted and return to the
-+ * thread that waited on the dal_mutex and we're deadlocked.
-+ *
-+ * With multiple cores the same essentially happens but might just take
-+ * a little longer to lock up all cores.
-+ *
-+ * The reason we should lock on dal_mutex is so that we can be sure
-+ * nobody messes with acrtc->target after we read and check its value.
-+ *
-+ * We might be able to fix our concurrency issues with a work queue
-+ * where we schedule all work items (mode_set, page_flip, etc.) and
-+ * execute them one by one. Care needs to be taken to still deal with
-+ * any potential concurrency issues arising from interrupt calls.
-+ */
-+
-+ acrtc = adev->mode_info.crtcs[crtc_id];
-+ target = acrtc->target;
-+
-+ /*
-+ * Received a page flip call after the display has been reset. Make sure
-+ * we return the buffers.
-+ */
-+ if (!target) {
-+ amdgpu_dm_flip_cleanup(adev, acrtc);
-+ return;
-+ }
-+
-+ addr.address.grph.addr.low_part = lower_32_bits(crtc_base);
-+ addr.address.grph.addr.high_part = upper_32_bits(crtc_base);
-+
-+ dc_flip_surface_addrs(
-+ adev->dm.dc,
-+ dc_target_get_status(target)->surfaces,
-+ &addr, 1);
-+}
-+
-+static const struct amdgpu_display_funcs display_funcs = {
-+ .set_vga_render_state = dm_set_vga_render_state,
-+ .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
-+ .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
-+ .vblank_wait = NULL, /* not called anywhere */
-+ .is_display_hung = dm_is_display_hung,/* called unconditionally */
-+ .backlight_set_level =
-+ dm_set_backlight_level,/* called unconditionally */
-+ .backlight_get_level =
-+ dm_get_backlight_level,/* called unconditionally */
-+ .hpd_sense = NULL,/* called unconditionally */
-+ .hpd_set_polarity = NULL, /* called unconditionally */
-+ .hpd_get_gpio_reg = dm_hpd_get_gpio_reg,/* called unconditionally */
-+ .page_flip = dm_page_flip, /* called unconditionally */
-+ .page_flip_get_scanoutpos =
-+ dm_crtc_get_scanoutpos,/* called unconditionally */
-+ .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
-+ .add_connector = NULL, /* VBIOS parsing. DAL does it. */
-+ .stop_mc_access = dm_stop_mc_access, /* called unconditionally */
-+ .resume_mc_access = dm_resume_mc_access, /* called unconditionally */
-+};
-+
-+static void set_display_funcs(struct amdgpu_device *adev)
-+{
-+ if (adev->mode_info.funcs == NULL)
-+ adev->mode_info.funcs = &display_funcs;
-+}
-+
-+static int dm_early_init(void *handle)
-+{
-+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-+ set_display_funcs(adev);
-+ amdgpu_dm_set_irq_funcs(adev);
-+
-+ switch (adev->asic_type) {
-+ case CHIP_CARRIZO:
-+ adev->mode_info.num_crtc = 3;
-+ adev->mode_info.num_hpd = 6;
-+ adev->mode_info.num_dig = 9;
-+ break;
-+ default:
-+ DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
-+ return -EINVAL;
-+ }
-+
-+ /* Note: Do NOT change adev->audio_endpt_rreg and
-+ * adev->audio_endpt_wreg because they are initialised in
-+ * amdgpu_device_init() */
-+
-+
-+
-+ return 0;
-+}
-+
-+
-+bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm)
-+{
-+ /* TODO */
-+ return true;
-+}
-+
-+bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm)
-+{
-+ /* TODO */
-+ return true;
-+}
-diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.h
-new file mode 100644
-index 0000000..57e9c45
---- /dev/null
-+++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.h
-@@ -0,0 +1,166 @@
-+/*
-+ * Copyright 2015 Advanced Micro Devices, Inc.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ * Authors: AMD
-+ *
-+ */
-+
-+#ifndef __AMDGPU_DM_H__
-+#define __AMDGPU_DM_H__
-+
-+/*
-+#include "linux/switch.h"
-+*/
-+
-+/*
-+ * This file contains the definition for amdgpu_display_manager
-+ * and its API for amdgpu driver's use.
-+ * This component provides all the display related functionality
-+ * and this is the only component that calls DAL API.
-+ * The API contained here intended for amdgpu driver use.
-+ * The API that is called directly from KMS framework is located
-+ * in amdgpu_dm_kms.h file
-+ */
-+
-+#define AMDGPU_DM_MAX_DISPLAY_INDEX 31
-+/*
-+#include "include/amdgpu_dal_power_if.h"
-+#include "amdgpu_dm_irq.h"
-+*/
-+
-+#include "irq_types.h"
-+
-+/* Forward declarations */
-+struct amdgpu_device;
-+struct drm_device;
-+struct amdgpu_dm_irq_handler_data;
-+
-+struct amdgpu_dm_prev_state {
-+ struct drm_framebuffer *fb;
-+ int32_t x;
-+ int32_t y;
-+ struct drm_display_mode mode;
-+};
-+
-+struct common_irq_params {
-+ struct amdgpu_device *adev;
-+ enum dc_irq_source irq_src;
-+};
-+
-+struct irq_list_head {
-+ struct list_head head;
-+ /* In case this interrupt needs post-processing, 'work' will be queued*/
-+ struct work_struct work;
-+};
-+
-+struct amdgpu_display_manager {
-+ struct dal *dal;
-+ struct dc *dc;
-+ void *cgs_device;
-+ /* lock to be used when DAL is called from SYNC IRQ context */
-+ spinlock_t dal_lock;
-+
-+ struct amdgpu_device *adev; /*AMD base driver*/
-+ struct drm_device *ddev; /*DRM base driver*/
-+ u16 display_indexes_num;
-+
-+ struct amdgpu_dm_prev_state prev_state;
-+
-+ /*
-+ * 'irq_source_handler_table' holds a list of handlers
-+ * per (DAL) IRQ source.
-+ *
-+ * Each IRQ source may need to be handled at different contexts.
-+ * By 'context' we mean, for example:
-+ * - The ISR context, which is the direct interrupt handler.
-+ * - The 'deferred' context - this is the post-processing of the
-+ * interrupt, but at a lower priority.
-+ *
-+ * Note that handlers are called in the same order as they were
-+ * registered (FIFO).
-+ */
-+ struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
-+ struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
-+
-+ struct common_irq_params
-+ pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1];
-+
-+ struct common_irq_params
-+ vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1];
-+
-+ /* this spin lock synchronizes access to 'irq_handler_list_table' */
-+ spinlock_t irq_handler_list_table_lock;
-+
-+ /* Timer-related data. */
-+ struct list_head timer_handler_list;
-+ struct workqueue_struct *timer_workqueue;
-+
-+ /* Use dal_mutex for any activity which is NOT syncronized by
-+ * DRM mode setting locks.
-+ * For example: amdgpu_dm_hpd_low_irq() calls into DAL *without*
-+ * DRM mode setting locks being acquired. This is where dal_mutex
-+ * is acquired before calling into DAL. */
-+ struct mutex dal_mutex;
-+
-+ struct backlight_device *backlight_dev;
-+
-+ const struct dc_link *backlight_link;
-+};
-+
-+
-+/* basic init/fini API */
-+int amdgpu_dm_init(struct amdgpu_device *adev);
-+
-+void amdgpu_dm_fini(struct amdgpu_device *adev);
-+
-+void amdgpu_dm_destroy(void);
-+
-+/* initializes drm_device display related structures, based on the information
-+ * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
-+ * drm_encoder, drm_mode_config
-+ *
-+ * Returns 0 on success
-+ */
-+int amdgpu_dm_initialize_drm_device(
-+ struct amdgpu_device *adev);
-+
-+/* removes and deallocates the drm structures, created by the above function */
-+void amdgpu_dm_destroy_drm_device(
-+ struct amdgpu_display_manager *dm);
-+
-+/* Locking/Mutex */
-+bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm);
-+
-+bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm);
-+
-+/* Register "Backlight device" accessible by user-mode. */
-+void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm);
-+
-+void amdgpu_dm_flip_cleanup(
-+ struct amdgpu_device *adev,
-+ struct amdgpu_crtc *acrtc);
-+
-+extern const struct amd_ip_funcs amdgpu_dm_funcs;
-+
-+void amdgpu_dm_update_connector_after_detect(
-+ struct amdgpu_connector *aconnector);
-+
-+#endif /* __AMDGPU_DM_H__ */
-diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.c
-new file mode 100644
-index 0000000..9491fd0
---- /dev/null
-+++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.c
-@@ -0,0 +1,814 @@
-+/*
-+ * Copyright 2015 Advanced Micro Devices, Inc.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ * Authors: AMD
-+ *
-+ */
-+
-+#include <drm/drmP.h>
-+
-+#include "dal_services_types.h"
-+#include "dc.h"
-+
-+#include "amdgpu.h"
-+#include "amdgpu_dm.h"
-+#include "amdgpu_dm_irq.h"
-+
-+
-+/******************************************************************************
-+ * Private declarations.
-+ *****************************************************************************/
-+
-+struct handler_common_data {
-+ struct list_head list;
-+ interrupt_handler handler;
-+ void *handler_arg;
-+
-+ /* DM which this handler belongs to */
-+ struct amdgpu_display_manager *dm;
-+};
-+
-+struct amdgpu_dm_irq_handler_data {
-+ struct handler_common_data hcd;
-+ /* DAL irq source which registered for this interrupt. */
-+ enum dc_irq_source irq_source;
-+};
-+
-+struct amdgpu_dm_timer_handler_data {
-+ struct handler_common_data hcd;
-+ struct delayed_work d_work;
-+};
-+
-+
-+#define DM_IRQ_TABLE_LOCK(adev, flags) \
-+ spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags)
-+
-+#define DM_IRQ_TABLE_UNLOCK(adev, flags) \
-+ spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags)
-+
-+/******************************************************************************
-+ * Private functions.
-+ *****************************************************************************/
-+
-+static void init_handler_common_data(
-+ struct handler_common_data *hcd,
-+ void (*ih)(void *),
-+ void *args,
-+ struct amdgpu_display_manager *dm)
-+{
-+ hcd->handler = ih;
-+ hcd->handler_arg = args;
-+ hcd->dm = dm;
-+}
-+
-+/**
-+ * dm_irq_work_func - Handle an IRQ outside of the interrupt handler proper.
-+ *
-+ * @work: work struct
-+ */
-+static void dm_irq_work_func(struct work_struct *work)
-+{
-+ struct list_head *entry;
-+ struct irq_list_head *irq_list_head =
-+ container_of(work, struct irq_list_head, work);
-+ struct list_head *handler_list = &irq_list_head->head;
-+ struct amdgpu_dm_irq_handler_data *handler_data;
-+
-+ list_for_each(entry, handler_list) {
-+ handler_data =
-+ list_entry(
-+ entry,
-+ struct amdgpu_dm_irq_handler_data,
-+ hcd.list);
-+
-+ DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
-+ handler_data->irq_source);
-+
-+ DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
-+ handler_data->irq_source);
-+
-+ handler_data->hcd.handler(handler_data->hcd.handler_arg);
-+ }
-+
-+ /* Call a DAL subcomponent which registered for interrupt notification
-+ * at INTERRUPT_LOW_IRQ_CONTEXT.
-+ * (The most common use is HPD interrupt) */
-+}
-+
-+/**
-+ * Remove a handler and return a pointer to hander list from which the
-+ * handler was removed.
-+ */
-+static struct list_head *remove_irq_handler(
-+ struct amdgpu_device *adev,
-+ void *ih,
-+ const struct dc_interrupt_params *int_params)
-+{
-+ struct list_head *hnd_list;
-+ struct list_head *entry, *tmp;
-+ struct amdgpu_dm_irq_handler_data *handler;
-+ unsigned long irq_table_flags;
-+ bool handler_removed = false;
-+ enum dc_irq_source irq_source;
-+
-+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
-+
-+ irq_source = int_params->irq_source;
-+
-+ switch (int_params->int_context) {
-+ case INTERRUPT_HIGH_IRQ_CONTEXT:
-+ hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
-+ break;
-+ case INTERRUPT_LOW_IRQ_CONTEXT:
-+ default:
-+ hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
-+ break;
-+ }
-+
-+ list_for_each_safe(entry, tmp, hnd_list) {
-+
-+ handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
-+ hcd.list);
-+
-+ if (ih == handler) {
-+ /* Found our handler. Remove it from the list. */
-+ list_del(&handler->hcd.list);
-+ handler_removed = true;
-+ break;
-+ }
-+ }
-+
-+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
-+
-+ if (handler_removed == false) {
-+ /* Not necessarily an error - caller may not
-+ * know the context. */
-+ return NULL;
-+ }
-+
-+ kfree(handler);
-+
-+ DRM_DEBUG_KMS(
-+ "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n",
-+ ih, int_params->irq_source, int_params->int_context);
-+
-+ return hnd_list;
-+}
-+
-+/* If 'handler_in == NULL' then remove ALL handlers. */
-+static void remove_timer_handler(
-+ struct amdgpu_device *adev,
-+ struct amdgpu_dm_timer_handler_data *handler_in)
-+{
-+ struct amdgpu_dm_timer_handler_data *handler_temp;
-+ struct list_head *handler_list;
-+ struct list_head *entry, *tmp;
-+ unsigned long irq_table_flags;
-+ bool handler_removed = false;
-+
-+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
-+
-+ handler_list = &adev->dm.timer_handler_list;
-+
-+ list_for_each_safe(entry, tmp, handler_list) {
-+ /* Note that list_for_each_safe() guarantees that
-+ * handler_temp is NOT null. */
-+ handler_temp = list_entry(entry,
-+ struct amdgpu_dm_timer_handler_data, hcd.list);
-+
-+ if (handler_in == NULL || handler_in == handler_temp) {
-+ list_del(&handler_temp->hcd.list);
-+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
-+
-+ DRM_DEBUG_KMS("DM_IRQ: removing timer handler: %p\n",
-+ handler_temp);
-+
-+ if (handler_in == NULL) {
-+ /* Since it is still in the queue, it must
-+ * be cancelled. */
-+ cancel_delayed_work_sync(&handler_temp->d_work);
-+ }
-+
-+ kfree(handler_temp);
-+ handler_removed = true;
-+
-+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
-+ }
-+
-+ if (handler_in == NULL) {
-+ /* Remove ALL handlers. */
-+ continue;
-+ }
-+
-+ if (handler_in == handler_temp) {
-+ /* Remove a SPECIFIC handler.
-+ * Found our handler - we can stop here. */
-+ break;
-+ }
-+ }
-+
-+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
-+
-+ if (handler_in != NULL && handler_removed == false) {
-+ DRM_ERROR("DM_IRQ: handler: %p is not in the list!\n",
-+ handler_in);
-+ }
-+}
-+
-+/**
-+ * dm_timer_work_func - Handle a timer.
-+ *
-+ * @work: work struct
-+ */
-+static void dm_timer_work_func(
-+ struct work_struct *work)
-+{
-+ struct amdgpu_dm_timer_handler_data *handler_data =
-+ container_of(work, struct amdgpu_dm_timer_handler_data,
-+ d_work.work);
-+
-+ DRM_DEBUG_KMS("DM_IRQ: work_func: handler_data=%p\n", handler_data);
-+
-+ /* Call a DAL subcomponent which registered for timer notification. */
-+ handler_data->hcd.handler(handler_data->hcd.handler_arg);
-+
-+ /* We support only "single shot" timers. That means we must delete
-+ * the handler after it was called. */
-+ remove_timer_handler(handler_data->hcd.dm->adev, handler_data);
-+}
-+
-+static bool validate_irq_registration_params(
-+ struct dc_interrupt_params *int_params,
-+ void (*ih)(void *))
-+{
-+ if (NULL == int_params || NULL == ih) {
-+ DRM_ERROR("DM_IRQ: invalid input!\n");
-+ return false;
-+ }
-+
-+ if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) {
-+ DRM_ERROR("DM_IRQ: invalid context: %d!\n",
-+ int_params->int_context);
-+ return false;
-+ }
-+
-+ if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) {
-+ DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n",
-+ int_params->irq_source);
-+ return false;
-+ }
-+
-+ return true;
-+}
-+
-+static bool validate_irq_unregistration_params(
-+ enum dc_irq_source irq_source,
-+ irq_handler_idx handler_idx)
-+{
-+ if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) {
-+ DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
-+ return false;
-+ }
-+
-+ if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) {
-+ DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source);
-+ return false;
-+ }
-+
-+ return true;
-+}
-+/******************************************************************************
-+ * Public functions.
-+ *
-+ * Note: caller is responsible for input validation.
-+ *****************************************************************************/
-+
-+void *amdgpu_dm_irq_register_interrupt(
-+ struct amdgpu_device *adev,
-+ struct dc_interrupt_params *int_params,
-+ void (*ih)(void *),
-+ void *handler_args)
-+{
-+ struct list_head *hnd_list;
-+ struct amdgpu_dm_irq_handler_data *handler_data;
-+ unsigned long irq_table_flags;
-+ enum dc_irq_source irq_source;
-+
-+ if (false == validate_irq_registration_params(int_params, ih))
-+ return DAL_INVALID_IRQ_HANDLER_IDX;
-+
-+ handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
-+ if (!handler_data) {
-+ DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
-+ return DAL_INVALID_IRQ_HANDLER_IDX;
-+ }
-+
-+ memset(handler_data, 0, sizeof(*handler_data));
-+
-+ init_handler_common_data(&handler_data->hcd, ih, handler_args,
-+ &adev->dm);
-+
-+ irq_source = int_params->irq_source;
-+
-+ handler_data->irq_source = irq_source;
-+
-+ /* Lock the list, add the handler. */
-+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
-+
-+ switch (int_params->int_context) {
-+ case INTERRUPT_HIGH_IRQ_CONTEXT:
-+ hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
-+ break;
-+ case INTERRUPT_LOW_IRQ_CONTEXT:
-+ default:
-+ hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
-+ break;
-+ }
-+
-+ list_add_tail(&handler_data->hcd.list, hnd_list);
-+
-+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
-+
-+ /* This pointer will be stored by code which requested interrupt
-+ * registration.
-+ * The same pointer will be needed in order to unregister the
-+ * interrupt. */
-+
-+ DRM_DEBUG_KMS(
-+ "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
-+ handler_data,
-+ irq_source,
-+ int_params->int_context);
-+
-+ return handler_data;
-+}
-+
-+void amdgpu_dm_irq_unregister_interrupt(
-+ struct amdgpu_device *adev,
-+ enum dc_irq_source irq_source,
-+ void *ih)
-+{
-+ struct list_head *handler_list;
-+ struct dc_interrupt_params int_params;
-+ int i;
-+
-+ if (false == validate_irq_unregistration_params(irq_source, ih))
-+ return;
-+
-+ memset(&int_params, 0, sizeof(int_params));
-+
-+ int_params.irq_source = irq_source;
-+
-+ for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) {
-+
-+ int_params.int_context = i;
-+
-+ handler_list = remove_irq_handler(adev, ih, &int_params);
-+
-+ if (handler_list != NULL)
-+ break;
-+ }
-+
-+ if (handler_list == NULL) {
-+ /* If we got here, it means we searched all irq contexts
-+ * for this irq source, but the handler was not found. */
-+ DRM_ERROR(
-+ "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
-+ ih, irq_source);
-+ }
-+}
-+
-+int amdgpu_dm_irq_init(
-+ struct amdgpu_device *adev)
-+{
-+ int src;
-+ struct irq_list_head *lh;
-+
-+ DRM_DEBUG_KMS("DM_IRQ\n");
-+
-+ spin_lock_init(&adev->dm.irq_handler_list_table_lock);
-+
-+ for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
-+ /* low context handler list init */
-+ lh = &adev->dm.irq_handler_list_low_tab[src];
-+ INIT_LIST_HEAD(&lh->head);
-+ INIT_WORK(&lh->work, dm_irq_work_func);
-+
-+ /* high context handler init */
-+ INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
-+ }
-+
-+ INIT_LIST_HEAD(&adev->dm.timer_handler_list);
-+
-+ /* allocate and initialize the workqueue for DM timer */
-+ adev->dm.timer_workqueue = create_singlethread_workqueue(
-+ "dm_timer_queue");
-+ if (adev->dm.timer_workqueue == NULL) {
-+ DRM_ERROR("DM_IRQ: unable to create timer queue!\n");
-+ return -1;
-+ }
-+
-+ return 0;
-+}
-+
-+void amdgpu_dm_irq_register_timer(
-+ struct amdgpu_device *adev,
-+ struct dc_timer_interrupt_params *int_params,
-+ interrupt_handler ih,
-+ void *args)
-+{
-+ unsigned long jf_delay;
-+ struct list_head *handler_list;
-+ struct amdgpu_dm_timer_handler_data *handler_data;
-+ unsigned long irq_table_flags;
-+
-+ handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
-+ if (!handler_data) {
-+ DRM_ERROR("DM_IRQ: failed to allocate timer handler!\n");
-+ return;
-+ }
-+
-+ memset(handler_data, 0, sizeof(*handler_data));
-+
-+ init_handler_common_data(&handler_data->hcd, ih, args, &adev->dm);
-+
-+ INIT_DELAYED_WORK(&handler_data->d_work, dm_timer_work_func);
-+
-+ /* Lock the list, add the handler. */
-+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
-+
-+ handler_list = &adev->dm.timer_handler_list;
-+
-+ list_add_tail(&handler_data->hcd.list, handler_list);
-+
-+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
-+
-+ jf_delay = usecs_to_jiffies(int_params->micro_sec_interval);
-+
-+ queue_delayed_work(adev->dm.timer_workqueue, &handler_data->d_work,
-+ jf_delay);
-+
-+ DRM_DEBUG_KMS("DM_IRQ: added handler:%p with micro_sec_interval=%llu\n",
-+ handler_data, int_params->micro_sec_interval);
-+ return;
-+}
-+
-+/* DM IRQ and timer resource release */
-+void amdgpu_dm_irq_fini(
-+ struct amdgpu_device *adev)
-+{
-+ int src;
-+ struct irq_list_head *lh;
-+ DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
-+
-+ for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
-+
-+ /* The handler was removed from the table,
-+ * it means it is safe to flush all the 'work'
-+ * (because no code can schedule a new one). */
-+ lh = &adev->dm.irq_handler_list_low_tab[src];
-+ flush_work(&lh->work);
-+ }
-+
-+ /* Cancel ALL timers and release handlers (if any). */
-+ remove_timer_handler(adev, NULL);
-+ /* Release the queue itself. */
-+ destroy_workqueue(adev->dm.timer_workqueue);
-+}
-+
-+int amdgpu_dm_irq_suspend(
-+ struct amdgpu_device *adev)
-+{
-+ int src;
-+ struct list_head *hnd_list_h;
-+ struct list_head *hnd_list_l;
-+ unsigned long irq_table_flags;
-+
-+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
-+
-+ DRM_DEBUG_KMS("DM_IRQ: suspend\n");
-+
-+ /* disable HW interrupt */
-+ for (src = DC_IRQ_SOURCE_HPD1; src < DAL_IRQ_SOURCES_NUMBER; src++) {
-+ hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
-+ hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
-+ if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
-+ dc_interrupt_set(adev->dm.dc, src, false);
-+
-+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
-+ flush_work(&adev->dm.irq_handler_list_low_tab[src].work);
-+
-+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
-+ }
-+
-+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
-+
-+ return 0;
-+}
-+
-+int amdgpu_dm_irq_resume(
-+ struct amdgpu_device *adev)
-+{
-+ int src;
-+ struct list_head *hnd_list_h, *hnd_list_l;
-+ unsigned long irq_table_flags;
-+
-+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
-+
-+ DRM_DEBUG_KMS("DM_IRQ: resume\n");
-+
-+ /* re-enable HW interrupt */
-+ for (src = DC_IRQ_SOURCE_HPD1; src < DAL_IRQ_SOURCES_NUMBER; src++) {
-+ hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
-+ hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
-+ if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
-+ dc_interrupt_set(adev->dm.dc, src, true);
-+ }
-+
-+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
-+
-+ return 0;
-+}
-+
-+
-+/**
-+ * amdgpu_dm_irq_schedule_work - schedule all work items registered for the
-+ * "irq_source".
-+ */
-+static void amdgpu_dm_irq_schedule_work(
-+ struct amdgpu_device *adev,
-+ enum dc_irq_source irq_source)
-+{
-+ unsigned long irq_table_flags;
-+
-+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
-+
-+ /* Since the caller is interested in 'work_struct' then
-+ * the irq will be post-processed at "INTERRUPT_LOW_IRQ_CONTEXT". */
-+
-+ schedule_work(&adev->dm.irq_handler_list_low_tab[irq_source].work);
-+
-+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
-+}
-+
-+/** amdgpu_dm_irq_immediate_work
-+ * Callback high irq work immediately, don't send to work queue
-+ */
-+static void amdgpu_dm_irq_immediate_work(
-+ struct amdgpu_device *adev,
-+ enum dc_irq_source irq_source)
-+{
-+ struct amdgpu_dm_irq_handler_data *handler_data;
-+ struct list_head *entry;
-+ unsigned long irq_table_flags;
-+
-+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
-+
-+ list_for_each(
-+ entry,
-+ &adev->dm.irq_handler_list_high_tab[irq_source]) {
-+
-+ handler_data =
-+ list_entry(
-+ entry,
-+ struct amdgpu_dm_irq_handler_data,
-+ hcd.list);
-+
-+ /* Call a subcomponent which registered for immediate
-+ * interrupt notification */
-+ handler_data->hcd.handler(handler_data->hcd.handler_arg);
-+ }
-+
-+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
-+}
-+
-+/*
-+ * amdgpu_dm_irq_handler
-+ *
-+ * Generic IRQ handler, calls all registered high irq work immediately, and
-+ * schedules work for low irq
-+ */
-+int amdgpu_dm_irq_handler(
-+ struct amdgpu_device *adev,
-+ struct amdgpu_irq_src *source,
-+ struct amdgpu_iv_entry *entry)
-+{
-+
-+ enum dc_irq_source src =
-+ dc_interrupt_to_irq_source(
-+ adev->dm.dc,
-+ entry->src_id,
-+ entry->src_data);
-+
-+ dc_interrupt_ack(adev->dm.dc, src);
-+
-+ /* Call high irq work immediately */
-+ amdgpu_dm_irq_immediate_work(adev, src);
-+ /*Schedule low_irq work */
-+ amdgpu_dm_irq_schedule_work(adev, src);
-+
-+ return 0;
-+}
-+
-+static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
-+{
-+ switch (type) {
-+ case AMDGPU_HPD_1:
-+ return DC_IRQ_SOURCE_HPD1;
-+ case AMDGPU_HPD_2:
-+ return DC_IRQ_SOURCE_HPD2;
-+ case AMDGPU_HPD_3:
-+ return DC_IRQ_SOURCE_HPD3;
-+ case AMDGPU_HPD_4:
-+ return DC_IRQ_SOURCE_HPD4;
-+ case AMDGPU_HPD_5:
-+ return DC_IRQ_SOURCE_HPD5;
-+ case AMDGPU_HPD_6:
-+ return DC_IRQ_SOURCE_HPD6;
-+ default:
-+ return DC_IRQ_SOURCE_INVALID;
-+ }
-+}
-+
-+static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
-+ struct amdgpu_irq_src *source,
-+ unsigned type,
-+ enum amdgpu_interrupt_state state)
-+{
-+ enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
-+ bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
-+
-+ dc_interrupt_set(adev->dm.dc, src, st);
-+ return 0;
-+}
-+
-+static inline int dm_irq_state(
-+ struct amdgpu_device *adev,
-+ struct amdgpu_irq_src *source,
-+ unsigned crtc_id,
-+ enum amdgpu_interrupt_state state,
-+ const enum irq_type dal_irq_type,
-+ const char *func)
-+{
-+ bool st;
-+ enum dc_irq_source irq_source;
-+
-+ struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id];
-+
-+ if (!acrtc->target) {
-+ DRM_INFO(
-+ "%s: target is null for crtc %d, talk to David R\n",
-+ func,
-+ crtc_id);
-+ WARN_ON(true);
-+ return 0;
-+ }
-+
-+ irq_source = dc_target_get_irq_src(acrtc->target, dal_irq_type);
-+
-+ st = (state == AMDGPU_IRQ_STATE_ENABLE);
-+
-+ dc_interrupt_set(adev->dm.dc, irq_source, st);
-+ return 0;
-+}
-+
-+static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
-+ struct amdgpu_irq_src *source,
-+ unsigned crtc_id,
-+ enum amdgpu_interrupt_state state)
-+{
-+ return dm_irq_state(
-+ adev,
-+ source,
-+ crtc_id,
-+ state,
-+ IRQ_TYPE_PFLIP,
-+ __func__);
-+}
-+
-+static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
-+ struct amdgpu_irq_src *source,
-+ unsigned crtc_id,
-+ enum amdgpu_interrupt_state state)
-+{
-+ return dm_irq_state(
-+ adev,
-+ source,
-+ crtc_id,
-+ state,
-+ IRQ_TYPE_VUPDATE,
-+ __func__);
-+}
-+
-+static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
-+ .set = amdgpu_dm_set_crtc_irq_state,
-+ .process = amdgpu_dm_irq_handler,
-+};
-+
-+static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
-+ .set = amdgpu_dm_set_pflip_irq_state,
-+ .process = amdgpu_dm_irq_handler,
-+};
-+
-+static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
-+ .set = amdgpu_dm_set_hpd_irq_state,
-+ .process = amdgpu_dm_irq_handler,
-+};
-+
-+void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
-+{
-+ adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
-+ adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
-+
-+ adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
-+ adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
-+
-+ adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
-+ adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
-+}
-+
-+/*
-+ * amdgpu_dm_hpd_init - hpd setup callback.
-+ *
-+ * @adev: amdgpu_device pointer
-+ *
-+ * Setup the hpd pins used by the card (evergreen+).
-+ * Enable the pin, set the polarity, and enable the hpd interrupts.
-+ */
-+void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
-+{
-+ struct drm_device *dev = adev->ddev;
-+ struct drm_connector *connector;
-+
-+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-+ struct amdgpu_connector *amdgpu_connector =
-+ to_amdgpu_connector(connector);
-+ enum dc_irq_source src =
-+ amdgpu_dm_hpd_to_dal_irq_source(
-+ amdgpu_connector->hpd.hpd);
-+ const struct dc_link *dc_link = amdgpu_connector->dc_link;
-+
-+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
-+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
-+ /* don't try to enable hpd on eDP or LVDS avoid breaking
-+ * the aux dp channel on imac and help (but not
-+ * completely fix)
-+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
-+ * also avoid interrupt storms during dpms.
-+ */
-+ continue;
-+ }
-+
-+ dc_interrupt_set(adev->dm.dc, src, true);
-+ amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
-+
-+ if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
-+ dc_interrupt_set(adev->dm.dc,
-+ dc_link->irq_source_hpd_rx,
-+ true);
-+ }
-+ }
-+}
-+
-+/**
-+ * amdgpu_dm_hpd_fini - hpd tear down callback.
-+ *
-+ * @adev: amdgpu_device pointer
-+ *
-+ * Tear down the hpd pins used by the card (evergreen+).
-+ * Disable the hpd interrupts.
-+ */
-+void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
-+{
-+ struct drm_device *dev = adev->ddev;
-+ struct drm_connector *connector;
-+
-+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-+ struct amdgpu_connector *amdgpu_connector =
-+ to_amdgpu_connector(connector);
-+ enum dc_irq_source src =
-+ amdgpu_dm_hpd_to_dal_irq_source(
-+ amdgpu_connector->hpd.hpd);
-+
-+ dc_interrupt_set(adev->dm.dc, src, false);
-+ amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
-+ }
-+}
-diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.h b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.h
-new file mode 100644
-index 0000000..afedb50
---- /dev/null
-+++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.h
-@@ -0,0 +1,122 @@
-+/*
-+ * Copyright 2015 Advanced Micro Devices, Inc.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ */
-+
-+#ifndef __AMDGPU_DM_IRQ_H__
-+#define __AMDGPU_DM_IRQ_H__
-+
-+#include "irq_types.h" /* DAL irq definitions */
-+
-+/*
-+ * Display Manager IRQ-related interfaces (for use by DAL).
-+ */
-+
-+/**
-+ * amdgpu_dm_irq_init - Initialize internal structures of 'amdgpu_dm_irq'.
-+ *
-+ * This function should be called exactly once - during DM initialization.
-+ *
-+ * Returns:
-+ * 0 - success
-+ * non-zero - error
-+ */
-+int amdgpu_dm_irq_init(
-+ struct amdgpu_device *adev);
-+
-+/**
-+ * amdgpu_dm_irq_fini - deallocate internal structures of 'amdgpu_dm_irq'.
-+ *
-+ * This function should be called exactly once - during DM destruction.
-+ *
-+ */
-+void amdgpu_dm_irq_fini(
-+ struct amdgpu_device *adev);
-+
-+/**
-+ * amdgpu_dm_irq_register_interrupt - register irq handler for Display block.
-+ *
-+ * @adev: AMD DRM device
-+ * @int_params: parameters for the irq
-+ * @ih: pointer to the irq hander function
-+ * @handler_args: arguments which will be passed to ih
-+ *
-+ * Returns:
-+ * IRQ Handler Index on success.
-+ * NULL on failure.
-+ *
-+ * Cannot be called from an interrupt handler.
-+ */
-+void *amdgpu_dm_irq_register_interrupt(
-+ struct amdgpu_device *adev,
-+ struct dc_interrupt_params *int_params,
-+ void (*ih)(void *),
-+ void *handler_args);
-+
-+/**
-+ * amdgpu_dm_irq_unregister_interrupt - unregister handler which was registered
-+ * by amdgpu_dm_irq_register_interrupt().
-+ *
-+ * @adev: AMD DRM device.
-+ * @ih_index: irq handler index which was returned by
-+ * amdgpu_dm_irq_register_interrupt
-+ */
-+void amdgpu_dm_irq_unregister_interrupt(
-+ struct amdgpu_device *adev,
-+ enum dc_irq_source irq_source,
-+ void *ih_index);
-+
-+void amdgpu_dm_irq_register_timer(
-+ struct amdgpu_device *adev,
-+ struct dc_timer_interrupt_params *int_params,
-+ interrupt_handler ih,
-+ void *args);
-+
-+/**
-+ * amdgpu_dm_irq_handler
-+ * Generic IRQ handler, calls all registered high irq work immediately, and
-+ * schedules work for low irq
-+ */
-+int amdgpu_dm_irq_handler(
-+ struct amdgpu_device *adev,
-+ struct amdgpu_irq_src *source,
-+ struct amdgpu_iv_entry *entry);
-+
-+void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev);
-+
-+void amdgpu_dm_hpd_init(struct amdgpu_device *adev);
-+void amdgpu_dm_hpd_fini(struct amdgpu_device *adev);
-+
-+/**
-+ * amdgpu_dm_irq_suspend - disable ASIC interrupt during suspend.
-+ *
-+ */
-+int amdgpu_dm_irq_suspend(
-+ struct amdgpu_device *adev);
-+
-+/**
-+ * amdgpu_dm_irq_resume - enable ASIC interrupt during resume.
-+ *
-+ */
-+int amdgpu_dm_irq_resume(
-+ struct amdgpu_device *adev);
-+
-+#endif /* __AMDGPU_DM_IRQ_H__ */
-diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_mst_types.c
-new file mode 100644
-index 0000000..6d9ee15
---- /dev/null
-+++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_mst_types.c
-@@ -0,0 +1,353 @@
-+/*
-+ * Copyright 2012-15 Advanced Micro Devices, Inc.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ * Authors: AMD
-+ *
-+ */
-+
-+#include "dal_services.h"
-+
-+#include "amdgpu.h"
-+
-+#include "amdgpu_dm_types.h"
-+
-+#include "amdgpu_dm_mst_types.h"
-+
-+#include "dc.h"
-+
-+static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
-+{
-+ struct pci_dev *pdev = to_pci_dev(aux->dev);
-+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
-+ struct amdgpu_device *adev = drm_dev->dev_private;
-+ struct dc *dc = adev->dm.dc;
-+
-+ switch (msg->request) {
-+ case DP_AUX_NATIVE_READ:
-+ dc_read_dpcd(
-+ dc,
-+ TO_DM_AUX(aux)->link_index,
-+ msg->address,
-+ msg->buffer,
-+ msg->size);
-+ break;
-+ case DP_AUX_NATIVE_WRITE:
-+ dc_write_dpcd(
-+ dc,
-+ TO_DM_AUX(aux)->link_index,
-+ msg->address,
-+ msg->buffer,
-+ msg->size);
-+ break;
-+ default:
-+ return 0;
-+ }
-+
-+ return msg->size;
-+}
-+
-+static enum drm_connector_status
-+dm_dp_mst_detect(struct drm_connector *connector, bool force)
-+{
-+ struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
-+ struct amdgpu_connector *master = aconnector->mst_port;
-+
-+ return drm_dp_mst_detect_port(connector, &master->mst_mgr, aconnector->port);
-+}
-+
-+static void
-+dm_dp_mst_connector_destroy(struct drm_connector *connector)
-+{
-+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
-+ struct amdgpu_encoder *amdgpu_encoder = amdgpu_connector->mst_encoder;
-+
-+ drm_encoder_cleanup(&amdgpu_encoder->base);
-+ kfree(amdgpu_encoder);
-+ drm_connector_cleanup(connector);
-+ kfree(amdgpu_connector);
-+}
-+
-+static int dm_dp_mst_connector_dpms(struct drm_connector *connector, int mode)
-+{
-+ DRM_DEBUG_KMS("\n");
-+ return 0;
-+}
-+
-+static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
-+ .dpms = dm_dp_mst_connector_dpms,
-+ .detect = dm_dp_mst_detect,
-+ .fill_modes = drm_helper_probe_single_connector_modes,
-+ .destroy = dm_dp_mst_connector_destroy,
-+ .reset = amdgpu_dm_connector_funcs_reset,
-+ .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
-+ .atomic_destroy_state = amdgpu_dm_connector_atomic_destroy_state,
-+ .atomic_set_property = amdgpu_dm_connector_atomic_set_property
-+};
-+
-+static const struct dc_sink *dm_dp_mst_add_mst_sink(
-+ struct dc_link *dc_link,
-+ uint8_t *edid,
-+ uint16_t len)
-+{
-+ struct dc_sink *dc_sink;
-+ struct sink_init_data init_params = {
-+ .link = dc_link,
-+ .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST};
-+
-+ if (len > MAX_EDID_BUFFER_SIZE) {
-+ DRM_ERROR("Max EDID buffer size breached!\n");
-+ return NULL;
-+ }
-+
-+ /*
-+ * TODO make dynamic-ish?
-+ * dc_link->connector_signal;
-+ */
-+
-+ dc_sink = sink_create(&init_params);
-+
-+ if (!dc_sink)
-+ return NULL;
-+
-+ dc_service_memmove(dc_sink->dc_edid.raw_edid, edid, len);
-+ dc_sink->dc_edid.length = len;
-+
-+ if (!dc_link_add_sink(
-+ dc_link,
-+ dc_sink))
-+ goto fail;
-+
-+ /* dc_sink_retain(&core_sink->public); */
-+
-+ return dc_sink;
-+
-+fail:
-+ dc_link_remove_sink(dc_link, dc_sink);
-+ return NULL;
-+}
-+
-+static int dm_dp_mst_get_modes(struct drm_connector *connector)
-+{
-+ struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
-+ struct amdgpu_connector *master = aconnector->mst_port;
-+ struct edid *edid;
-+ const struct dc_sink *sink;
-+ int ret = 0;
-+
-+ edid = drm_dp_mst_get_edid(connector, &master->mst_mgr, aconnector->port);
-+
-+ if (!edid) {
-+ drm_mode_connector_update_edid_property(
-+ &aconnector->base,
-+ NULL);
-+
-+ return ret;
-+ }
-+
-+ aconnector->edid = edid;
-+
-+ if (!aconnector->dc_sink) {
-+ sink = dm_dp_mst_add_mst_sink(
-+ (struct dc_link *)aconnector->dc_link,
-+ (uint8_t *)edid,
-+ (edid->extensions + 1) * EDID_LENGTH);
-+ aconnector->dc_sink = sink;
-+ }
-+
-+ DRM_DEBUG_KMS("edid retrieved %p\n", edid);
-+
-+ drm_mode_connector_update_edid_property(
-+ &aconnector->base,
-+ aconnector->edid);
-+
-+ ret = drm_add_edid_modes(&aconnector->base, aconnector->edid);
-+
-+ drm_edid_to_eld(&aconnector->base, aconnector->edid);
-+
-+ return ret;
-+}
-+
-+static enum drm_mode_status
-+dm_dp_mst_mode_valid(struct drm_connector *connector,
-+ struct drm_display_mode *mode)
-+{
-+ return MODE_OK;
-+}
-+
-+static struct drm_encoder *dm_mst_best_encoder(struct drm_connector *connector)
-+{
-+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
-+
-+ return &amdgpu_connector->mst_encoder->base;
-+}
-+
-+static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
-+ .get_modes = dm_dp_mst_get_modes,
-+ .mode_valid = dm_dp_mst_mode_valid,
-+ .best_encoder = dm_mst_best_encoder,
-+};
-+
-+static struct amdgpu_encoder *
-+dm_dp_create_fake_mst_encoder(struct amdgpu_connector *connector)
-+{
-+ struct drm_device *dev = connector->base.dev;
-+ struct amdgpu_device *adev = dev->dev_private;
-+ struct amdgpu_encoder *amdgpu_encoder;
-+ struct drm_encoder *encoder;
-+ const struct drm_connector_helper_funcs *connector_funcs =
-+ connector->base.helper_private;
-+ struct drm_encoder *enc_master =
-+ connector_funcs->best_encoder(&connector->base);
-+
-+ DRM_DEBUG_KMS("enc master is %p\n", enc_master);
-+ amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL);
-+ if (!amdgpu_encoder)
-+ return NULL;
-+
-+ encoder = &amdgpu_encoder->base;
-+ switch (adev->mode_info.num_crtc) {
-+ case 1:
-+ encoder->possible_crtcs = 0x1;
-+ break;
-+ case 2:
-+ default:
-+ encoder->possible_crtcs = 0x3;
-+ break;
-+ case 4:
-+ encoder->possible_crtcs = 0xf;
-+ break;
-+ case 6:
-+ encoder->possible_crtcs = 0x3f;
-+ break;
-+ }
-+
-+ encoder->possible_crtcs = 0x1;
-+
-+ drm_encoder_init(
-+ dev,
-+ &amdgpu_encoder->base,
-+ NULL,
-+ DRM_MODE_ENCODER_DPMST,
-+ NULL);
-+
-+ drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs);
-+
-+ return amdgpu_encoder;
-+}
-+
-+static struct drm_connector *dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
-+ struct drm_dp_mst_port *port,
-+ const char *pathprop)
-+{
-+ struct amdgpu_connector *master = container_of(mgr, struct amdgpu_connector, mst_mgr);
-+ struct drm_device *dev = master->base.dev;
-+ struct amdgpu_device *adev = dev->dev_private;
-+ struct amdgpu_connector *aconnector;
-+ struct drm_connector *connector;
-+
-+ aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
-+ if (!aconnector)
-+ return NULL;
-+
-+ aconnector->is_mst_connector = true;
-+ connector = &aconnector->base;
-+ aconnector->port = port;
-+ aconnector->mst_port = master;
-+ aconnector->dc_link = master->dc_link;
-+
-+ /* Initialize connector state before adding the connectror to drm and framebuffer lists */
-+ amdgpu_dm_connector_funcs_reset(connector);
-+
-+ drm_connector_init(dev, connector, &dm_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort);
-+ drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs);
-+ aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master);
-+
-+ drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
-+ drm_mode_connector_set_path_property(connector, pathprop);
-+
-+ mutex_lock(&dev->mode_config.mutex);
-+ drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
-+ mutex_unlock(&dev->mode_config.mutex);
-+
-+ drm_connector_register(connector);
-+
-+ DRM_DEBUG_KMS(":%d\n", connector->base.id);
-+
-+ return connector;
-+}
-+
-+static void dm_dp_destroy_mst_connector(
-+ struct drm_dp_mst_topology_mgr *mgr,
-+ struct drm_connector *connector)
-+{
-+ struct amdgpu_connector *master =
-+ container_of(mgr, struct amdgpu_connector, mst_mgr);
-+ struct drm_device *dev = master->base.dev;
-+ struct amdgpu_device *adev = dev->dev_private;
-+ drm_connector_unregister(connector);
-+ /* need to nuke the connector */
-+ mutex_lock(&dev->mode_config.mutex);
-+ /* dpms off */
-+ drm_fb_helper_remove_one_connector(
-+ &adev->mode_info.rfbdev->helper,
-+ connector);
-+
-+ drm_connector_cleanup(connector);
-+ mutex_unlock(&dev->mode_config.mutex);
-+
-+ kfree(connector);
-+ DRM_DEBUG_KMS("\n");
-+}
-+
-+static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
-+{
-+ struct amdgpu_connector *master = container_of(mgr, struct amdgpu_connector, mst_mgr);
-+ struct drm_device *dev = master->base.dev;
-+
-+ drm_kms_helper_hotplug_event(dev);
-+}
-+
-+struct drm_dp_mst_topology_cbs dm_mst_cbs = {
-+ .add_connector = dm_dp_add_mst_connector,
-+ .destroy_connector = dm_dp_destroy_mst_connector,
-+ .hotplug = dm_dp_mst_hotplug,
-+};
-+
-+void amdgpu_dm_initialize_mst_connector(
-+ struct amdgpu_display_manager *dm,
-+ struct amdgpu_connector *aconnector)
-+{
-+ aconnector->dm_dp_aux.aux.name = "dmdc";
-+ aconnector->dm_dp_aux.aux.dev = dm->adev->dev;
-+ aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
-+ aconnector->dm_dp_aux.link_index = aconnector->connector_id;
-+
-+ drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
-+
-+ aconnector->mst_mgr.cbs = &dm_mst_cbs;
-+ drm_dp_mst_topology_mgr_init(
-+ &aconnector->mst_mgr,
-+ dm->adev->dev,
-+ &aconnector->dm_dp_aux.aux,
-+ 16,
-+ 4,
-+ aconnector->connector_id);
-+}
-diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_mst_types.h
-new file mode 100644
-index 0000000..6130d62
---- /dev/null
-+++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_mst_types.h
-@@ -0,0 +1,36 @@
-+/*
-+ * Copyright 2012-15 Advanced Micro Devices, Inc.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ * Authors: AMD
-+ *
-+ */
-+
-+#ifndef __DAL_AMDGPU_DM_MST_TYPES_H__
-+#define __DAL_AMDGPU_DM_MST_TYPES_H__
-+
-+struct amdgpu_display_manager;
-+struct amdgpu_connector;
-+
-+void amdgpu_dm_initialize_mst_connector(
-+ struct amdgpu_display_manager *dm,
-+ struct amdgpu_connector *aconnector);
-+
-+#endif
-diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.c b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.c
-new file mode 100644
-index 0000000..bfff48c
---- /dev/null
-+++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.c
-@@ -0,0 +1,2390 @@
-+/*
-+ * Copyright 2012-13 Advanced Micro Devices, Inc.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ * Authors: AMD
-+ *
-+ */
-+
-+#include "dal_services_types.h"
-+
-+#include <linux/types.h>
-+#include <drm/drmP.h>
-+#include <drm/drm_atomic_helper.h>
-+#include <drm/drm_fb_helper.h>
-+#include <drm/drm_atomic.h>
-+#include "amdgpu.h"
-+#include "amdgpu_pm.h"
-+// We need to #undef FRAME_SIZE and DEPRECATED because they conflict
-+// with ptrace-abi.h's #define's of them.
-+#undef FRAME_SIZE
-+#undef DEPRECATED
-+
-+#include "mode_query_interface.h"
-+#include "dcs_types.h"
-+#include "mode_manager_types.h"
-+
-+/*#include "amdgpu_buffer.h"*/
-+
-+#include "dce/dce_11_0_d.h"
-+#include "dce/dce_11_0_sh_mask.h"
-+#include "dce/dce_11_0_enum.h"
-+
-+#include "dc.h"
-+
-+#include "amdgpu_dm_types.h"
-+#include "amdgpu_dm_mst_types.h"
-+
-+struct dm_connector_state {
-+ struct drm_connector_state base;
-+
-+ enum amdgpu_rmx_type scaling;
-+ uint8_t underscan_vborder;
-+ uint8_t underscan_hborder;
-+ bool underscan_enable;
-+};
-+
-+#define to_dm_connector_state(x)\
-+ container_of((x), struct dm_connector_state, base)
-+
-+#define AMDGPU_CRTC_MODE_PRIVATE_FLAGS_GAMMASET 1
-+
-+void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
-+{
-+ drm_encoder_cleanup(encoder);
-+ kfree(encoder);
-+}
-+
-+static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
-+ .destroy = amdgpu_dm_encoder_destroy,
-+};
-+
-+static void dm_set_cursor(
-+ struct amdgpu_crtc *amdgpu_crtc,
-+ uint64_t gpu_addr,
-+ uint32_t width,
-+ uint32_t height)
-+{
-+ struct dc_cursor_attributes attributes;
-+ amdgpu_crtc->cursor_width = width;
-+ amdgpu_crtc->cursor_height = height;
-+
-+ attributes.address.high_part = upper_32_bits(gpu_addr);
-+ attributes.address.low_part = lower_32_bits(gpu_addr);
-+ attributes.width = width-1;
-+ attributes.height = height-1;
-+ attributes.x_hot = 0;
-+ attributes.y_hot = 0;
-+ attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
-+ attributes.rotation_angle = 0;
-+ attributes.attribute_flags.value = 0;
-+
-+ if (!dc_target_set_cursor_attributes(
-+ amdgpu_crtc->target,
-+ &attributes)) {
-+ DRM_ERROR("DC failed to set cursor attributes\n");
-+ }
-+}
-+
-+static int dm_crtc_unpin_cursor_bo_old(
-+ struct amdgpu_crtc *amdgpu_crtc)
-+{
-+ struct amdgpu_bo *robj;
-+ int ret = 0;
-+
-+ if (NULL != amdgpu_crtc && NULL != amdgpu_crtc->cursor_bo) {
-+ robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
-+
-+ ret = amdgpu_bo_reserve(robj, false);
-+
-+ if (likely(ret == 0)) {
-+ amdgpu_bo_unpin(robj);
-+ amdgpu_bo_unreserve(robj);
-+ }
-+ } else {
-+ DRM_ERROR("dm_crtc_unpin_cursor_ob_old bo %x, leaked %p\n",
-+ ret,
-+ amdgpu_crtc->cursor_bo);
-+ }
-+
-+ drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
-+ amdgpu_crtc->cursor_bo = NULL;
-+
-+ return ret;
-+}
-+
-+static int dm_crtc_pin_cursor_bo_new(
-+ struct drm_crtc *crtc,
-+ struct drm_file *file_priv,
-+ uint32_t handle,
-+ struct amdgpu_bo **ret_obj,
-+ uint64_t *gpu_addr)
-+{
-+ struct amdgpu_crtc *amdgpu_crtc;
-+ struct amdgpu_bo *robj;
-+ struct drm_gem_object *obj;
-+ int ret = EINVAL;
-+
-+ if (NULL != crtc) {
-+ amdgpu_crtc = to_amdgpu_crtc(crtc);
-+
-+ obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
-+
-+ if (!obj) {
-+ DRM_ERROR(
-+ "Cannot find cursor object %x for crtc %d\n",
-+ handle,
-+ amdgpu_crtc->crtc_id);
-+ goto release;
-+ }
-+ robj = gem_to_amdgpu_bo(obj);
-+
-+ ret = amdgpu_bo_reserve(robj, false);
-+
-+ if (unlikely(ret != 0)) {
-+ drm_gem_object_unreference_unlocked(obj);
-+ DRM_ERROR("dm_crtc_pin_cursor_bo_new ret %x, handle %x\n",
-+ ret, handle);
-+ goto release;
-+ }
-+
-+ ret = amdgpu_bo_pin(robj, AMDGPU_GEM_DOMAIN_VRAM, NULL);
-+
-+ if (ret == 0) {
-+ *gpu_addr = amdgpu_bo_gpu_offset(robj);
-+ *ret_obj = robj;
-+ }
-+ amdgpu_bo_unreserve(robj);
-+ if (ret)
-+ drm_gem_object_unreference_unlocked(obj);
-+
-+ }
-+release:
-+
-+ return ret;
-+}
-+
-+static int dm_crtc_cursor_set(
-+ struct drm_crtc *crtc,
-+ struct drm_file *file_priv,
-+ uint32_t handle,
-+ uint32_t width,
-+ uint32_t height)
-+{
-+ struct amdgpu_bo *new_cursor_bo;
-+ uint64_t gpu_addr;
-+ struct dc_cursor_position position;
-+
-+ int ret;
-+
-+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-+
-+ ret = EINVAL;
-+ new_cursor_bo = NULL;
-+ gpu_addr = 0;
-+
-+ DRM_DEBUG_KMS(
-+ "%s: crtc_id=%d with handle %d and size %d to %d, bo_object %p\n",
-+ __func__,
-+ amdgpu_crtc->crtc_id,
-+ handle,
-+ width,
-+ height,
-+ amdgpu_crtc->cursor_bo);
-+
-+ if (!handle) {
-+ /* turn off cursor */
-+ position.enable = false;
-+ position.x = 0;
-+ position.y = 0;
-+ position.hot_spot_enable = false;
-+
-+ if (amdgpu_crtc->target) {
-+ /*set cursor visible false*/
-+ dc_target_set_cursor_position(
-+ amdgpu_crtc->target,
-+ &position);
-+ }
-+ /*unpin old cursor buffer and update cache*/
-+ ret = dm_crtc_unpin_cursor_bo_old(amdgpu_crtc);
-+ goto release;
-+
-+ }
-+
-+ if ((width > amdgpu_crtc->max_cursor_width) ||
-+ (height > amdgpu_crtc->max_cursor_height)) {
-+ DRM_ERROR(
-+ "%s: bad cursor width or height %d x %d\n",
-+ __func__,
-+ width,
-+ height);
-+ goto release;
-+ }
-+ /*try to pin new cursor bo*/
-+ ret = dm_crtc_pin_cursor_bo_new(crtc, file_priv, handle,
-+ &new_cursor_bo, &gpu_addr);
-+ /*if map not successful then return an error*/
-+ if (ret)
-+ goto release;
-+
-+ /*program new cursor bo to hardware*/
-+ dm_set_cursor(amdgpu_crtc, gpu_addr, width, height);
-+
-+ /*un map old, not used anymore cursor bo ,
-+ * return memory and mapping back */
-+ dm_crtc_unpin_cursor_bo_old(amdgpu_crtc);
-+
-+ /*assign new cursor bo to our internal cache*/
-+ amdgpu_crtc->cursor_bo = &new_cursor_bo->gem_base;
-+
-+release:
-+ return ret;
-+
-+}
-+
-+static int dm_crtc_cursor_move(struct drm_crtc *crtc,
-+ int x, int y)
-+{
-+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-+ int xorigin = 0, yorigin = 0;
-+ struct dc_cursor_position position;
-+
-+ /* avivo cursor are offset into the total surface */
-+ x += crtc->primary->state->src_x >> 16;
-+ y += crtc->primary->state->src_y >> 16;
-+
-+ /*
-+ * TODO: for cursor debugging unguard the following
-+ */
-+#if 0
-+ DRM_DEBUG_KMS(
-+ "%s: x %d y %d c->x %d c->y %d\n",
-+ __func__,
-+ x,
-+ y,
-+ crtc->x,
-+ crtc->y);
-+#endif
-+
-+ if (x < 0) {
-+ xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
-+ x = 0;
-+ }
-+ if (y < 0) {
-+ yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
-+ y = 0;
-+ }
-+
-+ position.enable = true;
-+ position.x = x;
-+ position.y = y;
-+
-+ position.hot_spot_enable = true;
-+ position.x_origin = xorigin;
-+ position.y_origin = yorigin;
-+
-+ if (!dc_target_set_cursor_position(
-+ amdgpu_crtc->target,
-+ &position)) {
-+ DRM_ERROR("DC failed to set cursor position\n");
-+ return -EINVAL;
-+ }
-+
-+#if BUILD_FEATURE_TIMING_SYNC
-+ {
-+ struct drm_device *dev = crtc->dev;
-+ struct amdgpu_device *adev = dev->dev_private;
-+ struct amdgpu_display_manager *dm = &adev->dm;
-+
-+ dc_print_sync_report(dm->dc);
-+ }
-+#endif
-+ return 0;
-+}
-+
-+static void dm_crtc_cursor_reset(struct drm_crtc *crtc)
-+{
-+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-+
-+ DRM_DEBUG_KMS(
-+ "%s: with cursor_bo %p\n",
-+ __func__,
-+ amdgpu_crtc->cursor_bo);
-+
-+ if (amdgpu_crtc->cursor_bo && amdgpu_crtc->target) {
-+ dm_set_cursor(
-+ amdgpu_crtc,
-+ amdgpu_bo_gpu_offset(gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo)),
-+ amdgpu_crtc->cursor_width,
-+ amdgpu_crtc->cursor_height);
-+ }
-+}
-+static bool fill_rects_from_plane_state(
-+ struct drm_plane_state *state,
-+ struct dc_surface *surface)
-+{
-+ surface->src_rect.x = state->src_x >> 16;
-+ surface->src_rect.y = state->src_y >> 16;
-+ /*we ignore for now mantissa and do not to deal with floating pixels :(*/
-+ surface->src_rect.width = state->src_w >> 16;
-+
-+ if (surface->src_rect.width == 0)
-+ return false;
-+
-+ surface->src_rect.height = state->src_h >> 16;
-+ if (surface->src_rect.height == 0)
-+ return false;
-+
-+ surface->dst_rect.x = state->crtc_x;
-+ surface->dst_rect.y = state->crtc_y;
-+
-+ if (state->crtc_w == 0)
-+ return false;
-+
-+ surface->dst_rect.width = state->crtc_w;
-+
-+ if (state->crtc_h == 0)
-+ return false;
-+
-+ surface->dst_rect.height = state->crtc_h;
-+
-+ surface->clip_rect = surface->dst_rect;
-+
-+ switch (state->rotation) {
-+ case DRM_ROTATE_0:
-+ surface->rotation = ROTATION_ANGLE_0;
-+ break;
-+ case DRM_ROTATE_90:
-+ surface->rotation = ROTATION_ANGLE_90;
-+ break;
-+ case DRM_ROTATE_180:
-+ surface->rotation = ROTATION_ANGLE_180;
-+ break;
-+ case DRM_ROTATE_270:
-+ surface->rotation = ROTATION_ANGLE_270;
-+ break;
-+ default:
-+ surface->rotation = ROTATION_ANGLE_0;
-+ break;
-+ }
-+
-+ return true;
-+}
-+static bool get_fb_info(
-+ struct amdgpu_framebuffer *amdgpu_fb,
-+ uint64_t *tiling_flags,
-+ uint64_t *fb_location)
-+{
-+ struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
-+ int r = amdgpu_bo_reserve(rbo, false);
-+ if (unlikely(r != 0)){
-+ DRM_ERROR("Unable to reserve buffer\n");
-+ return false;
-+ }
-+
-+
-+ if (fb_location)
-+ *fb_location = amdgpu_bo_gpu_offset(rbo);
-+
-+ if (tiling_flags)
-+ amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
-+
-+ amdgpu_bo_unreserve(rbo);
-+
-+ return true;
-+}
-+static void fill_plane_attributes_from_fb(
-+ struct dc_surface *surface,
-+ struct amdgpu_framebuffer *amdgpu_fb)
-+{
-+ uint64_t tiling_flags;
-+ uint64_t fb_location;
-+ struct drm_framebuffer *fb = &amdgpu_fb->base;
-+
-+ get_fb_info(
-+ amdgpu_fb,
-+ &tiling_flags,
-+ &fb_location);
-+
-+ surface->address.type = PLN_ADDR_TYPE_GRAPHICS;
-+ surface->address.grph.addr.low_part = lower_32_bits(fb_location);
-+ surface->address.grph.addr.high_part = upper_32_bits(fb_location);
-+
-+ switch (fb->pixel_format) {
-+ case DRM_FORMAT_C8:
-+ surface->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
-+ break;
-+ case DRM_FORMAT_RGB565:
-+ surface->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
-+ break;
-+ case DRM_FORMAT_XRGB8888:
-+ case DRM_FORMAT_ARGB8888:
-+ surface->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
-+ break;
-+ default:
-+ DRM_ERROR("Unsupported screen depth %d\n", fb->bits_per_pixel);
-+ return;
-+ }
-+
-+ surface->tiling_info.value = 0;
-+
-+ if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1)
-+ {
-+ unsigned bankw, bankh, mtaspect, tile_split, num_banks;
-+
-+ bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
-+ bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
-+ mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
-+ tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
-+ num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
-+
-+
-+ /* XXX fix me for VI */
-+ surface->tiling_info.grph.NUM_BANKS = num_banks;
-+ surface->tiling_info.grph.ARRAY_MODE =
-+ ARRAY_2D_TILED_THIN1;
-+ surface->tiling_info.grph.TILE_SPLIT = tile_split;
-+ surface->tiling_info.grph.BANK_WIDTH = bankw;
-+ surface->tiling_info.grph.BANK_HEIGHT = bankh;
-+ surface->tiling_info.grph.TILE_ASPECT = mtaspect;
-+ surface->tiling_info.grph.TILE_MODE =
-+ ADDR_SURF_MICRO_TILING_DISPLAY;
-+ } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
-+ == ARRAY_1D_TILED_THIN1) {
-+ surface->tiling_info.grph.ARRAY_MODE = ARRAY_1D_TILED_THIN1;
-+ }
-+
-+ surface->tiling_info.grph.PIPE_CONFIG =
-+ AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
-+
-+ surface->plane_size.grph.surface_size.x = 0;
-+ surface->plane_size.grph.surface_size.y = 0;
-+ surface->plane_size.grph.surface_size.width = fb->width;
-+ surface->plane_size.grph.surface_size.height = fb->height;
-+ surface->plane_size.grph.surface_pitch =
-+ fb->pitches[0] / (fb->bits_per_pixel / 8);
-+
-+ surface->enabled = true;
-+ surface->scaling_quality.h_taps_c = 2;
-+ surface->scaling_quality.v_taps_c = 2;
-+
-+/* TODO: unhardcode */
-+ surface->colorimetry.limited_range = false;
-+ surface->colorimetry.color_space = SURFACE_COLOR_SPACE_SRGB;
-+ surface->scaling_quality.h_taps = 4;
-+ surface->scaling_quality.v_taps = 4;
-+ surface->stereo_format = PLANE_STEREO_FORMAT_NONE;
-+
-+}
-+
-+static void fill_gamma_from_crtc(
-+ struct drm_crtc *crtc,
-+ struct dc_surface *dc_surface)
-+{
-+ int i;
-+ struct gamma_ramp *gamma;
-+ uint16_t *red, *green, *blue;
-+ int end = (crtc->gamma_size > NUM_OF_RAW_GAMMA_RAMP_RGB_256) ?
-+ NUM_OF_RAW_GAMMA_RAMP_RGB_256 : crtc->gamma_size;
-+
-+ red = crtc->gamma_store;
-+ green = red + crtc->gamma_size;
-+ blue = green + crtc->gamma_size;
-+
-+ gamma = &dc_surface->gamma_correction;
-+
-+ for (i = 0; i < end; i++) {
-+ gamma->gamma_ramp_rgb256x3x16.red[i] =
-+ (unsigned short) red[i];
-+ gamma->gamma_ramp_rgb256x3x16.green[i] =
-+ (unsigned short) green[i];
-+ gamma->gamma_ramp_rgb256x3x16.blue[i] =
-+ (unsigned short) blue[i];
-+ }
-+
-+ gamma->type = GAMMA_RAMP_RBG256X3X16;
-+ gamma->size = sizeof(gamma->gamma_ramp_rgb256x3x16);
-+}
-+
-+static void fill_plane_attributes(
-+ struct dc_surface *surface,
-+ struct drm_crtc *crtc)
-+{
-+ struct amdgpu_framebuffer *amdgpu_fb =
-+ to_amdgpu_framebuffer(crtc->primary->state->fb);
-+ fill_rects_from_plane_state(crtc->primary->state, surface);
-+ fill_plane_attributes_from_fb(
-+ surface,
-+ amdgpu_fb);
-+
-+ /* In case of gamma set, update gamma value */
-+ if (crtc->mode.private_flags &
-+ AMDGPU_CRTC_MODE_PRIVATE_FLAGS_GAMMASET) {
-+ fill_gamma_from_crtc(crtc, surface);
-+ /* reset trigger of gamma */
-+ crtc->mode.private_flags &=
-+ ~AMDGPU_CRTC_MODE_PRIVATE_FLAGS_GAMMASET;
-+ }
-+}
-+
-+/*****************************************************************************/
-+
-+struct amdgpu_connector *aconnector_from_drm_crtc(
-+ struct drm_crtc *crtc,
-+ struct drm_atomic_state *state)
-+{
-+ struct drm_connector *connector;
-+ struct amdgpu_connector *aconnector;
-+ struct drm_connector_state *conn_state;
-+ uint8_t i;
-+
-+ for_each_connector_in_state(state, connector, conn_state, i) {
-+ aconnector = to_amdgpu_connector(connector);
-+ if (connector->state->crtc == crtc)
-+ return aconnector;
-+ }
-+
-+ /* If we get here, not found. */
-+ return NULL;
-+}
-+
-+struct amdgpu_connector *aconnector_from_drm_crtc_id(
-+ const struct drm_crtc *crtc)
-+{
-+ struct drm_device *dev = crtc->dev;
-+ struct drm_connector *connector;
-+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
-+ struct amdgpu_connector *aconnector;
-+
-+ list_for_each_entry(connector,
-+ &dev->mode_config.connector_list, head) {
-+
-+ aconnector = to_amdgpu_connector(connector);
-+
-+ /* acrtc->crtc_id means display_index */
-+ if (aconnector->connector_id != acrtc->crtc_id)
-+ continue;
-+
-+ /* Found the connector */
-+ return aconnector;
-+ }
-+
-+ /* If we get here, not found. */
-+ return NULL;
-+}
-+
-+static void dm_dc_surface_commit(
-+ struct dc *dc,
-+ struct drm_crtc *crtc,
-+ struct amdgpu_framebuffer *afb)
-+{
-+ struct dc_surface *dc_surface;
-+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
-+ struct dc_target *dc_target = acrtc->target;
-+
-+ if (!dc_target) {
-+ dal_error(
-+ "%s: Failed to obtain target on crtc (%d)!\n",
-+ __func__,
-+ acrtc->crtc_id);
-+ goto fail;
-+ }
-+
-+ dc_surface = dc_create_surface(dc);
-+
-+ if (!dc_surface) {
-+ dal_error(
-+ "%s: Failed to create a surface!\n",
-+ __func__);
-+ goto fail;
-+ }
-+
-+ /* Surface programming */
-+
-+ fill_plane_attributes(dc_surface, crtc);
-+
-+ if (false == dc_commit_surfaces_to_target(
-+ dc,
-+ &dc_surface,
-+ 1,
-+ dc_target)) {
-+ dal_error(
-+ "%s: Failed to attach surface!\n",
-+ __func__);
-+ }
-+
-+ dc_surface_release(dc_surface);
-+fail:
-+ return;
-+}
-+
-+static enum dc_color_depth convert_color_depth_from_display_info(
-+ const struct drm_connector *connector)
-+{
-+ uint32_t bpc = connector->display_info.bpc;
-+
-+ /* Limited color depth to 8bit
-+ * TODO: Still need to handle deep color*/
-+ if (bpc > 8)
-+ bpc = 8;
-+
-+ switch (bpc) {
-+ case 0:
-+ /* Temporary Work around, DRM don't parse color depth for
-+ * EDID revision before 1.4
-+ * TODO: Fix edid parsing
-+ */
-+ return COLOR_DEPTH_888;
-+ case 6:
-+ return COLOR_DEPTH_666;
-+ case 8:
-+ return COLOR_DEPTH_888;
-+ case 10:
-+ return COLOR_DEPTH_101010;
-+ case 12:
-+ return COLOR_DEPTH_121212;
-+ case 14:
-+ return COLOR_DEPTH_141414;
-+ case 16:
-+ return COLOR_DEPTH_161616;
-+ default:
-+ return COLOR_DEPTH_UNDEFINED;
-+ }
-+}
-+
-+static enum dc_aspect_ratio get_aspect_ratio(
-+ const struct drm_display_mode *mode_in)
-+{
-+ int32_t width = mode_in->crtc_hdisplay * 9;
-+ int32_t height = mode_in->crtc_vdisplay * 16;
-+ if ((width - height) < 10 && (width - height) > -10)
-+ return ASPECT_RATIO_16_9;
-+ else
-+ return ASPECT_RATIO_4_3;
-+}
-+
-+/*****************************************************************************/
-+
-+static void dc_timing_from_drm_display_mode(
-+ struct dc_crtc_timing *timing_out,
-+ const struct drm_display_mode *mode_in,
-+ const struct drm_connector *connector)
-+{
-+ memset(timing_out, 0, sizeof(struct dc_crtc_timing));
-+
-+ timing_out->h_border_left = 0;
-+ timing_out->h_border_right = 0;
-+ timing_out->v_border_top = 0;
-+ timing_out->v_border_bottom = 0;
-+ /* TODO: un-hardcode */
-+ timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
-+ timing_out->timing_standard = TIMING_STANDARD_HDMI;
-+ timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
-+ timing_out->display_color_depth = convert_color_depth_from_display_info(
-+ connector);
-+ timing_out->scan_type = SCANNING_TYPE_NODATA;
-+ timing_out->hdmi_vic = 0;
-+ timing_out->vic = drm_match_cea_mode(mode_in);
-+
-+ timing_out->h_addressable = mode_in->crtc_hdisplay;
-+ timing_out->h_total = mode_in->crtc_htotal;
-+ timing_out->h_sync_width =
-+ mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
-+ timing_out->h_front_porch =
-+ mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
-+ timing_out->v_total = mode_in->crtc_vtotal;
-+ timing_out->v_addressable = mode_in->crtc_vdisplay;
-+ timing_out->v_front_porch =
-+ mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
-+ timing_out->v_sync_width =
-+ mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
-+ timing_out->pix_clk_khz = mode_in->crtc_clock;
-+ timing_out->aspect_ratio = get_aspect_ratio(mode_in);
-+}
-+
-+static void fill_audio_info(
-+ struct audio_info *audio_info,
-+ const struct drm_connector *drm_connector,
-+ const struct dc_sink *dc_sink)
-+{
-+ int i = 0;
-+ int cea_revision = 0;
-+ const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
-+
-+ audio_info->manufacture_id = edid_caps->manufacturer_id;
-+ audio_info->product_id = edid_caps->product_id;
-+
-+ cea_revision = drm_connector->display_info.cea_rev;
-+
-+ while (i < AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS &&
-+ edid_caps->display_name[i]) {
-+ audio_info->display_name[i] = edid_caps->display_name[i];
-+ i++;
-+ }
-+
-+ if(cea_revision >= 3) {
-+ audio_info->mode_count = edid_caps->audio_mode_count;
-+
-+ for (i = 0; i < audio_info->mode_count; ++i) {
-+ audio_info->modes[i].format_code =
-+ (enum audio_format_code)
-+ (edid_caps->audio_modes[i].format_code);
-+ audio_info->modes[i].channel_count =
-+ edid_caps->audio_modes[i].channel_count;
-+ audio_info->modes[i].sample_rates.all =
-+ edid_caps->audio_modes[i].sample_rate;
-+ audio_info->modes[i].sample_size =
-+ edid_caps->audio_modes[i].sample_size;
-+ }
-+ }
-+
-+ audio_info->flags.all = edid_caps->speaker_flags;
-+
-+ /* TODO: We only check for the progressive mode, check for interlace mode too */
-+ if(drm_connector->latency_present[0]) {
-+ audio_info->video_latency = drm_connector->video_latency[0];
-+ audio_info->audio_latency = drm_connector->audio_latency[0];
-+ }
-+
-+ /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
-+
-+}
-+
-+/*TODO: move these defines elsewhere*/
-+#define DAL_MAX_CONTROLLERS 4
-+
-+static void calculate_stream_scaling_settings(
-+ const struct drm_display_mode *mode,
-+ enum amdgpu_rmx_type rmx_type,
-+ struct dc_stream *stream,
-+ uint8_t underscan_vborder,
-+ uint8_t underscan_hborder,
-+ bool underscan_enable)
-+{
-+ /* Full screen scaling by default */
-+ stream->src.width = mode->hdisplay;
-+ stream->src.height = mode->vdisplay;
-+ stream->dst.width = stream->timing.h_addressable;
-+ stream->dst.height = stream->timing.v_addressable;
-+
-+ if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
-+ if (stream->src.width * stream->dst.height <
-+ stream->src.height * stream->dst.width) {
-+ /* height needs less upscaling/more downscaling */
-+ stream->dst.width = stream->src.width *
-+ stream->dst.height / stream->src.height;
-+ } else {
-+ /* width needs less upscaling/more downscaling */
-+ stream->dst.height = stream->src.height *
-+ stream->dst.width / stream->src.width;
-+ }
-+ } else if (rmx_type == RMX_CENTER) {
-+ stream->dst = stream->src;
-+ }
-+
-+ stream->dst.x = (stream->timing.h_addressable - stream->dst.width) / 2;
-+ stream->dst.y = (stream->timing.v_addressable - stream->dst.height) / 2;
-+
-+ if (underscan_enable) {
-+ stream->dst.x += underscan_hborder / 2;
-+ stream->dst.y += underscan_vborder / 2;
-+ stream->dst.width -= underscan_hborder;
-+ stream->dst.height -= underscan_vborder;
-+ }
-+}
-+
-+
-+static void copy_crtc_timing_for_drm_display_mode(
-+ const struct drm_display_mode *src_mode,
-+ struct drm_display_mode *dst_mode)
-+{
-+ dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
-+ dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
-+ dst_mode->crtc_clock = src_mode->crtc_clock;
-+ dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
-+ dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
-+ dst_mode->crtc_hsync_start= src_mode->crtc_hsync_start;
-+ dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
-+ dst_mode->crtc_htotal = src_mode->crtc_htotal;
-+ dst_mode->crtc_hskew = src_mode->crtc_hskew;
-+ dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;;
-+ dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;;
-+ dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;;
-+ dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;;
-+ dst_mode->crtc_vtotal = src_mode->crtc_vtotal;;
-+}
-+
-+static void decide_crtc_timing_for_drm_display_mode(
-+ struct drm_display_mode *drm_mode,
-+ const struct drm_display_mode *native_mode,
-+ bool scale_enabled)
-+{
-+ if (scale_enabled) {
-+ copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
-+ } else if (native_mode->clock == drm_mode->clock &&
-+ native_mode->htotal == drm_mode->htotal &&
-+ native_mode->vtotal == drm_mode->vtotal) {
-+ copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
-+ } else {
-+ /* no scaling nor amdgpu inserted, no need to patch */
-+ }
-+}
-+
-+
-+static struct dc_target *create_target_for_sink(
-+ const struct amdgpu_connector *aconnector,
-+ struct drm_display_mode *drm_mode)
-+{
-+ struct drm_display_mode *preferred_mode = NULL;
-+ const struct drm_connector *drm_connector;
-+ struct dm_connector_state *dm_state;
-+ struct dc_target *target = NULL;
-+ struct dc_stream *stream;
-+ struct drm_display_mode mode = *drm_mode;
-+ bool native_mode_found = false;
-+
-+ if (NULL == aconnector) {
-+ DRM_ERROR("aconnector is NULL!\n");
-+ goto drm_connector_null;
-+ }
-+
-+ drm_connector = &aconnector->base;
-+ dm_state = to_dm_connector_state(drm_connector->state);
-+ stream = dc_create_stream_for_sink(aconnector->dc_sink);
-+
-+ if (NULL == stream) {
-+ DRM_ERROR("Failed to create stream for sink!\n");
-+ goto stream_create_fail;
-+ }
-+
-+ list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
-+ /* Search for preferred mode */
-+ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
-+ native_mode_found = true;
-+ break;
-+ }
-+ }
-+ if (!native_mode_found)
-+ preferred_mode = list_first_entry_or_null(
-+ &aconnector->base.modes,
-+ struct drm_display_mode,
-+ head);
-+
-+ decide_crtc_timing_for_drm_display_mode(
-+ &mode, preferred_mode,
-+ dm_state->scaling != RMX_OFF);
-+
-+ dc_timing_from_drm_display_mode(&stream->timing,
-+ &mode, &aconnector->base);
-+
-+ calculate_stream_scaling_settings(&mode, dm_state->scaling, stream,
-+ dm_state->underscan_vborder,
-+ dm_state->underscan_hborder,
-+ dm_state->underscan_enable);
-+
-+
-+ fill_audio_info(
-+ &stream->audio_info,
-+ drm_connector,
-+ aconnector->dc_sink);
-+
-+ target = dc_create_target_for_streams(&stream, 1);
-+ dc_stream_release(stream);
-+
-+ if (NULL == target) {
-+ DRM_ERROR("Failed to create target with streams!\n");
-+ goto target_create_fail;
-+ }
-+
-+drm_connector_null:
-+target_create_fail:
-+stream_create_fail:
-+ return target;
-+}
-+
-+void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
-+{
-+ drm_crtc_cleanup(crtc);
-+ kfree(crtc);
-+}
-+
-+static void amdgpu_dm_atomic_crtc_gamma_set(
-+ struct drm_crtc *crtc,
-+ u16 *red,
-+ u16 *green,
-+ u16 *blue,
-+ uint32_t start,
-+ uint32_t size)
-+{
-+ struct drm_device *dev = crtc->dev;
-+ struct drm_property *prop = dev->mode_config.prop_crtc_id;
-+
-+ crtc->mode.private_flags |= AMDGPU_CRTC_MODE_PRIVATE_FLAGS_GAMMASET;
-+
-+ drm_atomic_helper_crtc_set_property(crtc, prop, 0);
-+}
-+
-+static int dm_crtc_funcs_atomic_set_property(
-+ struct drm_crtc *crtc,
-+ struct drm_crtc_state *state,
-+ struct drm_property *property,
-+ uint64_t val)
-+{
-+ struct drm_crtc_state *new_crtc_state;
-+ struct drm_crtc *new_crtc;
-+ int i;
-+
-+ for_each_crtc_in_state(state->state, new_crtc, new_crtc_state, i) {
-+ if (new_crtc == crtc) {
-+ struct drm_plane_state *plane_state;
-+
-+ new_crtc_state->planes_changed = true;
-+
-+ /*
-+ * Bit of magic done here. We need to ensure
-+ * that planes get update after mode is set.
-+ * So, we need to add primary plane to state,
-+ * and this way atomic_update would be called
-+ * for it
-+ */
-+ plane_state =
-+ drm_atomic_get_plane_state(
-+ state->state,
-+ crtc->primary);
-+
-+ if (!plane_state)
-+ return -EINVAL;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+/* Implemented only the options currently availible for the driver */
-+static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
-+ .reset = drm_atomic_helper_crtc_reset,
-+ .cursor_set = dm_crtc_cursor_set,
-+ .cursor_move = dm_crtc_cursor_move,
-+ .destroy = amdgpu_dm_crtc_destroy,
-+ .gamma_set = amdgpu_dm_atomic_crtc_gamma_set,
-+ .set_config = drm_atomic_helper_set_config,
-+ .page_flip = drm_atomic_helper_page_flip,
-+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
-+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
-+ .atomic_set_property = dm_crtc_funcs_atomic_set_property
-+};
-+
-+static enum drm_connector_status
-+amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
-+{
-+ bool connected;
-+ struct amdgpu_connector *aconnector =
-+ to_amdgpu_connector(connector);
-+
-+ /*
-+ * TODO: check whether we should lock here for mst_mgr.lock
-+ */
-+ /* set root connector to disconnected */
-+ if (aconnector->mst_mgr.mst_state)
-+ return connector_status_disconnected;
-+
-+ connected = (NULL != aconnector->dc_sink);
-+ return (connected ? connector_status_connected :
-+ connector_status_disconnected);
-+}
-+
-+int amdgpu_dm_connector_atomic_set_property(
-+ struct drm_connector *connector,
-+ struct drm_connector_state *state,
-+ struct drm_property *property,
-+ uint64_t val)
-+{
-+ struct drm_device *dev = connector->dev;
-+ struct amdgpu_device *adev = dev->dev_private;
-+ struct dm_connector_state *dm_old_state =
-+ to_dm_connector_state(connector->state);
-+ struct dm_connector_state *dm_new_state =
-+ to_dm_connector_state(state);
-+
-+ if (property == dev->mode_config.scaling_mode_property) {
-+ struct drm_crtc_state *new_crtc_state;
-+ struct drm_crtc *crtc;
-+ int i;
-+ enum amdgpu_rmx_type rmx_type;
-+
-+ switch (val) {
-+ case DRM_MODE_SCALE_CENTER:
-+ rmx_type = RMX_CENTER;
-+ break;
-+ case DRM_MODE_SCALE_ASPECT:
-+ rmx_type = RMX_ASPECT;
-+ break;
-+ case DRM_MODE_SCALE_FULLSCREEN:
-+ rmx_type = RMX_FULL;
-+ break;
-+ case DRM_MODE_SCALE_NONE:
-+ default:
-+ rmx_type = RMX_OFF;
-+ break;
-+ }
-+
-+ if (dm_old_state->scaling == rmx_type)
-+ return 0;
-+
-+ dm_new_state->scaling = rmx_type;
-+
-+ for_each_crtc_in_state(state->state, crtc, new_crtc_state, i) {
-+ if (crtc == state->crtc) {
-+ struct drm_plane_state *plane_state;
-+
-+ new_crtc_state->mode_changed = true;
-+
-+ /*
-+ * Bit of magic done here. We need to ensure
-+ * that planes get update after mode is set.
-+ * So, we need to add primary plane to state,
-+ * and this way atomic_update would be called
-+ * for it
-+ */
-+ plane_state =
-+ drm_atomic_get_plane_state(
-+ state->state,
-+ crtc->primary);
-+
-+ if (!plane_state)
-+ return -EINVAL;
-+ }
-+ }
-+
-+ return 0;
-+ } else if (property == adev->mode_info.underscan_hborder_property) {
-+ dm_new_state->underscan_hborder = val;
-+ return 0;
-+ } else if (property == adev->mode_info.underscan_vborder_property) {
-+ dm_new_state->underscan_vborder = val;
-+ return 0;
-+ } else if (property == adev->mode_info.underscan_property) {
-+ struct drm_crtc_state *new_crtc_state;
-+ struct drm_crtc *crtc;
-+ int i;
-+
-+ dm_new_state->underscan_enable = val;
-+ for_each_crtc_in_state(state->state, crtc, new_crtc_state, i) {
-+ if (crtc == state->crtc) {
-+ struct drm_plane_state *plane_state;
-+
-+ new_crtc_state->mode_changed = true;
-+
-+ /*
-+ * Bit of magic done here. We need to ensure
-+ * that planes get update after mode is set.
-+ * So, we need to add primary plane to state,
-+ * and this way atomic_update would be called
-+ * for it
-+ */
-+ plane_state =
-+ drm_atomic_get_plane_state(
-+ state->state,
-+ crtc->primary);
-+
-+ if (!plane_state)
-+ return -EINVAL;
-+ }
-+ }
-+
-+ return 0;
-+ }
-+
-+ return -EINVAL;
-+}
-+
-+void amdgpu_dm_connector_destroy(struct drm_connector *connector)
-+{
-+ /*drm_sysfs_connector_remove(connector);*/
-+ drm_connector_cleanup(connector);
-+ kfree(connector);
-+}
-+
-+void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
-+{
-+ struct dm_connector_state *state =
-+ to_dm_connector_state(connector->state);
-+
-+ kfree(state);
-+
-+ state = kzalloc(sizeof(*state), GFP_KERNEL);
-+
-+ if (state) {
-+ state->scaling = RMX_OFF;
-+
-+ connector->state = &state->base;
-+ connector->state->connector = connector;
-+ }
-+}
-+
-+struct drm_connector_state *amdgpu_dm_connector_atomic_duplicate_state(
-+ struct drm_connector *connector)
-+{
-+ struct dm_connector_state *state =
-+ to_dm_connector_state(connector->state);
-+
-+ struct dm_connector_state *new_state =
-+ kzalloc(sizeof(*new_state), GFP_KERNEL);
-+
-+ if (new_state) {
-+ *new_state = *state;
-+
-+ return &new_state->base;
-+ }
-+
-+ return NULL;
-+}
-+
-+void amdgpu_dm_connector_atomic_destroy_state(
-+ struct drm_connector *connector,
-+ struct drm_connector_state *state)
-+{
-+ struct dm_connector_state *dm_state =
-+ to_dm_connector_state(state);
-+
-+ __drm_atomic_helper_connector_destroy_state(connector, state);
-+
-+ kfree(dm_state);
-+}
-+
-+static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
-+ .dpms = drm_atomic_helper_connector_dpms,
-+ .reset = amdgpu_dm_connector_funcs_reset,
-+ .detect = amdgpu_dm_connector_detect,
-+ .fill_modes = drm_helper_probe_single_connector_modes,
-+ .set_property = drm_atomic_helper_connector_set_property,
-+ .destroy = amdgpu_dm_connector_destroy,
-+ .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
-+ .atomic_destroy_state = amdgpu_dm_connector_atomic_destroy_state,
-+ .atomic_set_property = amdgpu_dm_connector_atomic_set_property
-+};
-+
-+static struct drm_encoder *best_encoder(struct drm_connector *connector)
-+{
-+ int enc_id = connector->encoder_ids[0];
-+ struct drm_mode_object *obj;
-+ struct drm_encoder *encoder;
-+
-+ DRM_DEBUG_KMS("Finding the best encoder\n");
-+
-+ /* pick the encoder ids */
-+ if (enc_id) {
-+ obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
-+ if (!obj) {
-+ DRM_ERROR("Couldn't find a matching encoder for our connector\n");
-+ return NULL;
-+ }
-+ encoder = obj_to_encoder(obj);
-+ return encoder;
-+ }
-+ DRM_ERROR("No encoder id\n");
-+ return NULL;
-+}
-+
-+static int get_modes(struct drm_connector *connector)
-+{
-+ struct amdgpu_connector *amdgpu_connector =
-+ to_amdgpu_connector(connector);
-+ return amdgpu_connector->num_modes;
-+}
-+
-+static int mode_valid(struct drm_connector *connector,
-+ struct drm_display_mode *mode)
-+{
-+ int result = MODE_ERROR;
-+ const struct dc_sink *dc_sink =
-+ to_amdgpu_connector(connector)->dc_sink;
-+ struct amdgpu_device *adev = connector->dev->dev_private;
-+ struct dc_validation_set val_set = { 0 };
-+ /* TODO: Unhardcode stream count */
-+ struct dc_stream *streams[1];
-+ struct dc_target *target;
-+
-+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
-+ (mode->flags & DRM_MODE_FLAG_DBLSCAN))
-+ return result;
-+
-+ if (NULL == dc_sink) {
-+ DRM_ERROR("dc_sink is NULL!\n");
-+ goto stream_create_fail;
-+ }
-+
-+ streams[0] = dc_create_stream_for_sink(dc_sink);
-+
-+ if (NULL == streams[0]) {
-+ DRM_ERROR("Failed to create stream for sink!\n");
-+ goto stream_create_fail;
-+ }
-+
-+ drm_mode_set_crtcinfo(mode, 0);
-+ dc_timing_from_drm_display_mode(&streams[0]->timing, mode, connector);
-+
-+ target = dc_create_target_for_streams(streams, 1);
-+ val_set.target = target;
-+
-+ if (NULL == val_set.target) {
-+ DRM_ERROR("Failed to create target with stream!\n");
-+ goto target_create_fail;
-+ }
-+
-+ val_set.surface_count = 0;
-+ streams[0]->src.width = mode->hdisplay;
-+ streams[0]->src.height = mode->vdisplay;
-+ streams[0]->dst = streams[0]->src;
-+
-+ if (dc_validate_resources(adev->dm.dc, &val_set, 1))
-+ result = MODE_OK;
-+
-+ dc_target_release(target);
-+target_create_fail:
-+ dc_stream_release(streams[0]);
-+stream_create_fail:
-+ /* TODO: error handling*/
-+ return result;
-+}
-+
-+
-+static const struct drm_connector_helper_funcs
-+amdgpu_dm_connector_helper_funcs = {
-+ /*
-+ * If hotplug a second bigger display in FB Con mode, bigger resolution
-+ * modes will be filtered by drm_mode_validate_size(), and those modes
-+ * is missing after user start lightdm. So we need to renew modes list.
-+ * in get_modes call back, not just return the modes count
-+ */
-+ .get_modes = get_modes,
-+ .mode_valid = mode_valid,
-+ .best_encoder = best_encoder
-+};
-+
-+static void dm_crtc_helper_disable(struct drm_crtc *crtc)
-+{
-+}
-+
-+static int dm_crtc_helper_atomic_check(
-+ struct drm_crtc *crtc,
-+ struct drm_crtc_state *state)
-+{
-+ return 0;
-+}
-+
-+static bool dm_crtc_helper_mode_fixup(
-+ struct drm_crtc *crtc,
-+ const struct drm_display_mode *mode,
-+ struct drm_display_mode *adjusted_mode)
-+{
-+ return true;
-+}
-+
-+static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
-+ .disable = dm_crtc_helper_disable,
-+ .atomic_check = dm_crtc_helper_atomic_check,
-+ .mode_fixup = dm_crtc_helper_mode_fixup
-+};
-+
-+static void dm_encoder_helper_disable(struct drm_encoder *encoder)
-+{
-+
-+}
-+
-+static int dm_encoder_helper_atomic_check(
-+ struct drm_encoder *encoder,
-+ struct drm_crtc_state *crtc_state,
-+ struct drm_connector_state *conn_state)
-+{
-+ return 0;
-+}
-+
-+const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
-+ .disable = dm_encoder_helper_disable,
-+ .atomic_check = dm_encoder_helper_atomic_check
-+};
-+
-+static const struct drm_plane_funcs dm_plane_funcs = {
-+ .reset = drm_atomic_helper_plane_reset,
-+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
-+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state
-+};
-+
-+static void clear_unrelated_fields(struct drm_plane_state *state)
-+{
-+ state->crtc = NULL;
-+ state->fb = NULL;
-+ state->state = NULL;
-+ state->fence = NULL;
-+}
-+
-+static bool page_flip_needed(
-+ struct drm_plane_state *new_state,
-+ struct drm_plane_state *old_state)
-+{
-+ struct drm_plane_state old_state_tmp;
-+ struct drm_plane_state new_state_tmp;
-+
-+ struct amdgpu_framebuffer *amdgpu_fb_old;
-+ struct amdgpu_framebuffer *amdgpu_fb_new;
-+
-+ uint64_t old_tiling_flags;
-+ uint64_t new_tiling_flags;
-+
-+ if (!old_state)
-+ return false;
-+
-+ if (!old_state->fb)
-+ return false;
-+
-+ if (!new_state)
-+ return false;
-+
-+ if (!new_state->fb)
-+ return false;
-+
-+ old_state_tmp = *old_state;
-+ new_state_tmp = *new_state;
-+
-+ if (!new_state->crtc->state->event)
-+ return false;
-+
-+ amdgpu_fb_old = to_amdgpu_framebuffer(old_state->fb);
-+ amdgpu_fb_new = to_amdgpu_framebuffer(new_state->fb);
-+
-+ if (!get_fb_info(amdgpu_fb_old, &old_tiling_flags, NULL))
-+ return false;
-+
-+ if (!get_fb_info(amdgpu_fb_new, &new_tiling_flags, NULL))
-+ return false;
-+
-+ if (old_tiling_flags != new_tiling_flags)
-+ return false;
-+
-+ clear_unrelated_fields(&old_state_tmp);
-+ clear_unrelated_fields(&new_state_tmp);
-+
-+ return memcmp(&old_state_tmp, &new_state_tmp, sizeof(old_state_tmp)) == 0;
-+}
-+
-+static int dm_plane_helper_prepare_fb(
-+ struct drm_plane *plane,
-+ const struct drm_plane_state *new_state)
-+{
-+ struct amdgpu_framebuffer *afb;
-+ struct drm_gem_object *obj;
-+ struct amdgpu_bo *rbo;
-+ int r;
-+
-+ if (!new_state->fb) {
-+ DRM_DEBUG_KMS("No FB bound\n");
-+ return 0;
-+ }
-+
-+ afb = to_amdgpu_framebuffer(new_state->fb);
-+
-+ DRM_DEBUG_KMS("Pin new framebuffer: %p\n", afb);
-+ obj = afb->obj;
-+ rbo = gem_to_amdgpu_bo(obj);
-+ r = amdgpu_bo_reserve(rbo, false);
-+ if (unlikely(r != 0))
-+ return r;
-+
-+ r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, NULL);
-+
-+ amdgpu_bo_unreserve(rbo);
-+
-+ if (unlikely(r != 0)) {
-+ DRM_ERROR("Failed to pin framebuffer\n");
-+ return r;
-+ }
-+
-+ return 0;
-+}
-+
-+static void dm_plane_helper_cleanup_fb(
-+ struct drm_plane *plane,
-+ const struct drm_plane_state *old_state)
-+{
-+ struct amdgpu_bo *rbo;
-+ struct amdgpu_framebuffer *afb;
-+ int r;
-+
-+ if (!old_state->fb)
-+ return;
-+
-+ afb = to_amdgpu_framebuffer(old_state->fb);
-+ DRM_DEBUG_KMS("Unpin old framebuffer: %p\n", afb);
-+ rbo = gem_to_amdgpu_bo(afb->obj);
-+ r = amdgpu_bo_reserve(rbo, false);
-+ if (unlikely(r)) {
-+ DRM_ERROR("failed to reserve rbo before unpin\n");
-+ return;
-+ } else {
-+ amdgpu_bo_unpin(rbo);
-+ amdgpu_bo_unreserve(rbo);
-+ }
-+}
-+
-+int dm_create_validation_set_for_target(struct drm_connector *connector,
-+ struct drm_display_mode *mode, struct dc_validation_set *val_set)
-+{
-+ int result = MODE_ERROR;
-+ const struct dc_sink *dc_sink =
-+ to_amdgpu_connector(connector)->dc_sink;
-+ /* TODO: Unhardcode stream count */
-+ struct dc_stream *streams[1];
-+ struct dc_target *target;
-+
-+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
-+ (mode->flags & DRM_MODE_FLAG_DBLSCAN))
-+ return result;
-+
-+ if (NULL == dc_sink) {
-+ DRM_ERROR("dc_sink is NULL!\n");
-+ return result;
-+ }
-+
-+ streams[0] = dc_create_stream_for_sink(dc_sink);
-+
-+ if (NULL == streams[0]) {
-+ DRM_ERROR("Failed to create stream for sink!\n");
-+ return result;
-+ }
-+
-+ drm_mode_set_crtcinfo(mode, 0);
-+ dc_timing_from_drm_display_mode(&streams[0]->timing, mode, connector);
-+
-+ target = dc_create_target_for_streams(streams, 1);
-+ val_set->target = target;
-+
-+ if (NULL == val_set->target) {
-+ DRM_ERROR("Failed to create target with stream!\n");
-+ goto fail;
-+ }
-+
-+ streams[0]->src.width = mode->hdisplay;
-+ streams[0]->src.height = mode->vdisplay;
-+ streams[0]->dst = streams[0]->src;
-+
-+ return MODE_OK;
-+
-+fail:
-+ dc_stream_release(streams[0]);
-+ return result;
-+
-+}
-+
-+int dm_add_surface_to_validation_set(struct drm_plane *plane,
-+ struct drm_plane_state *state, struct dc_surface **surface)
-+{
-+ int res;
-+
-+ struct amdgpu_framebuffer *afb;
-+ struct amdgpu_connector *aconnector;
-+ struct drm_crtc *crtc;
-+ struct drm_framebuffer *fb;
-+
-+ struct drm_device *dev;
-+ struct amdgpu_device *adev;
-+
-+ res = -EINVAL;
-+
-+ if (NULL == plane || NULL == state) {
-+ DRM_ERROR("invalid parameters dm_plane_atomic_check\n");
-+ return res;
-+ }
-+
-+ crtc = state->crtc;
-+ fb = state->fb;
-+
-+
-+ afb = to_amdgpu_framebuffer(fb);
-+
-+ if (NULL == state->crtc) {
-+ return res;
-+ }
-+
-+ aconnector = aconnector_from_drm_crtc(crtc, state->state);
-+
-+ if (NULL == aconnector) {
-+ DRM_ERROR("Connector is NULL in dm_plane_atomic_check\n");
-+ return res;
-+ }
-+
-+ if (NULL == aconnector->dc_sink) {
-+ DRM_ERROR("dc_sink is NULL in dm_plane_atomic_check\n");
-+ return res;
-+ }
-+ dev = state->crtc->dev;
-+ adev = dev->dev_private;
-+
-+ *surface = dc_create_surface(adev->dm.dc);
-+ if (NULL == *surface){
-+ DRM_ERROR("surface is NULL\n");
-+ return res;
-+ }
-+
-+ if (!fill_rects_from_plane_state( state, *surface)) {
-+ DRM_ERROR("Failed to fill surface!\n");
-+ goto fail;
-+ }
-+
-+ fill_plane_attributes_from_fb(*surface, afb);
-+
-+ return MODE_OK;
-+
-+fail:
-+ dc_surface_release(*surface);
-+ return res;
-+}
-+
-+static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
-+ .prepare_fb = dm_plane_helper_prepare_fb,
-+ .cleanup_fb = dm_plane_helper_cleanup_fb,
-+};
-+
-+/*
-+ * TODO: these are currently initialized to rgb formats only.
-+ * For future use cases we should either initialize them dynamically based on
-+ * plane capabilities, or initialize this array to all formats, so internal drm
-+ * check will succeed, and let DC to implement proper check
-+ */
-+static uint32_t rgb_formats[] = {
-+ DRM_FORMAT_XRGB4444,
-+ DRM_FORMAT_ARGB4444,
-+ DRM_FORMAT_RGBA4444,
-+ DRM_FORMAT_ARGB1555,
-+ DRM_FORMAT_RGB565,
-+ DRM_FORMAT_RGB888,
-+ DRM_FORMAT_XRGB8888,
-+ DRM_FORMAT_ARGB8888,
-+ DRM_FORMAT_RGBA8888,
-+};
-+
-+int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
-+ struct amdgpu_crtc *acrtc,
-+ uint32_t link_index)
-+{
-+ int res = -ENOMEM;
-+
-+ struct drm_plane *primary_plane =
-+ kzalloc(sizeof(*primary_plane), GFP_KERNEL);
-+
-+ if (!primary_plane)
-+ goto fail_plane;
-+
-+ /* this flag is used in legacy code only */
-+ primary_plane->format_default = true;
-+
-+ res = drm_universal_plane_init(
-+ dm->adev->ddev,
-+ primary_plane,
-+ 0,
-+ &dm_plane_funcs,
-+ rgb_formats,
-+ ARRAY_SIZE(rgb_formats),
-+ DRM_PLANE_TYPE_PRIMARY, NULL);
-+
-+ primary_plane->crtc = &acrtc->base;
-+
-+ drm_plane_helper_add(primary_plane, &dm_plane_helper_funcs);
-+
-+ res = drm_crtc_init_with_planes(
-+ dm->ddev,
-+ &acrtc->base,
-+ primary_plane,
-+ NULL,
-+ &amdgpu_dm_crtc_funcs, NULL);
-+
-+ if (res)
-+ goto fail;
-+
-+ drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
-+
-+ acrtc->max_cursor_width = 128;
-+ acrtc->max_cursor_height = 128;
-+
-+ acrtc->crtc_id = link_index;
-+ acrtc->base.enabled = false;
-+
-+ dm->adev->mode_info.crtcs[link_index] = acrtc;
-+ drm_mode_crtc_set_gamma_size(&acrtc->base, 256);
-+
-+ return 0;
-+fail:
-+ kfree(primary_plane);
-+fail_plane:
-+ acrtc->crtc_id = -1;
-+ return res;
-+}
-+
-+static int to_drm_connector_type(enum signal_type st)
-+{
-+ switch (st) {
-+ case SIGNAL_TYPE_HDMI_TYPE_A:
-+ return DRM_MODE_CONNECTOR_HDMIA;
-+ case SIGNAL_TYPE_EDP:
-+ return DRM_MODE_CONNECTOR_eDP;
-+ case SIGNAL_TYPE_RGB:
-+ return DRM_MODE_CONNECTOR_VGA;
-+ case SIGNAL_TYPE_DISPLAY_PORT:
-+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
-+ return DRM_MODE_CONNECTOR_DisplayPort;
-+ case SIGNAL_TYPE_DVI_DUAL_LINK:
-+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
-+ return DRM_MODE_CONNECTOR_DVID;
-+
-+ default:
-+ return DRM_MODE_CONNECTOR_Unknown;
-+ }
-+}
-+
-+static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
-+{
-+ const struct drm_connector_helper_funcs *helper =
-+ connector->helper_private;
-+ struct drm_encoder *encoder;
-+ struct amdgpu_encoder *amdgpu_encoder;
-+
-+ encoder = helper->best_encoder(connector);
-+
-+ if (encoder == NULL)
-+ return;
-+
-+ amdgpu_encoder = to_amdgpu_encoder(encoder);
-+
-+ amdgpu_encoder->native_mode.clock = 0;
-+
-+ if (!list_empty(&connector->probed_modes)) {
-+ struct drm_display_mode *preferred_mode = NULL;
-+ list_for_each_entry(preferred_mode,
-+ &connector->probed_modes,
-+ head) {
-+ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
-+ amdgpu_encoder->native_mode = *preferred_mode;
-+ }
-+ break;
-+ }
-+
-+ }
-+}
-+
-+static struct drm_display_mode *amdgpu_dm_create_common_mode(
-+ struct drm_encoder *encoder, char *name,
-+ int hdisplay, int vdisplay)
-+{
-+ struct drm_device *dev = encoder->dev;
-+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
-+ struct drm_display_mode *mode = NULL;
-+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
-+
-+ mode = drm_mode_duplicate(dev, native_mode);
-+
-+ if(mode == NULL)
-+ return NULL;
-+
-+ mode->hdisplay = hdisplay;
-+ mode->vdisplay = vdisplay;
-+ mode->type &= ~DRM_MODE_TYPE_PREFERRED;
-+ strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
-+
-+ return mode;
-+
-+}
-+
-+static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
-+ struct drm_connector *connector)
-+{
-+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
-+ struct drm_display_mode *mode = NULL;
-+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
-+ struct amdgpu_connector *amdgpu_connector =
-+ to_amdgpu_connector(connector);
-+ int i;
-+ int n;
-+ struct mode_size {
-+ char name[DRM_DISPLAY_MODE_LEN];
-+ int w;
-+ int h;
-+ }common_modes[] = {
-+ { "640x480", 640, 480},
-+ { "800x600", 800, 600},
-+ { "1024x768", 1024, 768},
-+ { "1280x720", 1280, 720},
-+ { "1280x800", 1280, 800},
-+ {"1280x1024", 1280, 1024},
-+ { "1440x900", 1440, 900},
-+ {"1680x1050", 1680, 1050},
-+ {"1600x1200", 1600, 1200},
-+ {"1920x1080", 1920, 1080},
-+ {"1920x1200", 1920, 1200}
-+ };
-+
-+ n = sizeof(common_modes) / sizeof(common_modes[0]);
-+
-+ for (i = 0; i < n; i++) {
-+ struct drm_display_mode *curmode = NULL;
-+ bool mode_existed = false;
-+
-+ if (common_modes[i].w > native_mode->hdisplay ||
-+ common_modes[i].h > native_mode->vdisplay ||
-+ (common_modes[i].w == native_mode->hdisplay &&
-+ common_modes[i].h == native_mode->vdisplay))
-+ continue;
-+
-+ list_for_each_entry(curmode, &connector->probed_modes, head) {
-+ if (common_modes[i].w == curmode->hdisplay &&
-+ common_modes[i].h == curmode->vdisplay) {
-+ mode_existed = true;
-+ break;
-+ }
-+ }
-+
-+ if (mode_existed)
-+ continue;
-+
-+ mode = amdgpu_dm_create_common_mode(encoder,
-+ common_modes[i].name, common_modes[i].w,
-+ common_modes[i].h);
-+ drm_mode_probed_add(connector, mode);
-+ amdgpu_connector->num_modes++;
-+ }
-+}
-+
-+static void amdgpu_dm_connector_ddc_get_modes(
-+ struct drm_connector *connector,
-+ struct edid *edid)
-+{
-+ struct amdgpu_connector *amdgpu_connector =
-+ to_amdgpu_connector(connector);
-+
-+ if (edid) {
-+ /* empty probed_modes */
-+ INIT_LIST_HEAD(&connector->probed_modes);
-+ amdgpu_connector->num_modes =
-+ drm_add_edid_modes(connector, edid);
-+
-+ drm_edid_to_eld(connector, edid);
-+
-+ amdgpu_dm_get_native_mode(connector);
-+ } else
-+ amdgpu_connector->num_modes = 0;
-+}
-+
-+int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
-+{
-+ const struct drm_connector_helper_funcs *helper =
-+ connector->helper_private;
-+ struct amdgpu_connector *amdgpu_connector =
-+ to_amdgpu_connector(connector);
-+ struct drm_encoder *encoder;
-+ struct edid *edid = amdgpu_connector->edid;
-+
-+ encoder = helper->best_encoder(connector);
-+
-+ amdgpu_dm_connector_ddc_get_modes(connector, edid);
-+ amdgpu_dm_connector_add_common_modes(encoder, connector);
-+ return amdgpu_connector->num_modes;
-+}
-+
-+/* Note: this function assumes that dc_link_detect() was called for the
-+ * dc_link which will be represented by this aconnector. */
-+int amdgpu_dm_connector_init(
-+ struct amdgpu_display_manager *dm,
-+ struct amdgpu_connector *aconnector,
-+ uint32_t link_index,
-+ struct amdgpu_encoder *aencoder)
-+{
-+ int res, connector_type;
-+ struct amdgpu_device *adev = dm->ddev->dev_private;
-+ struct dc *dc = dm->dc;
-+ const struct dc_link *link = dc_get_link_at_index(dc, link_index);
-+
-+ DRM_DEBUG_KMS("%s()\n", __func__);
-+
-+ connector_type = to_drm_connector_type(link->connector_signal);
-+
-+ res = drm_connector_init(
-+ dm->ddev,
-+ &aconnector->base,
-+ &amdgpu_dm_connector_funcs,
-+ connector_type);
-+
-+ if (res) {
-+ DRM_ERROR("connector_init failed\n");
-+ aconnector->connector_id = -1;
-+ return res;
-+ }
-+
-+ drm_connector_helper_add(
-+ &aconnector->base,
-+ &amdgpu_dm_connector_helper_funcs);
-+
-+ aconnector->connector_id = link_index;
-+ aconnector->dc_link = link;
-+ aconnector->base.interlace_allowed = true;
-+ aconnector->base.doublescan_allowed = true;
-+ aconnector->hpd.hpd = link_index; /* maps to 'enum amdgpu_hpd_id' */
-+
-+ /*configure suport HPD hot plug connector_>polled default value is 0
-+ * which means HPD hot plug not supported*/
-+ switch (connector_type) {
-+ case DRM_MODE_CONNECTOR_HDMIA:
-+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
-+ break;
-+ case DRM_MODE_CONNECTOR_DisplayPort:
-+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
-+ break;
-+ case DRM_MODE_CONNECTOR_DVID:
-+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ drm_object_attach_property(&aconnector->base.base,
-+ dm->ddev->mode_config.scaling_mode_property,
-+ DRM_MODE_SCALE_NONE);
-+
-+ drm_object_attach_property(&aconnector->base.base,
-+ adev->mode_info.underscan_property,
-+ UNDERSCAN_OFF);
-+ drm_object_attach_property(&aconnector->base.base,
-+ adev->mode_info.underscan_hborder_property,
-+ 0);
-+ drm_object_attach_property(&aconnector->base.base,
-+ adev->mode_info.underscan_vborder_property,
-+ 0);
-+
-+ /* TODO: Don't do this manually anymore
-+ aconnector->base.encoder = &aencoder->base;
-+ */
-+
-+ drm_mode_connector_attach_encoder(
-+ &aconnector->base, &aencoder->base);
-+
-+ /*drm_sysfs_connector_add(&dm_connector->base);*/
-+
-+ drm_connector_register(&aconnector->base);
-+
-+ if (connector_type == DRM_MODE_CONNECTOR_DisplayPort)
-+ amdgpu_dm_initialize_mst_connector(dm, aconnector);
-+
-+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
-+ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
-+
-+ /* NOTE: this currently will create backlight device even if a panel
-+ * is not connected to the eDP/LVDS connector.
-+ *
-+ * This is less than ideal but we don't have sink information at this
-+ * stage since detection happens after. We can't do detection earlier
-+ * since MST detection needs connectors to be created first.
-+ */
-+ if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
-+ /* Event if registration failed, we should continue with
-+ * DM initialization because not having a backlight control
-+ * is better then a black screen. */
-+ amdgpu_dm_register_backlight_device(dm);
-+
-+ if (dm->backlight_dev)
-+ dm->backlight_link = link;
-+ }
-+#endif
-+
-+ return 0;
-+}
-+
-+int amdgpu_dm_encoder_init(
-+ struct drm_device *dev,
-+ struct amdgpu_encoder *aencoder,
-+ uint32_t link_index,
-+ struct amdgpu_crtc *acrtc)
-+{
-+ int res = drm_encoder_init(dev,
-+ &aencoder->base,
-+ &amdgpu_dm_encoder_funcs,
-+ DRM_MODE_ENCODER_TMDS,
-+ NULL);
-+
-+ aencoder->base.possible_crtcs = 1 << link_index;
-+
-+ if (!res)
-+ aencoder->encoder_id = link_index;
-+ else
-+ aencoder->encoder_id = -1;
-+
-+ drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
-+
-+ return res;
-+}
-+
-+enum dm_commit_action {
-+ DM_COMMIT_ACTION_NOTHING,
-+ DM_COMMIT_ACTION_RESET,
-+ DM_COMMIT_ACTION_DPMS_ON,
-+ DM_COMMIT_ACTION_DPMS_OFF,
-+ DM_COMMIT_ACTION_SET
-+};
-+
-+enum dm_commit_action get_dm_commit_action(struct drm_crtc *crtc,
-+ struct drm_crtc_state *state)
-+{
-+ /* mode changed means either actually mode changed or enabled changed */
-+ /* active changed means dpms changed */
-+ if (state->mode_changed) {
-+ /* if it is got disabled - call reset mode */
-+ if (!state->enable)
-+ return DM_COMMIT_ACTION_RESET;
-+
-+ if (state->active)
-+ return DM_COMMIT_ACTION_SET;
-+ else
-+ return DM_COMMIT_ACTION_RESET;
-+ } else {
-+ /* ! mode_changed */
-+
-+ /* if it is remain disable - skip it */
-+ if (!state->enable)
-+ return DM_COMMIT_ACTION_NOTHING;
-+
-+ if (state->active_changed) {
-+ if (state->active) {
-+ return DM_COMMIT_ACTION_DPMS_ON;
-+ } else {
-+ return DM_COMMIT_ACTION_DPMS_OFF;
-+ }
-+ } else {
-+ /* ! active_changed */
-+ return DM_COMMIT_ACTION_NOTHING;
-+ }
-+ }
-+}
-+
-+static void manage_dm_interrupts(
-+ struct amdgpu_device *adev,
-+ struct amdgpu_crtc *acrtc,
-+ bool enable)
-+{
-+ if (enable) {
-+ drm_crtc_vblank_on(&acrtc->base);
-+ amdgpu_irq_get(
-+ adev,
-+ &adev->pageflip_irq,
-+ amdgpu_crtc_idx_to_irq_type(
-+ adev,
-+ acrtc->crtc_id));
-+ } else {
-+ unsigned long flags;
-+ amdgpu_irq_put(
-+ adev,
-+ &adev->pageflip_irq,
-+ amdgpu_crtc_idx_to_irq_type(
-+ adev,
-+ acrtc->crtc_id));
-+ drm_crtc_vblank_off(&acrtc->base);
-+
-+ /*
-+ * should be called here, to guarantee no works left in queue.
-+ * As this function sleeps it was bug to call it inside the
-+ * amdgpu_dm_flip_cleanup function under locked event_lock
-+ */
-+ if (acrtc->pflip_works) {
-+ flush_work(&acrtc->pflip_works->flip_work);
-+ flush_work(&acrtc->pflip_works->unpin_work);
-+ }
-+
-+ /*
-+ * TODO: once Vitaly's change to adjust locking in
-+ * page_flip_work_func is submitted to base driver move
-+ * lock and check to amdgpu_dm_flip_cleanup function
-+ */
-+
-+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
-+ if (acrtc->pflip_status != AMDGPU_FLIP_NONE) {
-+ /*
-+ * this is the case when on reset, last pending pflip
-+ * interrupt did not not occur. Clean-up
-+ */
-+ amdgpu_dm_flip_cleanup(adev, acrtc);
-+ }
-+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
-+ }
-+}
-+
-+/*
-+ * Handle headless hotplug workaround
-+ *
-+ * In case of headless hotplug, if plugging the same monitor to the same
-+ * DDI, DRM consider it as mode unchanged. We should check whether the
-+ * sink pointer changed, and set mode_changed properly to
-+ * make sure commit is doing everything.
-+ */
-+static void handle_headless_hotplug(
-+ const struct amdgpu_crtc *acrtc,
-+ struct drm_crtc_state *state,
-+ struct amdgpu_connector **aconnector)
-+{
-+ struct amdgpu_connector *old_connector =
-+ aconnector_from_drm_crtc_id(&acrtc->base);
-+
-+ /*
-+ * TODO Revisit this. This code is kinda hacky and might break things.
-+ */
-+
-+ if (!old_connector)
-+ return;
-+
-+ if (!*aconnector)
-+ *aconnector = old_connector;
-+
-+ if (acrtc->target && (*aconnector)->dc_sink) {
-+ if ((*aconnector)->dc_sink !=
-+ acrtc->target->streams[0]->sink) {
-+ state->mode_changed = true;
-+ }
-+ }
-+
-+ if (!acrtc->target) {
-+ /* In case of headless with DPMS on, when system waked up,
-+ * if no monitor connected, target is null and will not create
-+ * new target, on that condition, we should check
-+ * if any connector is connected, if connected,
-+ * it means a hot plug happened after wake up,
-+ * mode_changed should be set to true to make sure
-+ * commit targets will do everything.
-+ */
-+ state->mode_changed =
-+ (*aconnector)->base.status ==
-+ connector_status_connected;
-+ } else {
-+ /* In case of headless hotplug, if plug same monitor to same
-+ * DDI, DRM consider it as mode unchanged, we should check
-+ * sink pointer changed, and set mode changed properly to
-+ * make sure commit doing everything.
-+ */
-+ /* check if sink has changed from last commit */
-+ if ((*aconnector)->dc_sink && (*aconnector)->dc_sink !=
-+ acrtc->target->streams[0]->sink)
-+ state->mode_changed = true;
-+ }
-+}
-+
-+int amdgpu_dm_atomic_commit(
-+ struct drm_device *dev,
-+ struct drm_atomic_state *state,
-+ bool async)
-+{
-+ struct amdgpu_device *adev = dev->dev_private;
-+ struct amdgpu_display_manager *dm = &adev->dm;
-+ struct drm_plane *plane;
-+ struct drm_plane_state *old_plane_state;
-+ uint32_t i;
-+ int32_t ret;
-+ uint32_t commit_targets_count = 0;
-+ uint32_t new_crtcs_count = 0;
-+ struct drm_crtc *crtc;
-+ struct drm_crtc_state *old_crtc_state;
-+
-+ struct dc_target *commit_targets[DAL_MAX_CONTROLLERS];
-+ struct amdgpu_crtc *new_crtcs[DAL_MAX_CONTROLLERS];
-+
-+ /* In this step all new fb would be pinned */
-+
-+ ret = drm_atomic_helper_prepare_planes(dev, state);
-+ if (ret)
-+ return ret;
-+
-+ /*
-+ * This is the point of no return - everything below never fails except
-+ * when the hw goes bonghits. Which means we can commit the new state on
-+ * the software side now.
-+ */
-+
-+ drm_atomic_helper_swap_state(dev, state);
-+
-+ /*
-+ * From this point state become old state really. New state is
-+ * initialized to appropriate objects and could be accessed from there
-+ */
-+
-+ /*
-+ * there is no fences usage yet in state. We can skip the following line
-+ * wait_for_fences(dev, state);
-+ */
-+
-+ /* update changed items */
-+ for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
-+ struct amdgpu_crtc *acrtc;
-+ struct amdgpu_connector *aconnector;
-+ enum dm_commit_action action;
-+ struct drm_crtc_state *new_state = crtc->state;
-+
-+ acrtc = to_amdgpu_crtc(crtc);
-+ aconnector = aconnector_from_drm_crtc(crtc, state);
-+
-+ /* handles headless hotplug case, updating new_state and
-+ * aconnector as needed
-+ */
-+ handle_headless_hotplug(acrtc, new_state, &aconnector);
-+
-+ action = get_dm_commit_action(crtc, new_state);
-+
-+ if (!aconnector) {
-+ DRM_ERROR("Can't find connector for crtc %d\n", acrtc->crtc_id);
-+ break;
-+ }
-+
-+ switch (action) {
-+ case DM_COMMIT_ACTION_DPMS_ON:
-+ case DM_COMMIT_ACTION_SET: {
-+ const struct drm_connector_helper_funcs *connector_funcs;
-+ struct dc_target *new_target =
-+ create_target_for_sink(
-+ aconnector,
-+ &crtc->state->mode);
-+
-+ if (!new_target) {
-+ /*
-+ * this could happen because of issues with
-+ * userspace notifications delivery.
-+ * In this case userspace tries to set mode on
-+ * display which is disconnect in fact.
-+ * dc_sink in NULL in this case on aconnector.
-+ * We expect reset mode will come soon.
-+ *
-+ * This can also happen when unplug is done
-+ * during resume sequence ended
-+ */
-+ new_state->planes_changed = false;
-+ break;
-+ }
-+
-+ if (acrtc->target) {
-+ /*
-+ * we evade vblanks and pflips on crtc that
-+ * should be changed
-+ */
-+ manage_dm_interrupts(adev, acrtc, false);
-+ /* this is the update mode case */
-+ dc_target_release(acrtc->target);
-+ acrtc->target = NULL;
-+ }
-+
-+ /*
-+ * this loop saves set mode crtcs
-+ * we needed to enable vblanks once all
-+ * resources acquired in dc after dc_commit_targets
-+ */
-+ new_crtcs[new_crtcs_count] = acrtc;
-+ new_crtcs_count++;
-+
-+ acrtc->target = new_target;
-+ acrtc->enabled = true;
-+ acrtc->base.enabled = true;
-+ connector_funcs = aconnector->base.helper_private;
-+ aconnector->base.encoder =
-+ connector_funcs->best_encoder(
-+ &aconnector->base);
-+ break;
-+ }
-+
-+ case DM_COMMIT_ACTION_NOTHING:
-+ break;
-+
-+ case DM_COMMIT_ACTION_DPMS_OFF:
-+ case DM_COMMIT_ACTION_RESET:
-+ /* i.e. reset mode */
-+ if (acrtc->target) {
-+ manage_dm_interrupts(adev, acrtc, false);
-+
-+ dc_target_release(acrtc->target);
-+ acrtc->target = NULL;
-+ acrtc->enabled = false;
-+ acrtc->base.enabled = false;
-+ aconnector->base.encoder = NULL;
-+ }
-+ break;
-+ } /* switch() */
-+ } /* for_each_crtc_in_state() */
-+
-+ commit_targets_count = 0;
-+
-+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-+
-+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
-+
-+ if (acrtc->target) {
-+ commit_targets[commit_targets_count] = acrtc->target;
-+ ++commit_targets_count;
-+ }
-+ }
-+
-+ /* DC is optimized not to do anything if 'targets' didn't change. */
-+ dc_commit_targets(dm->dc, commit_targets, commit_targets_count);
-+
-+ /* update planes when needed */
-+ for_each_plane_in_state(state, plane, old_plane_state, i) {
-+ struct drm_plane_state *plane_state = plane->state;
-+ struct drm_crtc *crtc = plane_state->crtc;
-+ struct drm_framebuffer *fb = plane_state->fb;
-+
-+ if (fb && crtc) {
-+ if (!crtc->state->planes_changed)
-+ continue;
-+
-+ if (page_flip_needed(
-+ plane_state,
-+ old_plane_state))
-+ amdgpu_crtc_page_flip(
-+ crtc,
-+ fb,
-+ crtc->state->event,
-+ 0);
-+ else
-+ dm_dc_surface_commit(
-+ dm->dc,
-+ crtc,
-+ to_amdgpu_framebuffer(fb));
-+ }
-+ }
-+
-+ for (i = 0; i < new_crtcs_count; i++) {
-+ /*
-+ * loop to enable interrupts on newly arrived crtc
-+ */
-+ struct amdgpu_crtc *acrtc = new_crtcs[i];
-+
-+ manage_dm_interrupts(adev, acrtc, true);
-+ dm_crtc_cursor_reset(&acrtc->base);
-+
-+ }
-+
-+ drm_atomic_helper_wait_for_vblanks(dev, state);
-+
-+ /* In this state all old framebuffers would be unpinned */
-+
-+ drm_atomic_helper_cleanup_planes(dev, state);
-+
-+ drm_atomic_state_free(state);
-+
-+ return 0;
-+}
-+
-+int amdgpu_dm_atomic_check(struct drm_device *dev,
-+ struct drm_atomic_state *s)
-+{
-+ struct drm_crtc *crtc;
-+ struct drm_crtc_state *crtc_state;
-+ struct drm_plane *plane;
-+ struct drm_plane_state *plane_state;
-+ struct drm_connector *connector;
-+ struct drm_connector_state *conn_state;
-+ int i, j, ret, set_count;
-+ struct dc_validation_set set[MAX_TARGET_NUM] = {{ 0 }};
-+ struct amdgpu_device *adev = dev->dev_private;
-+ struct amdgpu_connector *aconnector = NULL;
-+ set_count = 0;
-+
-+ ret = drm_atomic_helper_check(dev,s);
-+
-+ if (ret) {
-+ DRM_ERROR("Atomic state integrity validation failed with error :%d !\n",ret);
-+ return ret;
-+ }
-+
-+ ret = -EINVAL;
-+
-+ if (s->num_connector > MAX_TARGET_NUM) {
-+ DRM_ERROR("Exceeded max targets number !\n");
-+ return ret;
-+ }
-+
-+
-+ for_each_crtc_in_state(s, crtc, crtc_state, i) {
-+ enum dm_commit_action action;
-+ aconnector = NULL;
-+
-+ action = get_dm_commit_action(crtc, crtc_state);
-+ if (action == DM_COMMIT_ACTION_DPMS_OFF || DM_COMMIT_ACTION_RESET)
-+ continue;
-+
-+ for_each_connector_in_state(s, connector, conn_state, j) {
-+ if (conn_state->crtc && conn_state->crtc == crtc) {
-+ aconnector = to_amdgpu_connector(connector);
-+ /*I assume at most once connector for CRTC*/
-+ break;
-+ }
-+ }
-+
-+ /*In this case validate against existing connector if possible*/
-+ if (!aconnector)
-+ aconnector = aconnector_from_drm_crtc(crtc, s);
-+
-+ if (!aconnector || !aconnector->dc_sink)
-+ continue;
-+
-+ set[set_count].surface_count = 0;
-+ ret = dm_create_validation_set_for_target(&aconnector->base,
-+ &crtc_state->adjusted_mode, &set[set_count]);
-+ if (ret)
-+ {
-+ DRM_ERROR("Creation of validation set target failed !\n");
-+ goto end;
-+ }
-+
-+ for_each_plane_in_state(s, plane, plane_state, j) {
-+ /*Since we use drm_atomic_helper_set_config as our hook we garnteed to have the mask in correct state*/
-+ if (crtc_state->plane_mask & (1 << drm_plane_index(plane))) {
-+ if (set[set_count].surface_count == MAX_SURFACE_NUM) {
-+ DRM_ERROR("Exceeded max surfaces number per target!\n");
-+ ret = MODE_OK;
-+ goto end;
-+ }
-+
-+ ret = dm_add_surface_to_validation_set(plane,plane_state,
-+ (struct dc_surface **)&(set[set_count].surfaces[set[set_count].surface_count]));
-+
-+ if (ret) {
-+ DRM_ERROR("Failed to add surface for validation!\n");
-+ goto end;
-+ }
-+
-+ set[set_count].surface_count++;
-+ }
-+ }
-+
-+ set_count++;
-+ }
-+
-+ if (!set_count || dc_validate_resources(adev->dm.dc, set, set_count)) {
-+ ret = MODE_OK;
-+ }
-+end:
-+
-+ for (i = 0; i < MAX_TARGET_NUM; i++) {
-+ if (set[i].target)
-+ dc_target_release((struct dc_target *)set[i].target);
-+ }
-+
-+ return ret;
-+
-+}
-diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.h b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.h
-new file mode 100644
-index 0000000..bda39be
---- /dev/null
-+++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.h
-@@ -0,0 +1,96 @@
-+/*
-+ * Copyright 2012-13 Advanced Micro Devices, Inc.
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this software and associated documentation files (the "Software"),
-+ * to deal in the Software without restriction, including without limitation
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
-+ * and/or sell copies of the Software, and to permit persons to whom the
-+ * Software is furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-+ * OTHER DEALINGS IN THE SOFTWARE.
-+ *
-+ * Authors: AMD
-+ *
-+ */
-+
-+
-+#ifndef __AMDGPU_DM_TYPES_H__
-+#define __AMDGPU_DM_TYPES_H__
-+
-+#include <drm/drmP.h>
-+
-+struct plane_addr_flip_info;
-+struct amdgpu_framebuffer;
-+struct amdgpu_display_manager;
-+struct dc_validation_set;
-+struct dc_surface;
-+
-+/*TODO Jodan Hersen use the one in amdgpu_dm*/
-+int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
-+ struct amdgpu_crtc *amdgpu_crtc,
-+ uint32_t link_index);
-+int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
-+ struct amdgpu_connector *amdgpu_connector,
-+ uint32_t link_index,
-+ struct amdgpu_encoder *amdgpu_encoder);
-+int amdgpu_dm_encoder_init(struct drm_device *dev,
-+ struct amdgpu_encoder *amdgpu_encoder,
-+ uint32_t link_index,
-+ struct amdgpu_crtc *amdgpu_crtc);
-+
-+void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc);
-+void amdgpu_dm_connector_destroy(struct drm_connector *connector);
-+void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder);
-+
-+void dm_add_display_info(
-+ struct drm_display_info *disp_info,
-+ struct amdgpu_display_manager *dm,
-+ uint32_t display_index);
-+
-+int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
-+
-+struct amdgpu_connector *aconnector_from_drm_crtc(
-+ struct drm_crtc *crtc,
-+ struct drm_atomic_state *state);
-+
-+int amdgpu_dm_atomic_commit(
-+ struct drm_device *dev,
-+ struct drm_atomic_state *state,
-+ bool async);
-+int amdgpu_dm_atomic_check(struct drm_device *dev,
-+ struct drm_atomic_state *state);
-+
-+int dm_create_validation_set_for_target(
-+ struct drm_connector *connector,
-+ struct drm_display_mode *mode,
-+ struct dc_validation_set *val_set);
-+int dm_add_surface_to_validation_set(
-+ struct drm_plane *plane,
-+ struct drm_plane_state *state,
-+ struct dc_surface **surface);
-+
-+void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector);
-+struct drm_connector_state *amdgpu_dm_connector_atomic_duplicate_state(
-+ struct drm_connector *connector);
-+void amdgpu_dm_connector_atomic_destroy_state(
-+ struct drm_connector *connector,
-+ struct drm_connector_state *state);
-+int amdgpu_dm_connector_atomic_set_property(
-+ struct drm_connector *connector,
-+ struct drm_connector_state *state,
-+ struct drm_property *property,
-+ uint64_t val);
-+
-+extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
-+
-+#endif /* __AMDGPU_DM_TYPES_H__ */
---
-2.7.4
-