aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1815-drm-amd-display-Refactor-atomic-check.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1815-drm-amd-display-Refactor-atomic-check.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1815-drm-amd-display-Refactor-atomic-check.patch1765
1 files changed, 1765 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1815-drm-amd-display-Refactor-atomic-check.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1815-drm-amd-display-Refactor-atomic-check.patch
new file mode 100644
index 00000000..06c59b11
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1815-drm-amd-display-Refactor-atomic-check.patch
@@ -0,0 +1,1765 @@
+From 6b6344c9bfb3f1eb7389e50b8044ab40f0a7b368 Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <Andrey.Grodzovsky@amd.com>
+Date: Fri, 18 Aug 2017 10:52:20 -0400
+Subject: [PATCH 1815/4131] drm/amd/display: Refactor atomic check.
+
+Split into update crtcs and update plane functions.
+
+Change-Id: I57a739070861553de9b787f995e44a1da4e3c2d0
+Signed-off-by: Andrey Grodzovsky <Andrey.Grodzovsky@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1142 ++++++++++++---------
+ 1 file changed, 660 insertions(+), 482 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 977b21b..5d5ff98 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -23,8 +23,11 @@
+ *
+ */
+
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0) || defined(OS_NAME_RHEL_7_4)
++
+ #include "dm_services_types.h"
+ #include "dc.h"
++#include "dc/inc/core_types.h"
+
+ #include "vid.h"
+ #include "amdgpu.h"
+@@ -464,14 +467,14 @@ static int dm_sw_fini(void *handle)
+
+ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
+ {
+- struct amdgpu_dm_connector *aconnector;
++ struct amdgpu_connector *aconnector;
+ struct drm_connector *connector;
+ int ret = 0;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+- aconnector = to_amdgpu_dm_connector(connector);
++ aconnector = to_amdgpu_connector(connector);
+ if (aconnector->dc_link->type == dc_connection_mst_branch) {
+ DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
+ aconnector, aconnector->base.base.id);
+@@ -499,13 +502,13 @@ static int dm_late_init(void *handle)
+
+ static void s3_handle_mst(struct drm_device *dev, bool suspend)
+ {
+- struct amdgpu_dm_connector *aconnector;
++ struct amdgpu_connector *aconnector;
+ struct drm_connector *connector;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+- aconnector = to_amdgpu_dm_connector(connector);
++ aconnector = to_amdgpu_connector(connector);
+ if (aconnector->dc_link->type == dc_connection_mst_branch &&
+ !aconnector->mst_port) {
+
+@@ -571,7 +574,7 @@ static int dm_suspend(void *handle)
+ return ret;
+ }
+
+-struct amdgpu_dm_connector *amdgpu_dm_find_first_crct_matching_connector(
++struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector(
+ struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ bool from_state_var)
+@@ -592,7 +595,7 @@ struct amdgpu_dm_connector *amdgpu_dm_find_first_crct_matching_connector(
+ connector->state->crtc;
+
+ if (crtc_from_state == crtc)
+- return to_amdgpu_dm_connector(connector);
++ return to_amdgpu_connector(connector);
+ }
+
+ return NULL;
+@@ -707,7 +710,11 @@ static int dm_display_resume(struct drm_device *ddev)
+
+ err:
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) && !defined(OS_NAME_RHEL_7_4)
++ drm_atomic_state_free(state);
++#else
+ drm_atomic_state_put(state);
++#endif
+
+ return ret;
+ }
+@@ -730,7 +737,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev )
+ {
+ struct drm_device *ddev = adev->ddev;
+ struct amdgpu_display_manager *dm = &adev->dm;
+- struct amdgpu_dm_connector *aconnector;
++ struct amdgpu_connector *aconnector;
+ struct drm_connector *connector;
+ int ret = 0;
+ struct drm_crtc *crtc;
+@@ -758,7 +765,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev )
+ /* Do detection*/
+ list_for_each_entry(connector,
+ &ddev->mode_config.connector_list, head) {
+- aconnector = to_amdgpu_dm_connector(connector);
++ aconnector = to_amdgpu_connector(connector);
+
+ /*
+ * this is the case when traversing through already created
+@@ -769,10 +776,6 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev )
+
+ mutex_lock(&aconnector->hpd_lock);
+ dc_link_detect(aconnector->dc_link, false);
+-
+- if (aconnector->fake_enable && aconnector->dc_link->local_sink)
+- aconnector->fake_enable = false;
+-
+ aconnector->dc_sink = NULL;
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ mutex_unlock(&aconnector->hpd_lock);
+@@ -819,13 +822,33 @@ struct drm_atomic_state *
+ dm_atomic_state_alloc(struct drm_device *dev)
+ {
+ struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
++ struct validate_context *new_ctx;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct dc *dc = adev->dm.dc;
+
+- if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
+- kfree(state);
++ if (!state)
+ return NULL;
+- }
++
++ if (drm_atomic_state_init(dev, &state->base) < 0)
++ goto fail;
++
++ /* copy existing configuration */
++ new_ctx = dm_alloc(sizeof(*new_ctx));
++
++ if (!new_ctx)
++ goto fail;
++
++ atomic_inc(&new_ctx->ref_count);
++
++ dc_resource_validate_ctx_copy_construct_current(dc, new_ctx);
++
++ state->context = new_ctx;
+
+ return &state->base;
++
++fail:
++ kfree(state);
++ return NULL;
+ }
+
+ static void
+@@ -864,7 +887,7 @@ static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
+ };
+
+ void amdgpu_dm_update_connector_after_detect(
+- struct amdgpu_dm_connector *aconnector)
++ struct amdgpu_connector *aconnector)
+ {
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+@@ -970,7 +993,7 @@ void amdgpu_dm_update_connector_after_detect(
+
+ static void handle_hpd_irq(void *param)
+ {
+- struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
++ struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+
+@@ -978,10 +1001,6 @@ static void handle_hpd_irq(void *param)
+ * since (for MST case) MST does this in it's own context.
+ */
+ mutex_lock(&aconnector->hpd_lock);
+-
+- if (aconnector->fake_enable)
+- aconnector->fake_enable = false;
+-
+ if (dc_link_detect(aconnector->dc_link, false)) {
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+@@ -997,7 +1016,7 @@ static void handle_hpd_irq(void *param)
+
+ }
+
+-static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
++static void dm_handle_hpd_rx_irq(struct amdgpu_connector *aconnector)
+ {
+ uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
+ uint8_t dret;
+@@ -1076,7 +1095,7 @@ static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
+
+ static void handle_hpd_rx_irq(void *param)
+ {
+- struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
++ struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ const struct dc_link *dc_link = aconnector->dc_link;
+@@ -1115,7 +1134,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
+ {
+ struct drm_device *dev = adev->ddev;
+ struct drm_connector *connector;
+- struct amdgpu_dm_connector *aconnector;
++ struct amdgpu_connector *aconnector;
+ const struct dc_link *dc_link;
+ struct dc_interrupt_params int_params = {0};
+
+@@ -1125,7 +1144,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list, head) {
+
+- aconnector = to_amdgpu_dm_connector(connector);
++ aconnector = to_amdgpu_connector(connector);
+ dc_link = aconnector->dc_link;
+
+ if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+@@ -1365,7 +1384,11 @@ static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
+ return bd->props.brightness;
+ }
+
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
++static struct backlight_ops amdgpu_dm_backlight_ops = {
++#else
+ static const struct backlight_ops amdgpu_dm_backlight_ops = {
++#endif
+ .get_brightness = amdgpu_dm_backlight_get_brightness,
+ .update_status = amdgpu_dm_backlight_update_status,
+ };
+@@ -1373,19 +1396,28 @@ static const struct backlight_ops amdgpu_dm_backlight_ops = {
+ void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
+ {
+ char bl_name[16];
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 34)
+ struct backlight_properties props = { 0 };
+
+ props.max_brightness = AMDGPU_MAX_BL_LEVEL;
+ props.type = BACKLIGHT_RAW;
++#endif
+
+ snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
+ dm->adev->ddev->primary->index);
+
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 34)
+ dm->backlight_dev = backlight_device_register(bl_name,
+ dm->adev->ddev->dev,
+ dm,
+ &amdgpu_dm_backlight_ops,
+ &props);
++#else
++ dm->backlight_dev = backlight_device_register(bl_name,
++ dm->adev->ddev->dev,
++ dm,
++ &amdgpu_dm_backlight_ops);
++#endif
+
+ if (NULL == dm->backlight_dev)
+ DRM_ERROR("DM: Backlight registration failed!\n");
+@@ -1406,7 +1438,7 @@ int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+ {
+ struct amdgpu_display_manager *dm = &adev->dm;
+ uint32_t i;
+- struct amdgpu_dm_connector *aconnector = NULL;
++ struct amdgpu_connector *aconnector = NULL;
+ struct amdgpu_encoder *aencoder = NULL;
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ uint32_t link_cnt;
+@@ -1511,8 +1543,8 @@ int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+ goto fail_free_encoder;
+ }
+ /*
+- * Temporary disable until pplib/smu interaction is implemented
+- */
++ * Temporary disable until pplib/smu interaction is implemented
++ */
+ dm->dc->debug.disable_stutter = true;
+ break;
+ #endif
+@@ -1772,12 +1804,6 @@ static bool modeset_required(struct drm_crtc_state *crtc_state,
+ struct dc_stream_state *new_stream,
+ struct dc_stream_state *old_stream)
+ {
+- if (dc_is_stream_unchanged(new_stream, old_stream)) {
+- crtc_state->mode_changed = false;
+- DRM_DEBUG_KMS("Mode change not required, setting mode_changed to %d",
+- crtc_state->mode_changed);
+- }
+-
+ if (!drm_atomic_crtc_needs_modeset(crtc_state))
+ return false;
+
+@@ -1900,7 +1926,11 @@ static int fill_plane_attributes_from_fb(
+ if (ret)
+ return ret;
+
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
++ switch (fb->pixel_format) {
++#else
+ switch (fb->format->format) {
++#endif
+ case DRM_FORMAT_C8:
+ plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
+ break;
+@@ -1927,7 +1957,11 @@ static int fill_plane_attributes_from_fb(
+ break;
+ default:
+ DRM_ERROR("Unsupported screen format %s\n",
+- drm_get_format_name(fb->format->format, &format_name));
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
++ kcl_drm_get_format_name(fb->pixel_format, &format_name));
++#else
++ kcl_drm_get_format_name(fb->format->format, &format_name));
++#endif
+ return -EINVAL;
+ }
+
+@@ -1940,7 +1974,11 @@ static int fill_plane_attributes_from_fb(
+ plane_state->plane_size.grph.surface_size.width = fb->width;
+ plane_state->plane_size.grph.surface_size.height = fb->height;
+ plane_state->plane_size.grph.surface_pitch =
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
++ fb->pitches[0] / (fb->bits_per_pixel / 8);
++#else
+ fb->pitches[0] / fb->format->cpp[0];
++#endif
+ /* TODO: unhardcode */
+ plane_state->color_space = COLOR_SPACE_SRGB;
+
+@@ -2103,18 +2141,18 @@ static int fill_plane_attributes(
+
+ /*****************************************************************************/
+
+-struct amdgpu_dm_connector *aconnector_from_drm_crtc_id(
++struct amdgpu_connector *aconnector_from_drm_crtc_id(
+ const struct drm_crtc *crtc)
+ {
+ struct drm_device *dev = crtc->dev;
+ struct drm_connector *connector;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+- struct amdgpu_dm_connector *aconnector;
++ struct amdgpu_connector *aconnector;
+
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list, head) {
+
+- aconnector = to_amdgpu_dm_connector(connector);
++ aconnector = to_amdgpu_connector(connector);
+
+ if (aconnector->base.state->crtc != &acrtc->base)
+ continue;
+@@ -2415,27 +2453,8 @@ static void decide_crtc_timing_for_drm_display_mode(
+ }
+ }
+
+-static void create_fake_sink(struct amdgpu_dm_connector *aconnector)
+-{
+- struct dc_sink *sink = NULL;
+- struct dc_sink_init_data sink_init_data = { 0 };
+-
+- sink_init_data.link = aconnector->dc_link;
+- sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
+-
+- sink = dc_sink_create(&sink_init_data);
+- if (!sink)
+- DRM_ERROR("Failed to create sink!\n");
+-
+- sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
+- aconnector->fake_enable = true;
+-
+- aconnector->dc_sink = sink;
+- aconnector->dc_link->local_sink = sink;
+-}
+-
+ static struct dc_stream_state *create_stream_for_sink(
+- struct amdgpu_dm_connector *aconnector,
++ struct amdgpu_connector *aconnector,
+ const struct drm_display_mode *drm_mode,
+ const struct dm_connector_state *dm_state)
+ {
+@@ -2456,18 +2475,6 @@ static struct dc_stream_state *create_stream_for_sink(
+ }
+
+ drm_connector = &aconnector->base;
+-
+- if (!aconnector->dc_sink) {
+- /*
+- * Exclude MST from creating fake_sink
+- * TODO: need to enable MST into fake_sink feature
+- */
+- if (aconnector->mst_port)
+- goto stream_create_fail;
+-
+- create_fake_sink(aconnector);
+- }
+-
+ stream = dc_create_stream_for_sink(aconnector->dc_sink);
+
+ if (stream == NULL) {
+@@ -2522,6 +2529,88 @@ void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
+ kfree(crtc);
+ }
+
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
++static int amdgpu_atomic_helper_page_flip(struct drm_crtc *crtc,
++ struct drm_framebuffer *fb,
++ struct drm_pending_vblank_event *event,
++ uint32_t flags)
++{
++ struct drm_plane *plane = crtc->primary;
++ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
++ struct drm_atomic_state *state;
++ struct drm_plane_state *plane_state;
++ struct drm_crtc_state *crtc_state;
++ int ret = 0;
++
++ state = drm_atomic_state_alloc(plane->dev);
++ if (!state)
++ return -ENOMEM;
++
++ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
++retry:
++ crtc_state = drm_atomic_get_crtc_state(state, crtc);
++ if (IS_ERR(crtc_state)) {
++ ret = PTR_ERR(crtc_state);
++ goto fail;
++ }
++ crtc_state->event = event;
++
++ plane_state = drm_atomic_get_plane_state(state, plane);
++ if (IS_ERR(plane_state)) {
++ ret = PTR_ERR(plane_state);
++ goto fail;
++ }
++
++ ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
++ if (ret != 0)
++ goto fail;
++ drm_atomic_set_fb_for_plane(plane_state, fb);
++
++ /* Make sure we don't accidentally do a full modeset. */
++ state->allow_modeset = false;
++ if (!crtc_state->active) {
++ DRM_DEBUG_ATOMIC("[CRTC:%d] disabled, rejecting legacy flip\n",
++ crtc->base.id);
++ ret = -EINVAL;
++ goto fail;
++ }
++ acrtc->flip_flags = flags;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0) && !defined(OS_NAME_RHEL_7_4)
++ ret = drm_atomic_async_commit(state);
++#else
++ ret = drm_atomic_nonblocking_commit(state);
++#endif
++ if (ret != 0)
++ goto fail;
++
++ /* Driver takes ownership of state on successful async commit. */
++ return 0;
++fail:
++ if (ret == -EDEADLK)
++ goto backoff;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) && !defined(OS_NAME_RHEL_7_4)
++ drm_atomic_state_free(state);
++#else
++ drm_atomic_state_put(state);
++#endif
++
++ return ret;
++backoff:
++ drm_atomic_state_clear(state);
++ drm_atomic_legacy_backoff(state);
++
++ /*
++ * Someone might have exchanged the framebuffer while we dropped locks
++ * in the backoff code. We need to fix up the fb refcount tracking the
++ * core does for us.
++ */
++ plane->old_fb = plane->fb;
++
++ goto retry;
++}
++#endif
++
+ static void dm_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+ {
+@@ -2585,7 +2674,11 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
+ .set_config = drm_atomic_helper_set_config,
+ .set_property = drm_atomic_helper_crtc_set_property,
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
++ .page_flip = amdgpu_atomic_helper_page_flip,
++#else
+ .page_flip = drm_atomic_helper_page_flip,
++#endif
+ .atomic_duplicate_state = dm_crtc_duplicate_state,
+ .atomic_destroy_state = dm_crtc_destroy_state,
+ };
+@@ -2594,15 +2687,14 @@ static enum drm_connector_status
+ amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+ {
+ bool connected;
+- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
++ struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
+
+ /* Notes:
+ * 1. This interface is NOT called in context of HPD irq.
+ * 2. This interface *is called* in context of user-mode ioctl. Which
+ * makes it a bad place for *any* MST-related activit. */
+
+- if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
+- !aconnector->fake_enable)
++ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+ connected = (aconnector->dc_sink != NULL);
+ else
+ connected = (aconnector->base.force == DRM_FORCE_ON);
+@@ -2614,14 +2706,16 @@ amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+ /* Compare user free sync property with immunable property free sync capable
+ * and if display is not free sync capable sets free sync property to 0
+ */
+-static int amdgpu_freesync_update_property_atomic(struct drm_connector *connector)
++static int amdgpu_freesync_update_property_atomic(
++ struct drm_connector *connector,
++ uint64_t val_capable)
+ {
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ return drm_object_property_set_value(&connector->base,
+ adev->mode_info.freesync_property,
+- 0);
++ val_capable);
+
+
+ }
+@@ -2789,7 +2883,7 @@ int amdgpu_dm_connector_atomic_get_property(
+
+ void amdgpu_dm_connector_destroy(struct drm_connector *connector)
+ {
+- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
++ struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
+ const struct dc_link *link = aconnector->dc_link;
+ struct amdgpu_device *adev = connector->dev->dev_private;
+ struct amdgpu_display_manager *dm = &adev->dm;
+@@ -2889,7 +2983,7 @@ static int get_modes(struct drm_connector *connector)
+ return amdgpu_dm_connector_get_modes(connector);
+ }
+
+-static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
++static void create_eml_sink(struct amdgpu_connector *aconnector)
+ {
+ struct dc_sink_init_data init_params = {
+ .link = aconnector->dc_link,
+@@ -2922,7 +3016,7 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
+ aconnector->dc_em_sink;
+ }
+
+-static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
++static void handle_edid_mgmt(struct amdgpu_connector *aconnector)
+ {
+ struct dc_link *link = (struct dc_link *)aconnector->dc_link;
+
+@@ -2948,7 +3042,7 @@ int amdgpu_dm_connector_mode_valid(
+ struct amdgpu_device *adev = connector->dev->dev_private;
+ /* TODO: Unhardcode stream count */
+ struct dc_stream_state *stream;
+- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
++ struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
+
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+ (mode->flags & DRM_MODE_FLAG_DBLSCAN))
+@@ -2961,7 +3055,7 @@ int amdgpu_dm_connector_mode_valid(
+ !aconnector->dc_em_sink)
+ handle_edid_mgmt(aconnector);
+
+- dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
++ dc_sink = to_amdgpu_connector(connector)->dc_sink;
+
+ if (dc_sink == NULL) {
+ DRM_ERROR("dc_sink is NULL!\n");
+@@ -3106,6 +3200,10 @@ void dm_drm_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+ {
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
++
++ if (dm_plane_state->dc_state)
++ dc_plane_state_release(dm_plane_state->dc_state);
++
+ drm_atomic_helper_plane_destroy_state(plane, state);
+ }
+
+@@ -3119,9 +3217,23 @@ static const struct drm_plane_funcs dm_plane_funcs = {
+ .atomic_destroy_state = dm_drm_plane_destroy_state,
+ };
+
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+ static int dm_plane_helper_prepare_fb(
+ struct drm_plane *plane,
+ struct drm_plane_state *new_state)
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) || \
++ defined(OS_NAME_RHEL_6) || \
++ defined(OS_NAME_RHEL_7_3) || \
++ defined(OS_NAME_RHEL_7_4)
++static int dm_plane_helper_prepare_fb(
++ struct drm_plane *plane,
++ const struct drm_plane_state *new_state)
++#else
++static int dm_plane_helper_prepare_fb(
++ struct drm_plane *plane,
++ struct drm_framebuffer *fb,
++ const struct drm_plane_state *new_state)
++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) */
+ {
+ struct amdgpu_framebuffer *afb;
+ struct drm_gem_object *obj;
+@@ -3189,9 +3301,23 @@ static int dm_plane_helper_prepare_fb(
+ return 0;
+ }
+
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
+ static void dm_plane_helper_cleanup_fb(
+ struct drm_plane *plane,
+ struct drm_plane_state *old_state)
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) || \
++ defined(OS_NAME_RHEL_6) || \
++ defined(OS_NAME_RHEL_7_3) || \
++ defined(OS_NAME_RHEL_7_4)
++static void dm_plane_helper_cleanup_fb(
++ struct drm_plane *plane,
++ const struct drm_plane_state *old_state)
++#else
++static void dm_plane_helper_cleanup_fb(
++ struct drm_plane *plane,
++ struct drm_framebuffer *fb,
++ const struct drm_plane_state *old_state)
++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) */
+ {
+ struct amdgpu_bo *rbo;
+ struct amdgpu_framebuffer *afb;
+@@ -3218,7 +3344,7 @@ int dm_create_validation_set_for_connector(struct drm_connector *connector,
+ {
+ int result = MODE_ERROR;
+ struct dc_sink *dc_sink =
+- to_amdgpu_dm_connector(connector)->dc_sink;
++ to_amdgpu_connector(connector)->dc_sink;
+ /* TODO: Unhardcode stream count */
+ struct dc_stream_state *stream;
+
+@@ -3309,33 +3435,33 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
+ case DRM_PLANE_TYPE_PRIMARY:
+ aplane->base.format_default = true;
+
+- res = drm_universal_plane_init(
++ res = kcl_drm_universal_plane_init(
+ dm->adev->ddev,
+ &aplane->base,
+ possible_crtcs,
+ &dm_plane_funcs,
+ rgb_formats,
+- ARRAY_SIZE(rgb_formats), NULL,
++ ARRAY_SIZE(rgb_formats),
+ aplane->base.type, NULL);
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+- res = drm_universal_plane_init(
++ res = kcl_drm_universal_plane_init(
+ dm->adev->ddev,
+ &aplane->base,
+ possible_crtcs,
+ &dm_plane_funcs,
+ yuv_formats,
+- ARRAY_SIZE(yuv_formats), NULL,
++ ARRAY_SIZE(yuv_formats),
+ aplane->base.type, NULL);
+ break;
+ case DRM_PLANE_TYPE_CURSOR:
+- res = drm_universal_plane_init(
++ res = kcl_drm_universal_plane_init(
+ dm->adev->ddev,
+ &aplane->base,
+ possible_crtcs,
+ &dm_plane_funcs,
+ cursor_formats,
+- ARRAY_SIZE(cursor_formats), NULL,
++ ARRAY_SIZE(cursor_formats),
+ aplane->base.type, NULL);
+ break;
+ }
+@@ -3365,7 +3491,7 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
+ if (!acrtc)
+ goto fail;
+
+- res = drm_crtc_init_with_planes(
++ res = kcl_drm_crtc_init_with_planes(
+ dm->ddev,
+ &acrtc->base,
+ plane,
+@@ -3479,8 +3605,8 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+- struct amdgpu_dm_connector *amdgpu_dm_connector =
+- to_amdgpu_dm_connector(connector);
++ struct amdgpu_connector *amdgpu_connector =
++ to_amdgpu_connector(connector);
+ int i;
+ int n;
+ struct mode_size {
+@@ -3528,7 +3654,7 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
+ common_modes[i].name, common_modes[i].w,
+ common_modes[i].h);
+ drm_mode_probed_add(connector, mode);
+- amdgpu_dm_connector->num_modes++;
++ amdgpu_connector->num_modes++;
+ }
+ }
+
+@@ -3536,41 +3662,41 @@ static void amdgpu_dm_connector_ddc_get_modes(
+ struct drm_connector *connector,
+ struct edid *edid)
+ {
+- struct amdgpu_dm_connector *amdgpu_dm_connector =
+- to_amdgpu_dm_connector(connector);
++ struct amdgpu_connector *amdgpu_connector =
++ to_amdgpu_connector(connector);
+
+ if (edid) {
+ /* empty probed_modes */
+ INIT_LIST_HEAD(&connector->probed_modes);
+- amdgpu_dm_connector->num_modes =
++ amdgpu_connector->num_modes =
+ drm_add_edid_modes(connector, edid);
+
+ drm_edid_to_eld(connector, edid);
+
+ amdgpu_dm_get_native_mode(connector);
+ } else
+- amdgpu_dm_connector->num_modes = 0;
++ amdgpu_connector->num_modes = 0;
+ }
+
+ int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
+ {
+ const struct drm_connector_helper_funcs *helper =
+ connector->helper_private;
+- struct amdgpu_dm_connector *amdgpu_dm_connector =
+- to_amdgpu_dm_connector(connector);
++ struct amdgpu_connector *amdgpu_connector =
++ to_amdgpu_connector(connector);
+ struct drm_encoder *encoder;
+- struct edid *edid = amdgpu_dm_connector->edid;
++ struct edid *edid = amdgpu_connector->edid;
+
+ encoder = helper->best_encoder(connector);
+
+ amdgpu_dm_connector_ddc_get_modes(connector, edid);
+ amdgpu_dm_connector_add_common_modes(encoder, connector);
+- return amdgpu_dm_connector->num_modes;
++ return amdgpu_connector->num_modes;
+ }
+
+ void amdgpu_dm_connector_init_helper(
+ struct amdgpu_display_manager *dm,
+- struct amdgpu_dm_connector *aconnector,
++ struct amdgpu_connector *aconnector,
+ int connector_type,
+ struct dc_link *link,
+ int link_index)
+@@ -3698,7 +3824,7 @@ static struct amdgpu_i2c_adapter *create_i2c(
+ */
+ int amdgpu_dm_connector_init(
+ struct amdgpu_display_manager *dm,
+- struct amdgpu_dm_connector *aconnector,
++ struct amdgpu_connector *aconnector,
+ uint32_t link_index,
+ struct amdgpu_encoder *aencoder)
+ {
+@@ -3810,7 +3936,7 @@ int amdgpu_dm_encoder_init(
+ {
+ struct amdgpu_device *adev = dev->dev_private;
+
+- int res = drm_encoder_init(dev,
++ int res = kcl_drm_encoder_init(dev,
+ &aencoder->base,
+ &amdgpu_dm_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS,
+@@ -4128,9 +4254,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ continue;
+ }
+
+- if (!fb || !crtc || pcrtc != crtc || !crtc->state->active ||
+- (!crtc->state->planes_changed &&
+- !pcrtc->state->color_mgmt_changed))
++ if (!fb || !crtc || pcrtc != crtc || !crtc->state->active)
+ continue;
+
+ pflip_needed = !state->allow_modeset;
+@@ -4160,7 +4284,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ * TODO Check if it's correct
+ */
+ *wait_for_vblank =
+- acrtc_attach->flip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
++ acrtc_attach->flip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
+ false : true;
+
+ /* TODO: Needs rework for multiplane flip */
+@@ -4172,10 +4296,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ fb,
+ drm_crtc_vblank_count(crtc) + *wait_for_vblank);
+
+- /*TODO BUG remove ASAP in 4.12 to avoid race between worker and flip IOCTL */
++ /*TODO BUG remove ASAP in 4.12 to avoid race between worker and flip IOCTL */
+
+- /*clean up the flags for next usage*/
+- acrtc_attach->flip_flags = 0;
++ /*clean up the flags for next usage*/
++ acrtc_attach->flip_flags = 0;
+ }
+
+ }
+@@ -4252,7 +4376,7 @@ void amdgpu_dm_atomic_commit_tail(
+ struct drm_connector_state *old_conn_state;
+ struct dm_crtc_state *old_acrtc_state, *new_acrtc_state;
+
+- drm_atomic_helper_update_legacy_modeset_state(dev, state);
++ kcl_drm_atomic_helper_update_legacy_modeset_state(dev, state);
+
+ dm_state = to_dm_atomic_state(state);
+
+@@ -4264,6 +4388,7 @@ void amdgpu_dm_atomic_commit_tail(
+ new_acrtc_state = to_dm_crtc_state(new_state);
+ old_acrtc_state = to_dm_crtc_state(old_crtc_state);
+
++#if !defined(OS_NAME_RHEL_7_2)
+ DRM_DEBUG_KMS(
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
+ "planes_changed:%d, mode_changed:%d,active_changed:%d,"
+@@ -4275,6 +4400,7 @@ void amdgpu_dm_atomic_commit_tail(
+ new_state->mode_changed,
+ new_state->active_changed,
+ new_state->connectors_changed);
++#endif
+
+ /* handles headless hotplug case, updating new_state and
+ * aconnector as needed
+@@ -4341,7 +4467,7 @@ void amdgpu_dm_atomic_commit_tail(
+ */
+ if (adev->dm.freesync_module) {
+ for (i = 0; i < new_crtcs_count; i++) {
+- struct amdgpu_dm_connector *aconnector = NULL;
++ struct amdgpu_connector *aconnector = NULL;
+
+ new_acrtc_state = to_dm_crtc_state(new_crtcs[i]->base.state);
+
+@@ -4385,7 +4511,7 @@ void amdgpu_dm_atomic_commit_tail(
+
+ /* Handle scaling and undersacn changes*/
+ for_each_connector_in_state(state, connector, old_conn_state, i) {
+- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
++ struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
+ struct dm_connector_state *con_new_state =
+ to_dm_connector_state(aconnector->base.state);
+ struct dm_connector_state *con_old_state =
+@@ -4523,7 +4649,11 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
+
+ err:
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) && !defined(OS_NAME_RHEL_7_4)
++ drm_atomic_state_free(state);
++#else
+ drm_atomic_state_put(state);
++#endif
+
+ return ret;
+ }
+@@ -4535,7 +4665,7 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
+ */
+ void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector)
+ {
+- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
++ struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
+ struct amdgpu_crtc *disconnected_acrtc;
+ struct dm_crtc_state *acrtc_state;
+
+@@ -4557,77 +4687,6 @@ void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector
+ dm_force_atomic_commit(&aconnector->base);
+ }
+
+-static uint32_t add_val_sets_plane(
+- struct dc_validation_set *val_sets,
+- uint32_t set_count,
+- const struct dc_stream_state *stream,
+- struct dc_plane_state *plane_state)
+-{
+- uint32_t i = 0, j = 0;
+-
+- while (i < set_count) {
+- if (val_sets[i].stream == stream) {
+- while (val_sets[i].plane_states[j])
+- j++;
+- break;
+- }
+- ++i;
+- }
+-
+- val_sets[i].plane_states[j] = plane_state;
+- val_sets[i].plane_count++;
+-
+- return val_sets[i].plane_count;
+-}
+-
+-static uint32_t update_in_val_sets_stream(
+- struct dc_validation_set *val_sets,
+- uint32_t set_count,
+- struct dc_stream_state *old_stream,
+- struct dc_stream_state *new_stream,
+- struct drm_crtc *crtc)
+-{
+- uint32_t i = 0;
+-
+- while (i < set_count) {
+- if (val_sets[i].stream == old_stream)
+- break;
+- ++i;
+- }
+-
+- val_sets[i].stream = new_stream;
+-
+- if (i == set_count)
+- /* nothing found. add new one to the end */
+- return set_count + 1;
+-
+- return set_count;
+-}
+-
+-static uint32_t remove_from_val_sets(
+- struct dc_validation_set *val_sets,
+- uint32_t set_count,
+- const struct dc_stream_state *stream)
+-{
+- int i;
+-
+- for (i = 0; i < set_count; i++)
+- if (val_sets[i].stream == stream)
+- break;
+-
+- if (i == set_count) {
+- /* nothing found */
+- return set_count;
+- }
+-
+- set_count--;
+-
+- for (; i < set_count; i++)
+- val_sets[i] = val_sets[i + 1];
+-
+- return set_count;
+-}
+-
+ /*`
+ * Grabs all modesetting locks to serialize against any blocking commits,
+ * Waits for completion of all non blocking commits.
+@@ -4678,293 +4737,410 @@ static int do_aquire_global_lock(
+ return ret < 0 ? ret : 0;
+ }
+
++static int dm_update_crtcs_state(
++ struct dc *dc,
++ struct drm_atomic_state *state,
++ bool enable,
++ bool *lock_and_validation_needed)
++{
++ struct drm_crtc *crtc;
++ struct drm_crtc_state *crtc_state;
++ int i;
++ struct dm_crtc_state *old_acrtc_state, *new_acrtc_state;
++ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
++ int ret = 0;
++
++ /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
++ /* update changed items */
++ for_each_crtc_in_state(state, crtc, crtc_state, i) {
++ struct amdgpu_crtc *acrtc = NULL;
++ struct amdgpu_connector *aconnector = NULL;
++ struct dc_stream_state *new_stream = NULL;
++ struct drm_connector_state *conn_state = NULL;
++ struct dm_connector_state *dm_conn_state = NULL;
++
++
++ old_acrtc_state = to_dm_crtc_state(crtc->state);
++ new_acrtc_state = to_dm_crtc_state(crtc_state);
++ acrtc = to_amdgpu_crtc(crtc);
++
++ aconnector = amdgpu_dm_find_first_crct_matching_connector(state, crtc, true);
++
++ /* TODO This hack should go away */
++ if (aconnector && aconnector->dc_sink) {
++ conn_state = drm_atomic_get_connector_state(state,
++ &aconnector->base);
++
++ if (IS_ERR(conn_state)) {
++ ret = PTR_ERR_OR_ZERO(conn_state);
++ break;
++ }
++
++ dm_conn_state = to_dm_connector_state(conn_state);
++
++ new_stream = create_stream_for_sink(aconnector,
++ &crtc_state->mode,
++ dm_conn_state);
++
++ /*
++ * we can have no stream on ACTION_SET if a display
++ * was disconnected during S3, in this case it not and
++ * error, the OS will be updated after detection, and
++ * do the right thing on next atomic commit
++ */
++
++ if (!new_stream) {
++ DRM_DEBUG_KMS("%s: Failed to create new stream for crtc %d\n",
++ __func__, acrtc->base.base.id);
++ break;
++ }
++ }
++
++ if (dc_is_stream_unchanged(new_stream,
++ old_acrtc_state->stream)) {
++
++ crtc_state->mode_changed = false;
++
++ DRM_DEBUG_KMS("Mode change not required, setting mode_changed to %d",
++ crtc_state->mode_changed);
++ }
++
++
++ if (!drm_atomic_crtc_needs_modeset(crtc_state))
++ continue;
++
++#if !defined(OS_NAME_RHEL_7_2)
++ DRM_DEBUG_KMS(
++ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
++ "planes_changed:%d, mode_changed:%d,active_changed:%d,"
++ "connectors_changed:%d\n",
++ acrtc->crtc_id,
++ crtc_state->enable,
++ crtc_state->active,
++ crtc_state->planes_changed,
++ crtc_state->mode_changed,
++ crtc_state->active_changed,
++ crtc_state->connectors_changed);
++#endif
++
++ /* Remove stream for any changed/disabled CRTC */
++ if (!enable) {
++
++ if (!old_acrtc_state->stream)
++ continue;
++
++ DRM_DEBUG_KMS("Disabling DRM crtc: %d\n",
++ crtc->base.id);
++
++ /* i.e. reset mode */
++ if (!dc_remove_stream_from_ctx(
++ dc,
++ dm_state->context,
++ old_acrtc_state->stream)) {
++ ret = -EINVAL;
++ break;
++ }
++
++ dc_stream_release(old_acrtc_state->stream);
++ new_acrtc_state->stream = NULL;
++
++ *lock_and_validation_needed = true;
++
++ } else {/* Add stream for any updated/enabled CRTC */
++
++ if (modereset_required(crtc_state))
++ continue;
++
++ if (modeset_required(crtc_state, new_stream,
++ old_acrtc_state->stream)) {
++
++ WARN_ON(new_acrtc_state->stream);
++
++ new_acrtc_state->stream = new_stream;
++ dc_stream_retain(new_stream);
++
++ DRM_DEBUG_KMS("Enabling DRM crtc: %d\n",
++ crtc->base.id);
++
++ if (!dc_add_stream_to_ctx(
++ dc,
++ dm_state->context,
++ new_acrtc_state->stream)) {
++ ret = -EINVAL;
++ break;
++ }
++
++ *lock_and_validation_needed = true;
++ }
++ }
++
++ /* Release extra reference */
++ if (new_stream)
++ dc_stream_release(new_stream);
++ }
++
++ return ret;
++}
++
++static int dm_update_planes_state(
++ struct dc *dc,
++ struct drm_atomic_state *state,
++ bool enable,
++ bool *lock_and_validation_needed)
++{
++ struct drm_crtc *new_plane_crtc, *old_plane_crtc;
++ struct drm_crtc_state *new_crtc_state;
++ struct drm_plane *plane;
++ struct drm_plane_state *old_plane_state, *new_plane_state;
++ struct dm_crtc_state *new_acrtc_state, *old_acrtc_state;
++ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
++ struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
++ int i ;
++ /* TODO return page_flip_needed() function */
++ bool pflip_needed = !state->allow_modeset;
++ int ret = 0;
++
++ if (pflip_needed)
++ return ret;
++
++ /* Add new planes */
++ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
++ new_plane_crtc = new_plane_state->crtc;
++ old_plane_crtc = old_plane_state->crtc;
++ new_dm_plane_state = to_dm_plane_state(new_plane_state);
++ old_dm_plane_state = to_dm_plane_state(old_plane_state);
++
++ /*TODO Implement atomic check for cursor plane */
++ if (plane->type == DRM_PLANE_TYPE_CURSOR)
++ continue;
++
++ /* Remove any changed/removed planes */
++ if (!enable) {
++
++ if (!old_plane_crtc)
++ continue;
++
++ old_acrtc_state = to_dm_crtc_state(
++ drm_atomic_get_old_crtc_state(
++ state,
++ old_plane_crtc));
++
++ if (!old_acrtc_state->stream)
++ continue;
++
++ DRM_DEBUG_KMS("Disabling DRM plane: %d on DRM crtc %d\n",
++ plane->base.id, old_plane_crtc->base.id);
++
++ if (!dc_remove_plane_from_context(
++ dc,
++ old_acrtc_state->stream,
++ old_dm_plane_state->dc_state,
++ dm_state->context)) {
++
++ ret = EINVAL;
++ return ret;
++ }
++
++
++ dc_plane_state_release(old_dm_plane_state->dc_state);
++ new_dm_plane_state->dc_state = NULL;
++
++ *lock_and_validation_needed = true;
++
++ } else { /* Add new planes */
++
++ if (drm_atomic_plane_disabling(plane->state, new_plane_state))
++ continue;
++
++ if (!new_plane_crtc)
++ continue;
++
++ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
++ new_acrtc_state = to_dm_crtc_state(new_crtc_state);
++
++ if (!new_acrtc_state->stream)
++ continue;
++
++
++ WARN_ON(new_dm_plane_state->dc_state);
++
++ new_dm_plane_state->dc_state = dc_create_plane_state(dc);
++
++ DRM_DEBUG_KMS("Enabling DRM plane: %d on DRM crtc %d\n",
++ plane->base.id, new_plane_crtc->base.id);
++
++ if (!new_dm_plane_state->dc_state) {
++ ret = -EINVAL;
++ return ret;
++ }
++
++ ret = fill_plane_attributes(
++ new_plane_crtc->dev->dev_private,
++ new_dm_plane_state->dc_state,
++ new_plane_state,
++ new_crtc_state,
++ false);
++ if (ret)
++ return ret;
++
++
++ if (!dc_add_plane_to_context(
++ dc,
++ new_acrtc_state->stream,
++ new_dm_plane_state->dc_state,
++ dm_state->context)) {
++
++ ret = -EINVAL;
++ return ret;
++ }
++
++ *lock_and_validation_needed = true;
++ }
++ }
++
++
++ return ret;
++}
++
+ int amdgpu_dm_atomic_check(struct drm_device *dev,
+- struct drm_atomic_state *state)
+-{
+- struct dm_atomic_state *dm_state;
+- struct drm_crtc *crtc;
+- struct drm_crtc_state *crtc_state;
+- struct drm_plane *plane;
+- struct drm_plane_state *plane_state;
+- int i, j;
+- int ret;
+- struct amdgpu_device *adev = dev->dev_private;
+- struct dc *dc = adev->dm.dc;
+- struct drm_connector *connector;
+- struct drm_connector_state *conn_state;
+- int set_count;
+- struct dc_validation_set set[MAX_STREAMS] = { { 0 } };
+- struct dm_crtc_state *old_acrtc_state, *new_acrtc_state;
+-
+- /*
+- * This bool will be set for true for any modeset/reset
+- * or plane update which implies non fast surface update.
+- */
+- bool lock_and_validation_needed = false;
+-
+- ret = drm_atomic_helper_check_modeset(dev, state);
+-
+- if (ret) {
+- DRM_ERROR("Atomic state validation failed with error :%d !\n", ret);
+- return ret;
+- }
+-
+- dm_state = to_dm_atomic_state(state);
+-
+- /* copy existing configuration */
+- set_count = 0;
+- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+-
+- old_acrtc_state = to_dm_crtc_state(crtc->state);
+-
+- if (old_acrtc_state->stream) {
+- set[set_count].stream = old_acrtc_state->stream;
+- ++set_count;
+- }
+- }
+-
+- /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
+- /* update changed items */
+- for_each_crtc_in_state(state, crtc, crtc_state, i) {
+- struct amdgpu_crtc *acrtc = NULL;
+- struct amdgpu_dm_connector *aconnector = NULL;
+- struct dc_stream_state *new_stream = NULL;
+- struct drm_connector_state *conn_state = NULL;
+- struct dm_connector_state *dm_conn_state = NULL;
+-
+- old_acrtc_state = to_dm_crtc_state(crtc->state);
+- new_acrtc_state = to_dm_crtc_state(crtc_state);
+- acrtc = to_amdgpu_crtc(crtc);
+-
+- aconnector = amdgpu_dm_find_first_crct_matching_connector(state, crtc, true);
+-
+- DRM_DEBUG_KMS(
+- "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
+- "planes_changed:%d, mode_changed:%d,active_changed:%d,"
+- "connectors_changed:%d\n",
+- acrtc->crtc_id,
+- crtc_state->enable,
+- crtc_state->active,
+- crtc_state->planes_changed,
+- crtc_state->mode_changed,
+- crtc_state->active_changed,
+- crtc_state->connectors_changed);
+-
+-
+- if (modereset_required(crtc_state)) {
+-
+- /* i.e. reset mode */
+- if (new_acrtc_state->stream) {
+- set_count = remove_from_val_sets(
+- set,
+- set_count,
+- new_acrtc_state->stream);
+-
+- dc_stream_release(new_acrtc_state->stream);
+- new_acrtc_state->stream = NULL;
+-
+- lock_and_validation_needed = true;
+- }
+-
+- } else {
+-
+- /* TODO This hack should go away */
+- if (aconnector) {
+- conn_state = drm_atomic_get_connector_state(state,
+- &aconnector->base);
+-
+- if (IS_ERR(conn_state)) {
+- ret = PTR_ERR_OR_ZERO(conn_state);
+- goto fail;
+- }
+-
+- dm_conn_state = to_dm_connector_state(conn_state);
+-
+- new_stream = create_stream_for_sink(aconnector,
+- &crtc_state->mode,
+- dm_conn_state);
+-
+- /*
+- * we can have no stream on ACTION_SET if a display
+- * was disconnected during S3, in this case it not and
+- * error, the OS will be updated after detection, and
+- * do the right thing on next atomic commit
+- */
+-
+- if (!new_stream) {
+- DRM_DEBUG_KMS("%s: Failed to create new stream for crtc %d\n",
+- __func__, acrtc->base.base.id);
+- break;
+- }
+-
+-
+- }
+-
+- if (modeset_required(crtc_state, new_stream,
+- old_acrtc_state->stream)) {
+-
+- if (new_acrtc_state->stream)
+- dc_stream_release(new_acrtc_state->stream);
+-
+- new_acrtc_state->stream = new_stream;
+-
+- set_count = update_in_val_sets_stream(
+- set,
+- set_count,
+- old_acrtc_state->stream,
+- new_acrtc_state->stream,
+- crtc);
+-
+- lock_and_validation_needed = true;
+- } else {
+- /*
+- * The new stream is unused, so we release it
+- */
+- if (new_stream)
+- dc_stream_release(new_stream);
+-
+- }
+- }
+-
+-
+- /*
+- * Hack: Commit needs planes right now, specifically for gamma
+- * TODO rework commit to check CRTC for gamma change
+- */
+- if (crtc_state->color_mgmt_changed) {
+-
+- ret = drm_atomic_add_affected_planes(state, crtc);
+- if (ret)
+- goto fail;
+- }
+- }
+-
+- /* Check scaling and undersacn changes*/
+- /*TODO Removed scaling changes validation due to inability to commit
+- * new stream into context w\o causing full reset. Need to
+- * decide how to handle.
+- */
+- for_each_connector_in_state(state, connector, conn_state, i) {
+- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+- struct dm_connector_state *con_old_state =
+- to_dm_connector_state(aconnector->base.state);
+- struct dm_connector_state *con_new_state =
+- to_dm_connector_state(conn_state);
+- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(con_new_state->base.crtc);
+-
+- /* Skip any modesets/resets */
+- if (!acrtc || drm_atomic_crtc_needs_modeset(acrtc->base.state))
+- continue;
+-
+- /* Skip any thing not scale or underscan changes */
+- if (!is_scaling_state_different(con_new_state, con_old_state))
+- continue;
+-
+- lock_and_validation_needed = true;
+- }
+-
+- for_each_crtc_in_state(state, crtc, crtc_state, i) {
+- new_acrtc_state = to_dm_crtc_state(crtc_state);
+-
+- for_each_plane_in_state(state, plane, plane_state, j) {
+- struct drm_crtc *plane_crtc = plane_state->crtc;
+- struct drm_framebuffer *fb = plane_state->fb;
+- bool pflip_needed;
+- struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
+-
+- /*TODO Implement atomic check for cursor plane */
+- if (plane->type == DRM_PLANE_TYPE_CURSOR)
+- continue;
+-
+- if (!fb || !plane_crtc || crtc != plane_crtc || !crtc_state->active)
+- continue;
+-
+- WARN_ON(!new_acrtc_state->stream);
+-
+- pflip_needed = !state->allow_modeset;
+- if (!pflip_needed) {
+- struct dc_plane_state *dc_plane_state;
+-
+- dc_plane_state = dc_create_plane_state(dc);
+-
+- if (dm_plane_state->dc_state)
+- dc_plane_state_release(dm_plane_state->dc_state);
+-
+- dm_plane_state->dc_state = dc_plane_state;
+-
+- ret = fill_plane_attributes(
+- plane_crtc->dev->dev_private,
+- dc_plane_state,
+- plane_state,
+- crtc_state,
+- false);
+- if (ret)
+- goto fail;
+-
+- add_val_sets_plane(set,
+- set_count,
+- new_acrtc_state->stream,
+- dc_plane_state);
+-
+- lock_and_validation_needed = true;
+- }
+- }
+- }
+-
+- /* Run this here since we want to validate the streams we created */
+- ret = drm_atomic_helper_check_planes(dev, state);
+- if (ret)
+- goto fail;
+-
+- /*
+- * For full updates case when
+- * removing/adding/updating streams on once CRTC while flipping
+- * on another CRTC,
+- * acquiring global lock will guarantee that any such full
+- * update commit
+- * will wait for completion of any outstanding flip using DRMs
+- * synchronization events.
+- */
+-
+- if (lock_and_validation_needed) {
+-
+- ret = do_aquire_global_lock(dev, state);
+- if (ret)
+- goto fail;
+- WARN_ON(dm_state->context);
+- dm_state->context = dc_get_validate_context(dc, set, set_count);
+- if (!dm_state->context) {
+- ret = -EINVAL;
+- goto fail;
+- }
+- }
+-
+- /* Must be success */
+- WARN_ON(ret);
+- return ret;
++ struct drm_atomic_state *state)
++{
++ int i;
++ int ret;
++ struct amdgpu_device *adev = dev->dev_private;
++ struct dc *dc = adev->dm.dc;
++ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
++ struct drm_connector *connector;
++ struct drm_connector_state *conn_state;
++ struct drm_crtc *crtc;
++ struct drm_crtc_state *crtc_state;
++
++ /*
++ * This bool will be set for true for any modeset/reset
++ * or plane update which implies non fast surface update.
++ */
++ bool lock_and_validation_needed = false;
++
++ ret = drm_atomic_helper_check_modeset(dev, state);
++
++ if (ret) {
++ DRM_ERROR("Atomic state validation failed with error :%d !\n", ret);
++ return ret;
++ }
++
++ /*
++ * Hack: Commit needs planes right now, specifically for gamma
++ * TODO rework commit to check CRTC for gamma change
++ */
++ for_each_crtc_in_state(state, crtc, crtc_state, i) {
++ if (crtc_state->color_mgmt_changed) {
++ ret = drm_atomic_add_affected_planes(state, crtc);
++ if (ret)
++ goto fail;
++ }
++ }
++
++ /* Remove exiting planes if they are modified */
++ ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
++ if (ret) {
++ goto fail;
++ }
++
++ /* Disable all crtcs which require disable */
++ ret = dm_update_crtcs_state(dc, state, false, &lock_and_validation_needed);
++ if (ret) {
++ goto fail;
++ }
++
++ /* Enable all crtcs which require enable */
++ ret = dm_update_crtcs_state(dc, state, true, &lock_and_validation_needed);
++ if (ret) {
++ goto fail;
++ }
++
++ /* Add new/modified planes */
++ ret = dm_update_planes_state(dc, state, true, &lock_and_validation_needed);
++ if (ret) {
++ goto fail;
++ }
++
++ /* Run this here since we want to validate the streams we created */
++ ret = drm_atomic_helper_check_planes(dev, state);
++ if (ret)
++ goto fail;
++
++ /* Check scaling and undersacn changes*/
++ /*TODO Removed scaling changes validation due to inability to commit
++ * new stream into context w\o causing full reset. Need to
++ * decide how to handle.
++ */
++ for_each_connector_in_state(state, connector, conn_state, i) {
++ struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
++ struct dm_connector_state *con_old_state =
++ to_dm_connector_state(aconnector->base.state);
++ struct dm_connector_state *con_new_state =
++ to_dm_connector_state(conn_state);
++ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(con_new_state->base.crtc);
++
++ /* Skip any modesets/resets */
++ if (!acrtc || drm_atomic_crtc_needs_modeset(acrtc->base.state))
++ continue;
++
++ /* Skip any thing not scale or underscan changes */
++ if (!is_scaling_state_different(con_new_state, con_old_state))
++ continue;
++
++ lock_and_validation_needed = true;
++ }
++
++ /*
++ * For full updates case when
++ * removing/adding/updating streams on once CRTC while flipping
++ * on another CRTC,
++ * acquiring global lock will guarantee that any such full
++ * update commit
++ * will wait for completion of any outstanding flip using DRMs
++ * synchronization events.
++ */
++
++ if (lock_and_validation_needed) {
++
++ ret = do_aquire_global_lock(dev, state);
++ if (ret)
++ goto fail;
++
++ if (!dc_validate_global_state(dc, dm_state->context)) {
++ ret = -EINVAL;
++ goto fail;
++ }
++ }
++
++ /* Must be success */
++ WARN_ON(ret);
++ return ret;
+
+ fail:
+- if (ret == -EDEADLK)
+- DRM_DEBUG_KMS("Atomic check stopped due to to deadlock.\n");
+- else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
+- DRM_DEBUG_KMS("Atomic check stopped due to to signal.\n");
+- else
+- DRM_ERROR("Atomic check failed with err: %d .\n", ret);
++ if (ret == -EDEADLK)
++ DRM_DEBUG_KMS("Atomic check stopped due to to deadlock.\n");
++ else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
++ DRM_DEBUG_KMS("Atomic check stopped due to to signal.\n");
++ else
++ DRM_ERROR("Atomic check failed with err: %d \n", ret);
+
+- return ret;
++ return ret;
+ }
+
+ static bool is_dp_capable_without_timing_msa(
+ struct dc *dc,
+- struct amdgpu_dm_connector *amdgpu_dm_connector)
++ struct amdgpu_connector *amdgpu_connector)
+ {
+ uint8_t dpcd_data;
+ bool capable = false;
+
+- if (amdgpu_dm_connector->dc_link &&
++ if (amdgpu_connector->dc_link &&
+ dm_helpers_dp_read_dpcd(
+ NULL,
+- amdgpu_dm_connector->dc_link,
++ amdgpu_connector->dc_link,
+ DP_DOWN_STREAM_PORT_COUNT,
+ &dpcd_data,
+ sizeof(dpcd_data))) {
+@@ -4983,14 +5159,14 @@ void amdgpu_dm_add_sink_to_freesync_module(
+ struct detailed_timing *timing;
+ struct detailed_non_pixel *data;
+ struct detailed_data_monitor_range *range;
+- struct amdgpu_dm_connector *amdgpu_dm_connector =
+- to_amdgpu_dm_connector(connector);
++ struct amdgpu_connector *amdgpu_connector =
++ to_amdgpu_connector(connector);
+
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ edid_check_required = false;
+- if (!amdgpu_dm_connector->dc_sink) {
++ if (!amdgpu_connector->dc_sink) {
+ DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
+ return;
+ }
+@@ -5000,11 +5176,11 @@ void amdgpu_dm_add_sink_to_freesync_module(
+ * if edid non zero restrict freesync only for dp and edp
+ */
+ if (edid) {
+- if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
+- || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
++ if (amdgpu_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
++ || amdgpu_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
+ edid_check_required = is_dp_capable_without_timing_msa(
+ adev->dm.dc,
+- amdgpu_dm_connector);
++ amdgpu_connector);
+ }
+ }
+ val_capable = 0;
+@@ -5029,53 +5205,55 @@ void amdgpu_dm_add_sink_to_freesync_module(
+ if (range->flags != 1)
+ continue;
+
+- amdgpu_dm_connector->min_vfreq = range->min_vfreq;
+- amdgpu_dm_connector->max_vfreq = range->max_vfreq;
+- amdgpu_dm_connector->pixel_clock_mhz =
++ amdgpu_connector->min_vfreq = range->min_vfreq;
++ amdgpu_connector->max_vfreq = range->max_vfreq;
++ amdgpu_connector->pixel_clock_mhz =
+ range->pixel_clock_mhz * 10;
+ break;
+ }
+
+- if (amdgpu_dm_connector->max_vfreq -
+- amdgpu_dm_connector->min_vfreq > 10) {
+- amdgpu_dm_connector->caps.supported = true;
+- amdgpu_dm_connector->caps.min_refresh_in_micro_hz =
+- amdgpu_dm_connector->min_vfreq * 1000000;
+- amdgpu_dm_connector->caps.max_refresh_in_micro_hz =
+- amdgpu_dm_connector->max_vfreq * 1000000;
++ if (amdgpu_connector->max_vfreq -
++ amdgpu_connector->min_vfreq > 10) {
++ amdgpu_connector->caps.supported = true;
++ amdgpu_connector->caps.min_refresh_in_micro_hz =
++ amdgpu_connector->min_vfreq * 1000000;
++ amdgpu_connector->caps.max_refresh_in_micro_hz =
++ amdgpu_connector->max_vfreq * 1000000;
+ val_capable = 1;
+ }
+ }
+ drm_object_property_set_value(&connector->base,
+ adev->mode_info.freesync_capable_property,
+ val_capable);
+- amdgpu_freesync_update_property_atomic(connector);
++ amdgpu_freesync_update_property_atomic(connector, val_capable);
+
+ }
+
+ void amdgpu_dm_remove_sink_from_freesync_module(
+ struct drm_connector *connector)
+ {
+- struct amdgpu_dm_connector *amdgpu_dm_connector =
+- to_amdgpu_dm_connector(connector);
++ struct amdgpu_connector *amdgpu_connector =
++ to_amdgpu_connector(connector);
+
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+- if (!amdgpu_dm_connector->dc_sink || !adev->dm.freesync_module) {
++ if (!amdgpu_connector->dc_sink || !adev->dm.freesync_module) {
+ DRM_ERROR("dc_sink NULL or no free_sync module.\n");
+ return;
+ }
+
+- amdgpu_dm_connector->min_vfreq = 0;
+- amdgpu_dm_connector->max_vfreq = 0;
+- amdgpu_dm_connector->pixel_clock_mhz = 0;
++ amdgpu_connector->min_vfreq = 0;
++ amdgpu_connector->max_vfreq = 0;
++ amdgpu_connector->pixel_clock_mhz = 0;
+
+- memset(&amdgpu_dm_connector->caps, 0, sizeof(amdgpu_dm_connector->caps));
++ memset(&amdgpu_connector->caps, 0, sizeof(amdgpu_connector->caps));
+
+ drm_object_property_set_value(&connector->base,
+ adev->mode_info.freesync_capable_property,
+ 0);
+- amdgpu_freesync_update_property_atomic(connector);
++ amdgpu_freesync_update_property_atomic(connector, 0);
+
+ }
++
++#endif
+--
+2.7.4
+