aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.14.71/4419-drm-amd-display-Add-harvest-IP-support-for-Vega20.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.14.71/4419-drm-amd-display-Add-harvest-IP-support-for-Vega20.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4419-drm-amd-display-Add-harvest-IP-support-for-Vega20.patch247
1 files changed, 247 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4419-drm-amd-display-Add-harvest-IP-support-for-Vega20.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4419-drm-amd-display-Add-harvest-IP-support-for-Vega20.patch
new file mode 100644
index 00000000..f8a42bfc
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4419-drm-amd-display-Add-harvest-IP-support-for-Vega20.patch
@@ -0,0 +1,247 @@
+From 65af737105bd64feb320ebbad85e3fdf8d0d4afd Mon Sep 17 00:00:00 2001
+From: "Jerry (Fangzhi) Zuo" <Jerry.Zuo@amd.com>
+Date: Mon, 5 Mar 2018 16:12:23 -0500
+Subject: [PATCH 4419/5725] drm/amd/display: Add harvest IP support for Vega20
+
+Retrieve fuses to determine the availability of pipes, and
+eliminate pipes that cannot be used.
+
+Signed-off-by: Jerry (Fangzhi) Zuo <Jerry.Zuo@amd.com>
+Reviewed-by: Hersen Wu <hersenxs.wu@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Feifei Xu <Feifei.Xu@amd.com>
+---
+ .../drm/amd/display/dc/dce120/dce120_resource.c | 208 +++++++++++++++++++++
+ 1 file changed, 208 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+index fda0157..545f35f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+@@ -814,6 +814,213 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
+ dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges);
+ }
+
++#ifdef CONFIG_DRM_AMD_DC_VG20
++static uint32_t read_pipe_fuses(struct dc_context *ctx)
++{
++ uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0);
++ /* VG20 support max 6 pipes */
++ value = value & 0x3f;
++ return value;
++}
++
++static bool construct(
++ uint8_t num_virtual_links,
++ struct dc *dc,
++ struct dce110_resource_pool *pool)
++{
++ unsigned int i;
++ int j;
++ struct dc_context *ctx = dc->ctx;
++ struct irq_service_init_data irq_init_data;
++ bool harvest_enabled = ASICREV_IS_VEGA20_P(ctx->asic_id.hw_internal_rev);
++ uint32_t pipe_fuses;
++
++ ctx->dc_bios->regs = &bios_regs;
++
++ pool->base.res_cap = &res_cap;
++ pool->base.funcs = &dce120_res_pool_funcs;
++
++ /* TODO: Fill more data from GreenlandAsicCapability.cpp */
++ pool->base.pipe_count = res_cap.num_timing_generator;
++ pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator;
++ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
++
++ dc->caps.max_downscale_ratio = 200;
++ dc->caps.i2c_speed_in_khz = 100;
++ dc->caps.max_cursor_size = 128;
++ dc->caps.dual_link_dvi = true;
++
++ dc->debug = debug_defaults;
++
++ /*************************************************
++ * Create resources *
++ *************************************************/
++
++ pool->base.clock_sources[DCE120_CLK_SRC_PLL0] =
++ dce120_clock_source_create(ctx, ctx->dc_bios,
++ CLOCK_SOURCE_COMBO_PHY_PLL0,
++ &clk_src_regs[0], false);
++ pool->base.clock_sources[DCE120_CLK_SRC_PLL1] =
++ dce120_clock_source_create(ctx, ctx->dc_bios,
++ CLOCK_SOURCE_COMBO_PHY_PLL1,
++ &clk_src_regs[1], false);
++ pool->base.clock_sources[DCE120_CLK_SRC_PLL2] =
++ dce120_clock_source_create(ctx, ctx->dc_bios,
++ CLOCK_SOURCE_COMBO_PHY_PLL2,
++ &clk_src_regs[2], false);
++ pool->base.clock_sources[DCE120_CLK_SRC_PLL3] =
++ dce120_clock_source_create(ctx, ctx->dc_bios,
++ CLOCK_SOURCE_COMBO_PHY_PLL3,
++ &clk_src_regs[3], false);
++ pool->base.clock_sources[DCE120_CLK_SRC_PLL4] =
++ dce120_clock_source_create(ctx, ctx->dc_bios,
++ CLOCK_SOURCE_COMBO_PHY_PLL4,
++ &clk_src_regs[4], false);
++ pool->base.clock_sources[DCE120_CLK_SRC_PLL5] =
++ dce120_clock_source_create(ctx, ctx->dc_bios,
++ CLOCK_SOURCE_COMBO_PHY_PLL5,
++ &clk_src_regs[5], false);
++ pool->base.clk_src_count = DCE120_CLK_SRC_TOTAL;
++
++ pool->base.dp_clock_source =
++ dce120_clock_source_create(ctx, ctx->dc_bios,
++ CLOCK_SOURCE_ID_DP_DTO,
++ &clk_src_regs[0], true);
++
++ for (i = 0; i < pool->base.clk_src_count; i++) {
++ if (pool->base.clock_sources[i] == NULL) {
++ dm_error("DC: failed to create clock sources!\n");
++ BREAK_TO_DEBUGGER();
++ goto clk_src_create_fail;
++ }
++ }
++
++ pool->base.display_clock = dce120_disp_clk_create(ctx);
++ if (pool->base.display_clock == NULL) {
++ dm_error("DC: failed to create display clock!\n");
++ BREAK_TO_DEBUGGER();
++ goto disp_clk_create_fail;
++ }
++
++ pool->base.dmcu = dce_dmcu_create(ctx,
++ &dmcu_regs,
++ &dmcu_shift,
++ &dmcu_mask);
++ if (pool->base.dmcu == NULL) {
++ dm_error("DC: failed to create dmcu!\n");
++ BREAK_TO_DEBUGGER();
++ goto res_create_fail;
++ }
++
++ pool->base.abm = dce_abm_create(ctx,
++ &abm_regs,
++ &abm_shift,
++ &abm_mask);
++ if (pool->base.abm == NULL) {
++ dm_error("DC: failed to create abm!\n");
++ BREAK_TO_DEBUGGER();
++ goto res_create_fail;
++ }
++
++ irq_init_data.ctx = dc->ctx;
++ pool->base.irqs = dal_irq_service_dce120_create(&irq_init_data);
++ if (!pool->base.irqs)
++ goto irqs_create_fail;
++
++ /* retrieve valid pipe fuses */
++ if (harvest_enabled)
++ pipe_fuses = read_pipe_fuses(ctx);
++
++ /* index to valid pipe resource */
++ j = 0;
++ for (i = 0; i < pool->base.pipe_count; i++) {
++ if (harvest_enabled) {
++ if ((pipe_fuses & (1 << i)) != 0) {
++ dm_error("DC: skip invalid pipe %d!\n", i);
++ continue;
++ }
++ }
++
++ pool->base.timing_generators[j] =
++ dce120_timing_generator_create(
++ ctx,
++ i,
++ &dce120_tg_offsets[i]);
++ if (pool->base.timing_generators[j] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error("DC: failed to create tg!\n");
++ goto controller_create_fail;
++ }
++
++ pool->base.mis[j] = dce120_mem_input_create(ctx, i);
++
++ if (pool->base.mis[j] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC: failed to create memory input!\n");
++ goto controller_create_fail;
++ }
++
++ pool->base.ipps[j] = dce120_ipp_create(ctx, i);
++ if (pool->base.ipps[i] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC: failed to create input pixel processor!\n");
++ goto controller_create_fail;
++ }
++
++ pool->base.transforms[j] = dce120_transform_create(ctx, i);
++ if (pool->base.transforms[i] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC: failed to create transform!\n");
++ goto res_create_fail;
++ }
++
++ pool->base.opps[j] = dce120_opp_create(
++ ctx,
++ i);
++ if (pool->base.opps[j] == NULL) {
++ BREAK_TO_DEBUGGER();
++ dm_error(
++ "DC: failed to create output pixel processor!\n");
++ }
++
++ /* check next valid pipe */
++ j++;
++ }
++
++ /* valid pipe num */
++ pool->base.pipe_count = j;
++ pool->base.timing_generator_count = j;
++
++ if (!resource_construct(num_virtual_links, dc, &pool->base,
++ &res_create_funcs))
++ goto res_create_fail;
++
++ /* Create hardware sequencer */
++ if (!dce120_hw_sequencer_create(dc))
++ goto controller_create_fail;
++
++ dc->caps.max_planes = pool->base.pipe_count;
++
++ bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id);
++
++ bw_calcs_data_update_from_pplib(dc);
++
++ return true;
++
++irqs_create_fail:
++controller_create_fail:
++disp_clk_create_fail:
++clk_src_create_fail:
++res_create_fail:
++
++ destruct(pool);
++
++ return false;
++}
++#else
+ static bool construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+@@ -988,6 +1195,7 @@ static bool construct(
+
+ return false;
+ }
++#endif
+
+ struct resource_pool *dce120_create_resource_pool(
+ uint8_t num_virtual_links,
+--
+2.7.4
+