aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/display/amdgpu_dm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm')
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c378
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c800
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c37
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c20
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c21
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c30
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c25
9 files changed, 964 insertions, 360 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 710edc70e37e..e4b33c67b634 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -94,6 +94,12 @@
#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
+#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
+#endif
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
@@ -696,7 +702,7 @@ static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
adev->mode_info.audio.enabled = false;
}
-void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
+static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
{
struct drm_audio_component *acomp = adev->dm.audio_component;
@@ -1079,6 +1085,10 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_RENOIR:
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+#endif
return 0;
case CHIP_NAVI12:
fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
@@ -1175,6 +1185,16 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
dmub_asic = DMUB_ASIC_DCN21;
fw_name_dmub = FIRMWARE_RENOIR_DMUB;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case CHIP_SIENNA_CICHLID:
+ dmub_asic = DMUB_ASIC_DCN30;
+ fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
+ break;
+ case CHIP_NAVY_FLOUNDER:
+ dmub_asic = DMUB_ASIC_DCN30;
+ fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
+ break;
+#endif
default:
/* ASIC doesn't support DMUB. */
@@ -1237,10 +1257,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
region_params.vbios_size = adev->bios_size;
- region_params.fw_bss_data =
+ region_params.fw_bss_data = region_params.bss_data_size ?
adev->dm.dmub_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
- le32_to_cpu(hdr->inst_const_bytes);
+ le32_to_cpu(hdr->inst_const_bytes) : NULL;
region_params.fw_inst_const =
adev->dm.dmub_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
@@ -1314,15 +1334,11 @@ static int dm_sw_fini(void *handle)
adev->dm.dmub_srv = NULL;
}
- if (adev->dm.dmub_fw) {
- release_firmware(adev->dm.dmub_fw);
- adev->dm.dmub_fw = NULL;
- }
+ release_firmware(adev->dm.dmub_fw);
+ adev->dm.dmub_fw = NULL;
- if(adev->dm.fw_dmcu) {
- release_firmware(adev->dm.fw_dmcu);
- adev->dm.fw_dmcu = NULL;
- }
+ release_firmware(adev->dm.fw_dmcu);
+ adev->dm.fw_dmcu = NULL;
return 0;
}
@@ -1365,7 +1381,7 @@ static int dm_late_init(void *handle)
unsigned int linear_lut[16];
int i;
struct dmcu *dmcu = NULL;
- bool ret;
+ bool ret = true;
if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
return detect_mst_link_for_all_connectors(adev->ddev);
@@ -1386,7 +1402,14 @@ static int dm_late_init(void *handle)
*/
params.min_abm_backlight = 0x28F;
- ret = dmcu_load_iram(dmcu, params);
+ /* In the case where abm is implemented on dmcub,
+ * dmcu object will be null.
+ * ABM 2.4 and up are implemented on dmcub.
+ */
+ if (dmcu)
+ ret = dmcu_load_iram(dmcu, params);
+ else if (adev->dm.dc->ctx->dmub_srv)
+ ret = dmub_init_abm_config(adev->dm.dc->res_pool->abm, params);
if (!ret)
return -EINVAL;
@@ -1475,23 +1498,12 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
return 0;
}
- mutex_lock(&smu->mutex);
-
- /* pass data to smu controller */
- if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
- !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
- ret = smu_write_watermarks_table(smu);
-
- if (ret) {
- mutex_unlock(&smu->mutex);
- DRM_ERROR("Failed to update WMTABLE!\n");
- return ret;
- }
- smu->watermarks_bitmap |= WATERMARKS_LOADED;
+ ret = smu_write_watermarks_table(smu);
+ if (ret) {
+ DRM_ERROR("Failed to update WMTABLE!\n");
+ return ret;
}
- mutex_unlock(&smu->mutex);
-
return 0;
}
@@ -1582,7 +1594,7 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
}
-enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
+static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
{
struct dc_state *context = NULL;
enum dc_status res = DC_ERROR_UNEXPECTED;
@@ -2707,7 +2719,7 @@ static int dm_atomic_get_state(struct drm_atomic_state *state,
return 0;
}
-struct dm_atomic_state *
+static struct dm_atomic_state *
dm_atomic_get_new_state(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
@@ -2725,7 +2737,7 @@ dm_atomic_get_new_state(struct drm_atomic_state *state)
return NULL;
}
-struct dm_atomic_state *
+static struct dm_atomic_state *
dm_atomic_get_old_state(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
@@ -3219,6 +3231,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_RENOIR:
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+#endif
if (dcn10_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
@@ -3373,6 +3389,10 @@ static int dm_early_init(void *handle)
#endif
case CHIP_NAVI10:
case CHIP_NAVI12:
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+#endif
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
@@ -3693,6 +3713,10 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
adev->asic_type == CHIP_NAVI10 ||
adev->asic_type == CHIP_NAVI14 ||
adev->asic_type == CHIP_NAVI12 ||
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ adev->asic_type == CHIP_SIENNA_CICHLID ||
+ adev->asic_type == CHIP_NAVY_FLOUNDER ||
+#endif
adev->asic_type == CHIP_RENOIR ||
adev->asic_type == CHIP_RAVEN) {
/* Fill GFX9 params */
@@ -3712,6 +3736,11 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
tiling_info->gfx9.shaderEnable = 1;
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+ if (adev->asic_type == CHIP_SIENNA_CICHLID ||
+ adev->asic_type == CHIP_NAVY_FLOUNDER)
+ tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
+#endif
ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
plane_size, tiling_info,
tiling_flags, dcc, address,
@@ -4518,7 +4547,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
#if defined(CONFIG_DRM_AMD_DC_DCN)
dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
- aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
&dsc_caps);
#endif
link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
@@ -4547,26 +4576,20 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
- if (stream->link->psr_settings.psr_feature_enabled) {
- struct dc *core_dc = stream->link->ctx->dc;
-
- if (dc_is_dmcu_initialized(core_dc)) {
- //
- // should decide stream support vsc sdp colorimetry capability
- // before building vsc info packet
- //
- stream->use_vsc_sdp_for_colorimetry = false;
- if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- stream->use_vsc_sdp_for_colorimetry =
- aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
- } else {
- if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
- stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
- stream->use_vsc_sdp_for_colorimetry = true;
- }
- }
- mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
+ if (stream->link->psr_settings.psr_feature_enabled) {
+ //
+ // should decide stream support vsc sdp colorimetry capability
+ // before building vsc info packet
+ //
+ stream->use_vsc_sdp_for_colorimetry = false;
+ if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ stream->use_vsc_sdp_for_colorimetry =
+ aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
+ } else {
+ if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
+ stream->use_vsc_sdp_for_colorimetry = true;
}
+ mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
}
finish:
dc_sink_release(sink);
@@ -4607,9 +4630,7 @@ static void dm_crtc_reset_state(struct drm_crtc *crtc)
if (WARN_ON(!state))
return;
- crtc->state = &state->base;
- crtc->state->crtc = crtc;
-
+ __drm_atomic_helper_crtc_reset(crtc, &state->base);
}
static struct drm_crtc_state *
@@ -4634,7 +4655,6 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
}
state->active_planes = cur->active_planes;
- state->interrupts_enabled = cur->interrupts_enabled;
state->vrr_params = cur->vrr_params;
state->vrr_infopacket = cur->vrr_infopacket;
state->abm_level = cur->abm_level;
@@ -5054,11 +5074,12 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
dc_result = dc_validate_stream(adev->dm.dc, stream);
if (dc_result != DC_OK) {
- DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
+ DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
drm_mode->hdisplay,
drm_mode->vdisplay,
drm_mode->clock,
- dc_result);
+ dc_result,
+ dc_status_to_str(dc_result));
dc_stream_release(stream);
stream = NULL;
@@ -5294,29 +5315,19 @@ static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
return num_active;
}
-/*
- * Sets whether interrupts should be enabled on a specific CRTC.
- * We require that the stream be enabled and that there exist active
- * DC planes on the stream.
- */
-static void
-dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
- struct drm_crtc_state *new_crtc_state)
+static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
+ struct drm_crtc_state *new_crtc_state)
{
struct dm_crtc_state *dm_new_crtc_state =
to_dm_crtc_state(new_crtc_state);
dm_new_crtc_state->active_planes = 0;
- dm_new_crtc_state->interrupts_enabled = false;
if (!dm_new_crtc_state->stream)
return;
dm_new_crtc_state->active_planes =
count_crtc_active_planes(new_crtc_state);
-
- dm_new_crtc_state->interrupts_enabled =
- dm_new_crtc_state->active_planes > 0;
}
static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
@@ -5327,13 +5338,7 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
int ret = -EINVAL;
- /*
- * Update interrupt state for the CRTC. This needs to happen whenever
- * the CRTC has changed or whenever any of its planes have changed.
- * Atomic check satisfies both of these requirements since the CRTC
- * is added to the state by DRM during drm_atomic_helper_check_planes.
- */
- dm_update_crtc_interrupt_state(crtc, state);
+ dm_update_crtc_active_planes(crtc, state);
if (unlikely(!dm_crtc_state->stream &&
modeset_required(state, NULL, dm_crtc_state->stream))) {
@@ -5439,7 +5444,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
mst_mgr,
mst_port,
dm_new_connector_state->pbn,
- 0);
+ dm_mst_get_pbn_divider(aconnector->dc_link));
if (dm_new_connector_state->vcpi_slots < 0) {
DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
return dm_new_connector_state->vcpi_slots;
@@ -5551,7 +5556,7 @@ dm_drm_plane_duplicate_state(struct drm_plane *plane)
return &dm_plane_state->base;
}
-void dm_drm_plane_destroy_state(struct drm_plane *plane,
+static void dm_drm_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
@@ -5680,6 +5685,17 @@ static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
amdgpu_bo_unref(&rbo);
}
+static int dm_plane_helper_check_state(struct drm_plane_state *state,
+ struct drm_crtc_state *new_crtc_state)
+{
+ int max_downscale = 0;
+ int max_upscale = INT_MAX;
+
+ /* TODO: These should be checked against DC plane caps */
+ return drm_atomic_helper_check_plane_state(
+ state, new_crtc_state, max_downscale, max_upscale, true, true);
+}
+
static int dm_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
@@ -5687,6 +5703,7 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
struct dc *dc = adev->dm.dc;
struct dm_plane_state *dm_plane_state;
struct dc_scaling_info scaling_info;
+ struct drm_crtc_state *new_crtc_state;
int ret;
dm_plane_state = to_dm_plane_state(state);
@@ -5694,6 +5711,15 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
if (!dm_plane_state->dc_state)
return 0;
+ new_crtc_state =
+ drm_atomic_get_new_crtc_state(state->state, state->crtc);
+ if (!new_crtc_state)
+ return -EINVAL;
+
+ ret = dm_plane_helper_check_state(state, new_crtc_state);
+ if (ret)
+ return ret;
+
ret = fill_dc_scaling_info(state, &scaling_info);
if (ret)
return ret;
@@ -5837,6 +5863,7 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
uint32_t formats[32];
int num_formats;
int res = -EPERM;
+ unsigned int supported_rotations;
num_formats = get_plane_formats(plane, plane_cap, formats,
ARRAY_SIZE(formats));
@@ -5871,6 +5898,13 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
}
+ supported_rotations =
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
+ DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
+
+ drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
+ supported_rotations);
+
drm_plane_helper_add(plane, &dm_plane_helper_funcs);
/* Create (reset) the plane state */
@@ -6202,7 +6236,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
if (connector_type == DRM_MODE_CONNECTOR_eDP &&
- dc_is_dmcu_initialized(adev->dm.dc)) {
+ (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
drm_object_attach_property(&aconnector->base.base,
adev->mode_info.abm_level_property, 0);
}
@@ -6415,8 +6449,10 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
bool enable)
{
/*
- * this is not correct translation but will work as soon as VBLANK
- * constant is the same as PFLIP
+ * We have no guarantee that the frontend index maps to the same
+ * backend index - some even map to more than one.
+ *
+ * TODO: Use a different interrupt or check DC itself for the mapping.
*/
int irq_type =
amdgpu_display_crtc_idx_to_irq_type(
@@ -6439,6 +6475,19 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
}
}
+static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc)
+{
+ int irq_type =
+ amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
+
+ /**
+ * This reads the current state for the IRQ and force reapplies
+ * the setting to hardware.
+ */
+ amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
+}
+
static bool
is_scaling_state_different(const struct dm_connector_state *dm_state,
const struct dm_connector_state *old_dm_state)
@@ -7023,7 +7072,16 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
usleep_range(1000, 1100);
}
- if (acrtc_attach->base.state->event) {
+ /**
+ * Prepare the flip event for the pageflip interrupt to handle.
+ *
+ * This only works in the case where we've already turned on the
+ * appropriate hardware blocks (eg. HUBP) so in the transition case
+ * from 0 -> n planes we have to skip a hardware generated event
+ * and rely on sending it from software.
+ */
+ if (acrtc_attach->base.state->event &&
+ acrtc_state->active_planes > 0) {
drm_crtc_vblank_get(pcrtc);
spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
@@ -7092,6 +7150,24 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
&bundle->stream_update,
dc_state);
+ /**
+ * Enable or disable the interrupts on the backend.
+ *
+ * Most pipes are put into power gating when unused.
+ *
+ * When power gating is enabled on a pipe we lose the
+ * interrupt enablement state when power gating is disabled.
+ *
+ * So we need to update the IRQ control state in hardware
+ * whenever the pipe turns on (since it could be previously
+ * power gated) or off (since some pipes can't be power gated
+ * on some ASICs).
+ */
+ if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
+ dm_update_pflip_irq_state(
+ (struct amdgpu_device *)dev->dev_private,
+ acrtc_attach);
+
if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
@@ -7193,64 +7269,6 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev,
}
/*
- * Enable interrupts on CRTCs that are newly active, undergone
- * a modeset, or have active planes again.
- *
- * Done in two passes, based on the for_modeset flag:
- * Pass 1: For CRTCs going through modeset
- * Pass 2: For CRTCs going from 0 to n active planes
- *
- * Interrupts can only be enabled after the planes are programmed,
- * so this requires a two-pass approach since we don't want to
- * just defer the interrupts until after commit planes every time.
- */
-static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool for_modeset)
-{
- struct amdgpu_device *adev = dev->dev_private;
- struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
- int i;
-#ifdef CONFIG_DEBUG_FS
- enum amdgpu_dm_pipe_crc_source source;
-#endif
-
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
- struct dm_crtc_state *dm_new_crtc_state =
- to_dm_crtc_state(new_crtc_state);
- struct dm_crtc_state *dm_old_crtc_state =
- to_dm_crtc_state(old_crtc_state);
- bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
- bool run_pass;
-
- run_pass = (for_modeset && modeset) ||
- (!for_modeset && !modeset &&
- !dm_old_crtc_state->interrupts_enabled);
-
- if (!run_pass)
- continue;
-
- if (!dm_new_crtc_state->interrupts_enabled)
- continue;
-
- manage_dm_interrupts(adev, acrtc, true);
-
-#ifdef CONFIG_DEBUG_FS
- /* The stream has changed so CRC capture needs to re-enabled. */
- source = dm_new_crtc_state->crc_src;
- if (amdgpu_dm_is_valid_crc_source(source)) {
- amdgpu_dm_crtc_configure_crc_source(
- crtc, dm_new_crtc_state,
- dm_new_crtc_state->crc_src);
- }
-#endif
- }
-}
-
-/*
* amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
* @crtc_state: the DRM CRTC state
* @stream_state: the DC stream state.
@@ -7289,12 +7307,10 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
* in atomic check.
*/
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
- struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
- if (dm_old_crtc_state->interrupts_enabled &&
- (!dm_new_crtc_state->interrupts_enabled ||
+ if (old_crtc_state->active &&
+ (!new_crtc_state->active ||
drm_atomic_crtc_needs_modeset(new_crtc_state)))
manage_dm_interrupts(adev, acrtc, false);
}
@@ -7573,8 +7589,34 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_new_crtc_state);
}
- /* Enable interrupts for CRTCs going through a modeset. */
- amdgpu_dm_enable_crtc_interrupts(dev, state, true);
+ /**
+ * Enable interrupts for CRTCs that are newly enabled or went through
+ * a modeset. It was intentionally deferred until after the front end
+ * state was modified to wait until the OTG was on and so the IRQ
+ * handlers didn't access stale or invalid state.
+ */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ if (new_crtc_state->active &&
+ (!old_crtc_state->active ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
+ manage_dm_interrupts(adev, acrtc, true);
+#ifdef CONFIG_DEBUG_FS
+ /**
+ * Frontend may have changed so reapply the CRC capture
+ * settings for the stream.
+ */
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
+ amdgpu_dm_crtc_configure_crc_source(
+ crtc, dm_new_crtc_state,
+ dm_new_crtc_state->crc_src);
+ }
+#endif
+ }
+ }
for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
if (new_crtc_state->async_flip)
@@ -7589,9 +7631,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm, crtc, wait_for_vblank);
}
- /* Enable interrupts for CRTCs going from 0 to n active planes. */
- amdgpu_dm_enable_crtc_interrupts(dev, state, false);
-
/* Update audio instances for each connector. */
amdgpu_dm_commit_audio(dev, state);
@@ -8210,6 +8249,10 @@ static int dm_update_plane_state(struct dc *dc,
if (!needs_reset)
return 0;
+ ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
+ if (ret)
+ return ret;
+
WARN_ON(dm_new_plane_state->dc_state);
dc_new_plane_state = dc_create_plane_state(dc);
@@ -8429,7 +8472,7 @@ cleanup:
*out_type = update_type;
return ret;
}
-
+#if defined(CONFIG_DRM_AMD_DC_DCN)
static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
{
struct drm_connector *connector;
@@ -8452,6 +8495,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
}
+#endif
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
@@ -8492,7 +8536,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_plane_state *old_plane_state, *new_plane_state;
enum surface_update_type update_type = UPDATE_TYPE_FAST;
enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
-
+ enum dc_status status;
int ret, i;
/*
@@ -8505,6 +8549,30 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
+ /* Check connector changes */
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+
+ /* Skip connectors that are disabled or part of modeset already. */
+ if (!old_con_state->crtc && !new_con_state->crtc)
+ continue;
+
+ if (!new_con_state->crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
+ if (IS_ERR(new_crtc_state)) {
+ ret = PTR_ERR(new_crtc_state);
+ goto fail;
+ }
+
+ if (dm_old_con_state->abm_level !=
+ dm_new_con_state->abm_level)
+ new_crtc_state->connectors_changed = true;
+ }
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
if (adev->asic_type >= CHIP_NAVI10) {
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
@@ -8514,7 +8582,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
}
}
-
+#endif
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->color_mgmt_changed &&
@@ -8704,8 +8772,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
ret = drm_dp_mst_atomic_check(state);
if (ret)
goto fail;
-
- if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
+ status = dc_validate_global_state(dc, dm_state->context, false);
+ if (status != DC_OK) {
+ DC_LOG_WARNING("DC global validation failure: %s (%d)",
+ dc_status_to_str(status), status);
ret = -EINVAL;
goto fail;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 648180ccdc2e..dd1559c743c2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -142,10 +142,12 @@ struct amdgpu_dm_backlight_caps {
* @backlight_link: Link on which to control backlight
* @backlight_caps: Capabilities of the backlight device
* @freesync_module: Module handling freesync calculations
+ * @hdcp_workqueue: AMDGPU content protection queue
* @fw_dmcu: Reference to DMCU firmware
* @dmcu_fw_version: Version of the DMCU firmware
* @soc_bounding_box: SOC bounding box values provided by gpu_info FW
* @cached_state: Caches device atomic state for suspend/resume
+ * @cached_dc_state: Cached state of content streams
* @compressor: Frame buffer compression buffer. See &struct dm_comressor_info
*/
struct amdgpu_display_manager {
@@ -412,7 +414,6 @@ struct dm_crtc_state {
int update_type;
int active_planes;
- bool interrupts_enabled;
int crc_skip_count;
enum amdgpu_dm_pipe_crc_source crc_src;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index 4dfb6b55bb2e..b321ff654df4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -195,10 +195,13 @@ static int __set_legacy_tf(struct dc_transfer_func *func,
bool has_rom)
{
struct dc_gamma *gamma = NULL;
+ struct calculate_buffer cal_buffer = {0};
bool res;
ASSERT(lut && lut_size == MAX_COLOR_LEGACY_LUT_ENTRIES);
+ cal_buffer.buffer_index = -1;
+
gamma = dc_create_gamma();
if (!gamma)
return -ENOMEM;
@@ -208,7 +211,7 @@ static int __set_legacy_tf(struct dc_transfer_func *func,
__drm_lut_to_dc_gamma(lut, gamma, true);
res = mod_color_calculate_regamma_params(func, gamma, true, has_rom,
- NULL);
+ NULL, &cal_buffer);
dc_gamma_release(&gamma);
@@ -221,10 +224,13 @@ static int __set_output_tf(struct dc_transfer_func *func,
bool has_rom)
{
struct dc_gamma *gamma = NULL;
+ struct calculate_buffer cal_buffer = {0};
bool res;
ASSERT(lut && lut_size == MAX_COLOR_LUT_ENTRIES);
+ cal_buffer.buffer_index = -1;
+
gamma = dc_create_gamma();
if (!gamma)
return -ENOMEM;
@@ -248,7 +254,7 @@ static int __set_output_tf(struct dc_transfer_func *func,
*/
gamma->type = GAMMA_CS_TFM_1D;
res = mod_color_calculate_regamma_params(func, gamma, false,
- has_rom, NULL);
+ has_rom, NULL, &cal_buffer);
}
dc_gamma_release(&gamma);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 1d692f4f42f3..e5a6d9115949 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -33,6 +33,9 @@
#include "amdgpu_dm_debugfs.h"
#include "dm_helpers.h"
#include "dmub/dmub_srv.h"
+#include "resource.h"
+#include "dsc.h"
+#include "dc_link_dp.h"
struct dmub_debugfs_trace_header {
uint32_t entry_count;
@@ -46,6 +49,89 @@ struct dmub_debugfs_trace_entry {
uint32_t param1;
};
+
+/* parse_write_buffer_into_params - Helper function to parse debugfs write buffer into an array
+ *
+ * Function takes in attributes passed to debugfs write entry
+ * and writes into param array.
+ * The user passes max_param_num to identify maximum number of
+ * parameters that could be parsed.
+ *
+ */
+static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
+ long *param, const char __user *buf,
+ int max_param_num,
+ uint8_t *param_nums)
+{
+ char *wr_buf_ptr = NULL;
+ uint32_t wr_buf_count = 0;
+ int r;
+ char *sub_str = NULL;
+ const char delimiter[3] = {' ', '\n', '\0'};
+ uint8_t param_index = 0;
+
+ *param_nums = 0;
+
+ wr_buf_ptr = wr_buf;
+
+ r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
+
+ /* r is bytes not be copied */
+ if (r >= wr_buf_size) {
+ DRM_DEBUG_DRIVER("user data not be read\n");
+ return -EINVAL;
+ }
+
+ /* check number of parameters. isspace could not differ space and \n */
+ while ((*wr_buf_ptr != 0xa) && (wr_buf_count < wr_buf_size)) {
+ /* skip space*/
+ while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
+ wr_buf_ptr++;
+ wr_buf_count++;
+ }
+
+ if (wr_buf_count == wr_buf_size)
+ break;
+
+ /* skip non-space*/
+ while ((!isspace(*wr_buf_ptr)) && (wr_buf_count < wr_buf_size)) {
+ wr_buf_ptr++;
+ wr_buf_count++;
+ }
+
+ (*param_nums)++;
+
+ if (wr_buf_count == wr_buf_size)
+ break;
+ }
+
+ if (*param_nums > max_param_num)
+ *param_nums = max_param_num;
+;
+
+ wr_buf_ptr = wr_buf; /* reset buf pointer */
+ wr_buf_count = 0; /* number of char already checked */
+
+ while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
+ wr_buf_ptr++;
+ wr_buf_count++;
+ }
+
+ while (param_index < *param_nums) {
+ /* after strsep, wr_buf_ptr will be moved to after space */
+ sub_str = strsep(&wr_buf_ptr, delimiter);
+
+ r = kstrtol(sub_str, 16, &(param[param_index]));
+
+ if (r)
+ DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
+
+ param_index++;
+ }
+
+ return 0;
+}
+
/* function description
* get/ set DP configuration: lane_count, link_rate, spread_spectrum
*
@@ -161,15 +247,11 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
struct dc *dc = (struct dc *)link->dc;
struct dc_link_settings prefer_link_settings;
char *wr_buf = NULL;
- char *wr_buf_ptr = NULL;
const uint32_t wr_buf_size = 40;
- int r;
- int bytes_from_user;
- char *sub_str;
/* 0: lane_count; 1: link_rate */
- uint8_t param_index = 0;
+ int max_param_num = 2;
+ uint8_t param_nums = 0;
long param[2];
- const char delimiter[3] = {' ', '\n', '\0'};
bool valid_input = false;
if (size == 0)
@@ -177,35 +259,20 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
if (!wr_buf)
- return -EINVAL;
- wr_buf_ptr = wr_buf;
-
- r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
+ return -ENOSPC;
- /* r is bytes not be copied */
- if (r >= wr_buf_size) {
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+ (long *)param, buf,
+ max_param_num,
+ &param_nums)) {
kfree(wr_buf);
- DRM_DEBUG_DRIVER("user data not read\n");
return -EINVAL;
}
- bytes_from_user = wr_buf_size - r;
-
- while (isspace(*wr_buf_ptr))
- wr_buf_ptr++;
-
- while ((*wr_buf_ptr != '\0') && (param_index < 2)) {
-
- sub_str = strsep(&wr_buf_ptr, delimiter);
-
- r = kstrtol(sub_str, 16, &param[param_index]);
-
- if (r)
- DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
-
- param_index++;
- while (isspace(*wr_buf_ptr))
- wr_buf_ptr++;
+ if (param_nums <= 0) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("user data not be read\n");
+ return -EINVAL;
}
switch (param[0]) {
@@ -233,7 +300,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
if (!valid_input) {
kfree(wr_buf);
DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n");
- return bytes_from_user;
+ return size;
}
/* save user force lane_count, link_rate to preferred settings
@@ -246,7 +313,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
dc_link_set_preferred_link_settings(dc, &prefer_link_settings, link);
kfree(wr_buf);
- return bytes_from_user;
+ return size;
}
/* function: get current DP PHY settings: voltage swing, pre-emphasis,
@@ -337,51 +404,34 @@ static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
struct dc_link *link = connector->dc_link;
struct dc *dc = (struct dc *)link->dc;
char *wr_buf = NULL;
- char *wr_buf_ptr = NULL;
uint32_t wr_buf_size = 40;
- int r;
- int bytes_from_user;
- char *sub_str;
- uint8_t param_index = 0;
long param[3];
- const char delimiter[3] = {' ', '\n', '\0'};
bool use_prefer_link_setting;
struct link_training_settings link_lane_settings;
+ int max_param_num = 3;
+ uint8_t param_nums = 0;
+ int r = 0;
+
if (size == 0)
- return 0;
+ return -EINVAL;
wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
if (!wr_buf)
- return 0;
- wr_buf_ptr = wr_buf;
-
- r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
+ return -ENOSPC;
- /* r is bytes not be copied */
- if (r >= wr_buf_size) {
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+ (long *)param, buf,
+ max_param_num,
+ &param_nums)) {
kfree(wr_buf);
- DRM_DEBUG_DRIVER("user data not be read\n");
- return 0;
+ return -EINVAL;
}
- bytes_from_user = wr_buf_size - r;
-
- while (isspace(*wr_buf_ptr))
- wr_buf_ptr++;
-
- while ((*wr_buf_ptr != '\0') && (param_index < 3)) {
-
- sub_str = strsep(&wr_buf_ptr, delimiter);
-
- r = kstrtol(sub_str, 16, &param[param_index]);
-
- if (r)
- DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
-
- param_index++;
- while (isspace(*wr_buf_ptr))
- wr_buf_ptr++;
+ if (param_nums <= 0) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("user data not be read\n");
+ return -EINVAL;
}
if ((param[0] > VOLTAGE_SWING_MAX_LEVEL) ||
@@ -389,7 +439,7 @@ static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
(param[2] > POST_CURSOR2_MAX_LEVEL)) {
kfree(wr_buf);
DRM_DEBUG_DRIVER("Invalid Input No HW will be programmed\n");
- return bytes_from_user;
+ return size;
}
/* get link settings: lane count, link rate */
@@ -429,7 +479,7 @@ static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
dc_link_set_drive_settings(dc, &link_lane_settings, link);
kfree(wr_buf);
- return bytes_from_user;
+ return size;
}
/* function description
@@ -496,19 +546,13 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
struct dc_link *link = connector->dc_link;
char *wr_buf = NULL;
- char *wr_buf_ptr = NULL;
uint32_t wr_buf_size = 100;
- uint32_t wr_buf_count = 0;
- int r;
- int bytes_from_user;
- char *sub_str = NULL;
- uint8_t param_index = 0;
- uint8_t param_nums = 0;
long param[11] = {0x0};
- const char delimiter[3] = {' ', '\n', '\0'};
+ int max_param_num = 11;
enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
bool disable_hpd = false;
bool valid_test_pattern = false;
+ uint8_t param_nums = 0;
/* init with defalut 80bit custom pattern */
uint8_t custom_pattern[10] = {
0x1f, 0x7c, 0xf0, 0xc1, 0x07,
@@ -522,70 +566,26 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
int i;
if (size == 0)
- return 0;
+ return -EINVAL;
wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
if (!wr_buf)
- return 0;
- wr_buf_ptr = wr_buf;
+ return -ENOSPC;
- r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
-
- /* r is bytes not be copied */
- if (r >= wr_buf_size) {
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
+ (long *)param, buf,
+ max_param_num,
+ &param_nums)) {
kfree(wr_buf);
- DRM_DEBUG_DRIVER("user data not be read\n");
- return 0;
- }
-
- bytes_from_user = wr_buf_size - r;
-
- /* check number of parameters. isspace could not differ space and \n */
- while ((*wr_buf_ptr != 0xa) && (wr_buf_count < wr_buf_size)) {
- /* skip space*/
- while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
- wr_buf_ptr++;
- wr_buf_count++;
- }
-
- if (wr_buf_count == wr_buf_size)
- break;
-
- /* skip non-space*/
- while ((!isspace(*wr_buf_ptr)) && (wr_buf_count < wr_buf_size)) {
- wr_buf_ptr++;
- wr_buf_count++;
- }
-
- param_nums++;
-
- if (wr_buf_count == wr_buf_size)
- break;
+ return -EINVAL;
}
- /* max 11 parameters */
- if (param_nums > 11)
- param_nums = 11;
-
- wr_buf_ptr = wr_buf; /* reset buf pinter */
- wr_buf_count = 0; /* number of char already checked */
-
- while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
- wr_buf_ptr++;
- wr_buf_count++;
+ if (param_nums <= 0) {
+ kfree(wr_buf);
+ DRM_DEBUG_DRIVER("user data not be read\n");
+ return -EINVAL;
}
- while (param_index < param_nums) {
- /* after strsep, wr_buf_ptr will be moved to after space */
- sub_str = strsep(&wr_buf_ptr, delimiter);
-
- r = kstrtol(sub_str, 16, &param[param_index]);
-
- if (r)
- DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
-
- param_index++;
- }
test_pattern = param[0];
@@ -618,7 +618,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
if (!valid_test_pattern) {
kfree(wr_buf);
DRM_DEBUG_DRIVER("Invalid Test Pattern Parameters\n");
- return bytes_from_user;
+ return size;
}
if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) {
@@ -685,7 +685,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
kfree(wr_buf);
- return bytes_from_user;
+ return size;
}
/**
@@ -820,24 +820,6 @@ unlock:
return res;
}
-/*
- * Returns the min and max vrr vfreq through the connector's debugfs file.
- * Example usage: cat /sys/kernel/debug/dri/0/DP-1/vrr_range
- */
-static int vrr_range_show(struct seq_file *m, void *data)
-{
- struct drm_connector *connector = m->private;
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
-
- if (connector->status != connector_status_connected)
- return -ENODEV;
-
- seq_printf(m, "Min: %u\n", (unsigned int)aconnector->min_vfreq);
- seq_printf(m, "Max: %u\n", (unsigned int)aconnector->max_vfreq);
-
- return 0;
-}
-
#ifdef CONFIG_DRM_AMD_DC_HDCP
/*
* Returns the HDCP capability of the Display (1.4 for now).
@@ -859,8 +841,8 @@ static int hdcp_sink_capability_show(struct seq_file *m, void *data)
seq_printf(m, "%s:%d HDCP version: ", connector->name, connector->base.id);
- hdcp_cap = dc_link_is_hdcp14(aconnector->dc_link);
- hdcp2_cap = dc_link_is_hdcp22(aconnector->dc_link);
+ hdcp_cap = dc_link_is_hdcp14(aconnector->dc_link, aconnector->dc_sink->sink_signal);
+ hdcp2_cap = dc_link_is_hdcp22(aconnector->dc_link, aconnector->dc_sink->sink_signal);
if (hdcp_cap)
@@ -998,14 +980,517 @@ static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf,
return read_size - r;
}
+static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *rd_buf = NULL;
+ char *rd_buf_ptr = NULL;
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct display_stream_compressor *dsc;
+ struct dcn_dsc_state dsc_state = {0};
+ const uint32_t rd_buf_size = 10;
+ struct pipe_ctx *pipe_ctx;
+ ssize_t result = 0;
+ int i, r, str_len = 30;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+
+ if (!rd_buf)
+ return -ENOMEM;
+
+ rd_buf_ptr = rd_buf;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
+ }
+
+ if (!pipe_ctx)
+ return -ENXIO;
+
+ dsc = pipe_ctx->stream_res.dsc;
+ if (dsc)
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+
+ snprintf(rd_buf_ptr, str_len,
+ "%d\n",
+ dsc_state.dsc_clock_en);
+ rd_buf_ptr += str_len;
+
+ while (size) {
+ if (*pos >= rd_buf_size)
+ break;
+
+ r = put_user(*(rd_buf + result), buf);
+ if (r)
+ return r; /* r = -EFAULT */
+
+ buf += 1;
+ size -= 1;
+ *pos += 1;
+ result += 1;
+ }
+
+ kfree(rd_buf);
+ return result;
+}
+
+static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *rd_buf = NULL;
+ char *rd_buf_ptr = NULL;
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct display_stream_compressor *dsc;
+ struct dcn_dsc_state dsc_state = {0};
+ const uint32_t rd_buf_size = 100;
+ struct pipe_ctx *pipe_ctx;
+ ssize_t result = 0;
+ int i, r, str_len = 30;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+
+ if (!rd_buf)
+ return -ENOMEM;
+
+ rd_buf_ptr = rd_buf;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
+ }
+
+ if (!pipe_ctx)
+ return -ENXIO;
+
+ dsc = pipe_ctx->stream_res.dsc;
+ if (dsc)
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+
+ snprintf(rd_buf_ptr, str_len,
+ "%d\n",
+ dsc_state.dsc_slice_width);
+ rd_buf_ptr += str_len;
+
+ while (size) {
+ if (*pos >= rd_buf_size)
+ break;
+
+ r = put_user(*(rd_buf + result), buf);
+ if (r)
+ return r; /* r = -EFAULT */
+
+ buf += 1;
+ size -= 1;
+ *pos += 1;
+ result += 1;
+ }
+
+ kfree(rd_buf);
+ return result;
+}
+
+static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *rd_buf = NULL;
+ char *rd_buf_ptr = NULL;
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct display_stream_compressor *dsc;
+ struct dcn_dsc_state dsc_state = {0};
+ const uint32_t rd_buf_size = 100;
+ struct pipe_ctx *pipe_ctx;
+ ssize_t result = 0;
+ int i, r, str_len = 30;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+
+ if (!rd_buf)
+ return -ENOMEM;
+
+ rd_buf_ptr = rd_buf;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
+ }
+
+ if (!pipe_ctx)
+ return -ENXIO;
+
+ dsc = pipe_ctx->stream_res.dsc;
+ if (dsc)
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+
+ snprintf(rd_buf_ptr, str_len,
+ "%d\n",
+ dsc_state.dsc_slice_height);
+ rd_buf_ptr += str_len;
+
+ while (size) {
+ if (*pos >= rd_buf_size)
+ break;
+
+ r = put_user(*(rd_buf + result), buf);
+ if (r)
+ return r; /* r = -EFAULT */
+
+ buf += 1;
+ size -= 1;
+ *pos += 1;
+ result += 1;
+ }
+
+ kfree(rd_buf);
+ return result;
+}
+
+static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *rd_buf = NULL;
+ char *rd_buf_ptr = NULL;
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct display_stream_compressor *dsc;
+ struct dcn_dsc_state dsc_state = {0};
+ const uint32_t rd_buf_size = 100;
+ struct pipe_ctx *pipe_ctx;
+ ssize_t result = 0;
+ int i, r, str_len = 30;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+
+ if (!rd_buf)
+ return -ENOMEM;
+
+ rd_buf_ptr = rd_buf;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
+ }
+
+ if (!pipe_ctx)
+ return -ENXIO;
+
+ dsc = pipe_ctx->stream_res.dsc;
+ if (dsc)
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+
+ snprintf(rd_buf_ptr, str_len,
+ "%d\n",
+ dsc_state.dsc_bits_per_pixel);
+ rd_buf_ptr += str_len;
+
+ while (size) {
+ if (*pos >= rd_buf_size)
+ break;
+
+ r = put_user(*(rd_buf + result), buf);
+ if (r)
+ return r; /* r = -EFAULT */
+
+ buf += 1;
+ size -= 1;
+ *pos += 1;
+ result += 1;
+ }
+
+ kfree(rd_buf);
+ return result;
+}
+
+static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *rd_buf = NULL;
+ char *rd_buf_ptr = NULL;
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct display_stream_compressor *dsc;
+ struct dcn_dsc_state dsc_state = {0};
+ const uint32_t rd_buf_size = 100;
+ struct pipe_ctx *pipe_ctx;
+ ssize_t result = 0;
+ int i, r, str_len = 30;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+
+ if (!rd_buf)
+ return -ENOMEM;
+
+ rd_buf_ptr = rd_buf;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
+ }
+
+ if (!pipe_ctx)
+ return -ENXIO;
+
+ dsc = pipe_ctx->stream_res.dsc;
+ if (dsc)
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+
+ snprintf(rd_buf_ptr, str_len,
+ "%d\n",
+ dsc_state.dsc_pic_width);
+ rd_buf_ptr += str_len;
+
+ while (size) {
+ if (*pos >= rd_buf_size)
+ break;
+
+ r = put_user(*(rd_buf + result), buf);
+ if (r)
+ return r; /* r = -EFAULT */
+
+ buf += 1;
+ size -= 1;
+ *pos += 1;
+ result += 1;
+ }
+
+ kfree(rd_buf);
+ return result;
+}
+
+static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *rd_buf = NULL;
+ char *rd_buf_ptr = NULL;
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct display_stream_compressor *dsc;
+ struct dcn_dsc_state dsc_state = {0};
+ const uint32_t rd_buf_size = 100;
+ struct pipe_ctx *pipe_ctx;
+ ssize_t result = 0;
+ int i, r, str_len = 30;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+
+ if (!rd_buf)
+ return -ENOMEM;
+
+ rd_buf_ptr = rd_buf;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
+ }
+
+ if (!pipe_ctx)
+ return -ENXIO;
+
+ dsc = pipe_ctx->stream_res.dsc;
+ if (dsc)
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+
+ snprintf(rd_buf_ptr, str_len,
+ "%d\n",
+ dsc_state.dsc_pic_height);
+ rd_buf_ptr += str_len;
+
+ while (size) {
+ if (*pos >= rd_buf_size)
+ break;
+
+ r = put_user(*(rd_buf + result), buf);
+ if (r)
+ return r; /* r = -EFAULT */
+
+ buf += 1;
+ size -= 1;
+ *pos += 1;
+ result += 1;
+ }
+
+ kfree(rd_buf);
+ return result;
+}
+
+static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *rd_buf = NULL;
+ char *rd_buf_ptr = NULL;
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct display_stream_compressor *dsc;
+ struct dcn_dsc_state dsc_state = {0};
+ const uint32_t rd_buf_size = 100;
+ struct pipe_ctx *pipe_ctx;
+ ssize_t result = 0;
+ int i, r, str_len = 30;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+
+ if (!rd_buf)
+ return -ENOMEM;
+
+ rd_buf_ptr = rd_buf;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
+ }
+
+ if (!pipe_ctx)
+ return -ENXIO;
+
+ dsc = pipe_ctx->stream_res.dsc;
+ if (dsc)
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+
+ snprintf(rd_buf_ptr, str_len,
+ "%d\n",
+ dsc_state.dsc_chunk_size);
+ rd_buf_ptr += str_len;
+
+ while (size) {
+ if (*pos >= rd_buf_size)
+ break;
+
+ r = put_user(*(rd_buf + result), buf);
+ if (r)
+ return r; /* r = -EFAULT */
+
+ buf += 1;
+ size -= 1;
+ *pos += 1;
+ result += 1;
+ }
+
+ kfree(rd_buf);
+ return result;
+}
+
+static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *rd_buf = NULL;
+ char *rd_buf_ptr = NULL;
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct display_stream_compressor *dsc;
+ struct dcn_dsc_state dsc_state = {0};
+ const uint32_t rd_buf_size = 100;
+ struct pipe_ctx *pipe_ctx;
+ ssize_t result = 0;
+ int i, r, str_len = 30;
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+
+ if (!rd_buf)
+ return -ENOMEM;
+
+ rd_buf_ptr = rd_buf;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream &&
+ pipe_ctx->stream->link == aconnector->dc_link)
+ break;
+ }
+
+ if (!pipe_ctx)
+ return -ENXIO;
+
+ dsc = pipe_ctx->stream_res.dsc;
+ if (dsc)
+ dsc->funcs->dsc_read_state(dsc, &dsc_state);
+
+ snprintf(rd_buf_ptr, str_len,
+ "%d\n",
+ dsc_state.dsc_slice_bpg_offset);
+ rd_buf_ptr += str_len;
+
+ while (size) {
+ if (*pos >= rd_buf_size)
+ break;
+
+ r = put_user(*(rd_buf + result), buf);
+ if (r)
+ return r; /* r = -EFAULT */
+
+ buf += 1;
+ size -= 1;
+ *pos += 1;
+ result += 1;
+ }
+
+ kfree(rd_buf);
+ return result;
+}
+
DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
DEFINE_SHOW_ATTRIBUTE(output_bpc);
-DEFINE_SHOW_ATTRIBUTE(vrr_range);
#ifdef CONFIG_DRM_AMD_DC_HDCP
DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
#endif
+static const struct file_operations dp_dsc_clock_en_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = dp_dsc_clock_en_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_dsc_slice_width_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = dp_dsc_slice_width_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_dsc_slice_height_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = dp_dsc_slice_height_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_dsc_bits_per_pixel_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = dp_dsc_bits_per_pixel_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_dsc_pic_width_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = dp_dsc_pic_width_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_dsc_pic_height_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = dp_dsc_pic_height_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_dsc_chunk_size_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = dp_dsc_chunk_size_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_dsc_slice_bpg_offset_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = dp_dsc_slice_bpg_offset_read,
+ .llseek = default_llseek
+};
+
static const struct file_operations dp_link_settings_debugfs_fops = {
.owner = THIS_MODULE,
.read = dp_link_settings_read,
@@ -1058,14 +1543,21 @@ static const struct {
{"link_settings", &dp_link_settings_debugfs_fops},
{"phy_settings", &dp_phy_settings_debugfs_fop},
{"test_pattern", &dp_phy_test_pattern_fops},
- {"vrr_range", &vrr_range_fops},
#ifdef CONFIG_DRM_AMD_DC_HDCP
{"hdcp_sink_capability", &hdcp_sink_capability_fops},
#endif
{"sdp_message", &sdp_message_fops},
{"aux_dpcd_address", &dp_dpcd_address_debugfs_fops},
{"aux_dpcd_size", &dp_dpcd_size_debugfs_fops},
- {"aux_dpcd_data", &dp_dpcd_data_debugfs_fops}
+ {"aux_dpcd_data", &dp_dpcd_data_debugfs_fops},
+ {"dsc_clock_en", &dp_dsc_clock_en_debugfs_fops},
+ {"dsc_slice_width", &dp_dsc_slice_width_debugfs_fops},
+ {"dsc_slice_height", &dp_dsc_slice_height_debugfs_fops},
+ {"dsc_bits_per_pixel", &dp_dsc_bits_per_pixel_debugfs_fops},
+ {"dsc_pic_width", &dp_dsc_pic_width_debugfs_fops},
+ {"dsc_pic_height", &dp_dsc_pic_height_debugfs_fops},
+ {"dsc_chunk_size", &dp_dsc_chunk_size_debugfs_fops},
+ {"dsc_slice_bpg", &dp_dsc_slice_bpg_offset_debugfs_fops}
};
#ifdef CONFIG_DRM_AMD_DC_HDCP
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 949d10ef8304..694c5bc93665 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -390,6 +390,42 @@ void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
kfree(hdcp_work);
}
+
+static bool enable_assr(void *handle, struct dc_link *link)
+{
+
+ struct hdcp_workqueue *hdcp_work = handle;
+ struct mod_hdcp hdcp = hdcp_work->hdcp;
+ struct psp_context *psp = hdcp.config.psp.handle;
+ struct ta_dtm_shared_memory *dtm_cmd;
+ bool res = true;
+
+ if (!psp->dtm_context.dtm_initialized) {
+ DRM_INFO("Failed to enable ASSR, DTM TA is not initialized.");
+ return false;
+ }
+
+ dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+
+ mutex_lock(&psp->dtm_context.mutex);
+ memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
+
+ dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE;
+ dtm_cmd->dtm_in_message.topology_assr_enable.display_topology_dig_be_index = link->link_enc_hw_inst;
+ dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
+
+ psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
+ DRM_INFO("Failed to enable ASSR");
+ res = false;
+ }
+
+ mutex_unlock(&psp->dtm_context.mutex);
+
+ return res;
+}
+
static void update_config(void *handle, struct cp_psp_stream_config *config)
{
struct hdcp_workqueue *hdcp_work = handle;
@@ -601,6 +637,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
}
cp_psp->funcs.update_stream_config = update_config;
+ cp_psp->funcs.enable_assr = enable_assr;
cp_psp->handle = hdcp_work;
/* File created at /sys/class/drm/card0/device/hdcp_srm*/
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index b086d5c906e0..d839eb14e3f0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -627,3 +627,23 @@ void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks)
{
/* TODO: something */
}
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+
+void *dm_helpers_allocate_gpu_mem(
+ struct dc_context *ctx,
+ enum dc_gpu_mem_alloc_type type,
+ size_t size,
+ long long *addr)
+{
+ // TODO
+ return NULL;
+}
+
+void dm_helpers_free_gpu_mem(
+ struct dc_context *ctx,
+ enum dc_gpu_mem_alloc_type type,
+ void *pvMem)
+{
+ // TODO
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index e5ecc5affa1e..e85b58f0f416 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -521,6 +521,8 @@ static void increase_dsc_bpp(struct drm_atomic_state *state,
int link_timeslots_used;
int fair_pbn_alloc;
+ pbn_per_timeslot = dm_mst_get_pbn_divider(dc_link);
+
for (i = 0; i < count; i++) {
if (vars[i].dsc_enabled) {
initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn;
@@ -532,9 +534,6 @@ static void increase_dsc_bpp(struct drm_atomic_state *state,
}
}
- pbn_per_timeslot = dc_link_bandwidth_kbps(dc_link,
- dc_link_get_link_cap(dc_link)) / (8 * 1000 * 54);
-
while (remaining_to_increase) {
next_index = -1;
min_initial_slack = -1;
@@ -563,7 +562,7 @@ static void increase_dsc_bpp(struct drm_atomic_state *state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn,
- dm_mst_get_pbn_divider(dc_link)) < 0)
+ pbn_per_timeslot) < 0)
return;
if (!drm_dp_mst_atomic_check(state)) {
vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
@@ -573,7 +572,7 @@ static void increase_dsc_bpp(struct drm_atomic_state *state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn,
- dm_mst_get_pbn_divider(dc_link)) < 0)
+ pbn_per_timeslot) < 0)
return;
}
} else {
@@ -582,7 +581,7 @@ static void increase_dsc_bpp(struct drm_atomic_state *state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn,
- dm_mst_get_pbn_divider(dc_link)) < 0)
+ pbn_per_timeslot) < 0)
return;
if (!drm_dp_mst_atomic_check(state)) {
vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
@@ -592,7 +591,7 @@ static void increase_dsc_bpp(struct drm_atomic_state *state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn,
- dm_mst_get_pbn_divider(dc_link)) < 0)
+ pbn_per_timeslot) < 0)
return;
}
}
@@ -646,7 +645,7 @@ static void try_disable_dsc(struct drm_atomic_state *state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn,
- 0) < 0)
+ dm_mst_get_pbn_divider(dc_link)) < 0)
return;
if (!drm_dp_mst_atomic_check(state)) {
@@ -717,7 +716,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
params[i].port->mgr,
params[i].port,
vars[i].pbn,
- 0) < 0)
+ dm_mst_get_pbn_divider(dc_link)) < 0)
return false;
}
if (!drm_dp_mst_atomic_check(state)) {
@@ -745,7 +744,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
params[i].port->mgr,
params[i].port,
vars[i].pbn,
- 0) < 0)
+ dm_mst_get_pbn_divider(dc_link)) < 0)
return false;
}
}
@@ -807,7 +806,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
stream = dc_state->streams[i];
if (stream->timing.flags.DSC == 1)
- dcn20_add_dsc_to_stream_resource(stream->ctx->dc, dc_state, stream);
+ dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream);
}
return true;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index a2e1a73f66b8..c5f2216e59c4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -106,7 +106,7 @@ bool dm_pp_apply_display_requirements(
adev->powerplay.pp_funcs->display_configuration_change(
adev->powerplay.pp_handle,
&adev->pm.pm_display_cfg);
- else
+ else if (adev->smu.ppt_funcs)
smu_display_configuration_change(smu,
&adev->pm.pm_display_cfg);
@@ -530,6 +530,8 @@ bool dm_pp_get_static_clocks(
&pp_clk_info);
else if (adev->smu.ppt_funcs)
ret = smu_get_current_clocks(&adev->smu, &pp_clk_info);
+ else
+ return false;
if (ret)
return false;
@@ -590,7 +592,7 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
&wm_with_clock_ranges);
- else
+ else if (adev->smu.ppt_funcs)
smu_set_watermarks_for_clock_ranges(&adev->smu,
&wm_with_clock_ranges);
}
@@ -660,7 +662,7 @@ void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz)
pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz);
}
-enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
+static enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
struct pp_smu_wm_range_sets *ranges)
{
const struct dc_context *ctx = pp->dm;
@@ -728,7 +730,7 @@ enum pp_smu_status pp_nv_set_pme_wa_enable(struct pp_smu *pp)
return PP_SMU_RESULT_OK;
}
-enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
+static enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
@@ -744,7 +746,8 @@ enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
return PP_SMU_RESULT_OK;
}
-enum pp_smu_status pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
+static enum pp_smu_status
+pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
@@ -760,7 +763,7 @@ enum pp_smu_status pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
return PP_SMU_RESULT_OK;
}
-enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
+static enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
struct pp_smu *pp, int mhz)
{
const struct dc_context *ctx = pp->dm;
@@ -783,7 +786,8 @@ enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
return PP_SMU_RESULT_OK;
}
-enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
+static enum pp_smu_status
+pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
@@ -805,7 +809,7 @@ enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
return PP_SMU_RESULT_OK;
}
-enum pp_smu_status pp_nv_set_pstate_handshake_support(
+static enum pp_smu_status pp_nv_set_pstate_handshake_support(
struct pp_smu *pp, BOOLEAN pstate_handshake_supported)
{
const struct dc_context *ctx = pp->dm;
@@ -818,7 +822,7 @@ enum pp_smu_status pp_nv_set_pstate_handshake_support(
return PP_SMU_RESULT_OK;
}
-enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
+static enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
enum pp_smu_nv_clock_id clock_id, int mhz)
{
const struct dc_context *ctx = pp->dm;
@@ -853,7 +857,7 @@ enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
return PP_SMU_RESULT_OK;
}
-enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
+static enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
struct pp_smu *pp, struct pp_smu_nv_clock_table *max_clocks)
{
const struct dc_context *ctx = pp->dm;
@@ -872,7 +876,7 @@ enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
return PP_SMU_RESULT_FAIL;
}
-enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
+static enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
unsigned int *clock_values_in_khz, unsigned int *num_states)
{
const struct dc_context *ctx = pp->dm;
@@ -892,7 +896,7 @@ enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
return PP_SMU_RESULT_FAIL;
}
-enum pp_smu_status pp_rn_get_dpm_clock_table(
+static enum pp_smu_status pp_rn_get_dpm_clock_table(
struct pp_smu *pp, struct dpm_clocks *clock_table)
{
const struct dc_context *ctx = pp->dm;
@@ -911,7 +915,7 @@ enum pp_smu_status pp_rn_get_dpm_clock_table(
return PP_SMU_RESULT_FAIL;
}
-enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
+static enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
struct pp_smu_wm_range_sets *ranges)
{
const struct dc_context *ctx = pp->dm;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 022da5d45d4d..51f57420fadd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -47,29 +47,4 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line)
{
}
-bool dm_write_persistent_data(struct dc_context *ctx,
- const struct dc_sink *sink,
- const char *module_name,
- const char *key_name,
- void *params,
- unsigned int size,
- struct persistent_data_flag *flag)
-{
- /*TODO implement*/
- return false;
-}
-
-bool dm_read_persistent_data(struct dc_context *ctx,
- const struct dc_sink *sink,
- const char *module_name,
- const char *key_name,
- void *params,
- unsigned int size,
- struct persistent_data_flag *flag)
-{
- /*TODO implement*/
- return false;
-}
-
/**** power component interfaces ****/
-