aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0471-drm-amdgpu-Remove-FW_LOAD_DIRECT-type-support-on-VI.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0471-drm-amdgpu-Remove-FW_LOAD_DIRECT-type-support-on-VI.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0471-drm-amdgpu-Remove-FW_LOAD_DIRECT-type-support-on-VI.patch422
1 files changed, 422 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0471-drm-amdgpu-Remove-FW_LOAD_DIRECT-type-support-on-VI.patch b/meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0471-drm-amdgpu-Remove-FW_LOAD_DIRECT-type-support-on-VI.patch
new file mode 100644
index 00000000..04f2bc79
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0471-drm-amdgpu-Remove-FW_LOAD_DIRECT-type-support-on-VI.patch
@@ -0,0 +1,422 @@
+From 517dc4b1c9aeda4694606d42466b4e130cc5c0b6 Mon Sep 17 00:00:00 2001
+From: Rex Zhu <Rex.Zhu@amd.com>
+Date: Sat, 29 Sep 2018 20:09:00 +0800
+Subject: [PATCH 0471/2940] drm/amdgpu: Remove FW_LOAD_DIRECT type support on
+ VI
+
+AMDGPU_FW_LOAD_DIRECT is used for bring up.
+Now it don't work any more. so remove the support.
+
+v2: Add warning message if user select
+ AMDGPU_FW_LOAD_DIRECT/AMDGPU_FW_LOAD_PSP on VI.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 7 +-
+ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 249 +++++-----------------
+ drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 57 +----
+ 3 files changed, 59 insertions(+), 254 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+index 1fa8bc337859..987821232c42 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+@@ -297,10 +297,9 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+- if (!load_type)
+- return AMDGPU_FW_LOAD_DIRECT;
+- else
+- return AMDGPU_FW_LOAD_SMU;
++ if (load_type != AMDGPU_FW_LOAD_SMU)
++ pr_warning("%d is not supported on VI\n", load_type);
++ return AMDGPU_FW_LOAD_SMU;
+ case CHIP_VEGA10:
+ case CHIP_RAVEN:
+ case CHIP_VEGA12:
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index f9e0a21435f8..6b1954eaf275 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -1173,64 +1173,61 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
+ }
+ }
+
+- if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
+- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
+- info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
+- info->fw = adev->gfx.pfp_fw;
+- header = (const struct common_firmware_header *)info->fw->data;
+- adev->firmware.fw_size +=
+- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+-
+- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
+- info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
+- info->fw = adev->gfx.me_fw;
+- header = (const struct common_firmware_header *)info->fw->data;
+- adev->firmware.fw_size +=
+- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+-
+- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
+- info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
+- info->fw = adev->gfx.ce_fw;
+- header = (const struct common_firmware_header *)info->fw->data;
+- adev->firmware.fw_size +=
+- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
++ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
++ info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
++ info->fw = adev->gfx.pfp_fw;
++ header = (const struct common_firmware_header *)info->fw->data;
++ adev->firmware.fw_size +=
++ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
++
++ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
++ info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
++ info->fw = adev->gfx.me_fw;
++ header = (const struct common_firmware_header *)info->fw->data;
++ adev->firmware.fw_size +=
++ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
++
++ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
++ info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
++ info->fw = adev->gfx.ce_fw;
++ header = (const struct common_firmware_header *)info->fw->data;
++ adev->firmware.fw_size +=
++ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
++
++ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
++ info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
++ info->fw = adev->gfx.rlc_fw;
++ header = (const struct common_firmware_header *)info->fw->data;
++ adev->firmware.fw_size +=
++ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
++
++ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
++ info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
++ info->fw = adev->gfx.mec_fw;
++ header = (const struct common_firmware_header *)info->fw->data;
++ adev->firmware.fw_size +=
++ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
++
++ /* we need account JT in */
++ cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
++ adev->firmware.fw_size +=
++ ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
+
+- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
+- info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
+- info->fw = adev->gfx.rlc_fw;
+- header = (const struct common_firmware_header *)info->fw->data;
++ if (amdgpu_sriov_vf(adev)) {
++ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
++ info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
++ info->fw = adev->gfx.mec_fw;
+ adev->firmware.fw_size +=
+- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
++ ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
++ }
+
+- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
+- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
+- info->fw = adev->gfx.mec_fw;
++ if (adev->gfx.mec2_fw) {
++ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
++ info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
++ info->fw = adev->gfx.mec2_fw;
+ header = (const struct common_firmware_header *)info->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+-
+- /* we need account JT in */
+- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+- adev->firmware.fw_size +=
+- ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
+-
+- if (amdgpu_sriov_vf(adev)) {
+- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
+- info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
+- info->fw = adev->gfx.mec_fw;
+- adev->firmware.fw_size +=
+- ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
+- }
+-
+- if (adev->gfx.mec2_fw) {
+- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
+- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
+- info->fw = adev->gfx.mec2_fw;
+- header = (const struct common_firmware_header *)info->fw->data;
+- adev->firmware.fw_size +=
+- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+- }
+-
+ }
+
+ out:
+@@ -4181,45 +4178,11 @@ static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
+ udelay(50);
+ }
+
+-static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
+-{
+- const struct rlc_firmware_header_v2_0 *hdr;
+- const __le32 *fw_data;
+- unsigned i, fw_size;
+-
+- if (!adev->gfx.rlc_fw)
+- return -EINVAL;
+-
+- hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
+- amdgpu_ucode_print_rlc_hdr(&hdr->header);
+-
+- fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
+- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+- fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+-
+- WREG32(mmRLC_GPM_UCODE_ADDR, 0);
+- for (i = 0; i < fw_size; i++)
+- WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
+- WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
+-
+- return 0;
+-}
+-
+ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
+ {
+- int r;
+-
+ gfx_v8_0_rlc_stop(adev);
+ gfx_v8_0_rlc_reset(adev);
+ gfx_v8_0_init_pg(adev);
+-
+- if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+- /* legacy rlc firmware loading */
+- r = gfx_v8_0_rlc_load_microcode(adev);
+- if (r)
+- return r;
+- }
+-
+ gfx_v8_0_rlc_start(adev);
+
+ return 0;
+@@ -4245,63 +4208,6 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
+ udelay(50);
+ }
+
+-static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
+-{
+- const struct gfx_firmware_header_v1_0 *pfp_hdr;
+- const struct gfx_firmware_header_v1_0 *ce_hdr;
+- const struct gfx_firmware_header_v1_0 *me_hdr;
+- const __le32 *fw_data;
+- unsigned i, fw_size;
+-
+- if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
+- return -EINVAL;
+-
+- pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
+- adev->gfx.pfp_fw->data;
+- ce_hdr = (const struct gfx_firmware_header_v1_0 *)
+- adev->gfx.ce_fw->data;
+- me_hdr = (const struct gfx_firmware_header_v1_0 *)
+- adev->gfx.me_fw->data;
+-
+- amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
+- amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
+- amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
+-
+- gfx_v8_0_cp_gfx_enable(adev, false);
+-
+- /* PFP */
+- fw_data = (const __le32 *)
+- (adev->gfx.pfp_fw->data +
+- le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
+- fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
+- WREG32(mmCP_PFP_UCODE_ADDR, 0);
+- for (i = 0; i < fw_size; i++)
+- WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
+- WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
+-
+- /* CE */
+- fw_data = (const __le32 *)
+- (adev->gfx.ce_fw->data +
+- le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
+- fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
+- WREG32(mmCP_CE_UCODE_ADDR, 0);
+- for (i = 0; i < fw_size; i++)
+- WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
+- WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
+-
+- /* ME */
+- fw_data = (const __le32 *)
+- (adev->gfx.me_fw->data +
+- le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
+- fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
+- WREG32(mmCP_ME_RAM_WADDR, 0);
+- for (i = 0; i < fw_size; i++)
+- WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
+- WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
+-
+- return 0;
+-}
+-
+ static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev)
+ {
+ u32 count = 0;
+@@ -4501,52 +4407,6 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
+ udelay(50);
+ }
+
+-static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
+-{
+- const struct gfx_firmware_header_v1_0 *mec_hdr;
+- const __le32 *fw_data;
+- unsigned i, fw_size;
+-
+- if (!adev->gfx.mec_fw)
+- return -EINVAL;
+-
+- gfx_v8_0_cp_compute_enable(adev, false);
+-
+- mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+- amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
+-
+- fw_data = (const __le32 *)
+- (adev->gfx.mec_fw->data +
+- le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
+- fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
+-
+- /* MEC1 */
+- WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
+- for (i = 0; i < fw_size; i++)
+- WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data+i));
+- WREG32(mmCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
+-
+- /* Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
+- if (adev->gfx.mec2_fw) {
+- const struct gfx_firmware_header_v1_0 *mec2_hdr;
+-
+- mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+- amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
+-
+- fw_data = (const __le32 *)
+- (adev->gfx.mec2_fw->data +
+- le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
+- fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
+-
+- WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
+- for (i = 0; i < fw_size; i++)
+- WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data+i));
+- WREG32(mmCP_MEC_ME2_UCODE_ADDR, adev->gfx.mec2_fw_version);
+- }
+-
+- return 0;
+-}
+-
+ /* KIQ functions */
+ static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring)
+ {
+@@ -4980,17 +4840,6 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
+ if (!(adev->flags & AMD_IS_APU))
+ gfx_v8_0_enable_gui_idle_interrupt(adev, false);
+
+- if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+- /* legacy firmware loading */
+- r = gfx_v8_0_cp_gfx_load_microcode(adev);
+- if (r)
+- return r;
+-
+- r = gfx_v8_0_cp_compute_load_microcode(adev);
+- if (r)
+- return r;
+- }
+-
+ r = gfx_v8_0_kiq_resume(adev);
+ if (r)
+ return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+index 6d5c8ac64874..6fb3edaba0ec 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+@@ -318,14 +318,13 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
+ if (adev->sdma.instance[i].feature_version >= 20)
+ adev->sdma.instance[i].burst_nop = true;
+
+- if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
+- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
+- info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
+- info->fw = adev->sdma.instance[i].fw;
+- header = (const struct common_firmware_header *)info->fw->data;
+- adev->firmware.fw_size +=
+- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+- }
++ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
++ info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
++ info->fw = adev->sdma.instance[i].fw;
++ header = (const struct common_firmware_header *)info->fw->data;
++ adev->firmware.fw_size +=
++ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
++
+ }
+ out:
+ if (err) {
+@@ -777,42 +776,6 @@ static int sdma_v3_0_rlc_resume(struct amdgpu_device *adev)
+ return 0;
+ }
+
+-/**
+- * sdma_v3_0_load_microcode - load the sDMA ME ucode
+- *
+- * @adev: amdgpu_device pointer
+- *
+- * Loads the sDMA0/1 ucode.
+- * Returns 0 for success, -EINVAL if the ucode is not available.
+- */
+-static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
+-{
+- const struct sdma_firmware_header_v1_0 *hdr;
+- const __le32 *fw_data;
+- u32 fw_size;
+- int i, j;
+-
+- /* halt the MEs */
+- sdma_v3_0_enable(adev, false);
+-
+- for (i = 0; i < adev->sdma.num_instances; i++) {
+- if (!adev->sdma.instance[i].fw)
+- return -EINVAL;
+- hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
+- amdgpu_ucode_print_sdma_hdr(&hdr->header);
+- fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+- fw_data = (const __le32 *)
+- (adev->sdma.instance[i].fw->data +
+- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+- WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
+- for (j = 0; j < fw_size; j++)
+- WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
+- WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
+- }
+-
+- return 0;
+-}
+-
+ /**
+ * sdma_v3_0_start - setup and start the async dma engines
+ *
+@@ -825,12 +788,6 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
+ {
+ int r;
+
+- if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+- r = sdma_v3_0_load_microcode(adev);
+- if (r)
+- return r;
+- }
+-
+ /* disable sdma engine before programing it */
+ sdma_v3_0_ctx_switch_enable(adev, false);
+ sdma_v3_0_enable(adev, false);
+--
+2.17.1
+