aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c651
1 files changed, 531 insertions, 120 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index fac77a86c04b..65997ffaed45 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -55,6 +55,7 @@
* 2. Async ring
*/
#define GFX10_NUM_GFX_RINGS_NV1X 1
+#define GFX10_NUM_GFX_RINGS_Sienna_Cichlid 1
#define GFX10_MEC_HPD_SIZE 2048
#define F32_CE_PROGRAM_RAM_SIZE 65536
@@ -62,6 +63,54 @@
#define mmCGTT_GS_NGG_CLK_CTRL 0x5087
#define mmCGTT_GS_NGG_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_SPI_RA0_CLK_CTRL 0x507a
+#define mmCGTT_SPI_RA0_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_SPI_RA1_CLK_CTRL 0x507b
+#define mmCGTT_SPI_RA1_CLK_CTRL_BASE_IDX 1
+
+#define GB_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8
+#define GB_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L
+
+#define mmCP_MEC_CNTL_Sienna_Cichlid 0x0f55
+#define mmCP_MEC_CNTL_Sienna_Cichlid_BASE_IDX 0
+#define mmRLC_SAFE_MODE_Sienna_Cichlid 0x4ca0
+#define mmRLC_SAFE_MODE_Sienna_Cichlid_BASE_IDX 1
+#define mmRLC_CP_SCHEDULERS_Sienna_Cichlid 0x4ca1
+#define mmRLC_CP_SCHEDULERS_Sienna_Cichlid_BASE_IDX 1
+#define mmSPI_CONFIG_CNTL_Sienna_Cichlid 0x11ec
+#define mmSPI_CONFIG_CNTL_Sienna_Cichlid_BASE_IDX 0
+#define mmVGT_ESGS_RING_SIZE_Sienna_Cichlid 0x0fc1
+#define mmVGT_ESGS_RING_SIZE_Sienna_Cichlid_BASE_IDX 0
+#define mmVGT_GSVS_RING_SIZE_Sienna_Cichlid 0x0fc2
+#define mmVGT_GSVS_RING_SIZE_Sienna_Cichlid_BASE_IDX 0
+#define mmVGT_TF_RING_SIZE_Sienna_Cichlid 0x0fc3
+#define mmVGT_TF_RING_SIZE_Sienna_Cichlid_BASE_IDX 0
+#define mmVGT_HS_OFFCHIP_PARAM_Sienna_Cichlid 0x0fc4
+#define mmVGT_HS_OFFCHIP_PARAM_Sienna_Cichlid_BASE_IDX 0
+#define mmVGT_TF_MEMORY_BASE_Sienna_Cichlid 0x0fc5
+#define mmVGT_TF_MEMORY_BASE_Sienna_Cichlid_BASE_IDX 0
+#define mmVGT_TF_MEMORY_BASE_HI_Sienna_Cichlid 0x0fc6
+#define mmVGT_TF_MEMORY_BASE_HI_Sienna_Cichlid_BASE_IDX 0
+#define GRBM_STATUS2__RLC_BUSY_Sienna_Cichlid__SHIFT 0x1a
+#define GRBM_STATUS2__RLC_BUSY_Sienna_Cichlid_MASK 0x04000000L
+#define CP_RB_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER_Sienna_Cichlid_MASK 0x00000FFCL
+#define CP_RB_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER_Sienna_Cichlid__SHIFT 0x2
+#define CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_Sienna_Cichlid_MASK 0x00000FFCL
+#define mmGCR_GENERAL_CNTL_Sienna_Cichlid 0x1580
+#define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX 0
+
+#define mmCP_HYP_PFP_UCODE_ADDR 0x5814
+#define mmCP_HYP_PFP_UCODE_ADDR_BASE_IDX 1
+#define mmCP_HYP_PFP_UCODE_DATA 0x5815
+#define mmCP_HYP_PFP_UCODE_DATA_BASE_IDX 1
+#define mmCP_HYP_CE_UCODE_ADDR 0x5818
+#define mmCP_HYP_CE_UCODE_ADDR_BASE_IDX 1
+#define mmCP_HYP_CE_UCODE_DATA 0x5819
+#define mmCP_HYP_CE_UCODE_DATA_BASE_IDX 1
+#define mmCP_HYP_ME_UCODE_ADDR 0x5816
+#define mmCP_HYP_ME_UCODE_ADDR_BASE_IDX 1
+#define mmCP_HYP_ME_UCODE_DATA 0x5817
+#define mmCP_HYP_ME_UCODE_DATA_BASE_IDX 1
MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
@@ -89,6 +138,20 @@ MODULE_FIRMWARE("amdgpu/navi12_mec.bin");
MODULE_FIRMWARE("amdgpu/navi12_mec2.bin");
MODULE_FIRMWARE("amdgpu/navi12_rlc.bin");
+MODULE_FIRMWARE("amdgpu/sienna_cichlid_ce.bin");
+MODULE_FIRMWARE("amdgpu/sienna_cichlid_pfp.bin");
+MODULE_FIRMWARE("amdgpu/sienna_cichlid_me.bin");
+MODULE_FIRMWARE("amdgpu/sienna_cichlid_mec.bin");
+MODULE_FIRMWARE("amdgpu/sienna_cichlid_mec2.bin");
+MODULE_FIRMWARE("amdgpu/sienna_cichlid_rlc.bin");
+
+MODULE_FIRMWARE("amdgpu/navy_flounder_ce.bin");
+MODULE_FIRMWARE("amdgpu/navy_flounder_pfp.bin");
+MODULE_FIRMWARE("amdgpu/navy_flounder_me.bin");
+MODULE_FIRMWARE("amdgpu/navy_flounder_mec.bin");
+MODULE_FIRMWARE("amdgpu/navy_flounder_mec2.bin");
+MODULE_FIRMWARE("amdgpu/navy_flounder_rlc.bin");
+
static const struct soc15_reg_golden golden_settings_gc_10_1[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
@@ -3013,6 +3076,93 @@ static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000)
};
+static const struct soc15_reg_golden golden_settings_gc_10_3[] =
+{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_PS_CLK_CTRL, 0xff7f0fff, 0x78000100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PC_CNTL, 0x003fffff, 0x00280400),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CM_CTRL1, 0xff8fff0f, 0x580f1008),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xf7ffffff, 0x10f80988),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_CL_ENHANCE, 0xf17fffff, 0x01200007),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0xffffffbf, 0x00000820),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffffffff, 0x00070104),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xe07df47f, 0x00180070),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER0_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER1_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER10_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER11_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER12_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER13_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER14_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER15_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER2_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER3_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER4_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER5_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER6_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000)
+};
+
+static const struct soc15_reg_golden golden_settings_gc_10_3_sienna_cichlid[] =
+{
+ /* Pending on emulation bring up */
+};
+
+static const struct soc15_reg_golden golden_settings_gc_10_3_2[] =
+{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_PS_CLK_CTRL, 0xff7f0fff, 0x78000100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PC_CNTL, 0x003fffff, 0x00280400),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CM_CTRL1, 0xff8fff0f, 0x580f1008),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xf7ffffff, 0x00f80988),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_CL_ENHANCE, 0xf17fffff, 0x01200007),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0xffffffbf, 0x00000820),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffffffff, 0x00070104),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_START_PHASE, 0x000000ff, 0x00000004),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xe07df47f, 0x00180070),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER0_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER1_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER10_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER11_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER12_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER13_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER14_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER15_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER2_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER3_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER4_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER5_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER6_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
+};
+
#define DEFAULT_SH_MEM_CONFIG \
((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
(SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
@@ -3193,6 +3343,20 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_rlc_spm_10_1_2_nv12,
(const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_2_nv12));
break;
+ case CHIP_SIENNA_CICHLID:
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_10_3,
+ (const u32)ARRAY_SIZE(golden_settings_gc_10_3));
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_10_3_sienna_cichlid,
+ (const u32)ARRAY_SIZE(golden_settings_gc_10_3_sienna_cichlid));
+ break;
+ case CHIP_NAVY_FLOUNDER:
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_10_3_2,
+ (const u32)ARRAY_SIZE(golden_settings_gc_10_3_2));
+ break;
+
default:
break;
}
@@ -3373,6 +3537,10 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
(adev->gfx.mec_feature_version >= 27))
adev->gfx.cp_fw_write_wait = true;
break;
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ adev->gfx.cp_fw_write_wait = true;
+ break;
default:
break;
}
@@ -3463,6 +3631,12 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
case CHIP_NAVI12:
chip_name = "navi12";
break;
+ case CHIP_SIENNA_CICHLID:
+ chip_name = "sienna_cichlid";
+ break;
+ case CHIP_NAVY_FLOUNDER:
+ chip_name = "navy_flounder";
+ break;
default:
BUG();
}
@@ -3992,6 +4166,17 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
break;
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ adev->gfx.config.max_hw_contexts = 8;
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
+ gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
+ adev->gfx.config.gb_addr_config_fields.num_pkrs =
+ 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
+ break;
default:
BUG();
break;
@@ -4104,6 +4289,15 @@ static int gfx_v10_0_sw_init(void *handle)
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 8;
break;
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ adev->gfx.me.num_me = 1;
+ adev->gfx.me.num_pipe_per_me = 1;
+ adev->gfx.me.num_queue_per_pipe = 1;
+ adev->gfx.mec.num_mec = 2;
+ adev->gfx.mec.num_pipe_per_mec = 4;
+ adev->gfx.mec.num_queue_per_pipe = 4;
+ break;
default:
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
@@ -4273,12 +4467,6 @@ static int gfx_v10_0_sw_fini(void *handle)
return 0;
}
-
-static void gfx_v10_0_tiling_mode_table_init(struct amdgpu_device *adev)
-{
- /* TODO */
-}
-
static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 instance)
{
@@ -4355,6 +4543,12 @@ static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *ade
uint32_t num_packer_per_sc;
uint32_t pa_sc_tile_steering_override;
+ /* for ASICs that integrates GFX v10.3
+ * pa_sc_tile_steering_override should be set to 0 */
+ if (adev->asic_type == CHIP_SIENNA_CICHLID ||
+ adev->asic_type == CHIP_NAVY_FLOUNDER)
+ return 0;
+
/* init num_sc */
num_sc = adev->gfx.config.max_shader_engines * adev->gfx.config.max_sh_per_se *
adev->gfx.config.num_sc_per_sh;
@@ -4380,8 +4574,6 @@ static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *ade
}
#define DEFAULT_SH_MEM_BASES (0x6000)
-#define FIRST_COMPUTE_VMID (8)
-#define LAST_COMPUTE_VMID (16)
static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
{
@@ -4397,7 +4589,7 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
mutex_lock(&adev->srbm_mutex);
- for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
+ for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
nv_grbm_select(adev, 0, 0, 0, i);
/* CP and shaders */
WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
@@ -4408,7 +4600,7 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
/* Initialize all compute VMIDs to have no GDS, GWS, or OA
acccess. These should be enabled by FW for target VMIDs. */
- for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
+ for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
@@ -4523,8 +4715,6 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
- gfx_v10_0_tiling_mode_table_init(adev);
-
gfx_v10_0_setup_rb(adev);
gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info);
gfx_v10_0_get_tcc_info(adev);
@@ -4558,7 +4748,12 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
bool enable)
{
- u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
+ u32 tmp;
+
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
enable ? 1 : 0);
@@ -4577,12 +4772,19 @@ static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
/* csib */
- WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_HI,
- adev->gfx.rlc.clear_state_gpu_addr >> 32);
- WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_LO,
- adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
- WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
-
+ if (adev->asic_type == CHIP_NAVI12) {
+ WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_HI,
+ adev->gfx.rlc.clear_state_gpu_addr >> 32);
+ WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_LO,
+ adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
+ WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
+ } else {
+ WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI,
+ adev->gfx.rlc.clear_state_gpu_addr >> 32);
+ WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO,
+ adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
+ WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
+ }
return 0;
}
@@ -5190,7 +5392,12 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
- WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
+
+ if (adev->asic_type == CHIP_NAVI12) {
+ WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
+ } else {
+ WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
+ }
for (i = 0; i < adev->usec_timeout; i++) {
if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0)
@@ -5271,6 +5478,14 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_HI,
upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
+ WREG32_SOC15(GC, 0, mmCP_HYP_PFP_UCODE_ADDR, 0);
+
+ for (i = 0; i < pfp_hdr->jt_size; i++)
+ WREG32_SOC15(GC, 0, mmCP_HYP_PFP_UCODE_DATA,
+ le32_to_cpup(fw_data + pfp_hdr->jt_offset + i));
+
+ WREG32_SOC15(GC, 0, mmCP_HYP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
+
return 0;
}
@@ -5340,6 +5555,14 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_HI,
upper_32_bits(adev->gfx.ce.ce_fw_gpu_addr));
+ WREG32_SOC15(GC, 0, mmCP_HYP_CE_UCODE_ADDR, 0);
+
+ for (i = 0; i < ce_hdr->jt_size; i++)
+ WREG32_SOC15(GC, 0, mmCP_HYP_CE_UCODE_DATA,
+ le32_to_cpup(fw_data + ce_hdr->jt_offset + i));
+
+ WREG32_SOC15(GC, 0, mmCP_HYP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
+
return 0;
}
@@ -5409,6 +5632,14 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_HI,
upper_32_bits(adev->gfx.me.me_fw_gpu_addr));
+ WREG32_SOC15(GC, 0, mmCP_HYP_ME_UCODE_ADDR, 0);
+
+ for (i = 0; i < me_hdr->jt_size; i++)
+ WREG32_SOC15(GC, 0, mmCP_HYP_ME_UCODE_DATA,
+ le32_to_cpup(fw_data + me_hdr->jt_offset + i));
+
+ WREG32_SOC15(GC, 0, mmCP_HYP_ME_UCODE_ADDR, adev->gfx.me_fw_version);
+
return 0;
}
@@ -5549,12 +5780,25 @@ static void gfx_v10_0_cp_gfx_set_doorbell(struct amdgpu_device *adev,
DOORBELL_EN, 0);
}
WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
- tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
- DOORBELL_RANGE_LOWER, ring->doorbell_index);
- WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
+ switch (adev->asic_type) {
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
+ DOORBELL_RANGE_LOWER_Sienna_Cichlid, ring->doorbell_index);
+ WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
+
+ WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
+ CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_Sienna_Cichlid_MASK);
+ break;
+ default:
+ tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
+ DOORBELL_RANGE_LOWER, ring->doorbell_index);
+ WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
- WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
- CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
+ WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
+ CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
+ break;
+ }
}
static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
@@ -5669,11 +5913,29 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
{
if (enable) {
- WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
+ switch (adev->asic_type) {
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ WREG32_SOC15(GC, 0, mmCP_MEC_CNTL_Sienna_Cichlid, 0);
+ break;
+ default:
+ WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
+ break;
+ }
} else {
- WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
- (CP_MEC_CNTL__MEC_ME1_HALT_MASK |
- CP_MEC_CNTL__MEC_ME2_HALT_MASK));
+ switch (adev->asic_type) {
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ WREG32_SOC15(GC, 0, mmCP_MEC_CNTL_Sienna_Cichlid,
+ (CP_MEC_CNTL__MEC_ME1_HALT_MASK |
+ CP_MEC_CNTL__MEC_ME2_HALT_MASK));
+ break;
+ default:
+ WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
+ (CP_MEC_CNTL__MEC_ME1_HALT_MASK |
+ CP_MEC_CNTL__MEC_ME2_HALT_MASK));
+ break;
+ }
adev->gfx.kiq.ring.sched.ready = false;
}
udelay(50);
@@ -5755,12 +6017,25 @@ static void gfx_v10_0_kiq_setting(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
/* tell RLC which is KIQ queue */
- tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
- tmp &= 0xffffff00;
- tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
+ switch (adev->asic_type) {
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid);
+ tmp &= 0xffffff00;
+ tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
+ WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp);
+ tmp |= 0x80;
+ WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp);
+ break;
+ default:
+ tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
+ tmp &= 0xffffff00;
+ tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
+ WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
+ tmp |= 0x80;
+ WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
+ break;
+ }
}
static int gfx_v10_0_gfx_mqd_init(struct amdgpu_ring *ring)
@@ -6446,18 +6721,34 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev)
/* check if mmVGT_ESGS_RING_SIZE_UMD
* has been remapped to mmVGT_ESGS_RING_SIZE */
- data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE);
-
- WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, 0);
-
- WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, pattern);
+ switch (adev->asic_type) {
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid);
+ WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid, 0);
+ WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, pattern);
+
+ if (RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid) == pattern) {
+ WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD , data);
+ return true;
+ } else {
+ WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid, data);
+ return false;
+ }
+ break;
+ default:
+ data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE);
+ WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, 0);
+ WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, pattern);
- if (RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE) == pattern) {
- WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, data);
- return true;
- } else {
- WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, data);
- return false;
+ if (RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE) == pattern) {
+ WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, data);
+ return true;
+ } else {
+ WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, data);
+ return false;
+ }
+ break;
}
}
@@ -6469,59 +6760,120 @@ static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev)
* index will auto-inc after each data writting */
WREG32_SOC15(GC, 0, mmGRBM_CAM_INDEX, 0);
- /* mmVGT_TF_RING_SIZE_UMD -> mmVGT_TF_RING_SIZE */
- data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE_UMD) <<
- GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
- (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE) <<
- GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
- WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
- WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
-
- /* mmVGT_TF_MEMORY_BASE_UMD -> mmVGT_TF_MEMORY_BASE */
- data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_UMD) <<
- GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
- (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE) <<
- GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
- WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
- WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
-
- /* mmVGT_TF_MEMORY_BASE_HI_UMD -> mmVGT_TF_MEMORY_BASE_HI */
- data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI_UMD) <<
- GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
- (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI) <<
- GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
- WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
- WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
-
- /* mmVGT_HS_OFFCHIP_PARAM_UMD -> mmVGT_HS_OFFCHIP_PARAM */
- data = (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM_UMD) <<
- GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
- (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM) <<
- GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
- WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
- WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
-
- /* mmVGT_ESGS_RING_SIZE_UMD -> mmVGT_ESGS_RING_SIZE */
- data = (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE_UMD) <<
- GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
- (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE) <<
- GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
- WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
- WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
-
- /* mmVGT_GSVS_RING_SIZE_UMD -> mmVGT_GSVS_RING_SIZE */
- data = (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE_UMD) <<
- GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
- (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE) <<
- GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
- WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
- WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
+ switch (adev->asic_type) {
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ /* mmVGT_TF_RING_SIZE_UMD -> mmVGT_TF_RING_SIZE */
+ data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE_UMD) <<
+ GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
+ (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE_Sienna_Cichlid) <<
+ GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
+ WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
+ WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
+
+ /* mmVGT_TF_MEMORY_BASE_UMD -> mmVGT_TF_MEMORY_BASE */
+ data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_UMD) <<
+ GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
+ (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_Sienna_Cichlid) <<
+ GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
+ WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
+ WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
+
+ /* mmVGT_TF_MEMORY_BASE_HI_UMD -> mmVGT_TF_MEMORY_BASE_HI */
+ data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI_UMD) <<
+ GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
+ (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI_Sienna_Cichlid) <<
+ GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
+ WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
+ WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
+
+ /* mmVGT_HS_OFFCHIP_PARAM_UMD -> mmVGT_HS_OFFCHIP_PARAM */
+ data = (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM_UMD) <<
+ GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
+ (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM_Sienna_Cichlid) <<
+ GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
+ WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
+ WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
+
+ /* mmVGT_ESGS_RING_SIZE_UMD -> mmVGT_ESGS_RING_SIZE */
+ data = (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE_UMD) <<
+ GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
+ (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid) <<
+ GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
+ WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
+ WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
+
+ /* mmVGT_GSVS_RING_SIZE_UMD -> mmVGT_GSVS_RING_SIZE */
+ data = (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE_UMD) <<
+ GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
+ (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE_Sienna_Cichlid) <<
+ GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
+ WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
+ WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
+
+ /* mmSPI_CONFIG_CNTL_REMAP -> mmSPI_CONFIG_CNTL */
+ data = (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_REMAP) <<
+ GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
+ (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_Sienna_Cichlid) <<
+ GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
+ break;
+ default:
+ /* mmVGT_TF_RING_SIZE_UMD -> mmVGT_TF_RING_SIZE */
+ data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE_UMD) <<
+ GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
+ (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE) <<
+ GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
+ WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
+ WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
+
+ /* mmVGT_TF_MEMORY_BASE_UMD -> mmVGT_TF_MEMORY_BASE */
+ data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_UMD) <<
+ GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
+ (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE) <<
+ GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
+ WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
+ WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
+
+ /* mmVGT_TF_MEMORY_BASE_HI_UMD -> mmVGT_TF_MEMORY_BASE_HI */
+ data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI_UMD) <<
+ GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
+ (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI) <<
+ GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
+ WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
+ WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
+
+ /* mmVGT_HS_OFFCHIP_PARAM_UMD -> mmVGT_HS_OFFCHIP_PARAM */
+ data = (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM_UMD) <<
+ GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
+ (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM) <<
+ GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
+ WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
+ WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
+
+ /* mmVGT_ESGS_RING_SIZE_UMD -> mmVGT_ESGS_RING_SIZE */
+ data = (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE_UMD) <<
+ GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
+ (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE) <<
+ GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
+ WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
+ WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
+
+ /* mmVGT_GSVS_RING_SIZE_UMD -> mmVGT_GSVS_RING_SIZE */
+ data = (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE_UMD) <<
+ GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
+ (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE) <<
+ GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
+ WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
+ WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
+
+ /* mmSPI_CONFIG_CNTL_REMAP -> mmSPI_CONFIG_CNTL */
+ data = (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_REMAP) <<
+ GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
+ (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL) <<
+ GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
+ break;
+ }
- /* mmSPI_CONFIG_CNTL_REMAP -> mmSPI_CONFIG_CNTL */
- data = (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_REMAP) <<
- GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
- (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL) <<
- GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT);
WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0);
WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data);
}
@@ -6602,6 +6954,7 @@ static int gfx_v10_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
+ uint32_t tmp;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
@@ -6616,6 +6969,11 @@ static int gfx_v10_0_hw_fini(void *handle)
DRM_ERROR("KCQ disable failed\n");
if (amdgpu_sriov_vf(adev)) {
gfx_v10_0_cp_gfx_enable(adev, false);
+ /* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
+ tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
+ tmp &= 0xffffff00;
+ WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
+
return 0;
}
gfx_v10_0_cp_enable(adev, false);
@@ -6693,10 +7051,23 @@ static int gfx_v10_0_soft_reset(void *handle)
/* GRBM_STATUS2 */
tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
- if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
- grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
- GRBM_SOFT_RESET, SOFT_RESET_RLC,
- 1);
+ switch (adev->asic_type) {
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY_Sienna_Cichlid))
+ grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
+ GRBM_SOFT_RESET,
+ SOFT_RESET_RLC,
+ 1);
+ break;
+ default:
+ if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
+ grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
+ GRBM_SOFT_RESET,
+ SOFT_RESET_RLC,
+ 1);
+ break;
+ }
if (grbm_soft_reset) {
/* stop the rlc */
@@ -6774,7 +7145,19 @@ static int gfx_v10_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_NV1X;
+ switch (adev->asic_type) {
+ case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
+ adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_NV1X;
+ break;
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_Sienna_Cichlid;
+ break;
+ default:
+ break;
+ }
adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
@@ -6819,13 +7202,31 @@ static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev)
data = RLC_SAFE_MODE__CMD_MASK;
data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
- WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
- /* wait for RLC_SAFE_MODE */
- for (i = 0; i < adev->usec_timeout; i++) {
- if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
- break;
- udelay(1);
+ switch (adev->asic_type) {
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE_Sienna_Cichlid, data);
+
+ /* wait for RLC_SAFE_MODE */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE_Sienna_Cichlid),
+ RLC_SAFE_MODE, CMD))
+ break;
+ udelay(1);
+ }
+ break;
+ default:
+ WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
+
+ /* wait for RLC_SAFE_MODE */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE),
+ RLC_SAFE_MODE, CMD))
+ break;
+ udelay(1);
+ }
+ break;
}
}
@@ -6834,7 +7235,15 @@ static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev)
uint32_t data;
data = RLC_SAFE_MODE__CMD_MASK;
- WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
+ switch (adev->asic_type) {
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE_Sienna_Cichlid, data);
+ break;
+ default:
+ WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
+ break;
+ }
}
static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
@@ -7118,6 +7527,9 @@ static int gfx_v10_0_set_powergating_state(void *handle,
switch (adev->asic_type) {
case CHIP_NAVI10:
case CHIP_NAVI14:
+ case CHIP_NAVI12:
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
amdgpu_gfx_off_ctrl(adev, enable);
break;
default:
@@ -7138,6 +7550,8 @@ static int gfx_v10_0_set_clockgating_state(void *handle,
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
gfx_v10_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
@@ -7153,12 +7567,12 @@ static void gfx_v10_0_get_clockgating_state(void *handle, u32 *flags)
int data;
/* AMD_CG_SUPPORT_GFX_MGCG */
- data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
*flags |= AMD_CG_SUPPORT_GFX_MGCG;
/* AMD_CG_SUPPORT_GFX_CGCG */
- data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_CGCG;
@@ -7167,17 +7581,17 @@ static void gfx_v10_0_get_clockgating_state(void *handle, u32 *flags)
*flags |= AMD_CG_SUPPORT_GFX_CGLS;
/* AMD_CG_SUPPORT_GFX_RLC_LS */
- data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
/* AMD_CG_SUPPORT_GFX_CP_LS */
- data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
/* AMD_CG_SUPPORT_GFX_3D_CGCG */
- data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
@@ -7355,14 +7769,9 @@ static void gfx_v10_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
static void gfx_v10_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
u64 seq, unsigned flags)
{
- struct amdgpu_device *adev = ring->adev;
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
- /* Interrupt not work fine on GFX10.1 model yet. Use fallback instead */
- if (adev->pdev->device == 0x50)
- int_sel = false;
-
/* RELEASE_MEM - flush caches, send int */
amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ |
@@ -8243,6 +8652,8 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_NAVI10:
case CHIP_NAVI14:
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs;
break;
case CHIP_NAVI12: