aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/adreno/a6xx_gpu.c')
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c623
1 files changed, 559 insertions, 64 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 52da3795b175..f6c614764017 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -21,7 +21,7 @@ static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
/* Check that the GMU is idle */
- if (!a6xx_gmu_isidle(&a6xx_gpu->gmu))
+ if (!adreno_has_gmu_wrapper(adreno_gpu) && !a6xx_gmu_isidle(&a6xx_gpu->gmu))
return false;
/* Check tha the CX master is idle */
@@ -252,6 +252,56 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
a6xx_flush(gpu, ring);
}
+const struct adreno_reglist a612_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000081},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01202222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05522022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
+ {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
+ {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+ {},
+};
+
/* For a615 family (a615, a616, a618 and a619) */
const struct adreno_reglist a615_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
@@ -588,6 +638,63 @@ const struct adreno_reglist a660_hwcg[] = {
{},
};
+const struct adreno_reglist a690_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_CNTL, 0x8AA8AA82},
+ {REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
+ {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
+ {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+ {REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL, 0x20200},
+ {REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL, 0x10111},
+ {REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL, 0x5555},
+ {}
+};
+
static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -602,6 +709,8 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
if (adreno_is_a630(adreno_gpu))
clock_cntl_on = 0x8aa8aa02;
+ else if (adreno_is_a610(adreno_gpu))
+ clock_cntl_on = 0xaaa8aa82;
else
clock_cntl_on = 0x8aa8aa82;
@@ -612,13 +721,15 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
return;
/* Disable SP clock before programming HWCG registers */
- gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
+ if (!adreno_is_a610(adreno_gpu))
+ gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
for (i = 0; (reg = &adreno_gpu->info->hwcg[i], reg->offset); i++)
gpu_write(gpu, reg->offset, state ? reg->value : 0);
/* Enable SP clock */
- gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
+ if (!adreno_is_a610(adreno_gpu))
+ gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
}
@@ -747,6 +858,45 @@ static const u32 a660_protect[] = {
A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */
};
+/* These are for a690 */
+static const u32 a690_protect[] = {
+ A6XX_PROTECT_RDONLY(0x00000, 0x004ff),
+ A6XX_PROTECT_RDONLY(0x00501, 0x00001),
+ A6XX_PROTECT_RDONLY(0x0050b, 0x002f4),
+ A6XX_PROTECT_NORDWR(0x0050e, 0x00000),
+ A6XX_PROTECT_NORDWR(0x00510, 0x00000),
+ A6XX_PROTECT_NORDWR(0x00534, 0x00000),
+ A6XX_PROTECT_NORDWR(0x00800, 0x00082),
+ A6XX_PROTECT_NORDWR(0x008a0, 0x00008),
+ A6XX_PROTECT_NORDWR(0x008ab, 0x00024),
+ A6XX_PROTECT_RDONLY(0x008de, 0x000ae),
+ A6XX_PROTECT_NORDWR(0x00900, 0x0004d),
+ A6XX_PROTECT_NORDWR(0x0098d, 0x00272),
+ A6XX_PROTECT_NORDWR(0x00e00, 0x00001),
+ A6XX_PROTECT_NORDWR(0x00e03, 0x0000c),
+ A6XX_PROTECT_NORDWR(0x03c00, 0x000c3),
+ A6XX_PROTECT_RDONLY(0x03cc4, 0x01fff),
+ A6XX_PROTECT_NORDWR(0x08630, 0x001cf),
+ A6XX_PROTECT_NORDWR(0x08e00, 0x00000),
+ A6XX_PROTECT_NORDWR(0x08e08, 0x00007),
+ A6XX_PROTECT_NORDWR(0x08e50, 0x0001f),
+ A6XX_PROTECT_NORDWR(0x08e80, 0x0027f),
+ A6XX_PROTECT_NORDWR(0x09624, 0x001db),
+ A6XX_PROTECT_NORDWR(0x09e60, 0x00011),
+ A6XX_PROTECT_NORDWR(0x09e78, 0x00187),
+ A6XX_PROTECT_NORDWR(0x0a630, 0x001cf),
+ A6XX_PROTECT_NORDWR(0x0ae02, 0x00000),
+ A6XX_PROTECT_NORDWR(0x0ae50, 0x0012f),
+ A6XX_PROTECT_NORDWR(0x0b604, 0x00000),
+ A6XX_PROTECT_NORDWR(0x0b608, 0x00006),
+ A6XX_PROTECT_NORDWR(0x0be02, 0x00001),
+ A6XX_PROTECT_NORDWR(0x0be20, 0x0015f),
+ A6XX_PROTECT_NORDWR(0x0d000, 0x005ff),
+ A6XX_PROTECT_NORDWR(0x0f000, 0x00bff),
+ A6XX_PROTECT_RDONLY(0x0fc00, 0x01fff),
+ A6XX_PROTECT_NORDWR(0x11c00, 0x00000), /*note: infiite range */
+};
+
static void a6xx_set_cp_protect(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -758,6 +908,11 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu)
count = ARRAY_SIZE(a650_protect);
count_max = 48;
BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48);
+ } else if (adreno_is_a690(adreno_gpu)) {
+ regs = a690_protect;
+ count = ARRAY_SIZE(a690_protect);
+ count_max = 48;
+ BUILD_BUG_ON(ARRAY_SIZE(a690_protect) > 48);
} else if (adreno_is_a660_family(adreno_gpu)) {
regs = a660_protect;
count = ARRAY_SIZE(a660_protect);
@@ -786,39 +941,77 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu)
static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- u32 lower_bit = 2;
- u32 amsbc = 0;
+ /* Unknown, introduced with A650 family, related to UBWC mode/ver 4 */
u32 rgb565_predicator = 0;
+ /* Unknown, introduced with A650 family */
u32 uavflagprd_inv = 0;
+ /* Whether the minimum access length is 64 bits */
+ u32 min_acc_len = 0;
+ /* Entirely magic, per-GPU-gen value */
+ u32 ubwc_mode = 0;
+ /*
+ * The Highest Bank Bit value represents the bit of the highest DDR bank.
+ * We then subtract 13 from it (13 is the minimum value allowed by hw) and
+ * write the lowest two bits of the remaining value as hbb_lo and the
+ * one above it as hbb_hi to the hardware. This should ideally use DRAM
+ * type detection.
+ */
+ u32 hbb_hi = 0;
+ u32 hbb_lo = 2;
+ /* Unknown, introduced with A640/680 */
+ u32 amsbc = 0;
+
+ if (adreno_is_a610(adreno_gpu)) {
+ /* HBB = 14 */
+ hbb_lo = 1;
+ min_acc_len = 1;
+ ubwc_mode = 1;
+ }
/* a618 is using the hw default values */
if (adreno_is_a618(adreno_gpu))
return;
+ if (adreno_is_a619_holi(adreno_gpu))
+ hbb_lo = 0;
+
if (adreno_is_a640_family(adreno_gpu))
amsbc = 1;
if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) {
/* TODO: get ddr type from bootloader and use 2 for LPDDR4 */
- lower_bit = 3;
+ hbb_lo = 3;
+ amsbc = 1;
+ rgb565_predicator = 1;
+ uavflagprd_inv = 2;
+ }
+
+ if (adreno_is_a690(adreno_gpu)) {
+ hbb_lo = 2;
amsbc = 1;
rgb565_predicator = 1;
uavflagprd_inv = 2;
}
if (adreno_is_7c3(adreno_gpu)) {
- lower_bit = 1;
+ hbb_lo = 1;
amsbc = 1;
rgb565_predicator = 1;
uavflagprd_inv = 2;
}
gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL,
- rgb565_predicator << 11 | amsbc << 4 | lower_bit << 1);
- gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, lower_bit << 1);
- gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
- uavflagprd_inv << 4 | lower_bit << 1);
- gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, lower_bit << 21);
+ rgb565_predicator << 11 | hbb_hi << 10 | amsbc << 4 |
+ min_acc_len << 3 | hbb_lo << 1 | ubwc_mode);
+
+ gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, hbb_hi << 4 |
+ min_acc_len << 3 | hbb_lo << 1 | ubwc_mode);
+
+ gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, hbb_hi << 10 |
+ uavflagprd_inv << 4 | min_acc_len << 3 |
+ hbb_lo << 1 | ubwc_mode);
+
+ gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, min_acc_len << 23 | hbb_lo << 21);
}
static int a6xx_cp_init(struct msm_gpu *gpu)
@@ -997,17 +1190,32 @@ static int hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
int ret;
- /* Make sure the GMU keeps the GPU on while we set it up */
- a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+ if (!adreno_has_gmu_wrapper(adreno_gpu)) {
+ /* Make sure the GMU keeps the GPU on while we set it up */
+ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+ }
/* Clear GBIF halt in case GX domain was not collapsed */
- if (a6xx_has_gbif(adreno_gpu))
+ if (adreno_is_a619_holi(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0);
+ gpu_write(gpu, REG_A6XX_RBBM_GPR0_CNTL, 0);
+ /* Let's make extra sure that the GPU can access the memory.. */
+ mb();
+ } else if (a6xx_has_gbif(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0);
gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0);
+ /* Let's make extra sure that the GPU can access the memory.. */
+ mb();
+ }
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
+ if (adreno_is_a619_holi(adreno_gpu))
+ a6xx_sptprac_enable(gmu);
+
/*
* Disable the trusted memory range - we don't actually supported secure
* memory rendering at this point in time and we don't want to block off
@@ -1034,13 +1242,13 @@ static int hw_init(struct msm_gpu *gpu)
a6xx_set_hwcg(gpu, true);
/* VBIF/GBIF start*/
- if (adreno_is_a640_family(adreno_gpu) ||
+ if (adreno_is_a610(adreno_gpu) ||
+ adreno_is_a640_family(adreno_gpu) ||
adreno_is_a650_family(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
- gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3);
} else {
gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
@@ -1068,28 +1276,40 @@ static int hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
- if (adreno_is_a640_family(adreno_gpu) ||
- adreno_is_a650_family(adreno_gpu))
+ if (adreno_is_a640_family(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
- else
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
+ } else if (adreno_is_a610(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x00800060);
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x40201b16);
+ } else {
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
- gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
+ }
if (adreno_is_a660_family(adreno_gpu))
gpu_write(gpu, REG_A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020);
/* Setting the mem pool size */
- gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
+ if (adreno_is_a610(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 48);
+ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_DBG_ADDR, 47);
+ } else
+ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
/* Setting the primFifo thresholds default values,
* and vccCacheSkipDis=1 bit (0x200) for A640 and newer
*/
- if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
+ if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu) || adreno_is_a690(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
else if (adreno_is_a640_family(adreno_gpu) || adreno_is_7c3(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200);
else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
+ else if (adreno_is_a619(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00018000);
+ else if (adreno_is_a610(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00080000);
else
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000);
@@ -1105,8 +1325,12 @@ static int hw_init(struct msm_gpu *gpu)
a6xx_set_ubwc_config(gpu);
/* Enable fault detection */
- gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
- (1 << 30) | 0x1fffff);
+ if (adreno_is_a619(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3fffff);
+ else if (adreno_is_a610(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3ffff);
+ else
+ gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x1fffff);
gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
@@ -1123,6 +1347,13 @@ static int hw_init(struct msm_gpu *gpu)
0x3f0243f0);
}
+ /* Set up the CX GMU counter 0 to count busy ticks */
+ gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
+
+ /* Enable the power counter */
+ gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, BIT(5));
+ gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
+
/* Protect registers from the CP */
a6xx_set_cp_protect(gpu);
@@ -1212,6 +1443,8 @@ static int hw_init(struct msm_gpu *gpu)
}
out:
+ if (adreno_has_gmu_wrapper(adreno_gpu))
+ return ret;
/*
* Tell the GMU that we are done touching the GPU and it can start power
* management
@@ -1246,9 +1479,6 @@ static void a6xx_dump(struct msm_gpu *gpu)
adreno_dump(gpu);
}
-#define VBIF_RESET_ACK_TIMEOUT 100
-#define VBIF_RESET_ACK_MASK 0x00f0
-
static void a6xx_recover(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -1274,12 +1504,6 @@ static void a6xx_recover(struct msm_gpu *gpu)
/* Halt SQE first */
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
- /*
- * Turn off keep alive that might have been enabled by the hang
- * interrupt
- */
- gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
-
pm_runtime_dont_use_autosuspend(&gpu->pdev->dev);
/* active_submit won't change until we make a submission */
@@ -1292,6 +1516,15 @@ static void a6xx_recover(struct msm_gpu *gpu)
*/
gpu->active_submits = 0;
+ if (adreno_has_gmu_wrapper(adreno_gpu)) {
+ /* Drain the outstanding traffic on memory buses */
+ a6xx_bus_clear_pending_transactions(adreno_gpu, true);
+
+ /* Reset the GPU to a clean state */
+ a6xx_gpu_sw_reset(gpu, true);
+ a6xx_gpu_sw_reset(gpu, false);
+ }
+
reinit_completion(&gmu->pd_gate);
dev_pm_genpd_add_notifier(gmu->cxpd, &gmu->pd_nb);
dev_pm_genpd_synced_poweroff(gmu->cxpd);
@@ -1442,7 +1675,8 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
* Force the GPU to stay on until after we finish
* collecting information
*/
- gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1);
+ if (!adreno_has_gmu_wrapper(adreno_gpu))
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1);
DRM_DEV_ERROR(&gpu->pdev->dev,
"gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
@@ -1573,6 +1807,10 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
{
+ /* No LLCC on non-RPMh (and by extension, non-GMU) SoCs */
+ if (adreno_has_gmu_wrapper(&a6xx_gpu->base))
+ return;
+
llcc_slice_putd(a6xx_gpu->llc_slice);
llcc_slice_putd(a6xx_gpu->htw_llc_slice);
}
@@ -1582,6 +1820,10 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
{
struct device_node *phandle;
+ /* No LLCC on non-RPMh (and by extension, non-GMU) SoCs */
+ if (adreno_has_gmu_wrapper(&a6xx_gpu->base))
+ return;
+
/*
* There is a different programming path for targets with an mmu500
* attached, so detect if that is the case
@@ -1603,7 +1845,66 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL);
}
-static int a6xx_pm_resume(struct msm_gpu *gpu)
+#define GBIF_CLIENT_HALT_MASK BIT(0)
+#define GBIF_ARB_HALT_MASK BIT(1)
+#define VBIF_XIN_HALT_CTRL0_MASK GENMASK(3, 0)
+#define VBIF_RESET_ACK_MASK 0xF0
+#define GPR0_GBIF_HALT_REQUEST 0x1E0
+
+void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off)
+{
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ if (adreno_is_a619_holi(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_RBBM_GPR0_CNTL, GPR0_GBIF_HALT_REQUEST);
+ spin_until((gpu_read(gpu, REG_A6XX_RBBM_VBIF_GX_RESET_STATUS) &
+ (VBIF_RESET_ACK_MASK)) == VBIF_RESET_ACK_MASK);
+ } else if (!a6xx_has_gbif(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, VBIF_XIN_HALT_CTRL0_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
+ (VBIF_XIN_HALT_CTRL0_MASK)) == VBIF_XIN_HALT_CTRL0_MASK);
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
+
+ return;
+ }
+
+ if (gx_off) {
+ /* Halt the gx side of GBIF */
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
+ spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
+ }
+
+ /* Halt new client requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
+
+ /* Halt all AXI requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
+
+ /* The GBIF halt needs to be explicitly cleared */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
+}
+
+void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert)
+{
+ /* 11nm chips (e.g. ones with A610) have hw issues with the reset line! */
+ if (adreno_is_a610(to_adreno_gpu(gpu)))
+ return;
+
+ gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, assert);
+ /* Perform a bogus read and add a brief delay to ensure ordering. */
+ gpu_read(gpu, REG_A6XX_RBBM_SW_RESET_CMD);
+ udelay(1);
+
+ /* The reset line needs to be asserted for at least 100 us */
+ if (assert)
+ udelay(100);
+}
+
+static int a6xx_gmu_pm_resume(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
@@ -1623,10 +1924,61 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
a6xx_llc_activate(a6xx_gpu);
- return 0;
+ return ret;
}
-static int a6xx_pm_suspend(struct msm_gpu *gpu)
+static int a6xx_pm_resume(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ unsigned long freq = gpu->fast_rate;
+ struct dev_pm_opp *opp;
+ int ret;
+
+ gpu->needs_hw_init = true;
+
+ trace_msm_gpu_resume(0);
+
+ mutex_lock(&a6xx_gpu->gmu.lock);
+
+ opp = dev_pm_opp_find_freq_ceil(&gpu->pdev->dev, &freq);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ goto err_set_opp;
+ }
+ dev_pm_opp_put(opp);
+
+ /* Set the core clock and bus bw, having VDD scaling in mind */
+ dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
+
+ pm_runtime_resume_and_get(gmu->dev);
+ pm_runtime_resume_and_get(gmu->gxpd);
+
+ ret = clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
+ if (ret)
+ goto err_bulk_clk;
+
+ if (adreno_is_a619_holi(adreno_gpu))
+ a6xx_sptprac_enable(gmu);
+
+ /* If anything goes south, tear the GPU down piece by piece.. */
+ if (ret) {
+err_bulk_clk:
+ pm_runtime_put(gmu->gxpd);
+ pm_runtime_put(gmu->dev);
+ dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
+ }
+err_set_opp:
+ mutex_unlock(&a6xx_gpu->gmu.lock);
+
+ if (!ret)
+ msm_devfreq_resume(gpu);
+
+ return ret;
+}
+
+static int a6xx_gmu_pm_suspend(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
@@ -1653,7 +2005,43 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
return 0;
}
-static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+static int a6xx_pm_suspend(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ int i;
+
+ trace_msm_gpu_suspend(0);
+
+ msm_devfreq_suspend(gpu);
+
+ mutex_lock(&a6xx_gpu->gmu.lock);
+
+ /* Drain the outstanding traffic on memory buses */
+ a6xx_bus_clear_pending_transactions(adreno_gpu, true);
+
+ if (adreno_is_a619_holi(adreno_gpu))
+ a6xx_sptprac_disable(gmu);
+
+ clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
+
+ pm_runtime_put_sync(gmu->gxpd);
+ dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
+ pm_runtime_put_sync(gmu->dev);
+
+ mutex_unlock(&a6xx_gpu->gmu.lock);
+
+ if (a6xx_gpu->shadow_bo)
+ for (i = 0; i < gpu->nr_rings; i++)
+ a6xx_gpu->shadow[i] = 0;
+
+ gpu->suspend_count++;
+
+ return 0;
+}
+
+static int a6xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
@@ -1672,6 +2060,12 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
return 0;
}
+static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+ *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER);
+ return 0;
+}
+
static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -1697,9 +2091,7 @@ static void a6xx_destroy(struct msm_gpu *gpu)
a6xx_llc_slices_destroy(a6xx_gpu);
- mutex_lock(&a6xx_gpu->gmu.lock);
a6xx_gmu_remove(a6xx_gpu);
- mutex_unlock(&a6xx_gpu->gmu.lock);
adreno_gpu_cleanup(adreno_gpu);
@@ -1744,7 +2136,8 @@ a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
* This allows GPU to set the bus attributes required to use system
* cache on behalf of the iommu page table walker.
*/
- if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
+ if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice) &&
+ !device_iommu_capable(&pdev->dev, IOMMU_CAP_CACHE_COHERENCY))
quirks |= IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
return adreno_iommu_create_address_space(gpu, pdev, quirks);
@@ -1809,6 +2202,30 @@ static bool a6xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
return progress;
}
+static u32 a610_get_speed_bin(u32 fuse)
+{
+ /*
+ * There are (at least) three SoCs implementing A610: SM6125 (trinket),
+ * SM6115 (bengal) and SM6225 (khaje). Trinket does not have speedbinning,
+ * as only a single SKU exists and we don't support khaje upstream yet.
+ * Hence, this matching table is only valid for bengal and can be easily
+ * expanded if need be.
+ */
+
+ if (fuse == 0)
+ return 0;
+ else if (fuse == 206)
+ return 1;
+ else if (fuse == 200)
+ return 2;
+ else if (fuse == 157)
+ return 3;
+ else if (fuse == 127)
+ return 4;
+
+ return UINT_MAX;
+}
+
static u32 a618_get_speed_bin(u32 fuse)
{
if (fuse == 0)
@@ -1821,6 +2238,34 @@ static u32 a618_get_speed_bin(u32 fuse)
return UINT_MAX;
}
+static u32 a619_holi_get_speed_bin(u32 fuse)
+{
+ /*
+ * There are (at least) two SoCs implementing A619_holi: SM4350 (holi)
+ * and SM6375 (blair). Limit the fuse matching to the corresponding
+ * SoC to prevent bogus frequency setting (as improbable as it may be,
+ * given unexpected fuse values are.. unexpected! But still possible.)
+ */
+
+ if (fuse == 0)
+ return 0;
+
+ if (of_machine_is_compatible("qcom,sm4350")) {
+ if (fuse == 138)
+ return 1;
+ else if (fuse == 92)
+ return 2;
+ } else if (of_machine_is_compatible("qcom,sm6375")) {
+ if (fuse == 190)
+ return 1;
+ else if (fuse == 177)
+ return 2;
+ } else
+ pr_warn("Unknown SoC implementing A619_holi!\n");
+
+ return UINT_MAX;
+}
+
static u32 a619_get_speed_bin(u32 fuse)
{
if (fuse == 0)
@@ -1874,23 +2319,29 @@ static u32 adreno_7c3_get_speed_bin(u32 fuse)
return UINT_MAX;
}
-static u32 fuse_to_supp_hw(struct device *dev, struct adreno_rev rev, u32 fuse)
+static u32 fuse_to_supp_hw(struct device *dev, struct adreno_gpu *adreno_gpu, u32 fuse)
{
u32 val = UINT_MAX;
- if (adreno_cmp_rev(ADRENO_REV(6, 1, 8, ANY_ID), rev))
+ if (adreno_is_a610(adreno_gpu))
+ val = a610_get_speed_bin(fuse);
+
+ if (adreno_is_a618(adreno_gpu))
val = a618_get_speed_bin(fuse);
- if (adreno_cmp_rev(ADRENO_REV(6, 1, 9, ANY_ID), rev))
+ else if (adreno_is_a619_holi(adreno_gpu))
+ val = a619_holi_get_speed_bin(fuse);
+
+ else if (adreno_is_a619(adreno_gpu))
val = a619_get_speed_bin(fuse);
- if (adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), rev))
+ else if (adreno_is_7c3(adreno_gpu))
val = adreno_7c3_get_speed_bin(fuse);
- if (adreno_cmp_rev(ADRENO_REV(6, 4, 0, ANY_ID), rev))
+ else if (adreno_is_a640(adreno_gpu))
val = a640_get_speed_bin(fuse);
- if (adreno_cmp_rev(ADRENO_REV(6, 5, 0, ANY_ID), rev))
+ else if (adreno_is_a650(adreno_gpu))
val = a650_get_speed_bin(fuse);
if (val == UINT_MAX) {
@@ -1903,7 +2354,7 @@ static u32 fuse_to_supp_hw(struct device *dev, struct adreno_rev rev, u32 fuse)
return (1 << val);
}
-static int a6xx_set_supported_hw(struct device *dev, struct adreno_rev rev)
+static int a6xx_set_supported_hw(struct device *dev, struct adreno_gpu *adreno_gpu)
{
u32 supp_hw;
u32 speedbin;
@@ -1922,7 +2373,7 @@ static int a6xx_set_supported_hw(struct device *dev, struct adreno_rev rev)
return ret;
}
- supp_hw = fuse_to_supp_hw(dev, rev, speedbin);
+ supp_hw = fuse_to_supp_hw(dev, adreno_gpu, speedbin);
ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1);
if (ret)
@@ -1937,8 +2388,8 @@ static const struct adreno_gpu_funcs funcs = {
.set_param = adreno_set_param,
.hw_init = a6xx_hw_init,
.ucode_load = a6xx_ucode_load,
- .pm_suspend = a6xx_pm_suspend,
- .pm_resume = a6xx_pm_resume,
+ .pm_suspend = a6xx_gmu_pm_suspend,
+ .pm_resume = a6xx_gmu_pm_resume,
.recover = a6xx_recover,
.submit = a6xx_submit,
.active_ring = a6xx_active_ring,
@@ -1959,6 +2410,35 @@ static const struct adreno_gpu_funcs funcs = {
.get_rptr = a6xx_get_rptr,
.progress = a6xx_progress,
},
+ .get_timestamp = a6xx_gmu_get_timestamp,
+};
+
+static const struct adreno_gpu_funcs funcs_gmuwrapper = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a6xx_hw_init,
+ .ucode_load = a6xx_ucode_load,
+ .pm_suspend = a6xx_pm_suspend,
+ .pm_resume = a6xx_pm_resume,
+ .recover = a6xx_recover,
+ .submit = a6xx_submit,
+ .active_ring = a6xx_active_ring,
+ .irq = a6xx_irq,
+ .destroy = a6xx_destroy,
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
+ .show = a6xx_show,
+#endif
+ .gpu_busy = a6xx_gpu_busy,
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
+ .gpu_state_get = a6xx_gpu_state_get,
+ .gpu_state_put = a6xx_gpu_state_put,
+#endif
+ .create_address_space = a6xx_create_address_space,
+ .create_private_address_space = a6xx_create_private_address_space,
+ .get_rptr = a6xx_get_rptr,
+ .progress = a6xx_progress,
+ },
.get_timestamp = a6xx_get_timestamp,
};
@@ -1985,26 +2465,44 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = NULL;
+ /* Check if there is a GMU phandle and set it up */
+ node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
+ /* FIXME: How do we gracefully handle this? */
+ BUG_ON(!node);
+
+ adreno_gpu->gmu_is_wrapper = of_device_is_compatible(node, "qcom,adreno-gmu-wrapper");
+
/*
* We need to know the platform type before calling into adreno_gpu_init
* so that the hw_apriv flag can be correctly set. Snoop into the info
* and grab the revision number
*/
info = adreno_info(config->rev);
+ if (!info)
+ return ERR_PTR(-EINVAL);
- if (info && (info->revn == 650 || info->revn == 660 ||
- adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), info->rev)))
- adreno_gpu->base.hw_apriv = true;
+ /* Assign these early so that we can use the is_aXYZ helpers */
+ /* Numeric revision IDs (e.g. 630) */
+ adreno_gpu->revn = info->revn;
+ /* New-style ADRENO_REV()-only */
+ adreno_gpu->rev = info->rev;
+ /* Quirk data */
+ adreno_gpu->info = info;
+
+ adreno_gpu->base.hw_apriv = !!(info->quirks & ADRENO_QUIRK_HAS_HW_APRIV);
a6xx_llc_slices_init(pdev, a6xx_gpu);
- ret = a6xx_set_supported_hw(&pdev->dev, config->rev);
+ ret = a6xx_set_supported_hw(&pdev->dev, adreno_gpu);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));
return ERR_PTR(ret);
}
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ if (adreno_has_gmu_wrapper(adreno_gpu))
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_gmuwrapper, 1);
+ else
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));
return ERR_PTR(ret);
@@ -2017,13 +2515,10 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
if (adreno_is_a618(adreno_gpu) || adreno_is_7c3(adreno_gpu))
priv->gpu_clamp_to_idle = true;
- /* Check if there is a GMU phandle and set it up */
- node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
-
- /* FIXME: How do we gracefully handle this? */
- BUG_ON(!node);
-
- ret = a6xx_gmu_init(a6xx_gpu, node);
+ if (adreno_has_gmu_wrapper(adreno_gpu))
+ ret = a6xx_gmu_wrapper_init(a6xx_gpu, node);
+ else
+ ret = a6xx_gmu_init(a6xx_gpu, node);
of_node_put(node);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));