summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm/adreno
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:35:05 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:31 +0000
commit85c675d0d09a45a135bddd15d7b385f8758c32fb (patch)
tree76267dbc9b9a130337be3640948fe397b04ac629 /drivers/gpu/drm/msm/adreno
parentAdding upstream version 6.6.15. (diff)
downloadlinux-85c675d0d09a45a135bddd15d7b385f8758c32fb.tar.xz
linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.zip
Adding upstream version 6.7.7.upstream/6.7.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/msm/adreno')
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h9
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c209
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h8
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c676
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c69
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h65
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c88
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c37
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c7
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h32
11 files changed, 1062 insertions, 141 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
index 1c051535fd..863b5e3b0e 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -1114,6 +1114,12 @@ enum a6xx_tex_type {
#define REG_A6XX_CP_MISC_CNTL 0x00000840
#define REG_A6XX_CP_APRIV_CNTL 0x00000844
+#define A6XX_CP_APRIV_CNTL_CDWRITE 0x00000040
+#define A6XX_CP_APRIV_CNTL_CDREAD 0x00000020
+#define A6XX_CP_APRIV_CNTL_RBRPWB 0x00000008
+#define A6XX_CP_APRIV_CNTL_RBPRIVLEVEL 0x00000004
+#define A6XX_CP_APRIV_CNTL_RBFETCH 0x00000002
+#define A6XX_CP_APRIV_CNTL_ICACHE 0x00000001
#define REG_A6XX_CP_PREEMPT_THRESHOLD 0x000008c0
@@ -1939,6 +1945,8 @@ static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM_SEL(uint32_t i0) { return 0x00
#define REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE 0x00000122
+#define REG_A7XX_RBBM_CLOCK_HYST2_VFD 0x0000012f
+
#define REG_A6XX_RBBM_LPAC_GBIF_CLIENT_QOS_CNTL 0x000005ff
#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_A 0x00000600
@@ -8252,5 +8260,6 @@ static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1 0x00000002
+#define REG_A7XX_CX_MISC_TCM_RET_CNTL 0x00000039
#endif /* A6XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 7923129363..8c4900444b 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/interconnect.h>
#include <linux/of_platform.h>
@@ -202,9 +203,10 @@ int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu)
static int a6xx_gmu_start(struct a6xx_gmu *gmu)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ u32 mask, reset_val, val;
int ret;
- u32 val;
- u32 mask, reset_val;
val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8);
if (val <= 0x20010004) {
@@ -220,7 +222,11 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
/* Set the log wptr index
* note: downstream saves the value in poweroff and restores it here
*/
- gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0);
+ if (adreno_is_a7xx(adreno_gpu))
+ gmu_write(gmu, REG_A6XX_GMU_GENERAL_9, 0);
+ else
+ gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0);
+
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
@@ -513,6 +519,7 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct platform_device *pdev = to_platform_device(gmu->dev);
void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
+ u32 seqmem0_drv0_reg = REG_A6XX_RSCC_SEQ_MEM_0_DRV0;
void __iomem *seqptr = NULL;
uint32_t pdc_address_offset;
bool pdc_in_aop = false;
@@ -520,7 +527,9 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
if (IS_ERR(pdcptr))
goto err;
- if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu))
+ if (adreno_is_a650(adreno_gpu) ||
+ adreno_is_a660_family(adreno_gpu) ||
+ adreno_is_a7xx(adreno_gpu))
pdc_in_aop = true;
else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu))
pdc_address_offset = 0x30090;
@@ -544,20 +553,26 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
- gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4,
+ adreno_is_a740_family(adreno_gpu) ? 0x80000021 : 0x80000000);
gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
+ /* The second spin of A7xx GPUs messed with some register offsets.. */
+ if (adreno_is_a740_family(adreno_gpu))
+ seqmem0_drv0_reg = REG_A7XX_RSCC_SEQ_MEM_0_DRV0_A740;
+
/* Load RSC sequencer uCode for sleep and wakeup */
- if (adreno_is_a650_family(adreno_gpu)) {
- gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0);
- gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab);
- gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581);
- gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2);
- gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad);
+ if (adreno_is_a650_family(adreno_gpu) ||
+ adreno_is_a7xx(adreno_gpu)) {
+ gmu_write_rscc(gmu, seqmem0_drv0_reg, 0xeaaae5a0);
+ gmu_write_rscc(gmu, seqmem0_drv0_reg + 1, 0xe1a1ebab);
+ gmu_write_rscc(gmu, seqmem0_drv0_reg + 2, 0xa2e0a581);
+ gmu_write_rscc(gmu, seqmem0_drv0_reg + 3, 0xecac82e2);
+ gmu_write_rscc(gmu, seqmem0_drv0_reg + 4, 0x0020edad);
} else {
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
@@ -637,11 +652,18 @@ err:
/* Set up the idle state for the GMU */
static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+
/* Disable GMU WB/RB buffer */
gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1);
gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1);
+ /* A7xx knows better by default! */
+ if (adreno_is_a7xx(adreno_gpu))
+ return;
+
gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
switch (gmu->idle_level) {
@@ -698,7 +720,7 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
u32 itcm_base = 0x00000000;
u32 dtcm_base = 0x00040000;
- if (adreno_is_a650_family(adreno_gpu))
+ if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu))
dtcm_base = 0x10004000;
if (gmu->legacy) {
@@ -747,14 +769,22 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ u32 fence_range_lower, fence_range_upper;
+ u32 chipid, chipid_min = 0;
int ret;
- u32 chipid;
- if (adreno_is_a650_family(adreno_gpu)) {
+ /* Vote veto for FAL10 */
+ if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu)) {
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1);
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
}
+ /* Turn on TCM (Tightly Coupled Memory) retention */
+ if (adreno_is_a7xx(adreno_gpu))
+ a6xx_llc_write(a6xx_gpu, REG_A7XX_CX_MISC_TCM_RET_CNTL, 1);
+ else
+ gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
+
if (state == GMU_WARM_BOOT) {
ret = a6xx_rpmh_start(gmu);
if (ret)
@@ -764,9 +794,6 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
"GMU firmware is not loaded\n"))
return -ENOENT;
- /* Turn on register retention */
- gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
-
ret = a6xx_rpmh_start(gmu);
if (ret)
return ret;
@@ -776,6 +803,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
return ret;
}
+ /* Clear init result to make sure we are getting a fresh value */
gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
@@ -783,8 +811,18 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova);
gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
+ if (adreno_is_a7xx(adreno_gpu)) {
+ fence_range_upper = 0x32;
+ fence_range_lower = 0x8a0;
+ } else {
+ fence_range_upper = 0xa;
+ fence_range_lower = 0xa0;
+ }
+
gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
- (1 << 31) | (0xa << 18) | (0xa0));
+ BIT(31) |
+ FIELD_PREP(GENMASK(30, 18), fence_range_upper) |
+ FIELD_PREP(GENMASK(17, 0), fence_range_lower));
/*
* Snapshots toggle the NMI bit which will result in a jump to the NMI
@@ -792,21 +830,49 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
*/
gmu_write(gmu, REG_A6XX_GMU_CM3_CFG, 0x4052);
- /*
- * Note that the GMU has a slightly different layout for
- * chip_id, for whatever reason, so a bit of massaging
- * is needed. The upper 16b are the same, but minor and
- * patchid are packed in four bits each with the lower
- * 8b unused:
- */
- chipid = adreno_gpu->chip_id & 0xffff0000;
- chipid |= (adreno_gpu->chip_id << 4) & 0xf000; /* minor */
- chipid |= (adreno_gpu->chip_id << 8) & 0x0f00; /* patchid */
+ /* NOTE: A730 may also fall in this if-condition with a future GMU fw update. */
+ if (adreno_is_a7xx(adreno_gpu) && !adreno_is_a730(adreno_gpu)) {
+ /* A7xx GPUs have obfuscated chip IDs. Use constant maj = 7 */
+ chipid = FIELD_PREP(GENMASK(31, 24), 0x7);
+
+ /*
+ * The min part has a 1-1 mapping for each GPU SKU.
+ * This chipid that the GMU expects corresponds to the "GENX_Y_Z" naming,
+ * where X = major, Y = minor, Z = patchlevel, e.g. GEN7_2_1 for prod A740.
+ */
+ if (adreno_is_a740(adreno_gpu))
+ chipid_min = 2;
+ else
+ return -EINVAL;
+
+ chipid |= FIELD_PREP(GENMASK(23, 16), chipid_min);
- gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
+ /* Get the patchid (which may vary) from the device tree */
+ chipid |= FIELD_PREP(GENMASK(15, 8), adreno_patchid(adreno_gpu));
+ } else {
+ /*
+ * Note that the GMU has a slightly different layout for
+ * chip_id, for whatever reason, so a bit of massaging
+ * is needed. The upper 16b are the same, but minor and
+ * patchid are packed in four bits each with the lower
+ * 8b unused:
+ */
+ chipid = adreno_gpu->chip_id & 0xffff0000;
+ chipid |= (adreno_gpu->chip_id << 4) & 0xf000; /* minor */
+ chipid |= (adreno_gpu->chip_id << 8) & 0x0f00; /* patchid */
+ }
- gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG,
- gmu->log.iova | (gmu->log.size / SZ_4K - 1));
+ if (adreno_is_a7xx(adreno_gpu)) {
+ gmu_write(gmu, REG_A6XX_GMU_GENERAL_10, chipid);
+ gmu_write(gmu, REG_A6XX_GMU_GENERAL_8,
+ (gmu->log.iova & GENMASK(31, 12)) |
+ ((gmu->log.size / SZ_4K - 1) & GENMASK(7, 0)));
+ } else {
+ gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
+
+ gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG,
+ gmu->log.iova | (gmu->log.size / SZ_4K - 1));
+ }
/* Set up the lowest idle level on the GMU */
a6xx_gmu_power_config(gmu);
@@ -857,17 +923,23 @@ static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
{
- u32 val;
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ u32 val, seqmem_off = 0;
+
+ /* The second spin of A7xx GPUs messed with some register offsets.. */
+ if (adreno_is_a740_family(adreno_gpu))
+ seqmem_off = 4;
/* Make sure there are no outstanding RPMh votes */
- gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
- (val & 1), 100, 10000);
- gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
- (val & 1), 100, 10000);
- gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
- (val & 1), 100, 10000);
- gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
- (val & 1), 100, 1000);
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 10000);
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 10000);
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 10000);
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 1000);
}
/* Force the GMU off in case it isn't responsive */
@@ -950,6 +1022,14 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
gmu->hung = false;
+ /* Notify AOSS about the ACD state (unimplemented for now => disable it) */
+ if (!IS_ERR(gmu->qmp)) {
+ ret = qmp_send(gmu->qmp, "{class: gpu, res: acd, val: %d}",
+ 0 /* Hardcode ACD to be disabled for now */);
+ if (ret)
+ dev_err(gmu->dev, "failed to send GPU ACD state\n");
+ }
+
/* Turn on the resources */
pm_runtime_get_sync(gmu->dev);
@@ -963,7 +1043,8 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
/* Use a known rate to bring up the GMU */
clk_set_rate(gmu->core_clk, 200000000);
- clk_set_rate(gmu->hub_clk, 150000000);
+ clk_set_rate(gmu->hub_clk, adreno_is_a740_family(adreno_gpu) ?
+ 200000000 : 150000000);
ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
if (ret) {
pm_runtime_put(gmu->gxpd);
@@ -980,15 +1061,19 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
enable_irq(gmu->gmu_irq);
/* Check to see if we are doing a cold or warm boot */
- status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
- GMU_WARM_BOOT : GMU_COLD_BOOT;
-
- /*
- * Warm boot path does not work on newer GPUs
- * Presumably this is because icache/dcache regions must be restored
- */
- if (!gmu->legacy)
+ if (adreno_is_a7xx(adreno_gpu)) {
+ status = a6xx_llc_read(a6xx_gpu, REG_A7XX_CX_MISC_TCM_RET_CNTL) == 1 ?
+ GMU_WARM_BOOT : GMU_COLD_BOOT;
+ } else if (gmu->legacy) {
+ status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
+ GMU_WARM_BOOT : GMU_COLD_BOOT;
+ } else {
+ /*
+ * Warm boot path does not work on newer A6xx GPUs
+ * Presumably this is because icache/dcache regions must be restored
+ */
status = GMU_COLD_BOOT;
+ }
ret = a6xx_gmu_fw_start(gmu, status);
if (ret)
@@ -1473,6 +1558,9 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
dev_pm_domain_detach(gmu->gxpd, false);
}
+ if (!IS_ERR_OR_NULL(gmu->qmp))
+ qmp_put(gmu->qmp);
+
iounmap(gmu->mmio);
if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
iounmap(gmu->rscc);
@@ -1569,6 +1657,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
struct platform_device *pdev = of_find_device_by_node(node);
+ struct device_link *link;
int ret;
if (!pdev)
@@ -1600,7 +1689,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
* are otherwise unused by a660.
*/
gmu->dummy.size = SZ_4K;
- if (adreno_is_a660_family(adreno_gpu)) {
+ if (adreno_is_a660_family(adreno_gpu) ||
+ adreno_is_a7xx(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7,
0x60400000, "debug");
if (ret)
@@ -1616,7 +1706,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
goto err_memory;
/* Note that a650 family also includes a660 family: */
- if (adreno_is_a650_family(adreno_gpu)) {
+ if (adreno_is_a650_family(adreno_gpu) ||
+ adreno_is_a7xx(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
SZ_16M - SZ_16K, 0x04000, "icache");
if (ret)
@@ -1664,7 +1755,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
goto err_memory;
}
- if (adreno_is_a650_family(adreno_gpu)) {
+ if (adreno_is_a650_family(adreno_gpu) ||
+ adreno_is_a7xx(adreno_gpu)) {
gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc");
if (IS_ERR(gmu->rscc)) {
ret = -ENODEV;
@@ -1689,12 +1781,18 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
goto err_mmio;
}
- if (!device_link_add(gmu->dev, gmu->cxpd,
- DL_FLAG_PM_RUNTIME)) {
+ link = device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME);
+ if (!link) {
ret = -ENODEV;
goto detach_cxpd;
}
+ gmu->qmp = qmp_get(gmu->dev);
+ if (IS_ERR(gmu->qmp) && adreno_is_a7xx(adreno_gpu)) {
+ ret = PTR_ERR(gmu->qmp);
+ goto remove_device_link;
+ }
+
init_completion(&gmu->pd_gate);
complete_all(&gmu->pd_gate);
gmu->pd_nb.notifier_call = cxpd_notifier_cb;
@@ -1718,6 +1816,9 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
return 0;
+remove_device_link:
+ device_link_del(link);
+
detach_cxpd:
dev_pm_domain_detach(gmu->cxpd, false);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 236f81a43c..592b296aab 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -8,6 +8,7 @@
#include <linux/iopoll.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
+#include <linux/soc/qcom/qcom_aoss.h>
#include "msm_drv.h"
#include "a6xx_hfi.h"
@@ -96,6 +97,8 @@ struct a6xx_gmu {
/* For power domain callback */
struct notifier_block pd_nb;
struct completion pd_gate;
+
+ struct qmp *qmp;
};
static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
index fcd9eb53ba..5b66efafc9 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
@@ -360,6 +360,12 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
#define REG_A6XX_GMU_GENERAL_7 0x000051cc
+#define REG_A6XX_GMU_GENERAL_8 0x000051cd
+
+#define REG_A6XX_GMU_GENERAL_9 0x000051ce
+
+#define REG_A6XX_GMU_GENERAL_10 0x000051cf
+
#define REG_A6XX_GMU_ISENSE_CTRL 0x0000515d
#define REG_A6XX_GPU_CS_ENABLE_REG 0x00008920
@@ -471,6 +477,8 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
#define REG_A6XX_RSCC_SEQ_BUSY_DRV0 0x00000101
+#define REG_A7XX_RSCC_SEQ_MEM_0_DRV0_A740 0x00000154
+
#define REG_A6XX_RSCC_SEQ_MEM_0_DRV0 0x00000180
#define REG_A6XX_RSCC_TCS0_DRV0_STATUS 0x00000346
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 522ca7fe67..500ed2d183 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -103,6 +103,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
struct msm_ringbuffer *ring, struct msm_file_private *ctx)
{
bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1;
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
phys_addr_t ttbr;
u32 asid;
u64 memptr = rbmemptr(ring, ttbr0);
@@ -114,9 +115,11 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
return;
if (!sysprof) {
- /* Turn off protected mode to write to special registers */
- OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
- OUT_RING(ring, 0);
+ if (!adreno_is_a7xx(adreno_gpu)) {
+ /* Turn off protected mode to write to special registers */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+ }
OUT_PKT4(ring, REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD, 1);
OUT_RING(ring, 1);
@@ -142,6 +145,16 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
OUT_RING(ring, (asid << 16) | upper_32_bits(ttbr));
/*
+ * Sync both threads after switching pagetables and enable BR only
+ * to make sure BV doesn't race ahead while BR is still switching
+ * pagetables.
+ */
+ if (adreno_is_a7xx(&a6xx_gpu->base)) {
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR);
+ }
+
+ /*
* And finally, trigger a uche flush to be sure there isn't anything
* lingering in that part of the GPU
*/
@@ -163,9 +176,11 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(0x1));
OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0));
- /* Re-enable protected mode: */
- OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
- OUT_RING(ring, 1);
+ if (!adreno_is_a7xx(adreno_gpu)) {
+ /* Re-enable protected mode: */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+ }
}
}
@@ -252,6 +267,133 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
a6xx_flush(gpu, ring);
}
+static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+ unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = submit->ring;
+ unsigned int i, ibs = 0;
+
+ /*
+ * Toggle concurrent binning for pagetable switch and set the thread to
+ * BR since only it can execute the pagetable switch packets.
+ */
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR);
+
+ a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
+
+ get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
+ rbmemptr_stats(ring, index, cpcycles_start));
+ get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
+ rbmemptr_stats(ring, index, alwayson_start));
+
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, CP_SET_THREAD_BOTH);
+
+ OUT_PKT7(ring, CP_SET_MARKER, 1);
+ OUT_RING(ring, 0x101); /* IFPC disable */
+
+ OUT_PKT7(ring, CP_SET_MARKER, 1);
+ OUT_RING(ring, 0x00d); /* IB1LIST start */
+
+ /* Submit the commands */
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ break;
+ fallthrough;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ ibs++;
+ break;
+ }
+
+ /*
+ * Periodically update shadow-wptr if needed, so that we
+ * can see partial progress of submits with large # of
+ * cmds.. otherwise we could needlessly stall waiting for
+ * ringbuffer state, simply due to looking at a shadow
+ * rptr value that has not been updated
+ */
+ if ((ibs % 32) == 0)
+ update_shadow_rptr(gpu, ring);
+ }
+
+ OUT_PKT7(ring, CP_SET_MARKER, 1);
+ OUT_RING(ring, 0x00e); /* IB1LIST end */
+
+ get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
+ rbmemptr_stats(ring, index, cpcycles_end));
+ get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
+ rbmemptr_stats(ring, index, alwayson_end));
+
+ /* Write the fence to the scratch register */
+ OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
+ OUT_RING(ring, submit->seqno);
+
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, CP_SET_THREAD_BR);
+
+ OUT_PKT7(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, CCU_INVALIDATE_DEPTH);
+
+ OUT_PKT7(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, CCU_INVALIDATE_COLOR);
+
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, CP_SET_THREAD_BV);
+
+ /*
+ * Make sure the timestamp is committed once BV pipe is
+ * completely done with this submission.
+ */
+ OUT_PKT7(ring, CP_EVENT_WRITE, 4);
+ OUT_RING(ring, CACHE_CLEAN | BIT(27));
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, bv_fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, bv_fence)));
+ OUT_RING(ring, submit->seqno);
+
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, CP_SET_THREAD_BR);
+
+ /*
+ * This makes sure that BR doesn't race ahead and commit
+ * timestamp to memstore while BV is still processing
+ * this submission.
+ */
+ OUT_PKT7(ring, CP_WAIT_TIMESTAMP, 4);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, bv_fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, bv_fence)));
+ OUT_RING(ring, submit->seqno);
+
+ /* write the ringbuffer timestamp */
+ OUT_PKT7(ring, CP_EVENT_WRITE, 4);
+ OUT_RING(ring, CACHE_CLEAN | CP_EVENT_WRITE_0_IRQ | BIT(27));
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, submit->seqno);
+
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, CP_SET_THREAD_BOTH);
+
+ OUT_PKT7(ring, CP_SET_MARKER, 1);
+ OUT_RING(ring, 0x100); /* IFPC enable */
+
+ trace_msm_gpu_submit_flush(submit,
+ gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER));
+
+ a6xx_flush(gpu, ring);
+}
+
const struct adreno_reglist a612_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
@@ -695,6 +837,121 @@ const struct adreno_reglist a690_hwcg[] = {
{}
};
+const struct adreno_reglist a730_hwcg[] = {
+ { REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022222 },
+ { REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf },
+ { REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222220 },
+ { REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00222222 },
+ { REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777 },
+ { REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777 },
+ { REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777 },
+ { REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111 },
+ { REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111 },
+ { REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111 },
+ { REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222 },
+ { REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220 },
+ { REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x44000f00 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022 },
+ { REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00555555 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011 },
+ { REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00440044 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222 },
+ { REG_A7XX_RBBM_CLOCK_MODE2_GRAS, 0x00000222 },
+ { REG_A7XX_RBBM_CLOCK_MODE_BV_GRAS, 0x00222222 },
+ { REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x02222223 },
+ { REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222 },
+ { REG_A7XX_RBBM_CLOCK_MODE_BV_GPC, 0x00222222 },
+ { REG_A7XX_RBBM_CLOCK_MODE_BV_VFD, 0x00002222 },
+ { REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004 },
+ { REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222 },
+ { REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002 },
+ { REG_A7XX_RBBM_CLOCK_MODE_BV_LRZ, 0x55555552 },
+ { REG_A7XX_RBBM_CLOCK_MODE_CP, 0x00000223 },
+ { REG_A6XX_RBBM_CLOCK_CNTL, 0x8aa8aa82 },
+ { REG_A6XX_RBBM_ISDB_CNT, 0x00000182 },
+ { REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000 },
+ { REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111 },
+ { REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555 },
+ {},
+};
+
+const struct adreno_reglist a740_hwcg[] = {
+ { REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x22022222 },
+ { REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x003cf3cf },
+ { REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222220 },
+ { REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00222222 },
+ { REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777 },
+ { REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777 },
+ { REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777 },
+ { REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111 },
+ { REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111 },
+ { REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111 },
+ { REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x00222222 },
+ { REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000444 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220 },
+ { REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x44000f00 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022 },
+ { REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00555555 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011 },
+ { REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00440044 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222 },
+ { REG_A7XX_RBBM_CLOCK_MODE2_GRAS, 0x00000222 },
+ { REG_A7XX_RBBM_CLOCK_MODE_BV_GRAS, 0x00222222 },
+ { REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x02222223 },
+ { REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00222222 },
+ { REG_A7XX_RBBM_CLOCK_MODE_BV_GPC, 0x00222222 },
+ { REG_A7XX_RBBM_CLOCK_MODE_BV_VFD, 0x00002222 },
+ { REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004 },
+ { REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000 },
+ { REG_A7XX_RBBM_CLOCK_MODE_BV_LRZ, 0x55555552 },
+ { REG_A7XX_RBBM_CLOCK_HYST2_VFD, 0x00000000 },
+ { REG_A7XX_RBBM_CLOCK_MODE_CP, 0x00000222 },
+ { REG_A6XX_RBBM_CLOCK_CNTL, 0x8aa8aa82 },
+ { REG_A6XX_RBBM_ISDB_CNT, 0x00000182 },
+ { REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000 },
+ { REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000 },
+ { REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222 },
+ { REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111 },
+ { REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555 },
+ {},
+};
+
static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -702,7 +959,7 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
const struct adreno_reglist *reg;
unsigned int i;
- u32 val, clock_cntl_on;
+ u32 val, clock_cntl_on, cgc_mode;
if (!adreno_gpu->info->hwcg)
return;
@@ -714,6 +971,17 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
else
clock_cntl_on = 0x8aa8aa82;
+ if (adreno_is_a7xx(adreno_gpu)) {
+ cgc_mode = adreno_is_a740_family(adreno_gpu) ? 0x20222 : 0x20000;
+
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
+ state ? cgc_mode : 0);
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
+ state ? 0x10111 : 0);
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
+ state ? 0x5555 : 0);
+ }
+
val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL);
/* Don't re-program the registers if they are already correct */
@@ -721,14 +989,14 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
return;
/* Disable SP clock before programming HWCG registers */
- if (!adreno_is_a610(adreno_gpu))
+ if (!adreno_is_a610(adreno_gpu) && !adreno_is_a7xx(adreno_gpu))
gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
for (i = 0; (reg = &adreno_gpu->info->hwcg[i], reg->offset); i++)
gpu_write(gpu, reg->offset, state ? reg->value : 0);
/* Enable SP clock */
- if (!adreno_is_a610(adreno_gpu))
+ if (!adreno_is_a610(adreno_gpu) && !adreno_is_a7xx(adreno_gpu))
gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
@@ -897,6 +1165,59 @@ static const u32 a690_protect[] = {
A6XX_PROTECT_NORDWR(0x11c00, 0x00000), /*note: infiite range */
};
+static const u32 a730_protect[] = {
+ A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
+ A6XX_PROTECT_RDONLY(0x0050b, 0x0058),
+ A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00510, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00534, 0x0000),
+ A6XX_PROTECT_RDONLY(0x005fb, 0x009d),
+ A6XX_PROTECT_NORDWR(0x00699, 0x01e9),
+ A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
+ A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
+ /* 0x008d0-0x008dd are unprotected on purpose for tools like perfetto */
+ A6XX_PROTECT_RDONLY(0x008de, 0x0154),
+ A6XX_PROTECT_NORDWR(0x00900, 0x004d),
+ A6XX_PROTECT_NORDWR(0x0098d, 0x00b2),
+ A6XX_PROTECT_NORDWR(0x00a41, 0x01be),
+ A6XX_PROTECT_NORDWR(0x00df0, 0x0001),
+ A6XX_PROTECT_NORDWR(0x00e01, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00e07, 0x0008),
+ A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
+ A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
+ A6XX_PROTECT_NORDWR(0x08e80, 0x0280),
+ A6XX_PROTECT_NORDWR(0x09624, 0x01db),
+ A6XX_PROTECT_NORDWR(0x09e40, 0x0000),
+ A6XX_PROTECT_NORDWR(0x09e64, 0x000d),
+ A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
+ A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0ae50, 0x000f),
+ A6XX_PROTECT_NORDWR(0x0ae66, 0x0003),
+ A6XX_PROTECT_NORDWR(0x0ae6f, 0x0003),
+ A6XX_PROTECT_NORDWR(0x0b604, 0x0003),
+ A6XX_PROTECT_NORDWR(0x0ec00, 0x0fff),
+ A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x18400, 0x0053),
+ A6XX_PROTECT_RDONLY(0x18454, 0x0004),
+ A6XX_PROTECT_NORDWR(0x18459, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1a459, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1c459, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1f400, 0x0443),
+ A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
+ A6XX_PROTECT_NORDWR(0x1f860, 0x0000),
+ A6XX_PROTECT_NORDWR(0x1f878, 0x002a),
+ /* CP_PROTECT_REG[44, 46] are left untouched! */
+ 0,
+ 0,
+ 0,
+ A6XX_PROTECT_NORDWR(0x1f8c0, 0x00000),
+};
+
static void a6xx_set_cp_protect(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -918,6 +1239,11 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu)
count = ARRAY_SIZE(a660_protect);
count_max = 48;
BUILD_BUG_ON(ARRAY_SIZE(a660_protect) > 48);
+ } else if (adreno_is_a730(adreno_gpu) || adreno_is_a740(adreno_gpu)) {
+ regs = a730_protect;
+ count = ARRAY_SIZE(a730_protect);
+ count_max = 48;
+ BUILD_BUG_ON(ARRAY_SIZE(a730_protect) > 48);
} else {
regs = a6xx_protect;
count = ARRAY_SIZE(a6xx_protect);
@@ -984,7 +1310,11 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
if (adreno_is_a640_family(adreno_gpu))
amsbc = 1;
- if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) {
+ if (adreno_is_a650(adreno_gpu) ||
+ adreno_is_a660(adreno_gpu) ||
+ adreno_is_a690(adreno_gpu) ||
+ adreno_is_a730(adreno_gpu) ||
+ adreno_is_a740_family(adreno_gpu)) {
/* TODO: get ddr type from bootloader and use 2 for LPDDR4 */
hbb_lo = 3;
amsbc = 1;
@@ -992,13 +1322,6 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
uavflagprd_inv = 2;
}
- if (adreno_is_a690(adreno_gpu)) {
- hbb_lo = 2;
- amsbc = 1;
- rgb565_predicator = 1;
- uavflagprd_inv = 2;
- }
-
if (adreno_is_7c3(adreno_gpu)) {
hbb_lo = 1;
amsbc = 1;
@@ -1017,6 +1340,10 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
uavflagprd_inv << 4 | min_acc_len << 3 |
hbb_lo << 1 | ubwc_mode);
+ if (adreno_is_a7xx(adreno_gpu))
+ gpu_write(gpu, REG_A7XX_GRAS_NC_MODE_CNTL,
+ FIELD_PREP(GENMASK(8, 5), hbb_lo));
+
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, min_acc_len << 23 | hbb_lo << 21);
}
@@ -1049,6 +1376,55 @@ static int a6xx_cp_init(struct msm_gpu *gpu)
return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
}
+static int a7xx_cp_init(struct msm_gpu *gpu)
+{
+ struct msm_ringbuffer *ring = gpu->rb[0];
+ u32 mask;
+
+ /* Disable concurrent binning before sending CP init */
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, BIT(27));
+
+ OUT_PKT7(ring, CP_ME_INIT, 7);
+
+ /* Use multiple HW contexts */
+ mask = BIT(0);
+
+ /* Enable error detection */
+ mask |= BIT(1);
+
+ /* Set default reset state */
+ mask |= BIT(3);
+
+ /* Disable save/restore of performance counters across preemption */
+ mask |= BIT(6);
+
+ /* Enable the register init list with the spinlock */
+ mask |= BIT(8);
+
+ OUT_RING(ring, mask);
+
+ /* Enable multiple hardware contexts */
+ OUT_RING(ring, 0x00000003);
+
+ /* Enable error detection */
+ OUT_RING(ring, 0x20000000);
+
+ /* Operation mode mask */
+ OUT_RING(ring, 0x00000002);
+
+ /* *Don't* send a power up reg list for concurrent binning (TODO) */
+ /* Lo address */
+ OUT_RING(ring, 0x00000000);
+ /* Hi address */
+ OUT_RING(ring, 0x00000000);
+ /* BIT(31) set => read the regs from the list */
+ OUT_RING(ring, 0x00000000);
+
+ a6xx_flush(gpu, ring);
+ return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
/*
* Check that the microcode version is new enough to include several key
* security fixes. Return true if the ucode is safe.
@@ -1065,6 +1441,10 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
if (IS_ERR(buf))
return false;
+ /* A7xx is safe! */
+ if (adreno_is_a7xx(adreno_gpu))
+ return true;
+
/*
* Targets up to a640 (a618, a630 and a640) need to check for a
* microcode version that is patched to support the whereami opcode or
@@ -1181,22 +1561,46 @@ static int a6xx_zap_shader_init(struct msm_gpu *gpu)
}
#define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
- A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
- A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
- A6XX_RBBM_INT_0_MASK_CP_IB2 | \
- A6XX_RBBM_INT_0_MASK_CP_IB1 | \
- A6XX_RBBM_INT_0_MASK_CP_RB | \
- A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
- A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
- A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
- A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
- A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+ A6XX_RBBM_INT_0_MASK_CP_IB2 | \
+ A6XX_RBBM_INT_0_MASK_CP_IB1 | \
+ A6XX_RBBM_INT_0_MASK_CP_RB | \
+ A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
+ A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
+ A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
+
+#define A7XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR | \
+ A6XX_RBBM_INT_0_MASK_CP_SW | \
+ A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+ A6XX_RBBM_INT_0_MASK_PM4CPINTERRUPT | \
+ A6XX_RBBM_INT_0_MASK_CP_RB_DONE_TS | \
+ A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
+ A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
+ A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR | \
+ A6XX_RBBM_INT_0_MASK_TSBWRITEERROR)
+
+#define A7XX_APRIV_MASK (A6XX_CP_APRIV_CNTL_ICACHE | \
+ A6XX_CP_APRIV_CNTL_RBFETCH | \
+ A6XX_CP_APRIV_CNTL_RBPRIVLEVEL | \
+ A6XX_CP_APRIV_CNTL_RBRPWB)
+
+#define A7XX_BR_APRIVMASK (A7XX_APRIV_MASK | \
+ A6XX_CP_APRIV_CNTL_CDREAD | \
+ A6XX_CP_APRIV_CNTL_CDWRITE)
static int hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ u64 gmem_range_min;
int ret;
if (!adreno_has_gmu_wrapper(adreno_gpu)) {
@@ -1219,6 +1623,10 @@ static int hw_init(struct msm_gpu *gpu)
mb();
}
+ /* Some GPUs are stubborn and take their sweet time to unhalt GBIF! */
+ if (adreno_is_a7xx(adreno_gpu) && a6xx_has_gbif(adreno_gpu))
+ spin_until(!gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK));
+
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
if (adreno_is_a619_holi(adreno_gpu))
@@ -1232,19 +1640,21 @@ static int hw_init(struct msm_gpu *gpu)
gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE, 0x00000000);
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
- /* Turn on 64 bit addressing for all blocks */
- gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1);
- gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1);
- gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
- gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1);
- gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1);
- gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
- gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1);
- gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1);
- gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
- gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1);
- gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
- gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
+ if (!adreno_is_a7xx(adreno_gpu)) {
+ /* Turn on 64 bit addressing for all blocks */
+ gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
+ }
/* enable hardware clockgating */
a6xx_set_hwcg(gpu, true);
@@ -1252,12 +1662,14 @@ static int hw_init(struct msm_gpu *gpu)
/* VBIF/GBIF start*/
if (adreno_is_a610(adreno_gpu) ||
adreno_is_a640_family(adreno_gpu) ||
- adreno_is_a650_family(adreno_gpu)) {
+ adreno_is_a650_family(adreno_gpu) ||
+ adreno_is_a7xx(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
- gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3);
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL,
+ adreno_is_a7xx(adreno_gpu) ? 0x2120212 : 0x3);
} else {
gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
}
@@ -1265,24 +1677,39 @@ static int hw_init(struct msm_gpu *gpu)
if (adreno_is_a630(adreno_gpu))
gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
+ if (adreno_is_a7xx(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_UCHE_GBIF_GX_CONFIG, 0x10240e0);
+
/* Make all blocks contribute to the GPU BUSY perf counter */
gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
/* Disable L2 bypass in the UCHE */
- gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, 0x0001ffffffffffc0llu);
- gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu);
- gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu);
+ if (adreno_is_a7xx(adreno_gpu)) {
+ gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu);
+ gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu);
+ } else {
+ gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, 0x0001ffffffffffc0llu);
+ gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu);
+ gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu);
+ }
+
+ if (!(adreno_is_a650_family(adreno_gpu) ||
+ adreno_is_a730(adreno_gpu))) {
+ gmem_range_min = adreno_is_a740_family(adreno_gpu) ? SZ_16M : SZ_1M;
- if (!adreno_is_a650_family(adreno_gpu)) {
/* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
- gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN, 0x00100000);
+ gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN, gmem_range_min);
gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX,
- 0x00100000 + adreno_gpu->info->gmem - 1);
+ gmem_range_min + adreno_gpu->info->gmem - 1);
}
- gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
- gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
+ if (adreno_is_a7xx(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, BIT(23));
+ else {
+ gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
+ gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
+ }
if (adreno_is_a640_family(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
@@ -1290,7 +1717,7 @@ static int hw_init(struct msm_gpu *gpu)
} else if (adreno_is_a610(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x00800060);
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x40201b16);
- } else {
+ } else if (!adreno_is_a7xx(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
}
@@ -1302,13 +1729,15 @@ static int hw_init(struct msm_gpu *gpu)
if (adreno_is_a610(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 48);
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_DBG_ADDR, 47);
- } else
+ } else if (!adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
/* Setting the primFifo thresholds default values,
* and vccCacheSkipDis=1 bit (0x200) for A640 and newer
*/
- if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu) || adreno_is_a690(adreno_gpu))
+ if (adreno_is_a690(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00800200);
+ else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
else if (adreno_is_a640_family(adreno_gpu) || adreno_is_7c3(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200);
@@ -1318,7 +1747,7 @@ static int hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00018000);
else if (adreno_is_a610(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00080000);
- else
+ else if (!adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000);
/* Set the AHB default slave response to "ERROR" */
@@ -1327,13 +1756,24 @@ static int hw_init(struct msm_gpu *gpu)
/* Turn on performance counters */
gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1);
+ if (adreno_is_a7xx(adreno_gpu)) {
+ /* Turn on the IFPC counter (countable 4 on XOCLK4) */
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1,
+ FIELD_PREP(GENMASK(7, 0), 0x4));
+ }
+
/* Select CP0 to always count cycles */
gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL(0), PERF_CP_ALWAYS_COUNT);
a6xx_set_ubwc_config(gpu);
/* Enable fault detection */
- if (adreno_is_a619(adreno_gpu))
+ if (adreno_is_a730(adreno_gpu) ||
+ adreno_is_a740_family(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0xcfffff);
+ else if (adreno_is_a690(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x4fffff);
+ else if (adreno_is_a619(adreno_gpu))
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3fffff);
else if (adreno_is_a610(adreno_gpu))
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3ffff);
@@ -1366,22 +1806,43 @@ static int hw_init(struct msm_gpu *gpu)
a6xx_set_cp_protect(gpu);
if (adreno_is_a660_family(adreno_gpu)) {
- gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1);
+ if (adreno_is_a690(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x00028801);
+ else
+ gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1);
gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x0);
}
+ if (adreno_is_a690(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x90);
/* Set dualQ + disable afull for A660 GPU */
- if (adreno_is_a660(adreno_gpu))
+ else if (adreno_is_a660(adreno_gpu))
gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906);
+ else if (adreno_is_a7xx(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG,
+ FIELD_PREP(GENMASK(19, 16), 6) |
+ FIELD_PREP(GENMASK(15, 12), 6) |
+ FIELD_PREP(GENMASK(11, 8), 9) |
+ BIT(3) | BIT(2) |
+ FIELD_PREP(GENMASK(1, 0), 2));
/* Enable expanded apriv for targets that support it */
if (gpu->hw_apriv) {
- gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
- (1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1));
+ if (adreno_is_a7xx(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
+ A7XX_BR_APRIVMASK);
+ gpu_write(gpu, REG_A7XX_CP_BV_APRIV_CNTL,
+ A7XX_APRIV_MASK);
+ gpu_write(gpu, REG_A7XX_CP_LPAC_APRIV_CNTL,
+ A7XX_APRIV_MASK);
+ } else
+ gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
+ BIT(6) | BIT(5) | BIT(3) | BIT(2) | BIT(1));
}
/* Enable interrupts */
- gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
+ gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK,
+ adreno_is_a7xx(adreno_gpu) ? A7XX_INT_MASK : A6XX_INT_MASK);
ret = adreno_hw_init(gpu);
if (ret)
@@ -1408,6 +1869,12 @@ static int hw_init(struct msm_gpu *gpu)
shadowptr(a6xx_gpu, gpu->rb[0]));
}
+ /* ..which means "always" on A7xx, also for BV shadow */
+ if (adreno_is_a7xx(adreno_gpu)) {
+ gpu_write64(gpu, REG_A7XX_CP_BV_RB_RPTR_ADDR,
+ rbmemptr(gpu->rb[0], bv_fence));
+ }
+
/* Always come up on rb 0 */
a6xx_gpu->cur_ring = gpu->rb[0];
@@ -1416,7 +1883,7 @@ static int hw_init(struct msm_gpu *gpu)
/* Enable the SQE_to start the CP engine */
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
- ret = a6xx_cp_init(gpu);
+ ret = adreno_is_a7xx(adreno_gpu) ? a7xx_cp_init(gpu) : a6xx_cp_init(gpu);
if (ret)
goto out;
@@ -1653,7 +2120,7 @@ static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu)
(val & 0x3ffff), val);
}
- if (status & A6XX_CP_INT_CP_AHB_ERROR)
+ if (status & A6XX_CP_INT_CP_AHB_ERROR && !adreno_is_a7xx(to_adreno_gpu(gpu)))
dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n");
if (status & A6XX_CP_INT_CP_VSD_PARITY_ERROR)
@@ -1803,6 +2270,35 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval);
}
+static void a7xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
+{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ if (IS_ERR(a6xx_gpu->llc_mmio))
+ return;
+
+ if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
+ u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
+
+ gpu_scid &= GENMASK(4, 0);
+
+ gpu_write(gpu, REG_A6XX_GBIF_SCACHE_CNTL1,
+ FIELD_PREP(GENMASK(29, 25), gpu_scid) |
+ FIELD_PREP(GENMASK(24, 20), gpu_scid) |
+ FIELD_PREP(GENMASK(19, 15), gpu_scid) |
+ FIELD_PREP(GENMASK(14, 10), gpu_scid) |
+ FIELD_PREP(GENMASK(9, 5), gpu_scid) |
+ FIELD_PREP(GENMASK(4, 0), gpu_scid));
+
+ gpu_write(gpu, REG_A6XX_GBIF_SCACHE_CNTL0,
+ FIELD_PREP(GENMASK(14, 10), gpu_scid) |
+ BIT(8));
+ }
+
+ llcc_slice_activate(a6xx_gpu->htw_llc_slice);
+}
+
static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
{
/* No LLCC on non-RPMh (and by extension, non-GMU) SoCs */
@@ -1814,7 +2310,7 @@ static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
}
static void a6xx_llc_slices_init(struct platform_device *pdev,
- struct a6xx_gpu *a6xx_gpu)
+ struct a6xx_gpu *a6xx_gpu, bool is_a7xx)
{
struct device_node *phandle;
@@ -1823,18 +2319,18 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
return;
/*
- * There is a different programming path for targets with an mmu500
- * attached, so detect if that is the case
+ * There is a different programming path for A6xx targets with an
+ * mmu500 attached, so detect if that is the case
*/
phandle = of_parse_phandle(pdev->dev.of_node, "iommus", 0);
a6xx_gpu->have_mmu500 = (phandle &&
of_device_is_compatible(phandle, "arm,mmu-500"));
of_node_put(phandle);
- if (a6xx_gpu->have_mmu500)
- a6xx_gpu->llc_mmio = NULL;
- else
+ if (is_a7xx || !a6xx_gpu->have_mmu500)
a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem");
+ else
+ a6xx_gpu->llc_mmio = NULL;
a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
@@ -1920,7 +2416,7 @@ static int a6xx_gmu_pm_resume(struct msm_gpu *gpu)
msm_devfreq_resume(gpu);
- a6xx_llc_activate(a6xx_gpu);
+ adreno_is_a7xx(adreno_gpu) ? a7xx_llc_activate : a6xx_llc_activate(a6xx_gpu);
return ret;
}
@@ -2307,6 +2803,37 @@ static const struct adreno_gpu_funcs funcs_gmuwrapper = {
.get_timestamp = a6xx_get_timestamp,
};
+static const struct adreno_gpu_funcs funcs_a7xx = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a6xx_hw_init,
+ .ucode_load = a6xx_ucode_load,
+ .pm_suspend = a6xx_gmu_pm_suspend,
+ .pm_resume = a6xx_gmu_pm_resume,
+ .recover = a6xx_recover,
+ .submit = a7xx_submit,
+ .active_ring = a6xx_active_ring,
+ .irq = a6xx_irq,
+ .destroy = a6xx_destroy,
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
+ .show = a6xx_show,
+#endif
+ .gpu_busy = a6xx_gpu_busy,
+ .gpu_get_freq = a6xx_gmu_get_freq,
+ .gpu_set_freq = a6xx_gpu_set_freq,
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
+ .gpu_state_get = a6xx_gpu_state_get,
+ .gpu_state_put = a6xx_gpu_state_put,
+#endif
+ .create_address_space = a6xx_create_address_space,
+ .create_private_address_space = a6xx_create_private_address_space,
+ .get_rptr = a6xx_get_rptr,
+ .progress = a6xx_progress,
+ },
+ .get_timestamp = a6xx_gmu_get_timestamp,
+};
+
struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -2316,6 +2843,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
struct a6xx_gpu *a6xx_gpu;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
+ bool is_a7xx;
int ret;
a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL);
@@ -2339,7 +2867,11 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_gpu->base.hw_apriv =
!!(config->info->quirks & ADRENO_QUIRK_HAS_HW_APRIV);
- a6xx_llc_slices_init(pdev, a6xx_gpu);
+ /* gpu->info only gets assigned in adreno_gpu_init() */
+ is_a7xx = config->info->family == ADRENO_7XX_GEN1 ||
+ config->info->family == ADRENO_7XX_GEN2;
+
+ a6xx_llc_slices_init(pdev, a6xx_gpu, is_a7xx);
ret = a6xx_set_supported_hw(&pdev->dev, config->info);
if (ret) {
@@ -2347,7 +2879,9 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
return ERR_PTR(ret);
}
- if (adreno_has_gmu_wrapper(adreno_gpu))
+ if (is_a7xx)
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 1);
+ else if (adreno_has_gmu_wrapper(adreno_gpu))
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_gmuwrapper, 1);
else
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 4e5d650578..91a564a24d 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -882,12 +882,13 @@ static void a6xx_snapshot_gmu_hfi_history(struct msm_gpu *gpu,
}
}
+#define A6XX_REGLIST_SIZE 1
#define A6XX_GBIF_REGLIST_SIZE 1
static void a6xx_get_registers(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
struct a6xx_crashdumper *dumper)
{
- int i, count = ARRAY_SIZE(a6xx_ahb_reglist) +
+ int i, count = A6XX_REGLIST_SIZE +
ARRAY_SIZE(a6xx_reglist) +
ARRAY_SIZE(a6xx_hlsq_reglist) + A6XX_GBIF_REGLIST_SIZE;
int index = 0;
@@ -901,12 +902,20 @@ static void a6xx_get_registers(struct msm_gpu *gpu,
a6xx_state->nr_registers = count;
- for (i = 0; i < ARRAY_SIZE(a6xx_ahb_reglist); i++)
+ if (adreno_is_a7xx(adreno_gpu))
a6xx_get_ahb_gpu_registers(gpu,
- a6xx_state, &a6xx_ahb_reglist[i],
+ a6xx_state, &a7xx_ahb_reglist,
+ &a6xx_state->registers[index++]);
+ else
+ a6xx_get_ahb_gpu_registers(gpu,
+ a6xx_state, &a6xx_ahb_reglist,
&a6xx_state->registers[index++]);
- if (a6xx_has_gbif(adreno_gpu))
+ if (adreno_is_a7xx(adreno_gpu))
+ a6xx_get_ahb_gpu_registers(gpu,
+ a6xx_state, &a7xx_gbif_reglist,
+ &a6xx_state->registers[index++]);
+ else if (a6xx_has_gbif(adreno_gpu))
a6xx_get_ahb_gpu_registers(gpu,
a6xx_state, &a6xx_gbif_reglist,
&a6xx_state->registers[index++]);
@@ -948,6 +957,18 @@ static u32 a6xx_get_cp_roq_size(struct msm_gpu *gpu)
return gpu_read(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2) >> 14;
}
+static u32 a7xx_get_cp_roq_size(struct msm_gpu *gpu)
+{
+ /*
+ * The value at CP_ROQ_THRESHOLDS_2[20:31] is in 4dword units.
+ * That register however is not directly accessible from APSS on A7xx.
+ * Program the SQE_UCODE_DBG_ADDR with offset=0x70d3 and read the value.
+ */
+ gpu_write(gpu, REG_A6XX_CP_SQE_UCODE_DBG_ADDR, 0x70d3);
+
+ return 4 * (gpu_read(gpu, REG_A6XX_CP_SQE_UCODE_DBG_DATA) >> 20);
+}
+
/* Read a block of data from an indexed register pair */
static void a6xx_get_indexed_regs(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
@@ -1019,8 +1040,40 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
/* Restore the size in the hardware */
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, mempool_size);
+}
- a6xx_state->nr_indexed_regs = count;
+static void a7xx_get_indexed_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ int i, indexed_count, mempool_count;
+
+ indexed_count = ARRAY_SIZE(a7xx_indexed_reglist);
+ mempool_count = ARRAY_SIZE(a7xx_cp_bv_mempool_indexed);
+
+ a6xx_state->indexed_regs = state_kcalloc(a6xx_state,
+ indexed_count + mempool_count,
+ sizeof(*a6xx_state->indexed_regs));
+ if (!a6xx_state->indexed_regs)
+ return;
+
+ a6xx_state->nr_indexed_regs = indexed_count + mempool_count;
+
+ /* First read the common regs */
+ for (i = 0; i < indexed_count; i++)
+ a6xx_get_indexed_regs(gpu, a6xx_state, &a7xx_indexed_reglist[i],
+ &a6xx_state->indexed_regs[i]);
+
+ gpu_rmw(gpu, REG_A6XX_CP_CHICKEN_DBG, 0, BIT(2));
+ gpu_rmw(gpu, REG_A7XX_CP_BV_CHICKEN_DBG, 0, BIT(2));
+
+ /* Get the contents of the CP_BV mempool */
+ for (i = 0; i < mempool_count; i++)
+ a6xx_get_indexed_regs(gpu, a6xx_state, a7xx_cp_bv_mempool_indexed,
+ &a6xx_state->indexed_regs[indexed_count - 1 + i]);
+
+ gpu_rmw(gpu, REG_A6XX_CP_CHICKEN_DBG, BIT(2), 0);
+ gpu_rmw(gpu, REG_A7XX_CP_BV_CHICKEN_DBG, BIT(2), 0);
+ return;
}
struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
@@ -1056,6 +1109,12 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
return &a6xx_state->base;
/* Get the banks of indexed registers */
+ if (adreno_is_a7xx(adreno_gpu)) {
+ a7xx_get_indexed_registers(gpu, a6xx_state);
+ /* Further codeflow is untested on A7xx. */
+ return &a6xx_state->base;
+ }
+
a6xx_get_indexed_registers(gpu, a6xx_state);
/*
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index e788ed72eb..9560fc1b85 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -328,9 +328,8 @@ static const u32 a6xx_gbif_registers[] = {
0x3C00, 0X3C0B, 0X3C40, 0X3C47, 0X3CC0, 0X3CD1, 0xE3A, 0xE3A,
};
-static const struct a6xx_registers a6xx_ahb_reglist[] = {
- REGS(a6xx_ahb_registers, 0, 0),
-};
+static const struct a6xx_registers a6xx_ahb_reglist =
+ REGS(a6xx_ahb_registers, 0, 0);
static const struct a6xx_registers a6xx_vbif_reglist =
REGS(a6xx_vbif_registers, 0, 0);
@@ -338,6 +337,27 @@ static const struct a6xx_registers a6xx_vbif_reglist =
static const struct a6xx_registers a6xx_gbif_reglist =
REGS(a6xx_gbif_registers, 0, 0);
+static const u32 a7xx_ahb_registers[] = {
+ /* RBBM_STATUS */
+ 0x210, 0x210,
+ /* RBBM_STATUS2-3 */
+ 0x212, 0x213,
+};
+
+static const u32 a7xx_gbif_registers[] = {
+ 0x3c00, 0x3c0b,
+ 0x3c40, 0x3c42,
+ 0x3c45, 0x3c47,
+ 0x3c49, 0x3c4a,
+ 0x3cc0, 0x3cd1,
+};
+
+static const struct a6xx_registers a7xx_ahb_reglist=
+ REGS(a7xx_ahb_registers, 0, 0);
+
+static const struct a6xx_registers a7xx_gbif_reglist =
+ REGS(a7xx_gbif_registers, 0, 0);
+
static const u32 a6xx_gmu_gx_registers[] = {
/* GMU GX */
0x0000, 0x0000, 0x0010, 0x0013, 0x0016, 0x0016, 0x0018, 0x001b,
@@ -384,14 +404,17 @@ static const struct a6xx_registers a6xx_gmu_reglist[] = {
};
static u32 a6xx_get_cp_roq_size(struct msm_gpu *gpu);
+static u32 a7xx_get_cp_roq_size(struct msm_gpu *gpu);
-static struct a6xx_indexed_registers {
+struct a6xx_indexed_registers {
const char *name;
u32 addr;
u32 data;
u32 count;
u32 (*count_fn)(struct msm_gpu *gpu);
-} a6xx_indexed_reglist[] = {
+};
+
+static struct a6xx_indexed_registers a6xx_indexed_reglist[] = {
{ "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL },
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
@@ -402,11 +425,43 @@ static struct a6xx_indexed_registers {
REG_A6XX_CP_ROQ_DBG_DATA, 0, a6xx_get_cp_roq_size},
};
+static struct a6xx_indexed_registers a7xx_indexed_reglist[] = {
+ { "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
+ REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL },
+ { "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
+ REG_A6XX_CP_DRAW_STATE_DATA, 0x100, NULL },
+ { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
+ REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x8000, NULL },
+ { "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_SQE_STAT_ADDR,
+ REG_A7XX_CP_BV_SQE_STAT_DATA, 0x33, NULL },
+ { "CP_BV_DRAW_STATE_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
+ REG_A7XX_CP_BV_DRAW_STATE_DATA, 0x100, NULL },
+ { "CP_BV_SQE_UCODE_DBG_ADDR", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
+ REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA, 0x8000, NULL },
+ { "CP_SQE_AC_STAT_ADDR", REG_A7XX_CP_SQE_AC_STAT_ADDR,
+ REG_A7XX_CP_SQE_AC_STAT_DATA, 0x33, NULL },
+ { "CP_LPAC_DRAW_STATE_ADDR", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
+ REG_A7XX_CP_LPAC_DRAW_STATE_DATA, 0x100, NULL },
+ { "CP_SQE_AC_UCODE_DBG_ADDR", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
+ REG_A7XX_CP_SQE_AC_UCODE_DBG_DATA, 0x8000, NULL },
+ { "CP_LPAC_FIFO_DBG_ADDR", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
+ REG_A7XX_CP_LPAC_FIFO_DBG_DATA, 0x40, NULL },
+ { "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
+ REG_A6XX_CP_ROQ_DBG_DATA, 0, a7xx_get_cp_roq_size },
+};
+
static struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
"CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060, NULL,
};
+static struct a6xx_indexed_registers a7xx_cp_bv_mempool_indexed[] = {
+ { "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
+ REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2100, NULL },
+ { "CP_BV_MEMPOOL", REG_A7XX_CP_BV_MEM_POOL_DBG_ADDR,
+ REG_A7XX_CP_BV_MEM_POOL_DBG_DATA, 0x2100, NULL },
+};
+
#define DEBUGBUS(_id, _count) { .id = _id, .name = #_id, .count = _count }
static const struct a6xx_debugbus_block {
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index 25b235b49e..cdb3f6e74d 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -5,6 +5,8 @@
#include <linux/circ_buf.h>
#include <linux/list.h>
+#include <soc/qcom/cmd-db.h>
+
#include "a6xx_gmu.h"
#include "a6xx_gmu.xml.h"
#include "a6xx_gpu.h"
@@ -506,6 +508,88 @@ static void adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
msg->cnoc_cmds_data[0][0] = 0x40000000;
msg->cnoc_cmds_data[1][0] = 0x60000001;
}
+
+static void a730_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ msg->bw_level_num = 12;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x7;
+
+ msg->ddr_cmds_addrs[0] = cmd_db_read_addr("SH0");
+ msg->ddr_cmds_addrs[1] = cmd_db_read_addr("MC0");
+ msg->ddr_cmds_addrs[2] = cmd_db_read_addr("ACV");
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+ msg->ddr_cmds_data[1][0] = 0x600002e8;
+ msg->ddr_cmds_data[1][1] = 0x600003d0;
+ msg->ddr_cmds_data[1][2] = 0x60000008;
+ msg->ddr_cmds_data[2][0] = 0x6000068d;
+ msg->ddr_cmds_data[2][1] = 0x6000089a;
+ msg->ddr_cmds_data[2][2] = 0x60000008;
+ msg->ddr_cmds_data[3][0] = 0x600007f2;
+ msg->ddr_cmds_data[3][1] = 0x60000a6e;
+ msg->ddr_cmds_data[3][2] = 0x60000008;
+ msg->ddr_cmds_data[4][0] = 0x600009e5;
+ msg->ddr_cmds_data[4][1] = 0x60000cfd;
+ msg->ddr_cmds_data[4][2] = 0x60000008;
+ msg->ddr_cmds_data[5][0] = 0x60000b29;
+ msg->ddr_cmds_data[5][1] = 0x60000ea6;
+ msg->ddr_cmds_data[5][2] = 0x60000008;
+ msg->ddr_cmds_data[6][0] = 0x60001698;
+ msg->ddr_cmds_data[6][1] = 0x60001da8;
+ msg->ddr_cmds_data[6][2] = 0x60000008;
+ msg->ddr_cmds_data[7][0] = 0x600018d2;
+ msg->ddr_cmds_data[7][1] = 0x60002093;
+ msg->ddr_cmds_data[7][2] = 0x60000008;
+ msg->ddr_cmds_data[8][0] = 0x60001e66;
+ msg->ddr_cmds_data[8][1] = 0x600027e6;
+ msg->ddr_cmds_data[8][2] = 0x60000008;
+ msg->ddr_cmds_data[9][0] = 0x600027c2;
+ msg->ddr_cmds_data[9][1] = 0x6000342f;
+ msg->ddr_cmds_data[9][2] = 0x60000008;
+ msg->ddr_cmds_data[10][0] = 0x60002e71;
+ msg->ddr_cmds_data[10][1] = 0x60003cf5;
+ msg->ddr_cmds_data[10][2] = 0x60000008;
+ msg->ddr_cmds_data[11][0] = 0x600030ae;
+ msg->ddr_cmds_data[11][1] = 0x60003fe5;
+ msg->ddr_cmds_data[11][2] = 0x60000008;
+
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x1;
+
+ msg->cnoc_cmds_addrs[0] = cmd_db_read_addr("CN0");
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+}
+
+static void a740_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ msg->bw_level_num = 1;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x7;
+
+ msg->ddr_cmds_addrs[0] = cmd_db_read_addr("SH0");
+ msg->ddr_cmds_addrs[1] = cmd_db_read_addr("MC0");
+ msg->ddr_cmds_addrs[2] = cmd_db_read_addr("ACV");
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+
+ /* TODO: add a proper dvfs table */
+
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x1;
+
+ msg->cnoc_cmds_addrs[0] = cmd_db_read_addr("CN0");
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+}
+
static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
/* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
@@ -564,6 +648,10 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
a660_build_bw_table(&msg);
else if (adreno_is_a690(adreno_gpu))
a690_build_bw_table(&msg);
+ else if (adreno_is_a730(adreno_gpu))
+ a730_build_bw_table(&msg);
+ else if (adreno_is_a740_family(adreno_gpu))
+ a740_build_bw_table(&msg);
else
a6xx_build_bw_table(&msg);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index b7b527e21d..3ee14646ab 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -454,11 +454,13 @@ static const struct adreno_info gpulist[] = {
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
.init = a6xx_gpu_init,
+ .zapfw = "a660_zap.mbn",
.hwcg = a660_hwcg,
.address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 117, 0 },
+ { 172, 2 }, /* Called speedbin 1 downstream, but let's not break things! */
{ 190, 1 },
),
}, {
@@ -490,6 +492,36 @@ static const struct adreno_info gpulist[] = {
.zapfw = "a690_zap.mdt",
.hwcg = a690_hwcg,
.address_space_size = SZ_16G,
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x07030001),
+ .family = ADRENO_7XX_GEN1,
+ .fw = {
+ [ADRENO_FW_SQE] = "a730_sqe.fw",
+ [ADRENO_FW_GMU] = "gmu_gen70000.bin",
+ },
+ .gmem = SZ_2M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
+ .init = a6xx_gpu_init,
+ .zapfw = "a730_zap.mdt",
+ .hwcg = a730_hwcg,
+ .address_space_size = SZ_16G,
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x43050a01), /* "C510v2" */
+ .family = ADRENO_7XX_GEN2,
+ .fw = {
+ [ADRENO_FW_SQE] = "a740_sqe.fw",
+ [ADRENO_FW_GMU] = "gmu_gen70200.bin",
+ },
+ .gmem = 3 * SZ_1M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
+ .init = a6xx_gpu_init,
+ .zapfw = "a740_zap.mdt",
+ .hwcg = a740_hwcg,
+ .address_space_size = SZ_16G,
},
};
@@ -751,10 +783,9 @@ static int adreno_probe(struct platform_device *pdev)
return 0;
}
-static int adreno_remove(struct platform_device *pdev)
+static void adreno_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &a3xx_ops);
- return 0;
}
static void adreno_shutdown(struct platform_device *pdev)
@@ -869,7 +900,7 @@ static const struct dev_pm_ops adreno_pm_ops = {
static struct platform_driver adreno_driver = {
.probe = adreno_probe,
- .remove = adreno_remove,
+ .remove_new = adreno_remove,
.shutdown = adreno_shutdown,
.driver = {
.name = "adreno",
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 8090dde032..3fe9fd240c 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -323,7 +323,11 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
*value = adreno_gpu->info->gmem;
return 0;
case MSM_PARAM_GMEM_BASE:
- *value = !adreno_is_a650_family(adreno_gpu) ? 0x100000 : 0;
+ if (adreno_is_a650_family(adreno_gpu) ||
+ adreno_is_a740_family(adreno_gpu))
+ *value = 0;
+ else
+ *value = 0x100000;
return 0;
case MSM_PARAM_CHIP_ID:
*value = adreno_gpu->chip_id;
@@ -567,6 +571,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
ring->cur = ring->start;
ring->next = ring->start;
ring->memptrs->rptr = 0;
+ ring->memptrs->bv_fence = ring->fctx->completed_fence;
/* Detect and clean up an impossible fence, ie. if GPU managed
* to scribble something invalid, we don't want that to confuse
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 49f38edf98..80b3f63121 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -46,6 +46,8 @@ enum adreno_family {
ADRENO_6XX_GEN2, /* a640 family */
ADRENO_6XX_GEN3, /* a650 family */
ADRENO_6XX_GEN4, /* a660 family */
+ ADRENO_7XX_GEN1, /* a730 family */
+ ADRENO_7XX_GEN2, /* a740 family */
};
#define ADRENO_QUIRK_TWO_PASS_USE_WFI BIT(0)
@@ -75,7 +77,7 @@ struct adreno_reglist {
};
extern const struct adreno_reglist a612_hwcg[], a615_hwcg[], a630_hwcg[], a640_hwcg[], a650_hwcg[];
-extern const struct adreno_reglist a660_hwcg[], a690_hwcg[];
+extern const struct adreno_reglist a660_hwcg[], a690_hwcg[], a730_hwcg[], a740_hwcg[];
struct adreno_speedbin {
uint16_t fuse;
@@ -391,7 +393,8 @@ static inline int adreno_is_a650_family(const struct adreno_gpu *gpu)
{
if (WARN_ON_ONCE(!gpu->info))
return false;
- return gpu->info->family >= ADRENO_6XX_GEN3;
+ return gpu->info->family == ADRENO_6XX_GEN3 ||
+ gpu->info->family == ADRENO_6XX_GEN4;
}
static inline int adreno_is_a640_family(const struct adreno_gpu *gpu)
@@ -401,6 +404,31 @@ static inline int adreno_is_a640_family(const struct adreno_gpu *gpu)
return gpu->info->family == ADRENO_6XX_GEN2;
}
+static inline int adreno_is_a730(struct adreno_gpu *gpu)
+{
+ return gpu->info->chip_ids[0] == 0x07030001;
+}
+
+static inline int adreno_is_a740(struct adreno_gpu *gpu)
+{
+ return gpu->info->chip_ids[0] == 0x43050a01;
+}
+
+/* Placeholder to make future diffs smaller */
+static inline int adreno_is_a740_family(struct adreno_gpu *gpu)
+{
+ if (WARN_ON_ONCE(!gpu->info))
+ return false;
+ return gpu->info->family == ADRENO_7XX_GEN2;
+}
+
+static inline int adreno_is_a7xx(struct adreno_gpu *gpu)
+{
+ /* Update with non-fake (i.e. non-A702) Gen 7 GPUs */
+ return gpu->info->family == ADRENO_7XX_GEN1 ||
+ adreno_is_a740_family(gpu);
+}
+
u64 adreno_private_address_space_size(struct msm_gpu *gpu);
int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
uint32_t param, uint64_t *value, uint32_t *len);