From 2c3c1048746a4622d8c89a29670120dc8fab93c4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:49:45 +0200 Subject: Adding upstream version 6.1.76. Signed-off-by: Daniel Baumann --- drivers/gpu/drm/v3d/Kconfig | 13 + drivers/gpu/drm/v3d/Makefile | 20 + drivers/gpu/drm/v3d/v3d_bo.c | 235 +++++++ drivers/gpu/drm/v3d/v3d_debugfs.c | 252 +++++++ drivers/gpu/drm/v3d/v3d_drv.c | 328 ++++++++++ drivers/gpu/drm/v3d/v3d_drv.h | 420 ++++++++++++ drivers/gpu/drm/v3d/v3d_fence.c | 49 ++ drivers/gpu/drm/v3d/v3d_gem.c | 1129 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/v3d/v3d_irq.c | 289 ++++++++ drivers/gpu/drm/v3d/v3d_mmu.c | 125 ++++ drivers/gpu/drm/v3d/v3d_perfmon.c | 214 ++++++ drivers/gpu/drm/v3d/v3d_regs.h | 499 ++++++++++++++ drivers/gpu/drm/v3d/v3d_sched.c | 448 +++++++++++++ drivers/gpu/drm/v3d/v3d_trace.h | 297 +++++++++ drivers/gpu/drm/v3d/v3d_trace_points.c | 9 + 15 files changed, 4327 insertions(+) create mode 100644 drivers/gpu/drm/v3d/Kconfig create mode 100644 drivers/gpu/drm/v3d/Makefile create mode 100644 drivers/gpu/drm/v3d/v3d_bo.c create mode 100644 drivers/gpu/drm/v3d/v3d_debugfs.c create mode 100644 drivers/gpu/drm/v3d/v3d_drv.c create mode 100644 drivers/gpu/drm/v3d/v3d_drv.h create mode 100644 drivers/gpu/drm/v3d/v3d_fence.c create mode 100644 drivers/gpu/drm/v3d/v3d_gem.c create mode 100644 drivers/gpu/drm/v3d/v3d_irq.c create mode 100644 drivers/gpu/drm/v3d/v3d_mmu.c create mode 100644 drivers/gpu/drm/v3d/v3d_perfmon.c create mode 100644 drivers/gpu/drm/v3d/v3d_regs.h create mode 100644 drivers/gpu/drm/v3d/v3d_sched.c create mode 100644 drivers/gpu/drm/v3d/v3d_trace.h create mode 100644 drivers/gpu/drm/v3d/v3d_trace_points.c (limited to 'drivers/gpu/drm/v3d') diff --git a/drivers/gpu/drm/v3d/Kconfig b/drivers/gpu/drm/v3d/Kconfig new file mode 100644 index 000000000..ce62c5908 --- /dev/null +++ b/drivers/gpu/drm/v3d/Kconfig @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0-only +config DRM_V3D + tristate "Broadcom V3D 3.x and newer" + depends on ARCH_BCM || ARCH_BRCMSTB || ARCH_BCM2835 || COMPILE_TEST + depends on DRM + depends on COMMON_CLK + depends on MMU + select DRM_SCHED + select DRM_GEM_SHMEM_HELPER + help + Choose this option if you have a system that has a Broadcom + V3D 3.x or newer GPUs. SoCs supported include the BCM2711, + BCM7268 and BCM7278. diff --git a/drivers/gpu/drm/v3d/Makefile b/drivers/gpu/drm/v3d/Makefile new file mode 100644 index 000000000..e8b314137 --- /dev/null +++ b/drivers/gpu/drm/v3d/Makefile @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Please keep these build lists sorted! + +# core driver code +v3d-y := \ + v3d_bo.o \ + v3d_drv.o \ + v3d_fence.o \ + v3d_gem.o \ + v3d_irq.o \ + v3d_mmu.o \ + v3d_perfmon.o \ + v3d_trace_points.o \ + v3d_sched.o + +v3d-$(CONFIG_DEBUG_FS) += v3d_debugfs.o + +obj-$(CONFIG_DRM_V3D) += v3d.o + +CFLAGS_v3d_trace_points.o := -I$(src) diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c new file mode 100644 index 000000000..8b3229a37 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2015-2018 Broadcom */ + +/** + * DOC: V3D GEM BO management support + * + * Compared to VC4 (V3D 2.x), V3D 3.3 introduces an MMU between the + * GPU and the bus, allowing us to use shmem objects for our storage + * instead of CMA. + * + * Physically contiguous objects may still be imported to V3D, but the + * driver doesn't allocate physically contiguous objects on its own. + * Display engines requiring physically contiguous allocations should + * look into Mesa's "renderonly" support (as used by the Mesa pl111 + * driver) for an example of how to integrate with V3D. + * + * Long term, we should support evicting pages from the MMU when under + * memory pressure (thus the v3d_bo_get_pages() refcounting), but + * that's not a high priority since our systems tend to not have swap. + */ + +#include +#include + +#include "v3d_drv.h" +#include "uapi/drm/v3d_drm.h" + +/* Called DRM core on the last userspace/kernel unreference of the + * BO. + */ +void v3d_free_object(struct drm_gem_object *obj) +{ + struct v3d_dev *v3d = to_v3d_dev(obj->dev); + struct v3d_bo *bo = to_v3d_bo(obj); + + v3d_mmu_remove_ptes(bo); + + mutex_lock(&v3d->bo_lock); + v3d->bo_stats.num_allocated--; + v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT; + mutex_unlock(&v3d->bo_lock); + + spin_lock(&v3d->mm_lock); + drm_mm_remove_node(&bo->node); + spin_unlock(&v3d->mm_lock); + + /* GPU execution may have dirtied any pages in the BO. */ + bo->base.pages_mark_dirty_on_put = true; + + drm_gem_shmem_free(&bo->base); +} + +static const struct drm_gem_object_funcs v3d_gem_funcs = { + .free = v3d_free_object, + .print_info = drm_gem_shmem_object_print_info, + .pin = drm_gem_shmem_object_pin, + .unpin = drm_gem_shmem_object_unpin, + .get_sg_table = drm_gem_shmem_object_get_sg_table, + .vmap = drm_gem_shmem_object_vmap, + .vunmap = drm_gem_shmem_object_vunmap, + .mmap = drm_gem_shmem_object_mmap, + .vm_ops = &drm_gem_shmem_vm_ops, +}; + +/* gem_create_object function for allocating a BO struct and doing + * early setup. + */ +struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size) +{ + struct v3d_bo *bo; + struct drm_gem_object *obj; + + if (size == 0) + return ERR_PTR(-EINVAL); + + bo = kzalloc(sizeof(*bo), GFP_KERNEL); + if (!bo) + return ERR_PTR(-ENOMEM); + obj = &bo->base.base; + + obj->funcs = &v3d_gem_funcs; + bo->base.map_wc = true; + INIT_LIST_HEAD(&bo->unref_head); + + return &bo->base.base; +} + +static int +v3d_bo_create_finish(struct drm_gem_object *obj) +{ + struct v3d_dev *v3d = to_v3d_dev(obj->dev); + struct v3d_bo *bo = to_v3d_bo(obj); + struct sg_table *sgt; + int ret; + + /* So far we pin the BO in the MMU for its lifetime, so use + * shmem's helper for getting a lifetime sgt. + */ + sgt = drm_gem_shmem_get_pages_sgt(&bo->base); + if (IS_ERR(sgt)) + return PTR_ERR(sgt); + + spin_lock(&v3d->mm_lock); + /* Allocate the object's space in the GPU's page tables. + * Inserting PTEs will happen later, but the offset is for the + * lifetime of the BO. + */ + ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node, + obj->size >> PAGE_SHIFT, + GMP_GRANULARITY >> PAGE_SHIFT, 0, 0); + spin_unlock(&v3d->mm_lock); + if (ret) + return ret; + + /* Track stats for /debug/dri/n/bo_stats. */ + mutex_lock(&v3d->bo_lock); + v3d->bo_stats.num_allocated++; + v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT; + mutex_unlock(&v3d->bo_lock); + + v3d_mmu_insert_ptes(bo); + + return 0; +} + +struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, + size_t unaligned_size) +{ + struct drm_gem_shmem_object *shmem_obj; + struct v3d_bo *bo; + int ret; + + shmem_obj = drm_gem_shmem_create(dev, unaligned_size); + if (IS_ERR(shmem_obj)) + return ERR_CAST(shmem_obj); + bo = to_v3d_bo(&shmem_obj->base); + + ret = v3d_bo_create_finish(&shmem_obj->base); + if (ret) + goto free_obj; + + return bo; + +free_obj: + drm_gem_shmem_free(shmem_obj); + return ERR_PTR(ret); +} + +struct drm_gem_object * +v3d_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt) +{ + struct drm_gem_object *obj; + int ret; + + obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt); + if (IS_ERR(obj)) + return obj; + + ret = v3d_bo_create_finish(obj); + if (ret) { + drm_gem_shmem_free(&to_v3d_bo(obj)->base); + return ERR_PTR(ret); + } + + return obj; +} + +int v3d_create_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_v3d_create_bo *args = data; + struct v3d_bo *bo = NULL; + int ret; + + if (args->flags != 0) { + DRM_INFO("unknown create_bo flags: %d\n", args->flags); + return -EINVAL; + } + + bo = v3d_bo_create(dev, file_priv, PAGE_ALIGN(args->size)); + if (IS_ERR(bo)) + return PTR_ERR(bo); + + args->offset = bo->node.start << PAGE_SHIFT; + + ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); + drm_gem_object_put(&bo->base.base); + + return ret; +} + +int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_v3d_mmap_bo *args = data; + struct drm_gem_object *gem_obj; + + if (args->flags != 0) { + DRM_INFO("unknown mmap_bo flags: %d\n", args->flags); + return -EINVAL; + } + + gem_obj = drm_gem_object_lookup(file_priv, args->handle); + if (!gem_obj) { + DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); + return -ENOENT; + } + + args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); + drm_gem_object_put(gem_obj); + + return 0; +} + +int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_v3d_get_bo_offset *args = data; + struct drm_gem_object *gem_obj; + struct v3d_bo *bo; + + gem_obj = drm_gem_object_lookup(file_priv, args->handle); + if (!gem_obj) { + DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); + return -ENOENT; + } + bo = to_v3d_bo(gem_obj); + + args->offset = bo->node.start << PAGE_SHIFT; + + drm_gem_object_put(gem_obj); + return 0; +} diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c new file mode 100644 index 000000000..efbde124c --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_debugfs.c @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2014-2018 Broadcom */ + +#include +#include +#include +#include +#include + +#include + +#include "v3d_drv.h" +#include "v3d_regs.h" + +#define REGDEF(reg) { reg, #reg } +struct v3d_reg_def { + u32 reg; + const char *name; +}; + +static const struct v3d_reg_def v3d_hub_reg_defs[] = { + REGDEF(V3D_HUB_AXICFG), + REGDEF(V3D_HUB_UIFCFG), + REGDEF(V3D_HUB_IDENT0), + REGDEF(V3D_HUB_IDENT1), + REGDEF(V3D_HUB_IDENT2), + REGDEF(V3D_HUB_IDENT3), + REGDEF(V3D_HUB_INT_STS), + REGDEF(V3D_HUB_INT_MSK_STS), + + REGDEF(V3D_MMU_CTL), + REGDEF(V3D_MMU_VIO_ADDR), + REGDEF(V3D_MMU_VIO_ID), + REGDEF(V3D_MMU_DEBUG_INFO), +}; + +static const struct v3d_reg_def v3d_gca_reg_defs[] = { + REGDEF(V3D_GCA_SAFE_SHUTDOWN), + REGDEF(V3D_GCA_SAFE_SHUTDOWN_ACK), +}; + +static const struct v3d_reg_def v3d_core_reg_defs[] = { + REGDEF(V3D_CTL_IDENT0), + REGDEF(V3D_CTL_IDENT1), + REGDEF(V3D_CTL_IDENT2), + REGDEF(V3D_CTL_MISCCFG), + REGDEF(V3D_CTL_INT_STS), + REGDEF(V3D_CTL_INT_MSK_STS), + REGDEF(V3D_CLE_CT0CS), + REGDEF(V3D_CLE_CT0CA), + REGDEF(V3D_CLE_CT0EA), + REGDEF(V3D_CLE_CT1CS), + REGDEF(V3D_CLE_CT1CA), + REGDEF(V3D_CLE_CT1EA), + + REGDEF(V3D_PTB_BPCA), + REGDEF(V3D_PTB_BPCS), + + REGDEF(V3D_GMP_STATUS), + REGDEF(V3D_GMP_CFG), + REGDEF(V3D_GMP_VIO_ADDR), + + REGDEF(V3D_ERR_FDBGO), + REGDEF(V3D_ERR_FDBGB), + REGDEF(V3D_ERR_FDBGS), + REGDEF(V3D_ERR_STAT), +}; + +static const struct v3d_reg_def v3d_csd_reg_defs[] = { + REGDEF(V3D_CSD_STATUS), + REGDEF(V3D_CSD_CURRENT_CFG0), + REGDEF(V3D_CSD_CURRENT_CFG1), + REGDEF(V3D_CSD_CURRENT_CFG2), + REGDEF(V3D_CSD_CURRENT_CFG3), + REGDEF(V3D_CSD_CURRENT_CFG4), + REGDEF(V3D_CSD_CURRENT_CFG5), + REGDEF(V3D_CSD_CURRENT_CFG6), +}; + +static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *dev = node->minor->dev; + struct v3d_dev *v3d = to_v3d_dev(dev); + int i, core; + + for (i = 0; i < ARRAY_SIZE(v3d_hub_reg_defs); i++) { + seq_printf(m, "%s (0x%04x): 0x%08x\n", + v3d_hub_reg_defs[i].name, v3d_hub_reg_defs[i].reg, + V3D_READ(v3d_hub_reg_defs[i].reg)); + } + + if (v3d->ver < 41) { + for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) { + seq_printf(m, "%s (0x%04x): 0x%08x\n", + v3d_gca_reg_defs[i].name, + v3d_gca_reg_defs[i].reg, + V3D_GCA_READ(v3d_gca_reg_defs[i].reg)); + } + } + + for (core = 0; core < v3d->cores; core++) { + for (i = 0; i < ARRAY_SIZE(v3d_core_reg_defs); i++) { + seq_printf(m, "core %d %s (0x%04x): 0x%08x\n", + core, + v3d_core_reg_defs[i].name, + v3d_core_reg_defs[i].reg, + V3D_CORE_READ(core, + v3d_core_reg_defs[i].reg)); + } + + if (v3d_has_csd(v3d)) { + for (i = 0; i < ARRAY_SIZE(v3d_csd_reg_defs); i++) { + seq_printf(m, "core %d %s (0x%04x): 0x%08x\n", + core, + v3d_csd_reg_defs[i].name, + v3d_csd_reg_defs[i].reg, + V3D_CORE_READ(core, + v3d_csd_reg_defs[i].reg)); + } + } + } + + return 0; +} + +static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *dev = node->minor->dev; + struct v3d_dev *v3d = to_v3d_dev(dev); + u32 ident0, ident1, ident2, ident3, cores; + int core; + + ident0 = V3D_READ(V3D_HUB_IDENT0); + ident1 = V3D_READ(V3D_HUB_IDENT1); + ident2 = V3D_READ(V3D_HUB_IDENT2); + ident3 = V3D_READ(V3D_HUB_IDENT3); + cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES); + + seq_printf(m, "Revision: %d.%d.%d.%d\n", + V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER), + V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV), + V3D_GET_FIELD(ident3, V3D_HUB_IDENT3_IPREV), + V3D_GET_FIELD(ident3, V3D_HUB_IDENT3_IPIDX)); + seq_printf(m, "MMU: %s\n", + str_yes_no(ident2 & V3D_HUB_IDENT2_WITH_MMU)); + seq_printf(m, "TFU: %s\n", + str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TFU)); + seq_printf(m, "TSY: %s\n", + str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TSY)); + seq_printf(m, "MSO: %s\n", + str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_MSO)); + seq_printf(m, "L3C: %s (%dkb)\n", + str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_L3C), + V3D_GET_FIELD(ident2, V3D_HUB_IDENT2_L3C_NKB)); + + for (core = 0; core < cores; core++) { + u32 misccfg; + u32 nslc, ntmu, qups; + + ident0 = V3D_CORE_READ(core, V3D_CTL_IDENT0); + ident1 = V3D_CORE_READ(core, V3D_CTL_IDENT1); + ident2 = V3D_CORE_READ(core, V3D_CTL_IDENT2); + misccfg = V3D_CORE_READ(core, V3D_CTL_MISCCFG); + + nslc = V3D_GET_FIELD(ident1, V3D_IDENT1_NSLC); + ntmu = V3D_GET_FIELD(ident1, V3D_IDENT1_NTMU); + qups = V3D_GET_FIELD(ident1, V3D_IDENT1_QUPS); + + seq_printf(m, "Core %d:\n", core); + seq_printf(m, " Revision: %d.%d\n", + V3D_GET_FIELD(ident0, V3D_IDENT0_VER), + V3D_GET_FIELD(ident1, V3D_IDENT1_REV)); + seq_printf(m, " Slices: %d\n", nslc); + seq_printf(m, " TMUs: %d\n", nslc * ntmu); + seq_printf(m, " QPUs: %d\n", nslc * qups); + seq_printf(m, " Semaphores: %d\n", + V3D_GET_FIELD(ident1, V3D_IDENT1_NSEM)); + seq_printf(m, " BCG int: %d\n", + (ident2 & V3D_IDENT2_BCG_INT) != 0); + seq_printf(m, " Override TMU: %d\n", + (misccfg & V3D_MISCCFG_OVRTMUOUT) != 0); + } + + return 0; +} + +static int v3d_debugfs_bo_stats(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *dev = node->minor->dev; + struct v3d_dev *v3d = to_v3d_dev(dev); + + mutex_lock(&v3d->bo_lock); + seq_printf(m, "allocated bos: %d\n", + v3d->bo_stats.num_allocated); + seq_printf(m, "allocated bo size (kb): %ld\n", + (long)v3d->bo_stats.pages_allocated << (PAGE_SHIFT - 10)); + mutex_unlock(&v3d->bo_lock); + + return 0; +} + +static int v3d_measure_clock(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *dev = node->minor->dev; + struct v3d_dev *v3d = to_v3d_dev(dev); + uint32_t cycles; + int core = 0; + int measure_ms = 1000; + + if (v3d->ver >= 40) { + V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3, + V3D_SET_FIELD(V3D_PCTR_CYCLE_COUNT, + V3D_PCTR_S0)); + V3D_CORE_WRITE(core, V3D_V4_PCTR_0_CLR, 1); + V3D_CORE_WRITE(core, V3D_V4_PCTR_0_EN, 1); + } else { + V3D_CORE_WRITE(core, V3D_V3_PCTR_0_PCTRS0, + V3D_PCTR_CYCLE_COUNT); + V3D_CORE_WRITE(core, V3D_V3_PCTR_0_CLR, 1); + V3D_CORE_WRITE(core, V3D_V3_PCTR_0_EN, + V3D_V3_PCTR_0_EN_ENABLE | + 1); + } + msleep(measure_ms); + cycles = V3D_CORE_READ(core, V3D_PCTR_0_PCTR0); + + seq_printf(m, "cycles: %d (%d.%d Mhz)\n", + cycles, + cycles / (measure_ms * 1000), + (cycles / (measure_ms * 100)) % 10); + + return 0; +} + +static const struct drm_info_list v3d_debugfs_list[] = { + {"v3d_ident", v3d_v3d_debugfs_ident, 0}, + {"v3d_regs", v3d_v3d_debugfs_regs, 0}, + {"measure_clock", v3d_measure_clock, 0}, + {"bo_stats", v3d_debugfs_bo_stats, 0}, +}; + +void +v3d_debugfs_init(struct drm_minor *minor) +{ + drm_debugfs_create_files(v3d_debugfs_list, + ARRAY_SIZE(v3d_debugfs_list), + minor->debugfs_root, minor); +} diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c new file mode 100644 index 000000000..e8c975b81 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -0,0 +1,328 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2014-2018 Broadcom */ + +/** + * DOC: Broadcom V3D Graphics Driver + * + * This driver supports the Broadcom V3D 3.3 and 4.1 OpenGL ES GPUs. + * For V3D 2.x support, see the VC4 driver. + * + * The V3D GPU includes a tiled render (composed of a bin and render + * pipelines), the TFU (texture formatting unit), and the CSD (compute + * shader dispatch). + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "v3d_drv.h" +#include "v3d_regs.h" + +#define DRIVER_NAME "v3d" +#define DRIVER_DESC "Broadcom V3D graphics" +#define DRIVER_DATE "20180419" +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 +#define DRIVER_PATCHLEVEL 0 + +static int v3d_get_param_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct drm_v3d_get_param *args = data; + static const u32 reg_map[] = { + [DRM_V3D_PARAM_V3D_UIFCFG] = V3D_HUB_UIFCFG, + [DRM_V3D_PARAM_V3D_HUB_IDENT1] = V3D_HUB_IDENT1, + [DRM_V3D_PARAM_V3D_HUB_IDENT2] = V3D_HUB_IDENT2, + [DRM_V3D_PARAM_V3D_HUB_IDENT3] = V3D_HUB_IDENT3, + [DRM_V3D_PARAM_V3D_CORE0_IDENT0] = V3D_CTL_IDENT0, + [DRM_V3D_PARAM_V3D_CORE0_IDENT1] = V3D_CTL_IDENT1, + [DRM_V3D_PARAM_V3D_CORE0_IDENT2] = V3D_CTL_IDENT2, + }; + + if (args->pad != 0) + return -EINVAL; + + /* Note that DRM_V3D_PARAM_V3D_CORE0_IDENT0 is 0, so we need + * to explicitly allow it in the "the register in our + * parameter map" check. + */ + if (args->param < ARRAY_SIZE(reg_map) && + (reg_map[args->param] || + args->param == DRM_V3D_PARAM_V3D_CORE0_IDENT0)) { + u32 offset = reg_map[args->param]; + + if (args->value != 0) + return -EINVAL; + + if (args->param >= DRM_V3D_PARAM_V3D_CORE0_IDENT0 && + args->param <= DRM_V3D_PARAM_V3D_CORE0_IDENT2) { + args->value = V3D_CORE_READ(0, offset); + } else { + args->value = V3D_READ(offset); + } + return 0; + } + + switch (args->param) { + case DRM_V3D_PARAM_SUPPORTS_TFU: + args->value = 1; + return 0; + case DRM_V3D_PARAM_SUPPORTS_CSD: + args->value = v3d_has_csd(v3d); + return 0; + case DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH: + args->value = 1; + return 0; + case DRM_V3D_PARAM_SUPPORTS_PERFMON: + args->value = (v3d->ver >= 40); + return 0; + case DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT: + args->value = 1; + return 0; + default: + DRM_DEBUG("Unknown parameter %d\n", args->param); + return -EINVAL; + } +} + +static int +v3d_open(struct drm_device *dev, struct drm_file *file) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct v3d_file_priv *v3d_priv; + struct drm_gpu_scheduler *sched; + int i; + + v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL); + if (!v3d_priv) + return -ENOMEM; + + v3d_priv->v3d = v3d; + + for (i = 0; i < V3D_MAX_QUEUES; i++) { + sched = &v3d->queue[i].sched; + drm_sched_entity_init(&v3d_priv->sched_entity[i], + DRM_SCHED_PRIORITY_NORMAL, &sched, + 1, NULL); + } + + v3d_perfmon_open_file(v3d_priv); + file->driver_priv = v3d_priv; + + return 0; +} + +static void +v3d_postclose(struct drm_device *dev, struct drm_file *file) +{ + struct v3d_file_priv *v3d_priv = file->driver_priv; + enum v3d_queue q; + + for (q = 0; q < V3D_MAX_QUEUES; q++) + drm_sched_entity_destroy(&v3d_priv->sched_entity[q]); + + v3d_perfmon_close_file(v3d_priv); + kfree(v3d_priv); +} + +DEFINE_DRM_GEM_FOPS(v3d_drm_fops); + +/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP + * protection between clients. Note that render nodes would be + * able to submit CLs that could access BOs from clients authenticated + * with the master node. The TFU doesn't use the GMP, so it would + * need to stay DRM_AUTH until we do buffer size/offset validation. + */ +static const struct drm_ioctl_desc v3d_drm_ioctls[] = { + DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CL, v3d_submit_cl_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), + DRM_IOCTL_DEF_DRV(V3D_WAIT_BO, v3d_wait_bo_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(V3D_CREATE_BO, v3d_create_bo_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(V3D_MMAP_BO, v3d_mmap_bo_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(V3D_GET_PARAM, v3d_get_param_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(V3D_GET_BO_OFFSET, v3d_get_bo_offset_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(V3D_SUBMIT_TFU, v3d_submit_tfu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), + DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CSD, v3d_submit_csd_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), + DRM_IOCTL_DEF_DRV(V3D_PERFMON_CREATE, v3d_perfmon_create_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(V3D_PERFMON_DESTROY, v3d_perfmon_destroy_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(V3D_PERFMON_GET_VALUES, v3d_perfmon_get_values_ioctl, DRM_RENDER_ALLOW), +}; + +static const struct drm_driver v3d_drm_driver = { + .driver_features = (DRIVER_GEM | + DRIVER_RENDER | + DRIVER_SYNCOBJ), + + .open = v3d_open, + .postclose = v3d_postclose, + +#if defined(CONFIG_DEBUG_FS) + .debugfs_init = v3d_debugfs_init, +#endif + + .gem_create_object = v3d_create_object, + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_import_sg_table = v3d_prime_import_sg_table, + .gem_prime_mmap = drm_gem_prime_mmap, + + .ioctls = v3d_drm_ioctls, + .num_ioctls = ARRAY_SIZE(v3d_drm_ioctls), + .fops = &v3d_drm_fops, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, +}; + +static const struct of_device_id v3d_of_match[] = { + { .compatible = "brcm,2711-v3d" }, + { .compatible = "brcm,7268-v3d" }, + { .compatible = "brcm,7278-v3d" }, + {}, +}; +MODULE_DEVICE_TABLE(of, v3d_of_match); + +static int +map_regs(struct v3d_dev *v3d, void __iomem **regs, const char *name) +{ + *regs = devm_platform_ioremap_resource_byname(v3d_to_pdev(v3d), name); + return PTR_ERR_OR_ZERO(*regs); +} + +static int v3d_platform_drm_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct drm_device *drm; + struct v3d_dev *v3d; + int ret; + u32 mmu_debug; + u32 ident1; + u64 mask; + + v3d = devm_drm_dev_alloc(dev, &v3d_drm_driver, struct v3d_dev, drm); + if (IS_ERR(v3d)) + return PTR_ERR(v3d); + + drm = &v3d->drm; + + platform_set_drvdata(pdev, drm); + + ret = map_regs(v3d, &v3d->hub_regs, "hub"); + if (ret) + return ret; + + ret = map_regs(v3d, &v3d->core_regs[0], "core0"); + if (ret) + return ret; + + mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO); + mask = DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH)); + ret = dma_set_mask_and_coherent(dev, mask); + if (ret) + return ret; + + v3d->va_width = 30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_VA_WIDTH); + + ident1 = V3D_READ(V3D_HUB_IDENT1); + v3d->ver = (V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER) * 10 + + V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV)); + v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES); + WARN_ON(v3d->cores > 1); /* multicore not yet implemented */ + + v3d->reset = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR(v3d->reset)) { + ret = PTR_ERR(v3d->reset); + + if (ret == -EPROBE_DEFER) + return ret; + + v3d->reset = NULL; + ret = map_regs(v3d, &v3d->bridge_regs, "bridge"); + if (ret) { + dev_err(dev, + "Failed to get reset control or bridge regs\n"); + return ret; + } + } + + if (v3d->ver < 41) { + ret = map_regs(v3d, &v3d->gca_regs, "gca"); + if (ret) + return ret; + } + + v3d->mmu_scratch = dma_alloc_wc(dev, 4096, &v3d->mmu_scratch_paddr, + GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); + if (!v3d->mmu_scratch) { + dev_err(dev, "Failed to allocate MMU scratch page\n"); + return -ENOMEM; + } + + ret = v3d_gem_init(drm); + if (ret) + goto dma_free; + + ret = v3d_irq_init(v3d); + if (ret) + goto gem_destroy; + + ret = drm_dev_register(drm, 0); + if (ret) + goto irq_disable; + + return 0; + +irq_disable: + v3d_irq_disable(v3d); +gem_destroy: + v3d_gem_destroy(drm); +dma_free: + dma_free_wc(dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr); + return ret; +} + +static int v3d_platform_drm_remove(struct platform_device *pdev) +{ + struct drm_device *drm = platform_get_drvdata(pdev); + struct v3d_dev *v3d = to_v3d_dev(drm); + + drm_dev_unregister(drm); + + v3d_gem_destroy(drm); + + dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch, + v3d->mmu_scratch_paddr); + + return 0; +} + +static struct platform_driver v3d_platform_driver = { + .probe = v3d_platform_drm_probe, + .remove = v3d_platform_drm_remove, + .driver = { + .name = "v3d", + .of_match_table = v3d_of_match, + }, +}; + +module_platform_driver(v3d_platform_driver); + +MODULE_ALIAS("platform:v3d-drm"); +MODULE_DESCRIPTION("Broadcom V3D DRM Driver"); +MODULE_AUTHOR("Eric Anholt "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h new file mode 100644 index 000000000..b74b1351b --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -0,0 +1,420 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2015-2018 Broadcom */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "uapi/drm/v3d_drm.h" + +struct clk; +struct platform_device; +struct reset_control; + +#define GMP_GRANULARITY (128 * 1024) + +#define V3D_MAX_QUEUES (V3D_CACHE_CLEAN + 1) + +struct v3d_queue_state { + struct drm_gpu_scheduler sched; + + u64 fence_context; + u64 emit_seqno; +}; + +/* Performance monitor object. The perform lifetime is controlled by userspace + * using perfmon related ioctls. A perfmon can be attached to a submit_cl + * request, and when this is the case, HW perf counters will be activated just + * before the submit_cl is submitted to the GPU and disabled when the job is + * done. This way, only events related to a specific job will be counted. + */ +struct v3d_perfmon { + /* Tracks the number of users of the perfmon, when this counter reaches + * zero the perfmon is destroyed. + */ + refcount_t refcnt; + + /* Protects perfmon stop, as it can be invoked from multiple places. */ + struct mutex lock; + + /* Number of counters activated in this perfmon instance + * (should be less than DRM_V3D_MAX_PERF_COUNTERS). + */ + u8 ncounters; + + /* Events counted by the HW perf counters. */ + u8 counters[DRM_V3D_MAX_PERF_COUNTERS]; + + /* Storage for counter values. Counters are incremented by the + * HW perf counter values every time the perfmon is attached + * to a GPU job. This way, perfmon users don't have to + * retrieve the results after each job if they want to track + * events covering several submissions. Note that counter + * values can't be reset, but you can fake a reset by + * destroying the perfmon and creating a new one. + */ + u64 values[]; +}; + +struct v3d_dev { + struct drm_device drm; + + /* Short representation (e.g. 33, 41) of the V3D tech version + * and revision. + */ + int ver; + bool single_irq_line; + + void __iomem *hub_regs; + void __iomem *core_regs[3]; + void __iomem *bridge_regs; + void __iomem *gca_regs; + struct clk *clk; + struct reset_control *reset; + + /* Virtual and DMA addresses of the single shared page table. */ + volatile u32 *pt; + dma_addr_t pt_paddr; + + /* Virtual and DMA addresses of the MMU's scratch page. When + * a read or write is invalid in the MMU, it will be + * redirected here. + */ + void *mmu_scratch; + dma_addr_t mmu_scratch_paddr; + /* virtual address bits from V3D to the MMU. */ + int va_width; + + /* Number of V3D cores. */ + u32 cores; + + /* Allocator managing the address space. All units are in + * number of pages. + */ + struct drm_mm mm; + spinlock_t mm_lock; + + struct work_struct overflow_mem_work; + + struct v3d_bin_job *bin_job; + struct v3d_render_job *render_job; + struct v3d_tfu_job *tfu_job; + struct v3d_csd_job *csd_job; + + struct v3d_queue_state queue[V3D_MAX_QUEUES]; + + /* Spinlock used to synchronize the overflow memory + * management against bin job submission. + */ + spinlock_t job_lock; + + /* Used to track the active perfmon if any. */ + struct v3d_perfmon *active_perfmon; + + /* Protects bo_stats */ + struct mutex bo_lock; + + /* Lock taken when resetting the GPU, to keep multiple + * processes from trying to park the scheduler threads and + * reset at once. + */ + struct mutex reset_lock; + + /* Lock taken when creating and pushing the GPU scheduler + * jobs, to keep the sched-fence seqnos in order. + */ + struct mutex sched_lock; + + /* Lock taken during a cache clean and when initiating an L2 + * flush, to keep L2 flushes from interfering with the + * synchronous L2 cleans. + */ + struct mutex cache_clean_lock; + + struct { + u32 num_allocated; + u32 pages_allocated; + } bo_stats; +}; + +static inline struct v3d_dev * +to_v3d_dev(struct drm_device *dev) +{ + return container_of(dev, struct v3d_dev, drm); +} + +static inline bool +v3d_has_csd(struct v3d_dev *v3d) +{ + return v3d->ver >= 41; +} + +#define v3d_to_pdev(v3d) to_platform_device((v3d)->drm.dev) + +/* The per-fd struct, which tracks the MMU mappings. */ +struct v3d_file_priv { + struct v3d_dev *v3d; + + struct { + struct idr idr; + struct mutex lock; + } perfmon; + + struct drm_sched_entity sched_entity[V3D_MAX_QUEUES]; +}; + +struct v3d_bo { + struct drm_gem_shmem_object base; + + struct drm_mm_node node; + + /* List entry for the BO's position in + * v3d_render_job->unref_list + */ + struct list_head unref_head; +}; + +static inline struct v3d_bo * +to_v3d_bo(struct drm_gem_object *bo) +{ + return (struct v3d_bo *)bo; +} + +struct v3d_fence { + struct dma_fence base; + struct drm_device *dev; + /* v3d seqno for signaled() test */ + u64 seqno; + enum v3d_queue queue; +}; + +static inline struct v3d_fence * +to_v3d_fence(struct dma_fence *fence) +{ + return (struct v3d_fence *)fence; +} + +#define V3D_READ(offset) readl(v3d->hub_regs + offset) +#define V3D_WRITE(offset, val) writel(val, v3d->hub_regs + offset) + +#define V3D_BRIDGE_READ(offset) readl(v3d->bridge_regs + offset) +#define V3D_BRIDGE_WRITE(offset, val) writel(val, v3d->bridge_regs + offset) + +#define V3D_GCA_READ(offset) readl(v3d->gca_regs + offset) +#define V3D_GCA_WRITE(offset, val) writel(val, v3d->gca_regs + offset) + +#define V3D_CORE_READ(core, offset) readl(v3d->core_regs[core] + offset) +#define V3D_CORE_WRITE(core, offset, val) writel(val, v3d->core_regs[core] + offset) + +struct v3d_job { + struct drm_sched_job base; + + struct kref refcount; + + struct v3d_dev *v3d; + + /* This is the array of BOs that were looked up at the start + * of submission. + */ + struct drm_gem_object **bo; + u32 bo_count; + + /* v3d fence to be signaled by IRQ handler when the job is complete. */ + struct dma_fence *irq_fence; + + /* scheduler fence for when the job is considered complete and + * the BO reservations can be released. + */ + struct dma_fence *done_fence; + + /* Pointer to a performance monitor object if the user requested it, + * NULL otherwise. + */ + struct v3d_perfmon *perfmon; + + /* Callback for the freeing of the job on refcount going to 0. */ + void (*free)(struct kref *ref); +}; + +struct v3d_bin_job { + struct v3d_job base; + + /* GPU virtual addresses of the start/end of the CL job. */ + u32 start, end; + + u32 timedout_ctca, timedout_ctra; + + /* Corresponding render job, for attaching our overflow memory. */ + struct v3d_render_job *render; + + /* Submitted tile memory allocation start/size, tile state. */ + u32 qma, qms, qts; +}; + +struct v3d_render_job { + struct v3d_job base; + + /* GPU virtual addresses of the start/end of the CL job. */ + u32 start, end; + + u32 timedout_ctca, timedout_ctra; + + /* List of overflow BOs used in the job that need to be + * released once the job is complete. + */ + struct list_head unref_list; +}; + +struct v3d_tfu_job { + struct v3d_job base; + + struct drm_v3d_submit_tfu args; +}; + +struct v3d_csd_job { + struct v3d_job base; + + u32 timedout_batches; + + struct drm_v3d_submit_csd args; +}; + +struct v3d_submit_outsync { + struct drm_syncobj *syncobj; +}; + +struct v3d_submit_ext { + u32 flags; + u32 wait_stage; + + u32 in_sync_count; + u64 in_syncs; + + u32 out_sync_count; + struct v3d_submit_outsync *out_syncs; +}; + +/** + * __wait_for - magic wait macro + * + * Macro to help avoid open coding check/wait/timeout patterns. Note that it's + * important that we check the condition again after having timed out, since the + * timeout could be due to preemption or similar and we've never had a chance to + * check the condition before the timeout. + */ +#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \ + const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \ + long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \ + int ret__; \ + might_sleep(); \ + for (;;) { \ + const bool expired__ = ktime_after(ktime_get_raw(), end__); \ + OP; \ + /* Guarantee COND check prior to timeout */ \ + barrier(); \ + if (COND) { \ + ret__ = 0; \ + break; \ + } \ + if (expired__) { \ + ret__ = -ETIMEDOUT; \ + break; \ + } \ + usleep_range(wait__, wait__ * 2); \ + if (wait__ < (Wmax)) \ + wait__ <<= 1; \ + } \ + ret__; \ +}) + +#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \ + (Wmax)) +#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000) + +static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) +{ + /* nsecs_to_jiffies64() does not guard against overflow */ + if (NSEC_PER_SEC % HZ && + div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ) + return MAX_JIFFY_OFFSET; + + return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); +} + +/* v3d_bo.c */ +struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size); +void v3d_free_object(struct drm_gem_object *gem_obj); +struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, + size_t size); +int v3d_create_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt); + +/* v3d_debugfs.c */ +void v3d_debugfs_init(struct drm_minor *minor); + +/* v3d_fence.c */ +extern const struct dma_fence_ops v3d_fence_ops; +struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue); + +/* v3d_gem.c */ +int v3d_gem_init(struct drm_device *dev); +void v3d_gem_destroy(struct drm_device *dev); +int v3d_submit_cl_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int v3d_submit_csd_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int v3d_wait_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +void v3d_job_cleanup(struct v3d_job *job); +void v3d_job_put(struct v3d_job *job); +void v3d_reset(struct v3d_dev *v3d); +void v3d_invalidate_caches(struct v3d_dev *v3d); +void v3d_clean_caches(struct v3d_dev *v3d); + +/* v3d_irq.c */ +int v3d_irq_init(struct v3d_dev *v3d); +void v3d_irq_enable(struct v3d_dev *v3d); +void v3d_irq_disable(struct v3d_dev *v3d); +void v3d_irq_reset(struct v3d_dev *v3d); + +/* v3d_mmu.c */ +int v3d_mmu_get_offset(struct drm_file *file_priv, struct v3d_bo *bo, + u32 *offset); +int v3d_mmu_set_page_table(struct v3d_dev *v3d); +void v3d_mmu_insert_ptes(struct v3d_bo *bo); +void v3d_mmu_remove_ptes(struct v3d_bo *bo); + +/* v3d_sched.c */ +int v3d_sched_init(struct v3d_dev *v3d); +void v3d_sched_fini(struct v3d_dev *v3d); + +/* v3d_perfmon.c */ +void v3d_perfmon_get(struct v3d_perfmon *perfmon); +void v3d_perfmon_put(struct v3d_perfmon *perfmon); +void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon); +void v3d_perfmon_stop(struct v3d_dev *v3d, struct v3d_perfmon *perfmon, + bool capture); +struct v3d_perfmon *v3d_perfmon_find(struct v3d_file_priv *v3d_priv, int id); +void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv); +void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv); +int v3d_perfmon_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int v3d_perfmon_get_values_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); diff --git a/drivers/gpu/drm/v3d/v3d_fence.c b/drivers/gpu/drm/v3d/v3d_fence.c new file mode 100644 index 000000000..89840ed21 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_fence.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2017-2018 Broadcom */ + +#include "v3d_drv.h" + +struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue) +{ + struct v3d_fence *fence; + + fence = kzalloc(sizeof(*fence), GFP_KERNEL); + if (!fence) + return ERR_PTR(-ENOMEM); + + fence->dev = &v3d->drm; + fence->queue = queue; + fence->seqno = ++v3d->queue[queue].emit_seqno; + dma_fence_init(&fence->base, &v3d_fence_ops, &v3d->job_lock, + v3d->queue[queue].fence_context, fence->seqno); + + return &fence->base; +} + +static const char *v3d_fence_get_driver_name(struct dma_fence *fence) +{ + return "v3d"; +} + +static const char *v3d_fence_get_timeline_name(struct dma_fence *fence) +{ + struct v3d_fence *f = to_v3d_fence(fence); + + switch (f->queue) { + case V3D_BIN: + return "v3d-bin"; + case V3D_RENDER: + return "v3d-render"; + case V3D_TFU: + return "v3d-tfu"; + case V3D_CSD: + return "v3d-csd"; + default: + return NULL; + } +} + +const struct dma_fence_ops v3d_fence_ops = { + .get_driver_name = v3d_fence_get_driver_name, + .get_timeline_name = v3d_fence_get_timeline_name, +}; diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c new file mode 100644 index 000000000..b8980440d --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -0,0 +1,1129 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2014-2018 Broadcom */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "v3d_drv.h" +#include "v3d_regs.h" +#include "v3d_trace.h" + +static void +v3d_init_core(struct v3d_dev *v3d, int core) +{ + /* Set OVRTMUOUT, which means that the texture sampler uniform + * configuration's tmu output type field is used, instead of + * using the hardware default behavior based on the texture + * type. If you want the default behavior, you can still put + * "2" in the indirect texture state's output_type field. + */ + if (v3d->ver < 40) + V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT); + + /* Whenever we flush the L2T cache, we always want to flush + * the whole thing. + */ + V3D_CORE_WRITE(core, V3D_CTL_L2TFLSTA, 0); + V3D_CORE_WRITE(core, V3D_CTL_L2TFLEND, ~0); +} + +/* Sets invariant state for the HW. */ +static void +v3d_init_hw_state(struct v3d_dev *v3d) +{ + v3d_init_core(v3d, 0); +} + +static void +v3d_idle_axi(struct v3d_dev *v3d, int core) +{ + V3D_CORE_WRITE(core, V3D_GMP_CFG, V3D_GMP_CFG_STOP_REQ); + + if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS) & + (V3D_GMP_STATUS_RD_COUNT_MASK | + V3D_GMP_STATUS_WR_COUNT_MASK | + V3D_GMP_STATUS_CFG_BUSY)) == 0, 100)) { + DRM_ERROR("Failed to wait for safe GMP shutdown\n"); + } +} + +static void +v3d_idle_gca(struct v3d_dev *v3d) +{ + if (v3d->ver >= 41) + return; + + V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN); + + if (wait_for((V3D_GCA_READ(V3D_GCA_SAFE_SHUTDOWN_ACK) & + V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED) == + V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED, 100)) { + DRM_ERROR("Failed to wait for safe GCA shutdown\n"); + } +} + +static void +v3d_reset_by_bridge(struct v3d_dev *v3d) +{ + int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION); + + if (V3D_GET_FIELD(version, V3D_TOP_GR_BRIDGE_MAJOR) == 2) { + V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, + V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT); + V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, 0); + + /* GFXH-1383: The SW_INIT may cause a stray write to address 0 + * of the unit, so reset it to its power-on value here. + */ + V3D_WRITE(V3D_HUB_AXICFG, V3D_HUB_AXICFG_MAX_LEN_MASK); + } else { + WARN_ON_ONCE(V3D_GET_FIELD(version, + V3D_TOP_GR_BRIDGE_MAJOR) != 7); + V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, + V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT); + V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0); + } +} + +static void +v3d_reset_v3d(struct v3d_dev *v3d) +{ + if (v3d->reset) + reset_control_reset(v3d->reset); + else + v3d_reset_by_bridge(v3d); + + v3d_init_hw_state(v3d); +} + +void +v3d_reset(struct v3d_dev *v3d) +{ + struct drm_device *dev = &v3d->drm; + + DRM_DEV_ERROR(dev->dev, "Resetting GPU for hang.\n"); + DRM_DEV_ERROR(dev->dev, "V3D_ERR_STAT: 0x%08x\n", + V3D_CORE_READ(0, V3D_ERR_STAT)); + trace_v3d_reset_begin(dev); + + /* XXX: only needed for safe powerdown, not reset. */ + if (false) + v3d_idle_axi(v3d, 0); + + v3d_idle_gca(v3d); + v3d_reset_v3d(v3d); + + v3d_mmu_set_page_table(v3d); + v3d_irq_reset(v3d); + + v3d_perfmon_stop(v3d, v3d->active_perfmon, false); + + trace_v3d_reset_end(dev); +} + +static void +v3d_flush_l3(struct v3d_dev *v3d) +{ + if (v3d->ver < 41) { + u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL); + + V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL, + gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH); + + if (v3d->ver < 33) { + V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL, + gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH); + } + } +} + +/* Invalidates the (read-only) L2C cache. This was the L2 cache for + * uniforms and instructions on V3D 3.2. + */ +static void +v3d_invalidate_l2c(struct v3d_dev *v3d, int core) +{ + if (v3d->ver > 32) + return; + + V3D_CORE_WRITE(core, V3D_CTL_L2CACTL, + V3D_L2CACTL_L2CCLR | + V3D_L2CACTL_L2CENA); +} + +/* Invalidates texture L2 cachelines */ +static void +v3d_flush_l2t(struct v3d_dev *v3d, int core) +{ + /* While there is a busy bit (V3D_L2TCACTL_L2TFLS), we don't + * need to wait for completion before dispatching the job -- + * L2T accesses will be stalled until the flush has completed. + * However, we do need to make sure we don't try to trigger a + * new flush while the L2_CLEAN queue is trying to + * synchronously clean after a job. + */ + mutex_lock(&v3d->cache_clean_lock); + V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, + V3D_L2TCACTL_L2TFLS | + V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM)); + mutex_unlock(&v3d->cache_clean_lock); +} + +/* Cleans texture L1 and L2 cachelines (writing back dirty data). + * + * For cleaning, which happens from the CACHE_CLEAN queue after CSD has + * executed, we need to make sure that the clean is done before + * signaling job completion. So, we synchronously wait before + * returning, and we make sure that L2 invalidates don't happen in the + * meantime to confuse our are-we-done checks. + */ +void +v3d_clean_caches(struct v3d_dev *v3d) +{ + struct drm_device *dev = &v3d->drm; + int core = 0; + + trace_v3d_cache_clean_begin(dev); + + V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF); + if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) & + V3D_L2TCACTL_TMUWCF), 100)) { + DRM_ERROR("Timeout waiting for TMU write combiner flush\n"); + } + + mutex_lock(&v3d->cache_clean_lock); + V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, + V3D_L2TCACTL_L2TFLS | + V3D_SET_FIELD(V3D_L2TCACTL_FLM_CLEAN, V3D_L2TCACTL_FLM)); + + if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) & + V3D_L2TCACTL_L2TFLS), 100)) { + DRM_ERROR("Timeout waiting for L2T clean\n"); + } + + mutex_unlock(&v3d->cache_clean_lock); + + trace_v3d_cache_clean_end(dev); +} + +/* Invalidates the slice caches. These are read-only caches. */ +static void +v3d_invalidate_slices(struct v3d_dev *v3d, int core) +{ + V3D_CORE_WRITE(core, V3D_CTL_SLCACTL, + V3D_SET_FIELD(0xf, V3D_SLCACTL_TVCCS) | + V3D_SET_FIELD(0xf, V3D_SLCACTL_TDCCS) | + V3D_SET_FIELD(0xf, V3D_SLCACTL_UCC) | + V3D_SET_FIELD(0xf, V3D_SLCACTL_ICC)); +} + +void +v3d_invalidate_caches(struct v3d_dev *v3d) +{ + /* Invalidate the caches from the outside in. That way if + * another CL's concurrent use of nearby memory were to pull + * an invalidated cacheline back in, we wouldn't leave stale + * data in the inner cache. + */ + v3d_flush_l3(v3d); + v3d_invalidate_l2c(v3d, 0); + v3d_flush_l2t(v3d, 0); + v3d_invalidate_slices(v3d, 0); +} + +/* Takes the reservation lock on all the BOs being referenced, so that + * at queue submit time we can update the reservations. + * + * We don't lock the RCL the tile alloc/state BOs, or overflow memory + * (all of which are on exec->unref_list). They're entirely private + * to v3d, so we don't attach dma-buf fences to them. + */ +static int +v3d_lock_bo_reservations(struct v3d_job *job, + struct ww_acquire_ctx *acquire_ctx) +{ + int i, ret; + + ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx); + if (ret) + return ret; + + for (i = 0; i < job->bo_count; i++) { + ret = dma_resv_reserve_fences(job->bo[i]->resv, 1); + if (ret) + goto fail; + + ret = drm_sched_job_add_implicit_dependencies(&job->base, + job->bo[i], true); + if (ret) + goto fail; + } + + return 0; + +fail: + drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); + return ret; +} + +/** + * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects + * referenced by the job. + * @dev: DRM device + * @file_priv: DRM file for this fd + * @job: V3D job being set up + * @bo_handles: GEM handles + * @bo_count: Number of GEM handles passed in + * + * The command validator needs to reference BOs by their index within + * the submitted job's BO list. This does the validation of the job's + * BO list and reference counting for the lifetime of the job. + * + * Note that this function doesn't need to unreference the BOs on + * failure, because that will happen at v3d_exec_cleanup() time. + */ +static int +v3d_lookup_bos(struct drm_device *dev, + struct drm_file *file_priv, + struct v3d_job *job, + u64 bo_handles, + u32 bo_count) +{ + u32 *handles; + int ret = 0; + int i; + + job->bo_count = bo_count; + + if (!job->bo_count) { + /* See comment on bo_index for why we have to check + * this. + */ + DRM_DEBUG("Rendering requires BOs\n"); + return -EINVAL; + } + + job->bo = kvmalloc_array(job->bo_count, + sizeof(struct drm_gem_dma_object *), + GFP_KERNEL | __GFP_ZERO); + if (!job->bo) { + DRM_DEBUG("Failed to allocate validated BO pointers\n"); + return -ENOMEM; + } + + handles = kvmalloc_array(job->bo_count, sizeof(u32), GFP_KERNEL); + if (!handles) { + ret = -ENOMEM; + DRM_DEBUG("Failed to allocate incoming GEM handles\n"); + goto fail; + } + + if (copy_from_user(handles, + (void __user *)(uintptr_t)bo_handles, + job->bo_count * sizeof(u32))) { + ret = -EFAULT; + DRM_DEBUG("Failed to copy in GEM handles\n"); + goto fail; + } + + spin_lock(&file_priv->table_lock); + for (i = 0; i < job->bo_count; i++) { + struct drm_gem_object *bo = idr_find(&file_priv->object_idr, + handles[i]); + if (!bo) { + DRM_DEBUG("Failed to look up GEM BO %d: %d\n", + i, handles[i]); + ret = -ENOENT; + spin_unlock(&file_priv->table_lock); + goto fail; + } + drm_gem_object_get(bo); + job->bo[i] = bo; + } + spin_unlock(&file_priv->table_lock); + +fail: + kvfree(handles); + return ret; +} + +static void +v3d_job_free(struct kref *ref) +{ + struct v3d_job *job = container_of(ref, struct v3d_job, refcount); + int i; + + for (i = 0; i < job->bo_count; i++) { + if (job->bo[i]) + drm_gem_object_put(job->bo[i]); + } + kvfree(job->bo); + + dma_fence_put(job->irq_fence); + dma_fence_put(job->done_fence); + + if (job->perfmon) + v3d_perfmon_put(job->perfmon); + + kfree(job); +} + +static void +v3d_render_job_free(struct kref *ref) +{ + struct v3d_render_job *job = container_of(ref, struct v3d_render_job, + base.refcount); + struct v3d_bo *bo, *save; + + list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) { + drm_gem_object_put(&bo->base.base); + } + + v3d_job_free(ref); +} + +void v3d_job_cleanup(struct v3d_job *job) +{ + if (!job) + return; + + drm_sched_job_cleanup(&job->base); + v3d_job_put(job); +} + +void v3d_job_put(struct v3d_job *job) +{ + kref_put(&job->refcount, job->free); +} + +int +v3d_wait_bo_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + int ret; + struct drm_v3d_wait_bo *args = data; + ktime_t start = ktime_get(); + u64 delta_ns; + unsigned long timeout_jiffies = + nsecs_to_jiffies_timeout(args->timeout_ns); + + if (args->pad != 0) + return -EINVAL; + + ret = drm_gem_dma_resv_wait(file_priv, args->handle, + true, timeout_jiffies); + + /* Decrement the user's timeout, in case we got interrupted + * such that the ioctl will be restarted. + */ + delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start)); + if (delta_ns < args->timeout_ns) + args->timeout_ns -= delta_ns; + else + args->timeout_ns = 0; + + /* Asked to wait beyond the jiffie/scheduler precision? */ + if (ret == -ETIME && args->timeout_ns) + ret = -EAGAIN; + + return ret; +} + +static int +v3d_job_add_deps(struct drm_file *file_priv, struct v3d_job *job, + u32 in_sync, u32 point) +{ + struct dma_fence *in_fence = NULL; + int ret; + + ret = drm_syncobj_find_fence(file_priv, in_sync, point, 0, &in_fence); + if (ret == -EINVAL) + return ret; + + return drm_sched_job_add_dependency(&job->base, in_fence); +} + +static int +v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, + void **container, size_t size, void (*free)(struct kref *ref), + u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue) +{ + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; + struct v3d_job *job; + bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC); + int ret, i; + + *container = kcalloc(1, size, GFP_KERNEL); + if (!*container) { + DRM_ERROR("Cannot allocate memory for v3d job."); + return -ENOMEM; + } + + job = *container; + job->v3d = v3d; + job->free = free; + + ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue], + v3d_priv); + if (ret) + goto fail; + + if (has_multisync) { + if (se->in_sync_count && se->wait_stage == queue) { + struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs); + + for (i = 0; i < se->in_sync_count; i++) { + struct drm_v3d_sem in; + + if (copy_from_user(&in, handle++, sizeof(in))) { + ret = -EFAULT; + DRM_DEBUG("Failed to copy wait dep handle.\n"); + goto fail_deps; + } + ret = v3d_job_add_deps(file_priv, job, in.handle, 0); + if (ret) + goto fail_deps; + } + } + } else { + ret = v3d_job_add_deps(file_priv, job, in_sync, 0); + if (ret) + goto fail_deps; + } + + kref_init(&job->refcount); + + return 0; + +fail_deps: + drm_sched_job_cleanup(&job->base); +fail: + kfree(*container); + *container = NULL; + + return ret; +} + +static void +v3d_push_job(struct v3d_job *job) +{ + drm_sched_job_arm(&job->base); + + job->done_fence = dma_fence_get(&job->base.s_fence->finished); + + /* put by scheduler job completion */ + kref_get(&job->refcount); + + drm_sched_entity_push_job(&job->base); +} + +static void +v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv, + struct v3d_job *job, + struct ww_acquire_ctx *acquire_ctx, + u32 out_sync, + struct v3d_submit_ext *se, + struct dma_fence *done_fence) +{ + struct drm_syncobj *sync_out; + bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC); + int i; + + for (i = 0; i < job->bo_count; i++) { + /* XXX: Use shared fences for read-only objects. */ + dma_resv_add_fence(job->bo[i]->resv, job->done_fence, + DMA_RESV_USAGE_WRITE); + } + + drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); + + /* Update the return sync object for the job */ + /* If it only supports a single signal semaphore*/ + if (!has_multisync) { + sync_out = drm_syncobj_find(file_priv, out_sync); + if (sync_out) { + drm_syncobj_replace_fence(sync_out, done_fence); + drm_syncobj_put(sync_out); + } + return; + } + + /* If multiple semaphores extension is supported */ + if (se->out_sync_count) { + for (i = 0; i < se->out_sync_count; i++) { + drm_syncobj_replace_fence(se->out_syncs[i].syncobj, + done_fence); + drm_syncobj_put(se->out_syncs[i].syncobj); + } + kvfree(se->out_syncs); + } +} + +static void +v3d_put_multisync_post_deps(struct v3d_submit_ext *se) +{ + unsigned int i; + + if (!(se && se->out_sync_count)) + return; + + for (i = 0; i < se->out_sync_count; i++) + drm_syncobj_put(se->out_syncs[i].syncobj); + kvfree(se->out_syncs); +} + +static int +v3d_get_multisync_post_deps(struct drm_file *file_priv, + struct v3d_submit_ext *se, + u32 count, u64 handles) +{ + struct drm_v3d_sem __user *post_deps; + int i, ret; + + if (!count) + return 0; + + se->out_syncs = (struct v3d_submit_outsync *) + kvmalloc_array(count, + sizeof(struct v3d_submit_outsync), + GFP_KERNEL); + if (!se->out_syncs) + return -ENOMEM; + + post_deps = u64_to_user_ptr(handles); + + for (i = 0; i < count; i++) { + struct drm_v3d_sem out; + + if (copy_from_user(&out, post_deps++, sizeof(out))) { + ret = -EFAULT; + DRM_DEBUG("Failed to copy post dep handles\n"); + goto fail; + } + + se->out_syncs[i].syncobj = drm_syncobj_find(file_priv, + out.handle); + if (!se->out_syncs[i].syncobj) { + ret = -EINVAL; + goto fail; + } + } + se->out_sync_count = count; + + return 0; + +fail: + for (i--; i >= 0; i--) + drm_syncobj_put(se->out_syncs[i].syncobj); + kvfree(se->out_syncs); + + return ret; +} + +/* Get data for multiple binary semaphores synchronization. Parse syncobj + * to be signaled when job completes (out_sync). + */ +static int +v3d_get_multisync_submit_deps(struct drm_file *file_priv, + struct drm_v3d_extension __user *ext, + void *data) +{ + struct drm_v3d_multi_sync multisync; + struct v3d_submit_ext *se = data; + int ret; + + if (copy_from_user(&multisync, ext, sizeof(multisync))) + return -EFAULT; + + if (multisync.pad) + return -EINVAL; + + ret = v3d_get_multisync_post_deps(file_priv, data, multisync.out_sync_count, + multisync.out_syncs); + if (ret) + return ret; + + se->in_sync_count = multisync.in_sync_count; + se->in_syncs = multisync.in_syncs; + se->flags |= DRM_V3D_EXT_ID_MULTI_SYNC; + se->wait_stage = multisync.wait_stage; + + return 0; +} + +/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data + * according to the extension id (name). + */ +static int +v3d_get_extensions(struct drm_file *file_priv, + u64 ext_handles, + void *data) +{ + struct drm_v3d_extension __user *user_ext; + int ret; + + user_ext = u64_to_user_ptr(ext_handles); + while (user_ext) { + struct drm_v3d_extension ext; + + if (copy_from_user(&ext, user_ext, sizeof(ext))) { + DRM_DEBUG("Failed to copy submit extension\n"); + return -EFAULT; + } + + switch (ext.id) { + case DRM_V3D_EXT_ID_MULTI_SYNC: + ret = v3d_get_multisync_submit_deps(file_priv, user_ext, data); + if (ret) + return ret; + break; + default: + DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id); + return -EINVAL; + } + + user_ext = u64_to_user_ptr(ext.next); + } + + return 0; +} + +/** + * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D. + * @dev: DRM device + * @data: ioctl argument + * @file_priv: DRM file for this fd + * + * This is the main entrypoint for userspace to submit a 3D frame to + * the GPU. Userspace provides the binner command list (if + * applicable), and the kernel sets up the render command list to draw + * to the framebuffer described in the ioctl, using the command lists + * that the 3D engine's binner will produce. + */ +int +v3d_submit_cl_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; + struct drm_v3d_submit_cl *args = data; + struct v3d_submit_ext se = {0}; + struct v3d_bin_job *bin = NULL; + struct v3d_render_job *render = NULL; + struct v3d_job *clean_job = NULL; + struct v3d_job *last_job; + struct ww_acquire_ctx acquire_ctx; + int ret = 0; + + trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end); + + if (args->pad) + return -EINVAL; + + if (args->flags && + args->flags & ~(DRM_V3D_SUBMIT_CL_FLUSH_CACHE | + DRM_V3D_SUBMIT_EXTENSION)) { + DRM_INFO("invalid flags: %d\n", args->flags); + return -EINVAL; + } + + if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { + ret = v3d_get_extensions(file_priv, args->extensions, &se); + if (ret) { + DRM_DEBUG("Failed to get extensions.\n"); + return ret; + } + } + + ret = v3d_job_init(v3d, file_priv, (void *)&render, sizeof(*render), + v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER); + if (ret) + goto fail; + + render->start = args->rcl_start; + render->end = args->rcl_end; + INIT_LIST_HEAD(&render->unref_list); + + if (args->bcl_start != args->bcl_end) { + ret = v3d_job_init(v3d, file_priv, (void *)&bin, sizeof(*bin), + v3d_job_free, args->in_sync_bcl, &se, V3D_BIN); + if (ret) + goto fail; + + bin->start = args->bcl_start; + bin->end = args->bcl_end; + bin->qma = args->qma; + bin->qms = args->qms; + bin->qts = args->qts; + bin->render = render; + } + + if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) { + ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job), + v3d_job_free, 0, NULL, V3D_CACHE_CLEAN); + if (ret) + goto fail; + + last_job = clean_job; + } else { + last_job = &render->base; + } + + ret = v3d_lookup_bos(dev, file_priv, last_job, + args->bo_handles, args->bo_handle_count); + if (ret) + goto fail; + + ret = v3d_lock_bo_reservations(last_job, &acquire_ctx); + if (ret) + goto fail; + + if (args->perfmon_id) { + render->base.perfmon = v3d_perfmon_find(v3d_priv, + args->perfmon_id); + + if (!render->base.perfmon) { + ret = -ENOENT; + goto fail_perfmon; + } + } + + mutex_lock(&v3d->sched_lock); + if (bin) { + bin->base.perfmon = render->base.perfmon; + v3d_perfmon_get(bin->base.perfmon); + v3d_push_job(&bin->base); + + ret = drm_sched_job_add_dependency(&render->base.base, + dma_fence_get(bin->base.done_fence)); + if (ret) + goto fail_unreserve; + } + + v3d_push_job(&render->base); + + if (clean_job) { + struct dma_fence *render_fence = + dma_fence_get(render->base.done_fence); + ret = drm_sched_job_add_dependency(&clean_job->base, + render_fence); + if (ret) + goto fail_unreserve; + clean_job->perfmon = render->base.perfmon; + v3d_perfmon_get(clean_job->perfmon); + v3d_push_job(clean_job); + } + + mutex_unlock(&v3d->sched_lock); + + v3d_attach_fences_and_unlock_reservation(file_priv, + last_job, + &acquire_ctx, + args->out_sync, + &se, + last_job->done_fence); + + if (bin) + v3d_job_put(&bin->base); + v3d_job_put(&render->base); + if (clean_job) + v3d_job_put(clean_job); + + return 0; + +fail_unreserve: + mutex_unlock(&v3d->sched_lock); +fail_perfmon: + drm_gem_unlock_reservations(last_job->bo, + last_job->bo_count, &acquire_ctx); +fail: + v3d_job_cleanup((void *)bin); + v3d_job_cleanup((void *)render); + v3d_job_cleanup(clean_job); + v3d_put_multisync_post_deps(&se); + + return ret; +} + +/** + * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D. + * @dev: DRM device + * @data: ioctl argument + * @file_priv: DRM file for this fd + * + * Userspace provides the register setup for the TFU, which we don't + * need to validate since the TFU is behind the MMU. + */ +int +v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct drm_v3d_submit_tfu *args = data; + struct v3d_submit_ext se = {0}; + struct v3d_tfu_job *job = NULL; + struct ww_acquire_ctx acquire_ctx; + int ret = 0; + + trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia); + + if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) { + DRM_DEBUG("invalid flags: %d\n", args->flags); + return -EINVAL; + } + + if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { + ret = v3d_get_extensions(file_priv, args->extensions, &se); + if (ret) { + DRM_DEBUG("Failed to get extensions.\n"); + return ret; + } + } + + ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job), + v3d_job_free, args->in_sync, &se, V3D_TFU); + if (ret) + goto fail; + + job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles), + sizeof(*job->base.bo), GFP_KERNEL); + if (!job->base.bo) { + ret = -ENOMEM; + goto fail; + } + + job->args = *args; + + spin_lock(&file_priv->table_lock); + for (job->base.bo_count = 0; + job->base.bo_count < ARRAY_SIZE(args->bo_handles); + job->base.bo_count++) { + struct drm_gem_object *bo; + + if (!args->bo_handles[job->base.bo_count]) + break; + + bo = idr_find(&file_priv->object_idr, + args->bo_handles[job->base.bo_count]); + if (!bo) { + DRM_DEBUG("Failed to look up GEM BO %d: %d\n", + job->base.bo_count, + args->bo_handles[job->base.bo_count]); + ret = -ENOENT; + spin_unlock(&file_priv->table_lock); + goto fail; + } + drm_gem_object_get(bo); + job->base.bo[job->base.bo_count] = bo; + } + spin_unlock(&file_priv->table_lock); + + ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx); + if (ret) + goto fail; + + mutex_lock(&v3d->sched_lock); + v3d_push_job(&job->base); + mutex_unlock(&v3d->sched_lock); + + v3d_attach_fences_and_unlock_reservation(file_priv, + &job->base, &acquire_ctx, + args->out_sync, + &se, + job->base.done_fence); + + v3d_job_put(&job->base); + + return 0; + +fail: + v3d_job_cleanup((void *)job); + v3d_put_multisync_post_deps(&se); + + return ret; +} + +/** + * v3d_submit_csd_ioctl() - Submits a CSD (texture formatting) job to the V3D. + * @dev: DRM device + * @data: ioctl argument + * @file_priv: DRM file for this fd + * + * Userspace provides the register setup for the CSD, which we don't + * need to validate since the CSD is behind the MMU. + */ +int +v3d_submit_csd_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; + struct drm_v3d_submit_csd *args = data; + struct v3d_submit_ext se = {0}; + struct v3d_csd_job *job = NULL; + struct v3d_job *clean_job = NULL; + struct ww_acquire_ctx acquire_ctx; + int ret; + + trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]); + + if (args->pad) + return -EINVAL; + + if (!v3d_has_csd(v3d)) { + DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n"); + return -EINVAL; + } + + if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) { + DRM_INFO("invalid flags: %d\n", args->flags); + return -EINVAL; + } + + if (args->flags & DRM_V3D_SUBMIT_EXTENSION) { + ret = v3d_get_extensions(file_priv, args->extensions, &se); + if (ret) { + DRM_DEBUG("Failed to get extensions.\n"); + return ret; + } + } + + ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job), + v3d_job_free, args->in_sync, &se, V3D_CSD); + if (ret) + goto fail; + + ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job), + v3d_job_free, 0, NULL, V3D_CACHE_CLEAN); + if (ret) + goto fail; + + job->args = *args; + + ret = v3d_lookup_bos(dev, file_priv, clean_job, + args->bo_handles, args->bo_handle_count); + if (ret) + goto fail; + + ret = v3d_lock_bo_reservations(clean_job, &acquire_ctx); + if (ret) + goto fail; + + if (args->perfmon_id) { + job->base.perfmon = v3d_perfmon_find(v3d_priv, + args->perfmon_id); + if (!job->base.perfmon) { + ret = -ENOENT; + goto fail_perfmon; + } + } + + mutex_lock(&v3d->sched_lock); + v3d_push_job(&job->base); + + ret = drm_sched_job_add_dependency(&clean_job->base, + dma_fence_get(job->base.done_fence)); + if (ret) + goto fail_unreserve; + + v3d_push_job(clean_job); + mutex_unlock(&v3d->sched_lock); + + v3d_attach_fences_and_unlock_reservation(file_priv, + clean_job, + &acquire_ctx, + args->out_sync, + &se, + clean_job->done_fence); + + v3d_job_put(&job->base); + v3d_job_put(clean_job); + + return 0; + +fail_unreserve: + mutex_unlock(&v3d->sched_lock); +fail_perfmon: + drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count, + &acquire_ctx); +fail: + v3d_job_cleanup((void *)job); + v3d_job_cleanup(clean_job); + v3d_put_multisync_post_deps(&se); + + return ret; +} + +int +v3d_gem_init(struct drm_device *dev) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + u32 pt_size = 4096 * 1024; + int ret, i; + + for (i = 0; i < V3D_MAX_QUEUES; i++) + v3d->queue[i].fence_context = dma_fence_context_alloc(1); + + spin_lock_init(&v3d->mm_lock); + spin_lock_init(&v3d->job_lock); + mutex_init(&v3d->bo_lock); + mutex_init(&v3d->reset_lock); + mutex_init(&v3d->sched_lock); + mutex_init(&v3d->cache_clean_lock); + + /* Note: We don't allocate address 0. Various bits of HW + * treat 0 as special, such as the occlusion query counters + * where 0 means "disabled". + */ + drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1); + + v3d->pt = dma_alloc_wc(v3d->drm.dev, pt_size, + &v3d->pt_paddr, + GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); + if (!v3d->pt) { + drm_mm_takedown(&v3d->mm); + dev_err(v3d->drm.dev, + "Failed to allocate page tables. Please ensure you have DMA enabled.\n"); + return -ENOMEM; + } + + v3d_init_hw_state(v3d); + v3d_mmu_set_page_table(v3d); + + ret = v3d_sched_init(v3d); + if (ret) { + drm_mm_takedown(&v3d->mm); + dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt, + v3d->pt_paddr); + } + + return 0; +} + +void +v3d_gem_destroy(struct drm_device *dev) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + + v3d_sched_fini(v3d); + + /* Waiting for jobs to finish would need to be done before + * unregistering V3D. + */ + WARN_ON(v3d->bin_job); + WARN_ON(v3d->render_job); + + drm_mm_takedown(&v3d->mm); + + dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt, + v3d->pt_paddr); +} diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c new file mode 100644 index 000000000..e714d5318 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_irq.c @@ -0,0 +1,289 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2014-2018 Broadcom */ + +/** + * DOC: Interrupt management for the V3D engine + * + * When we take a bin, render, TFU done, or CSD done interrupt, we + * need to signal the fence for that job so that the scheduler can + * queue up the next one and unblock any waiters. + * + * When we take the binner out of memory interrupt, we need to + * allocate some new memory and pass it to the binner so that the + * current job can make progress. + */ + +#include + +#include "v3d_drv.h" +#include "v3d_regs.h" +#include "v3d_trace.h" + +#define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \ + V3D_INT_FLDONE | \ + V3D_INT_FRDONE | \ + V3D_INT_CSDDONE | \ + V3D_INT_GMPV)) + +#define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \ + V3D_HUB_INT_MMU_PTI | \ + V3D_HUB_INT_MMU_CAP | \ + V3D_HUB_INT_TFUC)) + +static irqreturn_t +v3d_hub_irq(int irq, void *arg); + +static void +v3d_overflow_mem_work(struct work_struct *work) +{ + struct v3d_dev *v3d = + container_of(work, struct v3d_dev, overflow_mem_work); + struct drm_device *dev = &v3d->drm; + struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024); + struct drm_gem_object *obj; + unsigned long irqflags; + + if (IS_ERR(bo)) { + DRM_ERROR("Couldn't allocate binner overflow mem\n"); + return; + } + obj = &bo->base.base; + + /* We lost a race, and our work task came in after the bin job + * completed and exited. This can happen because the HW + * signals OOM before it's fully OOM, so the binner might just + * barely complete. + * + * If we lose the race and our work task comes in after a new + * bin job got scheduled, that's fine. We'll just give them + * some binner pool anyway. + */ + spin_lock_irqsave(&v3d->job_lock, irqflags); + if (!v3d->bin_job) { + spin_unlock_irqrestore(&v3d->job_lock, irqflags); + goto out; + } + + drm_gem_object_get(obj); + list_add_tail(&bo->unref_head, &v3d->bin_job->render->unref_list); + spin_unlock_irqrestore(&v3d->job_lock, irqflags); + + V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT); + V3D_CORE_WRITE(0, V3D_PTB_BPOS, obj->size); + +out: + drm_gem_object_put(obj); +} + +static irqreturn_t +v3d_irq(int irq, void *arg) +{ + struct v3d_dev *v3d = arg; + u32 intsts; + irqreturn_t status = IRQ_NONE; + + intsts = V3D_CORE_READ(0, V3D_CTL_INT_STS); + + /* Acknowledge the interrupts we're handling here. */ + V3D_CORE_WRITE(0, V3D_CTL_INT_CLR, intsts); + + if (intsts & V3D_INT_OUTOMEM) { + /* Note that the OOM status is edge signaled, so the + * interrupt won't happen again until the we actually + * add more memory. Also, as of V3D 4.1, FLDONE won't + * be reported until any OOM state has been cleared. + */ + schedule_work(&v3d->overflow_mem_work); + status = IRQ_HANDLED; + } + + if (intsts & V3D_INT_FLDONE) { + struct v3d_fence *fence = + to_v3d_fence(v3d->bin_job->base.irq_fence); + + trace_v3d_bcl_irq(&v3d->drm, fence->seqno); + dma_fence_signal(&fence->base); + status = IRQ_HANDLED; + } + + if (intsts & V3D_INT_FRDONE) { + struct v3d_fence *fence = + to_v3d_fence(v3d->render_job->base.irq_fence); + + trace_v3d_rcl_irq(&v3d->drm, fence->seqno); + dma_fence_signal(&fence->base); + status = IRQ_HANDLED; + } + + if (intsts & V3D_INT_CSDDONE) { + struct v3d_fence *fence = + to_v3d_fence(v3d->csd_job->base.irq_fence); + + trace_v3d_csd_irq(&v3d->drm, fence->seqno); + dma_fence_signal(&fence->base); + status = IRQ_HANDLED; + } + + /* We shouldn't be triggering these if we have GMP in + * always-allowed mode. + */ + if (intsts & V3D_INT_GMPV) + dev_err(v3d->drm.dev, "GMP violation\n"); + + /* V3D 4.2 wires the hub and core IRQs together, so if we & + * didn't see the common one then check hub for MMU IRQs. + */ + if (v3d->single_irq_line && status == IRQ_NONE) + return v3d_hub_irq(irq, arg); + + return status; +} + +static irqreturn_t +v3d_hub_irq(int irq, void *arg) +{ + struct v3d_dev *v3d = arg; + u32 intsts; + irqreturn_t status = IRQ_NONE; + + intsts = V3D_READ(V3D_HUB_INT_STS); + + /* Acknowledge the interrupts we're handling here. */ + V3D_WRITE(V3D_HUB_INT_CLR, intsts); + + if (intsts & V3D_HUB_INT_TFUC) { + struct v3d_fence *fence = + to_v3d_fence(v3d->tfu_job->base.irq_fence); + + trace_v3d_tfu_irq(&v3d->drm, fence->seqno); + dma_fence_signal(&fence->base); + status = IRQ_HANDLED; + } + + if (intsts & (V3D_HUB_INT_MMU_WRV | + V3D_HUB_INT_MMU_PTI | + V3D_HUB_INT_MMU_CAP)) { + u32 axi_id = V3D_READ(V3D_MMU_VIO_ID); + u64 vio_addr = ((u64)V3D_READ(V3D_MMU_VIO_ADDR) << + (v3d->va_width - 32)); + static const char *const v3d41_axi_ids[] = { + "L2T", + "PTB", + "PSE", + "TLB", + "CLE", + "TFU", + "MMU", + "GMP", + }; + const char *client = "?"; + + V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL)); + + if (v3d->ver >= 41) { + axi_id = axi_id >> 5; + if (axi_id < ARRAY_SIZE(v3d41_axi_ids)) + client = v3d41_axi_ids[axi_id]; + } + + dev_err(v3d->drm.dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n", + client, axi_id, (long long)vio_addr, + ((intsts & V3D_HUB_INT_MMU_WRV) ? + ", write violation" : ""), + ((intsts & V3D_HUB_INT_MMU_PTI) ? + ", pte invalid" : ""), + ((intsts & V3D_HUB_INT_MMU_CAP) ? + ", cap exceeded" : "")); + status = IRQ_HANDLED; + } + + return status; +} + +int +v3d_irq_init(struct v3d_dev *v3d) +{ + int irq1, ret, core; + + INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work); + + /* Clear any pending interrupts someone might have left around + * for us. + */ + for (core = 0; core < v3d->cores; core++) + V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); + V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS); + + irq1 = platform_get_irq_optional(v3d_to_pdev(v3d), 1); + if (irq1 == -EPROBE_DEFER) + return irq1; + if (irq1 > 0) { + ret = devm_request_irq(v3d->drm.dev, irq1, + v3d_irq, IRQF_SHARED, + "v3d_core0", v3d); + if (ret) + goto fail; + ret = devm_request_irq(v3d->drm.dev, + platform_get_irq(v3d_to_pdev(v3d), 0), + v3d_hub_irq, IRQF_SHARED, + "v3d_hub", v3d); + if (ret) + goto fail; + } else { + v3d->single_irq_line = true; + + ret = devm_request_irq(v3d->drm.dev, + platform_get_irq(v3d_to_pdev(v3d), 0), + v3d_irq, IRQF_SHARED, + "v3d", v3d); + if (ret) + goto fail; + } + + v3d_irq_enable(v3d); + return 0; + +fail: + if (ret != -EPROBE_DEFER) + dev_err(v3d->drm.dev, "IRQ setup failed: %d\n", ret); + return ret; +} + +void +v3d_irq_enable(struct v3d_dev *v3d) +{ + int core; + + /* Enable our set of interrupts, masking out any others. */ + for (core = 0; core < v3d->cores; core++) { + V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS); + V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS); + } + + V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS); + V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS); +} + +void +v3d_irq_disable(struct v3d_dev *v3d) +{ + int core; + + /* Disable all interrupts. */ + for (core = 0; core < v3d->cores; core++) + V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0); + V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0); + + /* Clear any pending interrupts we might have left. */ + for (core = 0; core < v3d->cores; core++) + V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); + V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS); + + cancel_work_sync(&v3d->overflow_mem_work); +} + +/** Reinitializes interrupt registers when a GPU reset is performed. */ +void v3d_irq_reset(struct v3d_dev *v3d) +{ + v3d_irq_enable(v3d); +} diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c new file mode 100644 index 000000000..5a4535329 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_mmu.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2017-2018 Broadcom */ + +/** + * DOC: Broadcom V3D MMU + * + * The V3D 3.x hardware (compared to VC4) now includes an MMU. It has + * a single level of page tables for the V3D's 4GB address space to + * map to AXI bus addresses, thus it could need up to 4MB of + * physically contiguous memory to store the PTEs. + * + * Because the 4MB of contiguous memory for page tables is precious, + * and switching between them is expensive, we load all BOs into the + * same 4GB address space. + * + * To protect clients from each other, we should use the GMP to + * quickly mask out (at 128kb granularity) what pages are available to + * each client. This is not yet implemented. + */ + +#include "v3d_drv.h" +#include "v3d_regs.h" + +#define V3D_MMU_PAGE_SHIFT 12 + +/* Note: All PTEs for the 1MB superpage must be filled with the + * superpage bit set. + */ +#define V3D_PTE_SUPERPAGE BIT(31) +#define V3D_PTE_WRITEABLE BIT(29) +#define V3D_PTE_VALID BIT(28) + +static int v3d_mmu_flush_all(struct v3d_dev *v3d) +{ + int ret; + + /* Make sure that another flush isn't already running when we + * start this one. + */ + ret = wait_for(!(V3D_READ(V3D_MMU_CTL) & + V3D_MMU_CTL_TLB_CLEARING), 100); + if (ret) + dev_err(v3d->drm.dev, "TLB clear wait idle pre-wait failed\n"); + + V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) | + V3D_MMU_CTL_TLB_CLEAR); + + V3D_WRITE(V3D_MMUC_CONTROL, + V3D_MMUC_CONTROL_FLUSH | + V3D_MMUC_CONTROL_ENABLE); + + ret = wait_for(!(V3D_READ(V3D_MMU_CTL) & + V3D_MMU_CTL_TLB_CLEARING), 100); + if (ret) { + dev_err(v3d->drm.dev, "TLB clear wait idle failed\n"); + return ret; + } + + ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) & + V3D_MMUC_CONTROL_FLUSHING), 100); + if (ret) + dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n"); + + return ret; +} + +int v3d_mmu_set_page_table(struct v3d_dev *v3d) +{ + V3D_WRITE(V3D_MMU_PT_PA_BASE, v3d->pt_paddr >> V3D_MMU_PAGE_SHIFT); + V3D_WRITE(V3D_MMU_CTL, + V3D_MMU_CTL_ENABLE | + V3D_MMU_CTL_PT_INVALID_ENABLE | + V3D_MMU_CTL_PT_INVALID_ABORT | + V3D_MMU_CTL_PT_INVALID_INT | + V3D_MMU_CTL_WRITE_VIOLATION_ABORT | + V3D_MMU_CTL_WRITE_VIOLATION_INT | + V3D_MMU_CTL_CAP_EXCEEDED_ABORT | + V3D_MMU_CTL_CAP_EXCEEDED_INT); + V3D_WRITE(V3D_MMU_ILLEGAL_ADDR, + (v3d->mmu_scratch_paddr >> V3D_MMU_PAGE_SHIFT) | + V3D_MMU_ILLEGAL_ADDR_ENABLE); + V3D_WRITE(V3D_MMUC_CONTROL, V3D_MMUC_CONTROL_ENABLE); + + return v3d_mmu_flush_all(v3d); +} + +void v3d_mmu_insert_ptes(struct v3d_bo *bo) +{ + struct drm_gem_shmem_object *shmem_obj = &bo->base; + struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev); + u32 page = bo->node.start; + u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID; + struct sg_dma_page_iter dma_iter; + + for_each_sgtable_dma_page(shmem_obj->sgt, &dma_iter, 0) { + dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter); + u32 page_address = dma_addr >> V3D_MMU_PAGE_SHIFT; + u32 pte = page_prot | page_address; + u32 i; + + BUG_ON(page_address + (PAGE_SIZE >> V3D_MMU_PAGE_SHIFT) >= + BIT(24)); + for (i = 0; i < PAGE_SIZE >> V3D_MMU_PAGE_SHIFT; i++) + v3d->pt[page++] = pte + i; + } + + WARN_ON_ONCE(page - bo->node.start != + shmem_obj->base.size >> V3D_MMU_PAGE_SHIFT); + + if (v3d_mmu_flush_all(v3d)) + dev_err(v3d->drm.dev, "MMU flush timeout\n"); +} + +void v3d_mmu_remove_ptes(struct v3d_bo *bo) +{ + struct v3d_dev *v3d = to_v3d_dev(bo->base.base.dev); + u32 npages = bo->base.base.size >> V3D_MMU_PAGE_SHIFT; + u32 page; + + for (page = bo->node.start; page < bo->node.start + npages; page++) + v3d->pt[page] = 0; + + if (v3d_mmu_flush_all(v3d)) + dev_err(v3d->drm.dev, "MMU flush timeout\n"); +} diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c new file mode 100644 index 000000000..48aaaa972 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_perfmon.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Raspberry Pi + */ + +#include "v3d_drv.h" +#include "v3d_regs.h" + +#define V3D_PERFMONID_MIN 1 +#define V3D_PERFMONID_MAX U32_MAX + +void v3d_perfmon_get(struct v3d_perfmon *perfmon) +{ + if (perfmon) + refcount_inc(&perfmon->refcnt); +} + +void v3d_perfmon_put(struct v3d_perfmon *perfmon) +{ + if (perfmon && refcount_dec_and_test(&perfmon->refcnt)) + kfree(perfmon); +} + +void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon) +{ + unsigned int i; + u32 mask; + u8 ncounters; + + if (WARN_ON_ONCE(!perfmon || v3d->active_perfmon)) + return; + + ncounters = perfmon->ncounters; + mask = GENMASK(ncounters - 1, 0); + + for (i = 0; i < ncounters; i++) { + u32 source = i / 4; + u32 channel = V3D_SET_FIELD(perfmon->counters[i], V3D_PCTR_S0); + + i++; + channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0, + V3D_PCTR_S1); + i++; + channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0, + V3D_PCTR_S2); + i++; + channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0, + V3D_PCTR_S3); + V3D_CORE_WRITE(0, V3D_V4_PCTR_0_SRC_X(source), channel); + } + + V3D_CORE_WRITE(0, V3D_V4_PCTR_0_CLR, mask); + V3D_CORE_WRITE(0, V3D_PCTR_0_OVERFLOW, mask); + V3D_CORE_WRITE(0, V3D_V4_PCTR_0_EN, mask); + + v3d->active_perfmon = perfmon; +} + +void v3d_perfmon_stop(struct v3d_dev *v3d, struct v3d_perfmon *perfmon, + bool capture) +{ + unsigned int i; + + if (!perfmon || !v3d->active_perfmon) + return; + + mutex_lock(&perfmon->lock); + if (perfmon != v3d->active_perfmon) { + mutex_unlock(&perfmon->lock); + return; + } + + if (capture) + for (i = 0; i < perfmon->ncounters; i++) + perfmon->values[i] += V3D_CORE_READ(0, V3D_PCTR_0_PCTRX(i)); + + V3D_CORE_WRITE(0, V3D_V4_PCTR_0_EN, 0); + + v3d->active_perfmon = NULL; + mutex_unlock(&perfmon->lock); +} + +struct v3d_perfmon *v3d_perfmon_find(struct v3d_file_priv *v3d_priv, int id) +{ + struct v3d_perfmon *perfmon; + + mutex_lock(&v3d_priv->perfmon.lock); + perfmon = idr_find(&v3d_priv->perfmon.idr, id); + v3d_perfmon_get(perfmon); + mutex_unlock(&v3d_priv->perfmon.lock); + + return perfmon; +} + +void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv) +{ + mutex_init(&v3d_priv->perfmon.lock); + idr_init_base(&v3d_priv->perfmon.idr, 1); +} + +static int v3d_perfmon_idr_del(int id, void *elem, void *data) +{ + struct v3d_perfmon *perfmon = elem; + + v3d_perfmon_put(perfmon); + + return 0; +} + +void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv) +{ + mutex_lock(&v3d_priv->perfmon.lock); + idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, NULL); + idr_destroy(&v3d_priv->perfmon.idr); + mutex_unlock(&v3d_priv->perfmon.lock); +} + +int v3d_perfmon_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; + struct drm_v3d_perfmon_create *req = data; + struct v3d_perfmon *perfmon; + unsigned int i; + int ret; + + /* Number of monitored counters cannot exceed HW limits. */ + if (req->ncounters > DRM_V3D_MAX_PERF_COUNTERS || + !req->ncounters) + return -EINVAL; + + /* Make sure all counters are valid. */ + for (i = 0; i < req->ncounters; i++) { + if (req->counters[i] >= V3D_PERFCNT_NUM) + return -EINVAL; + } + + perfmon = kzalloc(struct_size(perfmon, values, req->ncounters), + GFP_KERNEL); + if (!perfmon) + return -ENOMEM; + + for (i = 0; i < req->ncounters; i++) + perfmon->counters[i] = req->counters[i]; + + perfmon->ncounters = req->ncounters; + + refcount_set(&perfmon->refcnt, 1); + mutex_init(&perfmon->lock); + + mutex_lock(&v3d_priv->perfmon.lock); + ret = idr_alloc(&v3d_priv->perfmon.idr, perfmon, V3D_PERFMONID_MIN, + V3D_PERFMONID_MAX, GFP_KERNEL); + mutex_unlock(&v3d_priv->perfmon.lock); + + if (ret < 0) { + kfree(perfmon); + return ret; + } + + req->id = ret; + + return 0; +} + +int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; + struct drm_v3d_perfmon_destroy *req = data; + struct v3d_perfmon *perfmon; + + mutex_lock(&v3d_priv->perfmon.lock); + perfmon = idr_remove(&v3d_priv->perfmon.idr, req->id); + mutex_unlock(&v3d_priv->perfmon.lock); + + if (!perfmon) + return -EINVAL; + + v3d_perfmon_put(perfmon); + + return 0; +} + +int v3d_perfmon_get_values_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; + struct drm_v3d_perfmon_get_values *req = data; + struct v3d_perfmon *perfmon; + int ret = 0; + + if (req->pad != 0) + return -EINVAL; + + mutex_lock(&v3d_priv->perfmon.lock); + perfmon = idr_find(&v3d_priv->perfmon.idr, req->id); + v3d_perfmon_get(perfmon); + mutex_unlock(&v3d_priv->perfmon.lock); + + if (!perfmon) + return -EINVAL; + + v3d_perfmon_stop(v3d, perfmon, true); + + if (copy_to_user(u64_to_user_ptr(req->values_ptr), perfmon->values, + perfmon->ncounters * sizeof(u64))) + ret = -EFAULT; + + v3d_perfmon_put(perfmon); + + return ret; +} diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h new file mode 100644 index 000000000..3663e0d6b --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_regs.h @@ -0,0 +1,499 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2017-2018 Broadcom */ + +#ifndef V3D_REGS_H +#define V3D_REGS_H + +#include + +#define V3D_MASK(high, low) ((u32)GENMASK(high, low)) +/* Using the GNU statement expression extension */ +#define V3D_SET_FIELD(value, field) \ + ({ \ + u32 fieldval = (value) << field##_SHIFT; \ + WARN_ON((fieldval & ~field##_MASK) != 0); \ + fieldval & field##_MASK; \ + }) + +#define V3D_GET_FIELD(word, field) (((word) & field##_MASK) >> \ + field##_SHIFT) + +/* Hub registers for shared hardware between V3D cores. */ + +#define V3D_HUB_AXICFG 0x00000 +# define V3D_HUB_AXICFG_MAX_LEN_MASK V3D_MASK(3, 0) +# define V3D_HUB_AXICFG_MAX_LEN_SHIFT 0 +#define V3D_HUB_UIFCFG 0x00004 +#define V3D_HUB_IDENT0 0x00008 + +#define V3D_HUB_IDENT1 0x0000c +# define V3D_HUB_IDENT1_WITH_MSO BIT(19) +# define V3D_HUB_IDENT1_WITH_TSY BIT(18) +# define V3D_HUB_IDENT1_WITH_TFU BIT(17) +# define V3D_HUB_IDENT1_WITH_L3C BIT(16) +# define V3D_HUB_IDENT1_NHOSTS_MASK V3D_MASK(15, 12) +# define V3D_HUB_IDENT1_NHOSTS_SHIFT 12 +# define V3D_HUB_IDENT1_NCORES_MASK V3D_MASK(11, 8) +# define V3D_HUB_IDENT1_NCORES_SHIFT 8 +# define V3D_HUB_IDENT1_REV_MASK V3D_MASK(7, 4) +# define V3D_HUB_IDENT1_REV_SHIFT 4 +# define V3D_HUB_IDENT1_TVER_MASK V3D_MASK(3, 0) +# define V3D_HUB_IDENT1_TVER_SHIFT 0 + +#define V3D_HUB_IDENT2 0x00010 +# define V3D_HUB_IDENT2_WITH_MMU BIT(8) +# define V3D_HUB_IDENT2_L3C_NKB_MASK V3D_MASK(7, 0) +# define V3D_HUB_IDENT2_L3C_NKB_SHIFT 0 + +#define V3D_HUB_IDENT3 0x00014 +# define V3D_HUB_IDENT3_IPREV_MASK V3D_MASK(15, 8) +# define V3D_HUB_IDENT3_IPREV_SHIFT 8 +# define V3D_HUB_IDENT3_IPIDX_MASK V3D_MASK(7, 0) +# define V3D_HUB_IDENT3_IPIDX_SHIFT 0 + +#define V3D_HUB_INT_STS 0x00050 +#define V3D_HUB_INT_SET 0x00054 +#define V3D_HUB_INT_CLR 0x00058 +#define V3D_HUB_INT_MSK_STS 0x0005c +#define V3D_HUB_INT_MSK_SET 0x00060 +#define V3D_HUB_INT_MSK_CLR 0x00064 +# define V3D_HUB_INT_MMU_WRV BIT(5) +# define V3D_HUB_INT_MMU_PTI BIT(4) +# define V3D_HUB_INT_MMU_CAP BIT(3) +# define V3D_HUB_INT_MSO BIT(2) +# define V3D_HUB_INT_TFUC BIT(1) +# define V3D_HUB_INT_TFUF BIT(0) + +#define V3D_GCA_CACHE_CTRL 0x0000c +# define V3D_GCA_CACHE_CTRL_FLUSH BIT(0) + +#define V3D_GCA_SAFE_SHUTDOWN 0x000b0 +# define V3D_GCA_SAFE_SHUTDOWN_EN BIT(0) + +#define V3D_GCA_SAFE_SHUTDOWN_ACK 0x000b4 +# define V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED 3 + +# define V3D_TOP_GR_BRIDGE_REVISION 0x00000 +# define V3D_TOP_GR_BRIDGE_MAJOR_MASK V3D_MASK(15, 8) +# define V3D_TOP_GR_BRIDGE_MAJOR_SHIFT 8 +# define V3D_TOP_GR_BRIDGE_MINOR_MASK V3D_MASK(7, 0) +# define V3D_TOP_GR_BRIDGE_MINOR_SHIFT 0 + +/* 7268 reset reg */ +# define V3D_TOP_GR_BRIDGE_SW_INIT_0 0x00008 +# define V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT BIT(0) +/* 7278 reset reg */ +# define V3D_TOP_GR_BRIDGE_SW_INIT_1 0x0000c +# define V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT BIT(0) + +#define V3D_TFU_CS 0x00400 +/* Stops current job, empties input fifo. */ +# define V3D_TFU_CS_TFURST BIT(31) +# define V3D_TFU_CS_CVTCT_MASK V3D_MASK(23, 16) +# define V3D_TFU_CS_CVTCT_SHIFT 16 +# define V3D_TFU_CS_NFREE_MASK V3D_MASK(13, 8) +# define V3D_TFU_CS_NFREE_SHIFT 8 +# define V3D_TFU_CS_BUSY BIT(0) + +#define V3D_TFU_SU 0x00404 +/* Interrupt when FINTTHR input slots are free (0 = disabled) */ +# define V3D_TFU_SU_FINTTHR_MASK V3D_MASK(13, 8) +# define V3D_TFU_SU_FINTTHR_SHIFT 8 +/* Skips resetting the CRC at the start of CRC generation. */ +# define V3D_TFU_SU_CRCCHAIN BIT(4) +/* skips writes, computes CRC of the image. miplevels must be 0. */ +# define V3D_TFU_SU_CRC BIT(3) +# define V3D_TFU_SU_THROTTLE_MASK V3D_MASK(1, 0) +# define V3D_TFU_SU_THROTTLE_SHIFT 0 + +#define V3D_TFU_ICFG 0x00408 +/* Interrupt when the conversion is complete. */ +# define V3D_TFU_ICFG_IOC BIT(0) + +/* Input Image Address */ +#define V3D_TFU_IIA 0x0040c +/* Input Chroma Address */ +#define V3D_TFU_ICA 0x00410 +/* Input Image Stride */ +#define V3D_TFU_IIS 0x00414 +/* Input Image U-Plane Address */ +#define V3D_TFU_IUA 0x00418 +/* Output Image Address */ +#define V3D_TFU_IOA 0x0041c +/* Image Output Size */ +#define V3D_TFU_IOS 0x00420 +/* TFU YUV Coefficient 0 */ +#define V3D_TFU_COEF0 0x00424 +/* Use these regs instead of the defaults. */ +# define V3D_TFU_COEF0_USECOEF BIT(31) +/* TFU YUV Coefficient 1 */ +#define V3D_TFU_COEF1 0x00428 +/* TFU YUV Coefficient 2 */ +#define V3D_TFU_COEF2 0x0042c +/* TFU YUV Coefficient 3 */ +#define V3D_TFU_COEF3 0x00430 + +#define V3D_TFU_CRC 0x00434 + +/* Per-MMU registers. */ + +#define V3D_MMUC_CONTROL 0x01000 +# define V3D_MMUC_CONTROL_CLEAR BIT(3) +# define V3D_MMUC_CONTROL_FLUSHING BIT(2) +# define V3D_MMUC_CONTROL_FLUSH BIT(1) +# define V3D_MMUC_CONTROL_ENABLE BIT(0) + +#define V3D_MMU_CTL 0x01200 +# define V3D_MMU_CTL_CAP_EXCEEDED BIT(27) +# define V3D_MMU_CTL_CAP_EXCEEDED_ABORT BIT(26) +# define V3D_MMU_CTL_CAP_EXCEEDED_INT BIT(25) +# define V3D_MMU_CTL_CAP_EXCEEDED_EXCEPTION BIT(24) +# define V3D_MMU_CTL_PT_INVALID BIT(20) +# define V3D_MMU_CTL_PT_INVALID_ABORT BIT(19) +# define V3D_MMU_CTL_PT_INVALID_INT BIT(18) +# define V3D_MMU_CTL_PT_INVALID_EXCEPTION BIT(17) +# define V3D_MMU_CTL_PT_INVALID_ENABLE BIT(16) +# define V3D_MMU_CTL_WRITE_VIOLATION BIT(12) +# define V3D_MMU_CTL_WRITE_VIOLATION_ABORT BIT(11) +# define V3D_MMU_CTL_WRITE_VIOLATION_INT BIT(10) +# define V3D_MMU_CTL_WRITE_VIOLATION_EXCEPTION BIT(9) +# define V3D_MMU_CTL_TLB_CLEARING BIT(7) +# define V3D_MMU_CTL_TLB_STATS_CLEAR BIT(3) +# define V3D_MMU_CTL_TLB_CLEAR BIT(2) +# define V3D_MMU_CTL_TLB_STATS_ENABLE BIT(1) +# define V3D_MMU_CTL_ENABLE BIT(0) + +#define V3D_MMU_PT_PA_BASE 0x01204 +#define V3D_MMU_HIT 0x01208 +#define V3D_MMU_MISSES 0x0120c +#define V3D_MMU_STALLS 0x01210 + +#define V3D_MMU_ADDR_CAP 0x01214 +# define V3D_MMU_ADDR_CAP_ENABLE BIT(31) +# define V3D_MMU_ADDR_CAP_MPAGE_MASK V3D_MASK(11, 0) +# define V3D_MMU_ADDR_CAP_MPAGE_SHIFT 0 + +#define V3D_MMU_SHOOT_DOWN 0x01218 +# define V3D_MMU_SHOOT_DOWN_SHOOTING BIT(29) +# define V3D_MMU_SHOOT_DOWN_SHOOT BIT(28) +# define V3D_MMU_SHOOT_DOWN_PAGE_MASK V3D_MASK(27, 0) +# define V3D_MMU_SHOOT_DOWN_PAGE_SHIFT 0 + +#define V3D_MMU_BYPASS_START 0x0121c +#define V3D_MMU_BYPASS_END 0x01220 + +/* AXI ID of the access that faulted */ +#define V3D_MMU_VIO_ID 0x0122c + +/* Address for illegal PTEs to return */ +#define V3D_MMU_ILLEGAL_ADDR 0x01230 +# define V3D_MMU_ILLEGAL_ADDR_ENABLE BIT(31) + +/* Address that faulted */ +#define V3D_MMU_VIO_ADDR 0x01234 + +#define V3D_MMU_DEBUG_INFO 0x01238 +# define V3D_MMU_PA_WIDTH_MASK V3D_MASK(11, 8) +# define V3D_MMU_PA_WIDTH_SHIFT 8 +# define V3D_MMU_VA_WIDTH_MASK V3D_MASK(7, 4) +# define V3D_MMU_VA_WIDTH_SHIFT 4 +# define V3D_MMU_VERSION_MASK V3D_MASK(3, 0) +# define V3D_MMU_VERSION_SHIFT 0 + +/* Per-V3D-core registers */ + +#define V3D_CTL_IDENT0 0x00000 +# define V3D_IDENT0_VER_MASK V3D_MASK(31, 24) +# define V3D_IDENT0_VER_SHIFT 24 + +#define V3D_CTL_IDENT1 0x00004 +/* Multiples of 1kb */ +# define V3D_IDENT1_VPM_SIZE_MASK V3D_MASK(31, 28) +# define V3D_IDENT1_VPM_SIZE_SHIFT 28 +# define V3D_IDENT1_NSEM_MASK V3D_MASK(23, 16) +# define V3D_IDENT1_NSEM_SHIFT 16 +# define V3D_IDENT1_NTMU_MASK V3D_MASK(15, 12) +# define V3D_IDENT1_NTMU_SHIFT 12 +# define V3D_IDENT1_QUPS_MASK V3D_MASK(11, 8) +# define V3D_IDENT1_QUPS_SHIFT 8 +# define V3D_IDENT1_NSLC_MASK V3D_MASK(7, 4) +# define V3D_IDENT1_NSLC_SHIFT 4 +# define V3D_IDENT1_REV_MASK V3D_MASK(3, 0) +# define V3D_IDENT1_REV_SHIFT 0 + +#define V3D_CTL_IDENT2 0x00008 +# define V3D_IDENT2_BCG_INT BIT(28) + +#define V3D_CTL_MISCCFG 0x00018 +# define V3D_CTL_MISCCFG_QRMAXCNT_MASK V3D_MASK(3, 1) +# define V3D_CTL_MISCCFG_QRMAXCNT_SHIFT 1 +# define V3D_MISCCFG_OVRTMUOUT BIT(0) + +#define V3D_CTL_L2CACTL 0x00020 +# define V3D_L2CACTL_L2CCLR BIT(2) +# define V3D_L2CACTL_L2CDIS BIT(1) +# define V3D_L2CACTL_L2CENA BIT(0) + +#define V3D_CTL_SLCACTL 0x00024 +# define V3D_SLCACTL_TVCCS_MASK V3D_MASK(27, 24) +# define V3D_SLCACTL_TVCCS_SHIFT 24 +# define V3D_SLCACTL_TDCCS_MASK V3D_MASK(19, 16) +# define V3D_SLCACTL_TDCCS_SHIFT 16 +# define V3D_SLCACTL_UCC_MASK V3D_MASK(11, 8) +# define V3D_SLCACTL_UCC_SHIFT 8 +# define V3D_SLCACTL_ICC_MASK V3D_MASK(3, 0) +# define V3D_SLCACTL_ICC_SHIFT 0 + +#define V3D_CTL_L2TCACTL 0x00030 +# define V3D_L2TCACTL_TMUWCF BIT(8) +# define V3D_L2TCACTL_L2T_NO_WM BIT(4) +/* Invalidates cache lines. */ +# define V3D_L2TCACTL_FLM_FLUSH 0 +/* Removes cachelines without writing dirty lines back. */ +# define V3D_L2TCACTL_FLM_CLEAR 1 +/* Writes out dirty cachelines and marks them clean, but doesn't invalidate. */ +# define V3D_L2TCACTL_FLM_CLEAN 2 +# define V3D_L2TCACTL_FLM_MASK V3D_MASK(2, 1) +# define V3D_L2TCACTL_FLM_SHIFT 1 +# define V3D_L2TCACTL_L2TFLS BIT(0) +#define V3D_CTL_L2TFLSTA 0x00034 +#define V3D_CTL_L2TFLEND 0x00038 + +#define V3D_CTL_INT_STS 0x00050 +#define V3D_CTL_INT_SET 0x00054 +#define V3D_CTL_INT_CLR 0x00058 +#define V3D_CTL_INT_MSK_STS 0x0005c +#define V3D_CTL_INT_MSK_SET 0x00060 +#define V3D_CTL_INT_MSK_CLR 0x00064 +# define V3D_INT_QPU_MASK V3D_MASK(27, 16) +# define V3D_INT_QPU_SHIFT 16 +# define V3D_INT_CSDDONE BIT(7) +# define V3D_INT_PCTR BIT(6) +# define V3D_INT_GMPV BIT(5) +# define V3D_INT_TRFB BIT(4) +# define V3D_INT_SPILLUSE BIT(3) +# define V3D_INT_OUTOMEM BIT(2) +# define V3D_INT_FLDONE BIT(1) +# define V3D_INT_FRDONE BIT(0) + +#define V3D_CLE_CT0CS 0x00100 +#define V3D_CLE_CT1CS 0x00104 +#define V3D_CLE_CTNCS(n) (V3D_CLE_CT0CS + 4 * n) +#define V3D_CLE_CT0EA 0x00108 +#define V3D_CLE_CT1EA 0x0010c +#define V3D_CLE_CTNEA(n) (V3D_CLE_CT0EA + 4 * n) +#define V3D_CLE_CT0CA 0x00110 +#define V3D_CLE_CT1CA 0x00114 +#define V3D_CLE_CTNCA(n) (V3D_CLE_CT0CA + 4 * n) +#define V3D_CLE_CT0RA 0x00118 +#define V3D_CLE_CT1RA 0x0011c +#define V3D_CLE_CTNRA(n) (V3D_CLE_CT0RA + 4 * n) +#define V3D_CLE_CT0LC 0x00120 +#define V3D_CLE_CT1LC 0x00124 +#define V3D_CLE_CT0PC 0x00128 +#define V3D_CLE_CT1PC 0x0012c +#define V3D_CLE_PCS 0x00130 +#define V3D_CLE_BFC 0x00134 +#define V3D_CLE_RFC 0x00138 +#define V3D_CLE_TFBC 0x0013c +#define V3D_CLE_TFIT 0x00140 +#define V3D_CLE_CT1CFG 0x00144 +#define V3D_CLE_CT1TILECT 0x00148 +#define V3D_CLE_CT1TSKIP 0x0014c +#define V3D_CLE_CT1PTCT 0x00150 +#define V3D_CLE_CT0SYNC 0x00154 +#define V3D_CLE_CT1SYNC 0x00158 +#define V3D_CLE_CT0QTS 0x0015c +# define V3D_CLE_CT0QTS_ENABLE BIT(1) +#define V3D_CLE_CT0QBA 0x00160 +#define V3D_CLE_CT1QBA 0x00164 +#define V3D_CLE_CTNQBA(n) (V3D_CLE_CT0QBA + 4 * n) +#define V3D_CLE_CT0QEA 0x00168 +#define V3D_CLE_CT1QEA 0x0016c +#define V3D_CLE_CTNQEA(n) (V3D_CLE_CT0QEA + 4 * n) +#define V3D_CLE_CT0QMA 0x00170 +#define V3D_CLE_CT0QMS 0x00174 +#define V3D_CLE_CT1QCFG 0x00178 +/* If set without ETPROC, entirely skip tiles with no primitives. */ +# define V3D_CLE_QCFG_ETFILT BIT(7) +/* If set with ETFILT, just write the clear color to tiles with no + * primitives. + */ +# define V3D_CLE_QCFG_ETPROC BIT(6) +# define V3D_CLE_QCFG_ETSFLUSH BIT(1) +# define V3D_CLE_QCFG_MCDIS BIT(0) + +#define V3D_PTB_BPCA 0x00300 +#define V3D_PTB_BPCS 0x00304 +#define V3D_PTB_BPOA 0x00308 +#define V3D_PTB_BPOS 0x0030c + +#define V3D_PTB_BXCF 0x00310 +# define V3D_PTB_BXCF_RWORDERDISA BIT(1) +# define V3D_PTB_BXCF_CLIPDISA BIT(0) + +#define V3D_V3_PCTR_0_EN 0x00674 +#define V3D_V3_PCTR_0_EN_ENABLE BIT(31) +#define V3D_V4_PCTR_0_EN 0x00650 +/* When a bit is set, resets the counter to 0. */ +#define V3D_V3_PCTR_0_CLR 0x00670 +#define V3D_V4_PCTR_0_CLR 0x00654 +#define V3D_PCTR_0_OVERFLOW 0x00658 + +#define V3D_V3_PCTR_0_PCTRS0 0x00684 +#define V3D_V3_PCTR_0_PCTRS15 0x00660 +#define V3D_V3_PCTR_0_PCTRSX(x) (V3D_V3_PCTR_0_PCTRS0 + \ + 4 * (x)) +/* Each src reg muxes four counters each. */ +#define V3D_V4_PCTR_0_SRC_0_3 0x00660 +#define V3D_V4_PCTR_0_SRC_28_31 0x0067c +#define V3D_V4_PCTR_0_SRC_X(x) (V3D_V4_PCTR_0_SRC_0_3 + \ + 4 * (x)) +# define V3D_PCTR_S0_MASK V3D_MASK(6, 0) +# define V3D_PCTR_S0_SHIFT 0 +# define V3D_PCTR_S1_MASK V3D_MASK(14, 8) +# define V3D_PCTR_S1_SHIFT 8 +# define V3D_PCTR_S2_MASK V3D_MASK(22, 16) +# define V3D_PCTR_S2_SHIFT 16 +# define V3D_PCTR_S3_MASK V3D_MASK(30, 24) +# define V3D_PCTR_S3_SHIFT 24 +# define V3D_PCTR_CYCLE_COUNT 32 + +/* Output values of the counters. */ +#define V3D_PCTR_0_PCTR0 0x00680 +#define V3D_PCTR_0_PCTR31 0x006fc +#define V3D_PCTR_0_PCTRX(x) (V3D_PCTR_0_PCTR0 + \ + 4 * (x)) +#define V3D_GMP_STATUS 0x00800 +# define V3D_GMP_STATUS_GMPRST BIT(31) +# define V3D_GMP_STATUS_WR_COUNT_MASK V3D_MASK(30, 24) +# define V3D_GMP_STATUS_WR_COUNT_SHIFT 24 +# define V3D_GMP_STATUS_RD_COUNT_MASK V3D_MASK(22, 16) +# define V3D_GMP_STATUS_RD_COUNT_SHIFT 16 +# define V3D_GMP_STATUS_WR_ACTIVE BIT(5) +# define V3D_GMP_STATUS_RD_ACTIVE BIT(4) +# define V3D_GMP_STATUS_CFG_BUSY BIT(3) +# define V3D_GMP_STATUS_CNTOVF BIT(2) +# define V3D_GMP_STATUS_INVPROT BIT(1) +# define V3D_GMP_STATUS_VIO BIT(0) + +#define V3D_GMP_CFG 0x00804 +# define V3D_GMP_CFG_LBURSTEN BIT(3) +# define V3D_GMP_CFG_PGCRSEN BIT() +# define V3D_GMP_CFG_STOP_REQ BIT(1) +# define V3D_GMP_CFG_PROT_ENABLE BIT(0) + +#define V3D_GMP_VIO_ADDR 0x00808 +#define V3D_GMP_VIO_TYPE 0x0080c +#define V3D_GMP_TABLE_ADDR 0x00810 +#define V3D_GMP_CLEAR_LOAD 0x00814 +#define V3D_GMP_PRESERVE_LOAD 0x00818 +#define V3D_GMP_VALID_LINES 0x00820 + +#define V3D_CSD_STATUS 0x00900 +# define V3D_CSD_STATUS_NUM_COMPLETED_MASK V3D_MASK(11, 4) +# define V3D_CSD_STATUS_NUM_COMPLETED_SHIFT 4 +# define V3D_CSD_STATUS_NUM_ACTIVE_MASK V3D_MASK(3, 2) +# define V3D_CSD_STATUS_NUM_ACTIVE_SHIFT 2 +# define V3D_CSD_STATUS_HAVE_CURRENT_DISPATCH BIT(1) +# define V3D_CSD_STATUS_HAVE_QUEUED_DISPATCH BIT(0) + +#define V3D_CSD_QUEUED_CFG0 0x00904 +# define V3D_CSD_QUEUED_CFG0_NUM_WGS_X_MASK V3D_MASK(31, 16) +# define V3D_CSD_QUEUED_CFG0_NUM_WGS_X_SHIFT 16 +# define V3D_CSD_QUEUED_CFG0_WG_X_OFFSET_MASK V3D_MASK(15, 0) +# define V3D_CSD_QUEUED_CFG0_WG_X_OFFSET_SHIFT 0 + +#define V3D_CSD_QUEUED_CFG1 0x00908 +# define V3D_CSD_QUEUED_CFG1_NUM_WGS_Y_MASK V3D_MASK(31, 16) +# define V3D_CSD_QUEUED_CFG1_NUM_WGS_Y_SHIFT 16 +# define V3D_CSD_QUEUED_CFG1_WG_Y_OFFSET_MASK V3D_MASK(15, 0) +# define V3D_CSD_QUEUED_CFG1_WG_Y_OFFSET_SHIFT 0 + +#define V3D_CSD_QUEUED_CFG2 0x0090c +# define V3D_CSD_QUEUED_CFG2_NUM_WGS_Z_MASK V3D_MASK(31, 16) +# define V3D_CSD_QUEUED_CFG2_NUM_WGS_Z_SHIFT 16 +# define V3D_CSD_QUEUED_CFG2_WG_Z_OFFSET_MASK V3D_MASK(15, 0) +# define V3D_CSD_QUEUED_CFG2_WG_Z_OFFSET_SHIFT 0 + +#define V3D_CSD_QUEUED_CFG3 0x00910 +# define V3D_CSD_QUEUED_CFG3_OVERLAP_WITH_PREV BIT(26) +# define V3D_CSD_QUEUED_CFG3_MAX_SG_ID_MASK V3D_MASK(25, 20) +# define V3D_CSD_QUEUED_CFG3_MAX_SG_ID_SHIFT 20 +# define V3D_CSD_QUEUED_CFG3_BATCHES_PER_SG_M1_MASK V3D_MASK(19, 12) +# define V3D_CSD_QUEUED_CFG3_BATCHES_PER_SG_M1_SHIFT 12 +# define V3D_CSD_QUEUED_CFG3_WGS_PER_SG_MASK V3D_MASK(11, 8) +# define V3D_CSD_QUEUED_CFG3_WGS_PER_SG_SHIFT 8 +# define V3D_CSD_QUEUED_CFG3_WG_SIZE_MASK V3D_MASK(7, 0) +# define V3D_CSD_QUEUED_CFG3_WG_SIZE_SHIFT 0 + +/* Number of batches, minus 1 */ +#define V3D_CSD_QUEUED_CFG4 0x00914 + +/* Shader address, pnan, singleseg, threading, like a shader record. */ +#define V3D_CSD_QUEUED_CFG5 0x00918 + +/* Uniforms address (4 byte aligned) */ +#define V3D_CSD_QUEUED_CFG6 0x0091c + +#define V3D_CSD_CURRENT_CFG0 0x00920 +#define V3D_CSD_CURRENT_CFG1 0x00924 +#define V3D_CSD_CURRENT_CFG2 0x00928 +#define V3D_CSD_CURRENT_CFG3 0x0092c +#define V3D_CSD_CURRENT_CFG4 0x00930 +#define V3D_CSD_CURRENT_CFG5 0x00934 +#define V3D_CSD_CURRENT_CFG6 0x00938 + +#define V3D_CSD_CURRENT_ID0 0x0093c +# define V3D_CSD_CURRENT_ID0_WG_X_MASK V3D_MASK(31, 16) +# define V3D_CSD_CURRENT_ID0_WG_X_SHIFT 16 +# define V3D_CSD_CURRENT_ID0_WG_IN_SG_MASK V3D_MASK(11, 8) +# define V3D_CSD_CURRENT_ID0_WG_IN_SG_SHIFT 8 +# define V3D_CSD_CURRENT_ID0_L_IDX_MASK V3D_MASK(7, 0) +# define V3D_CSD_CURRENT_ID0_L_IDX_SHIFT 0 + +#define V3D_CSD_CURRENT_ID1 0x00940 +# define V3D_CSD_CURRENT_ID0_WG_Z_MASK V3D_MASK(31, 16) +# define V3D_CSD_CURRENT_ID0_WG_Z_SHIFT 16 +# define V3D_CSD_CURRENT_ID0_WG_Y_MASK V3D_MASK(15, 0) +# define V3D_CSD_CURRENT_ID0_WG_Y_SHIFT 0 + +#define V3D_ERR_FDBGO 0x00f04 +#define V3D_ERR_FDBGB 0x00f08 +#define V3D_ERR_FDBGR 0x00f0c + +#define V3D_ERR_FDBGS 0x00f10 +# define V3D_ERR_FDBGS_INTERPZ_IP_STALL BIT(17) +# define V3D_ERR_FDBGS_DEPTHO_FIFO_IP_STALL BIT(16) +# define V3D_ERR_FDBGS_XYNRM_IP_STALL BIT(14) +# define V3D_ERR_FDBGS_EZREQ_FIFO_OP_VALID BIT(13) +# define V3D_ERR_FDBGS_QXYF_FIFO_OP_VALID BIT(12) +# define V3D_ERR_FDBGS_QXYF_FIFO_OP_LAST BIT(11) +# define V3D_ERR_FDBGS_EZTEST_ANYQVALID BIT(7) +# define V3D_ERR_FDBGS_EZTEST_PASS BIT(6) +# define V3D_ERR_FDBGS_EZTEST_QREADY BIT(5) +# define V3D_ERR_FDBGS_EZTEST_VLF_OKNOVALID BIT(4) +# define V3D_ERR_FDBGS_EZTEST_QSTALL BIT(3) +# define V3D_ERR_FDBGS_EZTEST_IP_VLFSTALL BIT(2) +# define V3D_ERR_FDBGS_EZTEST_IP_PRSTALL BIT(1) +# define V3D_ERR_FDBGS_EZTEST_IP_QSTALL BIT(0) + +#define V3D_ERR_STAT 0x00f20 +# define V3D_ERR_L2CARE BIT(15) +# define V3D_ERR_VCMBE BIT(14) +# define V3D_ERR_VCMRE BIT(13) +# define V3D_ERR_VCDI BIT(12) +# define V3D_ERR_VCDE BIT(11) +# define V3D_ERR_VDWE BIT(10) +# define V3D_ERR_VPMEAS BIT(9) +# define V3D_ERR_VPMEFNA BIT(8) +# define V3D_ERR_VPMEWNA BIT(7) +# define V3D_ERR_VPMERNA BIT(6) +# define V3D_ERR_VPMERR BIT(5) +# define V3D_ERR_VPMEWR BIT(4) +# define V3D_ERR_VPAERRGL BIT(3) +# define V3D_ERR_VPAEBRGL BIT(2) +# define V3D_ERR_VPAERGS BIT(1) +# define V3D_ERR_VPAEABB BIT(0) + +#endif /* V3D_REGS_H */ diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c new file mode 100644 index 000000000..06238e6d7 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -0,0 +1,448 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2018 Broadcom */ + +/** + * DOC: Broadcom V3D scheduling + * + * The shared DRM GPU scheduler is used to coordinate submitting jobs + * to the hardware. Each DRM fd (roughly a client process) gets its + * own scheduler entity, which will process jobs in order. The GPU + * scheduler will round-robin between clients to submit the next job. + * + * For simplicity, and in order to keep latency low for interactive + * jobs when bulk background jobs are queued up, we submit a new job + * to the HW only when it has completed the last one, instead of + * filling up the CT[01]Q FIFOs with jobs. Similarly, we use + * drm_sched_job_add_dependency() to manage the dependency between bin and + * render, instead of having the clients submit jobs using the HW's + * semaphores to interlock between them. + */ + +#include + +#include "v3d_drv.h" +#include "v3d_regs.h" +#include "v3d_trace.h" + +static struct v3d_job * +to_v3d_job(struct drm_sched_job *sched_job) +{ + return container_of(sched_job, struct v3d_job, base); +} + +static struct v3d_bin_job * +to_bin_job(struct drm_sched_job *sched_job) +{ + return container_of(sched_job, struct v3d_bin_job, base.base); +} + +static struct v3d_render_job * +to_render_job(struct drm_sched_job *sched_job) +{ + return container_of(sched_job, struct v3d_render_job, base.base); +} + +static struct v3d_tfu_job * +to_tfu_job(struct drm_sched_job *sched_job) +{ + return container_of(sched_job, struct v3d_tfu_job, base.base); +} + +static struct v3d_csd_job * +to_csd_job(struct drm_sched_job *sched_job) +{ + return container_of(sched_job, struct v3d_csd_job, base.base); +} + +static void +v3d_sched_job_free(struct drm_sched_job *sched_job) +{ + struct v3d_job *job = to_v3d_job(sched_job); + + v3d_job_cleanup(job); +} + +static void +v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job) +{ + if (job->perfmon != v3d->active_perfmon) + v3d_perfmon_stop(v3d, v3d->active_perfmon, true); + + if (job->perfmon && v3d->active_perfmon != job->perfmon) + v3d_perfmon_start(v3d, job->perfmon); +} + +static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job) +{ + struct v3d_bin_job *job = to_bin_job(sched_job); + struct v3d_dev *v3d = job->base.v3d; + struct drm_device *dev = &v3d->drm; + struct dma_fence *fence; + unsigned long irqflags; + + if (unlikely(job->base.base.s_fence->finished.error)) + return NULL; + + /* Lock required around bin_job update vs + * v3d_overflow_mem_work(). + */ + spin_lock_irqsave(&v3d->job_lock, irqflags); + v3d->bin_job = job; + /* Clear out the overflow allocation, so we don't + * reuse the overflow attached to a previous job. + */ + V3D_CORE_WRITE(0, V3D_PTB_BPOS, 0); + spin_unlock_irqrestore(&v3d->job_lock, irqflags); + + v3d_invalidate_caches(v3d); + + fence = v3d_fence_create(v3d, V3D_BIN); + if (IS_ERR(fence)) + return NULL; + + if (job->base.irq_fence) + dma_fence_put(job->base.irq_fence); + job->base.irq_fence = dma_fence_get(fence); + + trace_v3d_submit_cl(dev, false, to_v3d_fence(fence)->seqno, + job->start, job->end); + + v3d_switch_perfmon(v3d, &job->base); + + /* Set the current and end address of the control list. + * Writing the end register is what starts the job. + */ + if (job->qma) { + V3D_CORE_WRITE(0, V3D_CLE_CT0QMA, job->qma); + V3D_CORE_WRITE(0, V3D_CLE_CT0QMS, job->qms); + } + if (job->qts) { + V3D_CORE_WRITE(0, V3D_CLE_CT0QTS, + V3D_CLE_CT0QTS_ENABLE | + job->qts); + } + V3D_CORE_WRITE(0, V3D_CLE_CT0QBA, job->start); + V3D_CORE_WRITE(0, V3D_CLE_CT0QEA, job->end); + + return fence; +} + +static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job) +{ + struct v3d_render_job *job = to_render_job(sched_job); + struct v3d_dev *v3d = job->base.v3d; + struct drm_device *dev = &v3d->drm; + struct dma_fence *fence; + + if (unlikely(job->base.base.s_fence->finished.error)) + return NULL; + + v3d->render_job = job; + + /* Can we avoid this flush? We need to be careful of + * scheduling, though -- imagine job0 rendering to texture and + * job1 reading, and them being executed as bin0, bin1, + * render0, render1, so that render1's flush at bin time + * wasn't enough. + */ + v3d_invalidate_caches(v3d); + + fence = v3d_fence_create(v3d, V3D_RENDER); + if (IS_ERR(fence)) + return NULL; + + if (job->base.irq_fence) + dma_fence_put(job->base.irq_fence); + job->base.irq_fence = dma_fence_get(fence); + + trace_v3d_submit_cl(dev, true, to_v3d_fence(fence)->seqno, + job->start, job->end); + + v3d_switch_perfmon(v3d, &job->base); + + /* XXX: Set the QCFG */ + + /* Set the current and end address of the control list. + * Writing the end register is what starts the job. + */ + V3D_CORE_WRITE(0, V3D_CLE_CT1QBA, job->start); + V3D_CORE_WRITE(0, V3D_CLE_CT1QEA, job->end); + + return fence; +} + +static struct dma_fence * +v3d_tfu_job_run(struct drm_sched_job *sched_job) +{ + struct v3d_tfu_job *job = to_tfu_job(sched_job); + struct v3d_dev *v3d = job->base.v3d; + struct drm_device *dev = &v3d->drm; + struct dma_fence *fence; + + fence = v3d_fence_create(v3d, V3D_TFU); + if (IS_ERR(fence)) + return NULL; + + v3d->tfu_job = job; + if (job->base.irq_fence) + dma_fence_put(job->base.irq_fence); + job->base.irq_fence = dma_fence_get(fence); + + trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno); + + V3D_WRITE(V3D_TFU_IIA, job->args.iia); + V3D_WRITE(V3D_TFU_IIS, job->args.iis); + V3D_WRITE(V3D_TFU_ICA, job->args.ica); + V3D_WRITE(V3D_TFU_IUA, job->args.iua); + V3D_WRITE(V3D_TFU_IOA, job->args.ioa); + V3D_WRITE(V3D_TFU_IOS, job->args.ios); + V3D_WRITE(V3D_TFU_COEF0, job->args.coef[0]); + if (job->args.coef[0] & V3D_TFU_COEF0_USECOEF) { + V3D_WRITE(V3D_TFU_COEF1, job->args.coef[1]); + V3D_WRITE(V3D_TFU_COEF2, job->args.coef[2]); + V3D_WRITE(V3D_TFU_COEF3, job->args.coef[3]); + } + /* ICFG kicks off the job. */ + V3D_WRITE(V3D_TFU_ICFG, job->args.icfg | V3D_TFU_ICFG_IOC); + + return fence; +} + +static struct dma_fence * +v3d_csd_job_run(struct drm_sched_job *sched_job) +{ + struct v3d_csd_job *job = to_csd_job(sched_job); + struct v3d_dev *v3d = job->base.v3d; + struct drm_device *dev = &v3d->drm; + struct dma_fence *fence; + int i; + + v3d->csd_job = job; + + v3d_invalidate_caches(v3d); + + fence = v3d_fence_create(v3d, V3D_CSD); + if (IS_ERR(fence)) + return NULL; + + if (job->base.irq_fence) + dma_fence_put(job->base.irq_fence); + job->base.irq_fence = dma_fence_get(fence); + + trace_v3d_submit_csd(dev, to_v3d_fence(fence)->seqno); + + v3d_switch_perfmon(v3d, &job->base); + + for (i = 1; i <= 6; i++) + V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0 + 4 * i, job->args.cfg[i]); + /* CFG0 write kicks off the job. */ + V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0, job->args.cfg[0]); + + return fence; +} + +static struct dma_fence * +v3d_cache_clean_job_run(struct drm_sched_job *sched_job) +{ + struct v3d_job *job = to_v3d_job(sched_job); + struct v3d_dev *v3d = job->v3d; + + v3d_clean_caches(v3d); + + return NULL; +} + +static enum drm_gpu_sched_stat +v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) +{ + enum v3d_queue q; + + mutex_lock(&v3d->reset_lock); + + /* block scheduler */ + for (q = 0; q < V3D_MAX_QUEUES; q++) + drm_sched_stop(&v3d->queue[q].sched, sched_job); + + if (sched_job) + drm_sched_increase_karma(sched_job); + + /* get the GPU back into the init state */ + v3d_reset(v3d); + + for (q = 0; q < V3D_MAX_QUEUES; q++) + drm_sched_resubmit_jobs(&v3d->queue[q].sched); + + /* Unblock schedulers and restart their jobs. */ + for (q = 0; q < V3D_MAX_QUEUES; q++) { + drm_sched_start(&v3d->queue[q].sched, true); + } + + mutex_unlock(&v3d->reset_lock); + + return DRM_GPU_SCHED_STAT_NOMINAL; +} + +/* If the current address or return address have changed, then the GPU + * has probably made progress and we should delay the reset. This + * could fail if the GPU got in an infinite loop in the CL, but that + * is pretty unlikely outside of an i-g-t testcase. + */ +static enum drm_gpu_sched_stat +v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q, + u32 *timedout_ctca, u32 *timedout_ctra) +{ + struct v3d_job *job = to_v3d_job(sched_job); + struct v3d_dev *v3d = job->v3d; + u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q)); + u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q)); + + if (*timedout_ctca != ctca || *timedout_ctra != ctra) { + *timedout_ctca = ctca; + *timedout_ctra = ctra; + return DRM_GPU_SCHED_STAT_NOMINAL; + } + + return v3d_gpu_reset_for_timeout(v3d, sched_job); +} + +static enum drm_gpu_sched_stat +v3d_bin_job_timedout(struct drm_sched_job *sched_job) +{ + struct v3d_bin_job *job = to_bin_job(sched_job); + + return v3d_cl_job_timedout(sched_job, V3D_BIN, + &job->timedout_ctca, &job->timedout_ctra); +} + +static enum drm_gpu_sched_stat +v3d_render_job_timedout(struct drm_sched_job *sched_job) +{ + struct v3d_render_job *job = to_render_job(sched_job); + + return v3d_cl_job_timedout(sched_job, V3D_RENDER, + &job->timedout_ctca, &job->timedout_ctra); +} + +static enum drm_gpu_sched_stat +v3d_generic_job_timedout(struct drm_sched_job *sched_job) +{ + struct v3d_job *job = to_v3d_job(sched_job); + + return v3d_gpu_reset_for_timeout(job->v3d, sched_job); +} + +static enum drm_gpu_sched_stat +v3d_csd_job_timedout(struct drm_sched_job *sched_job) +{ + struct v3d_csd_job *job = to_csd_job(sched_job); + struct v3d_dev *v3d = job->base.v3d; + u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4); + + /* If we've made progress, skip reset and let the timer get + * rearmed. + */ + if (job->timedout_batches != batches) { + job->timedout_batches = batches; + return DRM_GPU_SCHED_STAT_NOMINAL; + } + + return v3d_gpu_reset_for_timeout(v3d, sched_job); +} + +static const struct drm_sched_backend_ops v3d_bin_sched_ops = { + .run_job = v3d_bin_job_run, + .timedout_job = v3d_bin_job_timedout, + .free_job = v3d_sched_job_free, +}; + +static const struct drm_sched_backend_ops v3d_render_sched_ops = { + .run_job = v3d_render_job_run, + .timedout_job = v3d_render_job_timedout, + .free_job = v3d_sched_job_free, +}; + +static const struct drm_sched_backend_ops v3d_tfu_sched_ops = { + .run_job = v3d_tfu_job_run, + .timedout_job = v3d_generic_job_timedout, + .free_job = v3d_sched_job_free, +}; + +static const struct drm_sched_backend_ops v3d_csd_sched_ops = { + .run_job = v3d_csd_job_run, + .timedout_job = v3d_csd_job_timedout, + .free_job = v3d_sched_job_free +}; + +static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = { + .run_job = v3d_cache_clean_job_run, + .timedout_job = v3d_generic_job_timedout, + .free_job = v3d_sched_job_free +}; + +int +v3d_sched_init(struct v3d_dev *v3d) +{ + int hw_jobs_limit = 1; + int job_hang_limit = 0; + int hang_limit_ms = 500; + int ret; + + ret = drm_sched_init(&v3d->queue[V3D_BIN].sched, + &v3d_bin_sched_ops, + hw_jobs_limit, job_hang_limit, + msecs_to_jiffies(hang_limit_ms), NULL, + NULL, "v3d_bin", v3d->drm.dev); + if (ret) + return ret; + + ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched, + &v3d_render_sched_ops, + hw_jobs_limit, job_hang_limit, + msecs_to_jiffies(hang_limit_ms), NULL, + NULL, "v3d_render", v3d->drm.dev); + if (ret) + goto fail; + + ret = drm_sched_init(&v3d->queue[V3D_TFU].sched, + &v3d_tfu_sched_ops, + hw_jobs_limit, job_hang_limit, + msecs_to_jiffies(hang_limit_ms), NULL, + NULL, "v3d_tfu", v3d->drm.dev); + if (ret) + goto fail; + + if (v3d_has_csd(v3d)) { + ret = drm_sched_init(&v3d->queue[V3D_CSD].sched, + &v3d_csd_sched_ops, + hw_jobs_limit, job_hang_limit, + msecs_to_jiffies(hang_limit_ms), NULL, + NULL, "v3d_csd", v3d->drm.dev); + if (ret) + goto fail; + + ret = drm_sched_init(&v3d->queue[V3D_CACHE_CLEAN].sched, + &v3d_cache_clean_sched_ops, + hw_jobs_limit, job_hang_limit, + msecs_to_jiffies(hang_limit_ms), NULL, + NULL, "v3d_cache_clean", v3d->drm.dev); + if (ret) + goto fail; + } + + return 0; + +fail: + v3d_sched_fini(v3d); + return ret; +} + +void +v3d_sched_fini(struct v3d_dev *v3d) +{ + enum v3d_queue q; + + for (q = 0; q < V3D_MAX_QUEUES; q++) { + if (v3d->queue[q].sched.ready) + drm_sched_fini(&v3d->queue[q].sched); + } +} diff --git a/drivers/gpu/drm/v3d/v3d_trace.h b/drivers/gpu/drm/v3d/v3d_trace.h new file mode 100644 index 000000000..7aa8dc356 --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_trace.h @@ -0,0 +1,297 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2015-2018 Broadcom */ + +#if !defined(_V3D_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _V3D_TRACE_H_ + +#include +#include +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM v3d +#define TRACE_INCLUDE_FILE v3d_trace + +TRACE_EVENT(v3d_submit_cl_ioctl, + TP_PROTO(struct drm_device *dev, u32 ct1qba, u32 ct1qea), + TP_ARGS(dev, ct1qba, ct1qea), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u32, ct1qba) + __field(u32, ct1qea) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->ct1qba = ct1qba; + __entry->ct1qea = ct1qea; + ), + + TP_printk("dev=%u, RCL 0x%08x..0x%08x", + __entry->dev, + __entry->ct1qba, + __entry->ct1qea) +); + +TRACE_EVENT(v3d_submit_cl, + TP_PROTO(struct drm_device *dev, bool is_render, + uint64_t seqno, + u32 ctnqba, u32 ctnqea), + TP_ARGS(dev, is_render, seqno, ctnqba, ctnqea), + + TP_STRUCT__entry( + __field(u32, dev) + __field(bool, is_render) + __field(u64, seqno) + __field(u32, ctnqba) + __field(u32, ctnqea) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->is_render = is_render; + __entry->seqno = seqno; + __entry->ctnqba = ctnqba; + __entry->ctnqea = ctnqea; + ), + + TP_printk("dev=%u, %s, seqno=%llu, 0x%08x..0x%08x", + __entry->dev, + __entry->is_render ? "RCL" : "BCL", + __entry->seqno, + __entry->ctnqba, + __entry->ctnqea) +); + +TRACE_EVENT(v3d_bcl_irq, + TP_PROTO(struct drm_device *dev, + uint64_t seqno), + TP_ARGS(dev, seqno), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u64, seqno) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->seqno = seqno; + ), + + TP_printk("dev=%u, seqno=%llu", + __entry->dev, + __entry->seqno) +); + +TRACE_EVENT(v3d_rcl_irq, + TP_PROTO(struct drm_device *dev, + uint64_t seqno), + TP_ARGS(dev, seqno), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u64, seqno) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->seqno = seqno; + ), + + TP_printk("dev=%u, seqno=%llu", + __entry->dev, + __entry->seqno) +); + +TRACE_EVENT(v3d_tfu_irq, + TP_PROTO(struct drm_device *dev, + uint64_t seqno), + TP_ARGS(dev, seqno), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u64, seqno) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->seqno = seqno; + ), + + TP_printk("dev=%u, seqno=%llu", + __entry->dev, + __entry->seqno) +); + +TRACE_EVENT(v3d_csd_irq, + TP_PROTO(struct drm_device *dev, + uint64_t seqno), + TP_ARGS(dev, seqno), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u64, seqno) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->seqno = seqno; + ), + + TP_printk("dev=%u, seqno=%llu", + __entry->dev, + __entry->seqno) +); + +TRACE_EVENT(v3d_submit_tfu_ioctl, + TP_PROTO(struct drm_device *dev, u32 iia), + TP_ARGS(dev, iia), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u32, iia) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->iia = iia; + ), + + TP_printk("dev=%u, IIA 0x%08x", + __entry->dev, + __entry->iia) +); + +TRACE_EVENT(v3d_submit_tfu, + TP_PROTO(struct drm_device *dev, + uint64_t seqno), + TP_ARGS(dev, seqno), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u64, seqno) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->seqno = seqno; + ), + + TP_printk("dev=%u, seqno=%llu", + __entry->dev, + __entry->seqno) +); + +TRACE_EVENT(v3d_submit_csd_ioctl, + TP_PROTO(struct drm_device *dev, u32 cfg5, u32 cfg6), + TP_ARGS(dev, cfg5, cfg6), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u32, cfg5) + __field(u32, cfg6) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->cfg5 = cfg5; + __entry->cfg6 = cfg6; + ), + + TP_printk("dev=%u, CFG5 0x%08x, CFG6 0x%08x", + __entry->dev, + __entry->cfg5, + __entry->cfg6) +); + +TRACE_EVENT(v3d_submit_csd, + TP_PROTO(struct drm_device *dev, + uint64_t seqno), + TP_ARGS(dev, seqno), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u64, seqno) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->seqno = seqno; + ), + + TP_printk("dev=%u, seqno=%llu", + __entry->dev, + __entry->seqno) +); + +TRACE_EVENT(v3d_cache_clean_begin, + TP_PROTO(struct drm_device *dev), + TP_ARGS(dev), + + TP_STRUCT__entry( + __field(u32, dev) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + ), + + TP_printk("dev=%u", + __entry->dev) +); + +TRACE_EVENT(v3d_cache_clean_end, + TP_PROTO(struct drm_device *dev), + TP_ARGS(dev), + + TP_STRUCT__entry( + __field(u32, dev) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + ), + + TP_printk("dev=%u", + __entry->dev) +); + +TRACE_EVENT(v3d_reset_begin, + TP_PROTO(struct drm_device *dev), + TP_ARGS(dev), + + TP_STRUCT__entry( + __field(u32, dev) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + ), + + TP_printk("dev=%u", + __entry->dev) +); + +TRACE_EVENT(v3d_reset_end, + TP_PROTO(struct drm_device *dev), + TP_ARGS(dev), + + TP_STRUCT__entry( + __field(u32, dev) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + ), + + TP_printk("dev=%u", + __entry->dev) +); + +#endif /* _V3D_TRACE_H_ */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include diff --git a/drivers/gpu/drm/v3d/v3d_trace_points.c b/drivers/gpu/drm/v3d/v3d_trace_points.c new file mode 100644 index 000000000..482922d7c --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_trace_points.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2015 Broadcom */ + +#include "v3d_drv.h" + +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "v3d_trace.h" +#endif -- cgit v1.2.3