summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_bo.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:27 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:27 +0000
commit34996e42f82bfd60bc2c191e5cae3c6ab233ec6c (patch)
tree62db60558cbf089714b48daeabca82bf2b20b20e /drivers/gpu/drm/nouveau/nouveau_bo.c
parentAdding debian version 6.8.12-1. (diff)
downloadlinux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.tar.xz
linux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.zip
Merging upstream version 6.9.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_bo.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c103
1 files changed, 42 insertions, 61 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 5d8ee17295..186add400e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -241,28 +241,28 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
}
nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
- if (!nouveau_cli_uvmm(cli) || internal) {
- /* for BO noVM allocs, don't assign kinds */
- if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
- nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
- if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
- kfree(nvbo);
- return ERR_PTR(-EINVAL);
- }
- nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
- } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- nvbo->kind = (tile_flags & 0x00007f00) >> 8;
- nvbo->comp = (tile_flags & 0x00030000) >> 16;
- if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
- kfree(nvbo);
- return ERR_PTR(-EINVAL);
- }
- } else {
- nvbo->zeta = (tile_flags & 0x00000007);
+ if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
+ nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
+ if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+ kfree(nvbo);
+ return ERR_PTR(-EINVAL);
}
- nvbo->mode = tile_mode;
+ nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
+ } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ nvbo->kind = (tile_flags & 0x00007f00) >> 8;
+ nvbo->comp = (tile_flags & 0x00030000) >> 16;
+ if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+ kfree(nvbo);
+ return ERR_PTR(-EINVAL);
+ }
+ } else {
+ nvbo->zeta = (tile_flags & 0x00000007);
+ }
+ nvbo->mode = tile_mode;
+
+ if (!nouveau_cli_uvmm(cli) || internal) {
/* Determine the desirable target GPU page size for the buffer. */
for (i = 0; i < vmm->page_nr; i++) {
/* Because we cannot currently allow VMM maps to fail
@@ -304,12 +304,6 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
}
nvbo->page = vmm->page[pi].shift;
} else {
- /* reject other tile flags when in VM mode. */
- if (tile_mode)
- return ERR_PTR(-EINVAL);
- if (tile_flags & ~NOUVEAU_GEM_TILE_NONCONTIG)
- return ERR_PTR(-EINVAL);
-
/* Determine the desirable target GPU page size for the buffer. */
for (i = 0; i < vmm->page_nr; i++) {
/* Because we cannot currently allow VMM maps to fail
@@ -405,27 +399,6 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
}
static void
-set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t domain)
-{
- *n = 0;
-
- if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
- pl[*n].mem_type = TTM_PL_VRAM;
- pl[*n].flags = 0;
- (*n)++;
- }
- if (domain & NOUVEAU_GEM_DOMAIN_GART) {
- pl[*n].mem_type = TTM_PL_TT;
- pl[*n].flags = 0;
- (*n)++;
- }
- if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
- pl[*n].mem_type = TTM_PL_SYSTEM;
- pl[(*n)++].flags = 0;
- }
-}
-
-static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
@@ -452,10 +425,6 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
nvbo->placements[i].fpfn = fpfn;
nvbo->placements[i].lpfn = lpfn;
}
- for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
- nvbo->busy_placements[i].fpfn = fpfn;
- nvbo->busy_placements[i].lpfn = lpfn;
- }
}
}
@@ -463,15 +432,32 @@ void
nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
uint32_t busy)
{
- struct ttm_placement *pl = &nvbo->placement;
+ unsigned int *n = &nvbo->placement.num_placement;
+ struct ttm_place *pl = nvbo->placements;
- pl->placement = nvbo->placements;
- set_placement_list(nvbo->placements, &pl->num_placement, domain);
+ domain |= busy;
- pl->busy_placement = nvbo->busy_placements;
- set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
- domain | busy);
+ *n = 0;
+ if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
+ pl[*n].mem_type = TTM_PL_VRAM;
+ pl[*n].flags = busy & NOUVEAU_GEM_DOMAIN_VRAM ?
+ TTM_PL_FLAG_FALLBACK : 0;
+ (*n)++;
+ }
+ if (domain & NOUVEAU_GEM_DOMAIN_GART) {
+ pl[*n].mem_type = TTM_PL_TT;
+ pl[*n].flags = busy & NOUVEAU_GEM_DOMAIN_GART ?
+ TTM_PL_FLAG_FALLBACK : 0;
+ (*n)++;
+ }
+ if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
+ pl[*n].mem_type = TTM_PL_SYSTEM;
+ pl[*n].flags = busy & NOUVEAU_GEM_DOMAIN_CPU ?
+ TTM_PL_FLAG_FALLBACK : 0;
+ (*n)++;
+ }
+ nvbo->placement.placement = nvbo->placements;
set_placement_range(nvbo, domain);
}
@@ -1316,11 +1302,6 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
nvbo->placements[i].lpfn = mappable;
}
- for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
- nvbo->busy_placements[i].fpfn = 0;
- nvbo->busy_placements[i].lpfn = mappable;
- }
-
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
}