From 8b0a8165cdad0f4133837d753649ef4682e42c3b Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 7 Aug 2024 15:11:40 +0200 Subject: Merging upstream version 6.9.7. Signed-off-by: Daniel Baumann --- drivers/gpu/drm/xe/xe_bo.c | 123 ++++++++++++++++++++++++++++++++++++--------- 1 file changed, 100 insertions(+), 23 deletions(-) (limited to 'drivers/gpu/drm/xe/xe_bo.c') diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index eb2c44a328..9c0837b6fd 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -46,22 +46,26 @@ static const struct ttm_place sys_placement_flags = { static struct ttm_placement sys_placement = { .num_placement = 1, .placement = &sys_placement_flags, - .num_busy_placement = 1, - .busy_placement = &sys_placement_flags, }; -static const struct ttm_place tt_placement_flags = { - .fpfn = 0, - .lpfn = 0, - .mem_type = XE_PL_TT, - .flags = 0, +static const struct ttm_place tt_placement_flags[] = { + { + .fpfn = 0, + .lpfn = 0, + .mem_type = XE_PL_TT, + .flags = TTM_PL_FLAG_DESIRED, + }, + { + .fpfn = 0, + .lpfn = 0, + .mem_type = XE_PL_SYSTEM, + .flags = TTM_PL_FLAG_FALLBACK, + } }; static struct ttm_placement tt_placement = { - .num_placement = 1, - .placement = &tt_placement_flags, - .num_busy_placement = 1, - .busy_placement = &sys_placement_flags, + .num_placement = 2, + .placement = tt_placement_flags, }; bool mem_type_is_vram(u32 mem_type) @@ -216,8 +220,6 @@ static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo, bo->placement = (struct ttm_placement) { .num_placement = c, .placement = bo->placements, - .num_busy_placement = c, - .busy_placement = bo->placements, }; return 0; @@ -237,7 +239,6 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo, /* Don't handle scatter gather BOs */ if (tbo->type == ttm_bo_type_sg) { placement->num_placement = 0; - placement->num_busy_placement = 0; return; } @@ -572,6 +573,8 @@ static int xe_bo_move_notify(struct xe_bo *bo, { struct ttm_buffer_object *ttm_bo = &bo->ttm; struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); + struct ttm_resource *old_mem = ttm_bo->resource; + u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM; int ret; /* @@ -591,6 +594,18 @@ static int xe_bo_move_notify(struct xe_bo *bo, if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach) dma_buf_move_notify(ttm_bo->base.dma_buf); + /* + * TTM has already nuked the mmap for us (see ttm_bo_unmap_virtual), + * so if we moved from VRAM make sure to unlink this from the userfault + * tracking. + */ + if (mem_type_is_vram(old_mem_type)) { + mutex_lock(&xe->mem_access.vram_userfault.lock); + if (!list_empty(&bo->vram_userfault_link)) + list_del_init(&bo->vram_userfault_link); + mutex_unlock(&xe->mem_access.vram_userfault.lock); + } + return 0; } @@ -1012,7 +1027,7 @@ static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo) } } -struct ttm_device_funcs xe_ttm_funcs = { +const struct ttm_device_funcs xe_ttm_funcs = { .ttm_tt_create = xe_ttm_tt_create, .ttm_tt_populate = xe_ttm_tt_populate, .ttm_tt_unpopulate = xe_ttm_tt_unpopulate, @@ -1048,6 +1063,11 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo) if (bo->vm && xe_bo_is_user(bo)) xe_vm_put(bo->vm); + mutex_lock(&xe->mem_access.vram_userfault.lock); + if (!list_empty(&bo->vram_userfault_link)) + list_del(&bo->vram_userfault_link); + mutex_unlock(&xe->mem_access.vram_userfault.lock); + kfree(bo); } @@ -1088,16 +1108,20 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf) { struct ttm_buffer_object *tbo = vmf->vma->vm_private_data; struct drm_device *ddev = tbo->base.dev; + struct xe_device *xe = to_xe_device(ddev); + struct xe_bo *bo = ttm_to_xe_bo(tbo); + bool needs_rpm = bo->flags & XE_BO_CREATE_VRAM_MASK; vm_fault_t ret; int idx; + if (needs_rpm) + xe_device_mem_access_get(xe); + ret = ttm_bo_vm_reserve(tbo, vmf); if (ret) - return ret; + goto out; if (drm_dev_enter(ddev, &idx)) { - struct xe_bo *bo = ttm_to_xe_bo(tbo); - trace_xe_bo_cpu_fault(bo); ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, @@ -1106,10 +1130,24 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf) } else { ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); } + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) - return ret; + goto out; + /* + * ttm_bo_vm_reserve() already has dma_resv_lock. + */ + if (ret == VM_FAULT_NOPAGE && mem_type_is_vram(tbo->resource->mem_type)) { + mutex_lock(&xe->mem_access.vram_userfault.lock); + if (list_empty(&bo->vram_userfault_link)) + list_add(&bo->vram_userfault_link, &xe->mem_access.vram_userfault.list); + mutex_unlock(&xe->mem_access.vram_userfault.lock); + } dma_resv_unlock(tbo->base.resv); +out: + if (needs_rpm) + xe_device_mem_access_put(xe); + return ret; } @@ -1220,6 +1258,7 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, #ifdef CONFIG_PROC_FS INIT_LIST_HEAD(&bo->client_link); #endif + INIT_LIST_HEAD(&bo->vram_userfault_link); drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size); @@ -1319,8 +1358,6 @@ static int __xe_bo_fixed_placement(struct xe_device *xe, bo->placement = (struct ttm_placement) { .num_placement = 1, .placement = place, - .num_busy_placement = 1, - .busy_placement = place, }; return 0; @@ -1534,6 +1571,38 @@ struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_til return bo; } +/** + * xe_managed_bo_reinit_in_vram + * @xe: xe device + * @tile: Tile where the new buffer will be created + * @src: Managed buffer object allocated in system memory + * + * Replace a managed src buffer object allocated in system memory with a new + * one allocated in vram, copying the data between them. + * Buffer object in VRAM is not going to have the same GGTT address, the caller + * is responsible for making sure that any old references to it are updated. + * + * Returns 0 for success, negative error code otherwise. + */ +int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src) +{ + struct xe_bo *bo; + + xe_assert(xe, IS_DGFX(xe)); + xe_assert(xe, !(*src)->vmap.is_iomem); + + bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr, (*src)->size, + XE_BO_CREATE_VRAM_IF_DGFX(tile) | + XE_BO_CREATE_GGTT_BIT); + if (IS_ERR(bo)) + return PTR_ERR(bo); + + drmm_release_action(&xe->drm, __xe_bo_unpin_map_no_vm, *src); + *src = bo; + + return 0; +} + /* * XXX: This is in the VM bind data path, likely should calculate this once and * store, with a recalculation if the BO is moved. @@ -2078,9 +2147,7 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type) xe_place_from_ttm_type(mem_type, &requested); placement.num_placement = 1; - placement.num_busy_placement = 1; placement.placement = &requested; - placement.busy_placement = &requested; /* * Stolen needs to be handled like below VRAM handling if we ever need @@ -2230,6 +2297,16 @@ int xe_bo_dumb_create(struct drm_file *file_priv, return err; } +void xe_bo_runtime_pm_release_mmap_offset(struct xe_bo *bo) +{ + struct ttm_buffer_object *tbo = &bo->ttm; + struct ttm_device *bdev = tbo->bdev; + + drm_vma_node_unmap(&tbo->base.vma_node, bdev->dev_mapping); + + list_del_init(&bo->vram_userfault_link); +} + #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) #include "tests/xe_bo.c" #endif -- cgit v1.2.3