summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c')
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c191
1 files changed, 90 insertions, 101 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 9af1d0943..c50a0dc9c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -198,6 +198,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
addr[i] >> PAGE_SHIFT, page_to_pfn(page));
}
+
return 0;
}
@@ -349,6 +350,7 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
INIT_LIST_HEAD(&prange->child_list);
atomic_set(&prange->invalid, 0);
prange->validate_timestamp = 0;
+ prange->vram_pages = 0;
mutex_init(&prange->migrate_mutex);
mutex_init(&prange->lock);
@@ -395,6 +397,8 @@ static void svm_range_bo_release(struct kref *kref)
prange->start, prange->last);
mutex_lock(&prange->lock);
prange->svm_bo = NULL;
+ /* prange should not hold vram page now */
+ WARN_ONCE(prange->actual_loc, "prange should not hold vram page");
mutex_unlock(&prange->lock);
spin_lock(&svm_bo->list_lock);
@@ -873,14 +877,29 @@ static void svm_range_debug_dump(struct svm_range_list *svms)
static void *
svm_range_copy_array(void *psrc, size_t size, uint64_t num_elements,
- uint64_t offset)
+ uint64_t offset, uint64_t *vram_pages)
{
+ unsigned char *src = (unsigned char *)psrc + offset;
unsigned char *dst;
+ uint64_t i;
dst = kvmalloc_array(num_elements, size, GFP_KERNEL);
if (!dst)
return NULL;
- memcpy(dst, (unsigned char *)psrc + offset, num_elements * size);
+
+ if (!vram_pages) {
+ memcpy(dst, src, num_elements * size);
+ return (void *)dst;
+ }
+
+ *vram_pages = 0;
+ for (i = 0; i < num_elements; i++) {
+ dma_addr_t *temp;
+ temp = (dma_addr_t *)dst + i;
+ *temp = *((dma_addr_t *)src + i);
+ if (*temp&SVM_RANGE_VRAM_DOMAIN)
+ (*vram_pages)++;
+ }
return (void *)dst;
}
@@ -894,7 +913,7 @@ svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src)
if (!src->dma_addr[i])
continue;
dst->dma_addr[i] = svm_range_copy_array(src->dma_addr[i],
- sizeof(*src->dma_addr[i]), src->npages, 0);
+ sizeof(*src->dma_addr[i]), src->npages, 0, NULL);
if (!dst->dma_addr[i])
return -ENOMEM;
}
@@ -905,7 +924,7 @@ svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src)
static int
svm_range_split_array(void *ppnew, void *ppold, size_t size,
uint64_t old_start, uint64_t old_n,
- uint64_t new_start, uint64_t new_n)
+ uint64_t new_start, uint64_t new_n, uint64_t *new_vram_pages)
{
unsigned char *new, *old, *pold;
uint64_t d;
@@ -917,11 +936,12 @@ svm_range_split_array(void *ppnew, void *ppold, size_t size,
return 0;
d = (new_start - old_start) * size;
- new = svm_range_copy_array(pold, size, new_n, d);
+ /* get dma addr array for new range and calculte its vram page number */
+ new = svm_range_copy_array(pold, size, new_n, d, new_vram_pages);
if (!new)
return -ENOMEM;
d = (new_start == old_start) ? new_n * size : 0;
- old = svm_range_copy_array(pold, size, old_n, d);
+ old = svm_range_copy_array(pold, size, old_n, d, NULL);
if (!old) {
kvfree(new);
return -ENOMEM;
@@ -943,10 +963,13 @@ svm_range_split_pages(struct svm_range *new, struct svm_range *old,
for (i = 0; i < MAX_GPU_INSTANCE; i++) {
r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
sizeof(*old->dma_addr[i]), old->start,
- npages, new->start, new->npages);
+ npages, new->start, new->npages,
+ old->actual_loc ? &new->vram_pages : NULL);
if (r)
return r;
}
+ if (old->actual_loc)
+ old->vram_pages -= new->vram_pages;
return 0;
}
@@ -1092,7 +1115,7 @@ static int
svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
struct list_head *insert_list, struct list_head *remap_list)
{
- struct svm_range *tail;
+ struct svm_range *tail = NULL;
int r = svm_range_split(prange, prange->start, new_last, &tail);
if (!r) {
@@ -1107,7 +1130,7 @@ static int
svm_range_split_head(struct svm_range *prange, uint64_t new_start,
struct list_head *insert_list, struct list_head *remap_list)
{
- struct svm_range *head;
+ struct svm_range *head = NULL;
int r = svm_range_split(prange, new_start, prange->last, &head);
if (!r) {
@@ -1130,66 +1153,6 @@ svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
list_add_tail(&pchild->child_list, &prange->child_list);
}
-/**
- * svm_range_split_by_granularity - collect ranges within granularity boundary
- *
- * @p: the process with svms list
- * @mm: mm structure
- * @addr: the vm fault address in pages, to split the prange
- * @parent: parent range if prange is from child list
- * @prange: prange to split
- *
- * Trims @prange to be a single aligned block of prange->granularity if
- * possible. The head and tail are added to the child_list in @parent.
- *
- * Context: caller must hold mmap_read_lock and prange->lock
- *
- * Return:
- * 0 - OK, otherwise error code
- */
-int
-svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
- unsigned long addr, struct svm_range *parent,
- struct svm_range *prange)
-{
- struct svm_range *head, *tail;
- unsigned long start, last, size;
- int r;
-
- /* Align splited range start and size to granularity size, then a single
- * PTE will be used for whole range, this reduces the number of PTE
- * updated and the L1 TLB space used for translation.
- */
- size = 1UL << prange->granularity;
- start = ALIGN_DOWN(addr, size);
- last = ALIGN(addr + 1, size) - 1;
-
- pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n",
- prange->svms, prange->start, prange->last, start, last, size);
-
- if (start > prange->start) {
- r = svm_range_split(prange, start, prange->last, &head);
- if (r)
- return r;
- svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE);
- }
-
- if (last < prange->last) {
- r = svm_range_split(prange, prange->start, last, &tail);
- if (r)
- return r;
- svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
- }
-
- /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
- if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
- prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
- pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
- prange, prange->start, prange->last,
- SVM_OP_ADD_RANGE_AND_MAP);
- }
- return 0;
-}
static bool
svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
{
@@ -1524,7 +1487,7 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr)
uint32_t gpuidx;
int r;
- drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0);
+ drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0, 0);
drm_exec_until_all_locked(&ctx->exec) {
for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
@@ -1609,6 +1572,7 @@ static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
* 5. Release page table (and SVM BO) reservation
*/
static int svm_range_validate_and_map(struct mm_struct *mm,
+ unsigned long map_start, unsigned long map_last,
struct svm_range *prange, int32_t gpuidx,
bool intr, bool wait, bool flush_tlb)
{
@@ -1689,10 +1653,12 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
}
}
- start = prange->start << PAGE_SHIFT;
- end = (prange->last + 1) << PAGE_SHIFT;
+ start = map_start << PAGE_SHIFT;
+ end = (map_last + 1) << PAGE_SHIFT;
for (addr = start; !r && addr < end; ) {
struct hmm_range *hmm_range;
+ unsigned long map_start_vma;
+ unsigned long map_last_vma;
struct vm_area_struct *vma;
unsigned long next = 0;
unsigned long offset;
@@ -1720,7 +1686,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
}
if (!r) {
- offset = (addr - start) >> PAGE_SHIFT;
+ offset = (addr >> PAGE_SHIFT) - prange->start;
r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
hmm_range->hmm_pfns);
if (r)
@@ -1738,9 +1704,16 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
r = -EAGAIN;
}
- if (!r)
- r = svm_range_map_to_gpus(prange, offset, npages, readonly,
- ctx->bitmap, wait, flush_tlb);
+ if (!r) {
+ map_start_vma = max(map_start, prange->start + offset);
+ map_last_vma = min(map_last, prange->start + offset + npages - 1);
+ if (map_start_vma <= map_last_vma) {
+ offset = map_start_vma - prange->start;
+ npages = map_last_vma - map_start_vma + 1;
+ r = svm_range_map_to_gpus(prange, offset, npages, readonly,
+ ctx->bitmap, wait, flush_tlb);
+ }
+ }
if (!r && next == end)
prange->mapped_to_gpu = true;
@@ -1833,8 +1806,8 @@ static void svm_range_restore_work(struct work_struct *work)
*/
mutex_lock(&prange->migrate_mutex);
- r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
- false, true, false);
+ r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
+ MAX_GPU_INSTANCE, false, true, false);
if (r)
pr_debug("failed %d to map 0x%lx to gpus\n", r,
prange->start);
@@ -1871,7 +1844,7 @@ out_reschedule:
/* If validation failed, reschedule another attempt */
if (evicted_ranges) {
pr_debug("reschedule to restore svm range\n");
- schedule_delayed_work(&svms->restore_work,
+ queue_delayed_work(system_freezable_wq, &svms->restore_work,
msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
kfd_smi_event_queue_restore_rescheduled(mm);
@@ -1947,7 +1920,7 @@ svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
pr_debug("failed to quiesce KFD\n");
pr_debug("schedule to restore svm %p ranges\n", svms);
- schedule_delayed_work(&svms->restore_work,
+ queue_delayed_work(system_freezable_wq, &svms->restore_work,
msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
} else {
unsigned long s, l;
@@ -2002,6 +1975,7 @@ static struct svm_range *svm_range_clone(struct svm_range *old)
new->actual_loc = old->actual_loc;
new->granularity = old->granularity;
new->mapped_to_gpu = old->mapped_to_gpu;
+ new->vram_pages = old->vram_pages;
bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
@@ -2911,6 +2885,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
uint32_t vmid, uint32_t node_id,
uint64_t addr, bool write_fault)
{
+ unsigned long start, last, size;
struct mm_struct *mm = NULL;
struct svm_range_list *svms;
struct svm_range *prange;
@@ -3046,40 +3021,44 @@ retry_write_locked:
kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr,
write_fault, timestamp);
- if (prange->actual_loc != best_loc) {
+ /* Align migration range start and size to granularity size */
+ size = 1UL << prange->granularity;
+ start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start);
+ last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last);
+ if (prange->actual_loc != 0 || best_loc != 0) {
migration = true;
+
if (best_loc) {
- r = svm_migrate_to_vram(prange, best_loc, mm,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
+ r = svm_migrate_to_vram(prange, best_loc, start, last,
+ mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
if (r) {
pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
r, addr);
/* Fallback to system memory if migration to
* VRAM failed
*/
- if (prange->actual_loc)
- r = svm_migrate_vram_to_ram(prange, mm,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
- NULL);
+ if (prange->actual_loc && prange->actual_loc != best_loc)
+ r = svm_migrate_vram_to_ram(prange, mm, start, last,
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
else
r = 0;
}
} else {
- r = svm_migrate_vram_to_ram(prange, mm,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
- NULL);
+ r = svm_migrate_vram_to_ram(prange, mm, start, last,
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
}
if (r) {
pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
- r, svms, prange->start, prange->last);
+ r, svms, start, last);
goto out_unlock_range;
}
}
- r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false);
+ r = svm_range_validate_and_map(mm, start, last, prange, gpuidx, false,
+ false, false);
if (r)
pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
- r, svms, prange->start, prange->last);
+ r, svms, start, last);
kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr,
migration);
@@ -3425,18 +3404,24 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
*migrated = false;
best_loc = svm_range_best_prefetch_location(prange);
- if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
- best_loc == prange->actual_loc)
+ /* when best_loc is a gpu node and same as prange->actual_loc
+ * we still need do migration as prange->actual_loc !=0 does
+ * not mean all pages in prange are vram. hmm migrate will pick
+ * up right pages during migration.
+ */
+ if ((best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) ||
+ (best_loc == 0 && prange->actual_loc == 0))
return 0;
if (!best_loc) {
- r = svm_migrate_vram_to_ram(prange, mm,
+ r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
*migrated = !r;
return r;
}
- r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH);
+ r = svm_migrate_to_vram(prange, best_loc, prange->start, prange->last,
+ mm, KFD_MIGRATE_TRIGGER_PREFETCH);
*migrated = !r;
return r;
@@ -3490,7 +3475,11 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
mutex_lock(&prange->migrate_mutex);
do {
+ /* migrate all vram pages in this prange to sys ram
+ * after that prange->actual_loc should be zero
+ */
r = svm_migrate_vram_to_ram(prange, mm,
+ prange->start, prange->last,
KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
} while (!r && prange->actual_loc && --retries);
@@ -3614,8 +3603,8 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
- r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
- true, true, flush_tlb);
+ r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
+ MAX_GPU_INSTANCE, true, true, flush_tlb);
if (r)
pr_debug("failed %d to map svm range\n", r);
@@ -3629,8 +3618,8 @@ out_unlock_range:
pr_debug("Remapping prange 0x%p [0x%lx 0x%lx]\n",
prange, prange->start, prange->last);
mutex_lock(&prange->migrate_mutex);
- r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
- true, true, prange->mapped_to_gpu);
+ r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
+ MAX_GPU_INSTANCE, true, true, prange->mapped_to_gpu);
if (r)
pr_debug("failed %d on remap svm range\n", r);
mutex_unlock(&prange->migrate_mutex);