diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:22 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:22 +0000 |
commit | b20732900e4636a467c0183a47f7396700f5f743 (patch) | |
tree | 42f079ff82e701ebcb76829974b4caca3e5b6798 /drivers/accel/habanalabs/common/mmu | |
parent | Adding upstream version 6.8.12. (diff) | |
download | linux-b20732900e4636a467c0183a47f7396700f5f743.tar.xz linux-b20732900e4636a467c0183a47f7396700f5f743.zip |
Adding upstream version 6.9.7.upstream/6.9.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/accel/habanalabs/common/mmu')
-rw-r--r-- | drivers/accel/habanalabs/common/mmu/Makefile | 2 | ||||
-rw-r--r-- | drivers/accel/habanalabs/common/mmu/mmu.c | 223 | ||||
-rw-r--r-- | drivers/accel/habanalabs/common/mmu/mmu_v1.c | 354 | ||||
-rw-r--r-- | drivers/accel/habanalabs/common/mmu/mmu_v2.c | 338 | ||||
-rw-r--r-- | drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c | 24 |
5 files changed, 617 insertions, 324 deletions
diff --git a/drivers/accel/habanalabs/common/mmu/Makefile b/drivers/accel/habanalabs/common/mmu/Makefile index 1806c524e0..f4b815bf4f 100644 --- a/drivers/accel/habanalabs/common/mmu/Makefile +++ b/drivers/accel/habanalabs/common/mmu/Makefile @@ -1,3 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only HL_COMMON_MMU_FILES := common/mmu/mmu.o common/mmu/mmu_v1.o \ - common/mmu/mmu_v2_hr.o + common/mmu/mmu_v2.o common/mmu/mmu_v2_hr.o diff --git a/drivers/accel/habanalabs/common/mmu/mmu.c b/drivers/accel/habanalabs/common/mmu/mmu.c index b654302a68..d3eaab9084 100644 --- a/drivers/accel/habanalabs/common/mmu/mmu.c +++ b/drivers/accel/habanalabs/common/mmu/mmu.c @@ -585,6 +585,8 @@ int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, int hl_mmu_if_set_funcs(struct hl_device *hdev) { + struct asic_fixed_properties *prop = &hdev->asic_prop; + if (hdev->mmu_disable) return 0; @@ -597,8 +599,9 @@ int hl_mmu_if_set_funcs(struct hl_device *hdev) case ASIC_GAUDI2: case ASIC_GAUDI2B: case ASIC_GAUDI2C: - /* MMUs in Gaudi2 are always host resident */ - hl_mmu_v2_hr_set_funcs(hdev, &hdev->mmu_func[MMU_HR_PGT]); + hl_mmu_v2_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]); + if (prop->pmmu.host_resident) + hl_mmu_v2_hr_set_funcs(hdev, &hdev->mmu_func[MMU_HR_PGT]); break; default: dev_err(hdev->dev, "Unrecognized ASIC type %d\n", @@ -1209,3 +1212,219 @@ int hl_mmu_hr_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_ return 0; } +struct pgt_info *hl_mmu_dr_get_pgt_info(struct hl_ctx *ctx, u64 hop_addr) +{ + struct pgt_info *pgt_info = NULL; + + hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node, + (unsigned long) hop_addr) + if (hop_addr == pgt_info->shadow_addr) + break; + + return pgt_info; +} + +void hl_mmu_dr_free_hop(struct hl_ctx *ctx, u64 hop_addr) +{ + struct pgt_info *pgt_info = hl_mmu_dr_get_pgt_info(ctx, hop_addr); + + hl_mmu_dr_free_pgt_node(ctx, pgt_info); +} + +void hl_mmu_dr_free_pgt_node(struct hl_ctx *ctx, struct pgt_info *pgt_info) +{ + struct hl_device *hdev = ctx->hdev; + + gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, pgt_info->phys_addr, + hdev->asic_prop.dmmu.hop_table_size); + hash_del(&pgt_info->node); + kfree((u64 *) (uintptr_t) pgt_info->shadow_addr); + kfree(pgt_info); +} + +u64 hl_mmu_dr_get_phys_hop0_addr(struct hl_ctx *ctx) +{ + return ctx->hdev->asic_prop.mmu_pgt_addr + + (ctx->asid * ctx->hdev->asic_prop.dmmu.hop_table_size); +} + +u64 hl_mmu_dr_get_hop0_addr(struct hl_ctx *ctx) +{ + return (u64) (uintptr_t) ctx->hdev->mmu_priv.dr.mmu_shadow_hop0 + + (ctx->asid * ctx->hdev->asic_prop.dmmu.hop_table_size); +} + +u64 hl_mmu_dr_get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr) +{ + u64 page_mask = ctx->hdev->asic_prop.dmmu.hop_table_size - 1; + u64 shadow_hop_addr = shadow_addr & (~page_mask); + u64 pte_offset = shadow_addr & page_mask; + u64 phys_hop_addr; + + if (shadow_hop_addr != hl_mmu_dr_get_hop0_addr(ctx)) + phys_hop_addr = hl_mmu_dr_get_pgt_info(ctx, shadow_hop_addr)->phys_addr; + else + phys_hop_addr = hl_mmu_dr_get_phys_hop0_addr(ctx); + + return phys_hop_addr + pte_offset; +} + +void hl_mmu_dr_write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val) +{ + u64 phys_val = hl_mmu_dr_get_phys_addr(ctx, val); + + ctx->hdev->asic_funcs->write_pte(ctx->hdev, hl_mmu_dr_get_phys_addr(ctx, shadow_pte_addr), + phys_val); + + *(u64 *) (uintptr_t) shadow_pte_addr = val; +} + +void hl_mmu_dr_write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val) +{ + ctx->hdev->asic_funcs->write_pte(ctx->hdev, + hl_mmu_dr_get_phys_addr(ctx, shadow_pte_addr), val); + *(u64 *) (uintptr_t) shadow_pte_addr = val; +} + +void hl_mmu_dr_clear_pte(struct hl_ctx *ctx, u64 pte_addr) +{ + hl_mmu_dr_write_final_pte(ctx, pte_addr, 0); +} + +void hl_mmu_dr_get_pte(struct hl_ctx *ctx, u64 hop_addr) +{ + hl_mmu_dr_get_pgt_info(ctx, hop_addr)->num_of_ptes++; +} + +int hl_mmu_dr_put_pte(struct hl_ctx *ctx, u64 hop_addr) +{ + struct pgt_info *pgt_info = hl_mmu_dr_get_pgt_info(ctx, hop_addr); + int num_of_ptes_left; + + pgt_info->num_of_ptes--; + + /* + * Need to save the number of ptes left because hl_mmu_free_hop might free + * the pgt_info + */ + num_of_ptes_left = pgt_info->num_of_ptes; + if (!num_of_ptes_left) + hl_mmu_dr_free_pgt_node(ctx, pgt_info); + + return num_of_ptes_left; +} + +u64 hl_mmu_dr_alloc_hop(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct pgt_info *pgt_info; + u64 phys_addr, shadow_addr; + + pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL); + if (!pgt_info) + return ULLONG_MAX; + + phys_addr = (u64) gen_pool_alloc(hdev->mmu_priv.dr.mmu_pgt_pool, + prop->dmmu.hop_table_size); + if (!phys_addr) { + dev_err(hdev->dev, "failed to allocate page\n"); + goto pool_add_err; + } + + shadow_addr = (u64) (uintptr_t) kzalloc(prop->dmmu.hop_table_size, + GFP_KERNEL); + if (!shadow_addr) + goto shadow_err; + + pgt_info->phys_addr = phys_addr; + pgt_info->shadow_addr = shadow_addr; + pgt_info->ctx = ctx; + pgt_info->num_of_ptes = 0; + hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr); + + return shadow_addr; + +shadow_err: + gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, + phys_addr, prop->dmmu.hop_table_size); +pool_add_err: + kfree(pgt_info); + + return ULLONG_MAX; +} + +u64 hl_mmu_dr_get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte, bool *is_new_hop) +{ + u64 hop_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte); + + if (hop_addr == ULLONG_MAX) { + hop_addr = hl_mmu_dr_alloc_hop(ctx); + *is_new_hop = (hop_addr != ULLONG_MAX); + } + + return hop_addr; +} + +void hl_mmu_dr_flush(struct hl_ctx *ctx) +{ + /* flush all writes from all cores to reach PCI */ + mb(); + ctx->hdev->asic_funcs->read_pte(ctx->hdev, hl_mmu_dr_get_phys_hop0_addr(ctx)); +} + +int hl_mmu_dr_init(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + int rc; + + hdev->mmu_priv.dr.mmu_pgt_pool = + gen_pool_create(__ffs(prop->dmmu.hop_table_size), -1); + + if (!hdev->mmu_priv.dr.mmu_pgt_pool) { + dev_err(hdev->dev, "Failed to create page gen pool\n"); + return -ENOMEM; + } + + rc = gen_pool_add(hdev->mmu_priv.dr.mmu_pgt_pool, prop->mmu_pgt_addr + + prop->dmmu.hop0_tables_total_size, + prop->dmmu.pgt_size - prop->dmmu.hop0_tables_total_size, + -1); + if (rc) { + dev_err(hdev->dev, "Failed to add memory to page gen pool\n"); + goto err_pool_add; + } + + hdev->mmu_priv.dr.mmu_shadow_hop0 = kvcalloc(prop->max_asid, + prop->dmmu.hop_table_size, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) { + rc = -ENOMEM; + goto err_pool_add; + } + + /* MMU H/W init will be done in device hw_init() */ + + return 0; + +err_pool_add: + gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool); + + return rc; +} + +void hl_mmu_dr_fini(struct hl_device *hdev) +{ + /* MMU H/W fini was already done in device hw_fini() */ + + if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) + return; + + kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0); + gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool); + + /* Make sure that if we arrive here again without init was + * called we won't cause kernel panic. This can happen for + * example if we fail during hard reset code at certain points + */ + hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL; +} diff --git a/drivers/accel/habanalabs/common/mmu/mmu_v1.c b/drivers/accel/habanalabs/common/mmu/mmu_v1.c index d925dc4dd0..845d16aaa6 100644 --- a/drivers/accel/habanalabs/common/mmu/mmu_v1.c +++ b/drivers/accel/habanalabs/common/mmu/mmu_v1.c @@ -12,166 +12,6 @@ #define MMU_V1_MAX_HOPS (MMU_HOP4 + 1) -static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr); - -static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr) -{ - struct pgt_info *pgt_info = NULL; - - hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node, - (unsigned long) hop_addr) - if (hop_addr == pgt_info->shadow_addr) - break; - - return pgt_info; -} - -static void _free_hop(struct hl_ctx *ctx, struct pgt_info *pgt_info) -{ - struct hl_device *hdev = ctx->hdev; - - gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, pgt_info->phys_addr, - hdev->asic_prop.mmu_hop_table_size); - hash_del(&pgt_info->node); - kfree((u64 *) (uintptr_t) pgt_info->shadow_addr); - kfree(pgt_info); -} - -static void free_hop(struct hl_ctx *ctx, u64 hop_addr) -{ - struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr); - - _free_hop(ctx, pgt_info); -} - -static u64 alloc_hop(struct hl_ctx *ctx) -{ - struct hl_device *hdev = ctx->hdev; - struct asic_fixed_properties *prop = &hdev->asic_prop; - struct pgt_info *pgt_info; - u64 phys_addr, shadow_addr; - - pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL); - if (!pgt_info) - return ULLONG_MAX; - - phys_addr = (u64) gen_pool_alloc(hdev->mmu_priv.dr.mmu_pgt_pool, - prop->mmu_hop_table_size); - if (!phys_addr) { - dev_err(hdev->dev, "failed to allocate page\n"); - goto pool_add_err; - } - - shadow_addr = (u64) (uintptr_t) kzalloc(prop->mmu_hop_table_size, - GFP_KERNEL); - if (!shadow_addr) - goto shadow_err; - - pgt_info->phys_addr = phys_addr; - pgt_info->shadow_addr = shadow_addr; - pgt_info->ctx = ctx; - pgt_info->num_of_ptes = 0; - hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr); - - return shadow_addr; - -shadow_err: - gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, phys_addr, - prop->mmu_hop_table_size); -pool_add_err: - kfree(pgt_info); - - return ULLONG_MAX; -} - -static inline u64 get_phys_hop0_addr(struct hl_ctx *ctx) -{ - return ctx->hdev->asic_prop.mmu_pgt_addr + - (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size); -} - -static inline u64 get_hop0_addr(struct hl_ctx *ctx) -{ - return (u64) (uintptr_t) ctx->hdev->mmu_priv.dr.mmu_shadow_hop0 + - (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size); -} - -static void flush(struct hl_ctx *ctx) -{ - /* flush all writes from all cores to reach PCI */ - mb(); - ctx->hdev->asic_funcs->read_pte(ctx->hdev, get_phys_hop0_addr(ctx)); -} - -/* transform the value to physical address when writing to H/W */ -static inline void write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val) -{ - /* - * The value to write is actually the address of the next shadow hop + - * flags at the 12 LSBs. - * Hence in order to get the value to write to the physical PTE, we - * clear the 12 LSBs and translate the shadow hop to its associated - * physical hop, and add back the original 12 LSBs. - */ - u64 phys_val = get_phys_addr(ctx, val & HOP_PHYS_ADDR_MASK) | - (val & FLAGS_MASK); - - ctx->hdev->asic_funcs->write_pte(ctx->hdev, - get_phys_addr(ctx, shadow_pte_addr), - phys_val); - - *(u64 *) (uintptr_t) shadow_pte_addr = val; -} - -/* do not transform the value to physical address when writing to H/W */ -static inline void write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, - u64 val) -{ - ctx->hdev->asic_funcs->write_pte(ctx->hdev, - get_phys_addr(ctx, shadow_pte_addr), - val); - *(u64 *) (uintptr_t) shadow_pte_addr = val; -} - -/* clear the last and present bits */ -static inline void clear_pte(struct hl_ctx *ctx, u64 pte_addr) -{ - /* no need to transform the value to physical address */ - write_final_pte(ctx, pte_addr, 0); -} - -static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr) -{ - get_pgt_info(ctx, hop_addr)->num_of_ptes++; -} - -/* - * put_pte - decrement the num of ptes and free the hop if possible - * - * @ctx: pointer to the context structure - * @hop_addr: addr of the hop - * - * This function returns the number of ptes left on this hop. If the number is - * 0, it means the pte was freed. - */ -static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr) -{ - struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr); - int num_of_ptes_left; - - pgt_info->num_of_ptes--; - - /* - * Need to save the number of ptes left because free_hop might free - * the pgt_info - */ - num_of_ptes_left = pgt_info->num_of_ptes; - if (!num_of_ptes_left) - _free_hop(ctx, pgt_info); - - return num_of_ptes_left; -} - static inline u64 get_hop_pte_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop, u64 *hop_addr_arr, u64 virt_addr, enum mmu_hop_num hop_idx) { @@ -183,35 +23,6 @@ static inline u64 get_hop_pte_addr(struct hl_ctx *ctx, struct hl_mmu_properties ctx->hdev->asic_prop.mmu_pte_size * ((virt_addr & mask) >> shift); } -static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte, - bool *is_new_hop) -{ - u64 hop_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte); - - if (hop_addr == ULLONG_MAX) { - hop_addr = alloc_hop(ctx); - *is_new_hop = (hop_addr != ULLONG_MAX); - } - - return hop_addr; -} - -/* translates shadow address inside hop to a physical address */ -static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr) -{ - u64 page_mask = (ctx->hdev->asic_prop.mmu_hop_table_size - 1); - u64 shadow_hop_addr = shadow_addr & ~page_mask; - u64 pte_offset = shadow_addr & page_mask; - u64 phys_hop_addr; - - if (shadow_hop_addr != get_hop0_addr(ctx)) - phys_hop_addr = get_pgt_info(ctx, shadow_hop_addr)->phys_addr; - else - phys_hop_addr = get_phys_hop0_addr(ctx); - - return phys_hop_addr + pte_offset; -} - static int dram_default_mapping_init(struct hl_ctx *ctx) { struct hl_device *hdev = ctx->hdev; @@ -232,13 +43,13 @@ static int dram_default_mapping_init(struct hl_ctx *ctx) /* add hop1 and hop2 */ total_hops = num_of_hop3 + 2; - ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops, GFP_KERNEL); + ctx->dram_default_hops = kcalloc(total_hops, HL_PTE_SIZE, GFP_KERNEL); if (!ctx->dram_default_hops) return -ENOMEM; - hop0_addr = get_hop0_addr(ctx); + hop0_addr = hl_mmu_dr_get_hop0_addr(ctx); - hop1_addr = alloc_hop(ctx); + hop1_addr = hl_mmu_dr_alloc_hop(ctx); if (hop1_addr == ULLONG_MAX) { dev_err(hdev->dev, "failed to alloc hop 1\n"); rc = -ENOMEM; @@ -247,7 +58,7 @@ static int dram_default_mapping_init(struct hl_ctx *ctx) ctx->dram_default_hops[total_hops - 1] = hop1_addr; - hop2_addr = alloc_hop(ctx); + hop2_addr = hl_mmu_dr_alloc_hop(ctx); if (hop2_addr == ULLONG_MAX) { dev_err(hdev->dev, "failed to alloc hop 2\n"); rc = -ENOMEM; @@ -257,7 +68,7 @@ static int dram_default_mapping_init(struct hl_ctx *ctx) ctx->dram_default_hops[total_hops - 2] = hop2_addr; for (i = 0 ; i < num_of_hop3 ; i++) { - ctx->dram_default_hops[i] = alloc_hop(ctx); + ctx->dram_default_hops[i] = hl_mmu_dr_alloc_hop(ctx); if (ctx->dram_default_hops[i] == ULLONG_MAX) { dev_err(hdev->dev, "failed to alloc hop 3, i: %d\n", i); rc = -ENOMEM; @@ -268,18 +79,18 @@ static int dram_default_mapping_init(struct hl_ctx *ctx) /* need only pte 0 in hops 0 and 1 */ pte_val = (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; - write_pte(ctx, hop0_addr, pte_val); + hl_mmu_dr_write_pte(ctx, hop0_addr, pte_val); pte_val = (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; - write_pte(ctx, hop1_addr, pte_val); - get_pte(ctx, hop1_addr); + hl_mmu_dr_write_pte(ctx, hop1_addr, pte_val); + hl_mmu_dr_get_pte(ctx, hop1_addr); hop2_pte_addr = hop2_addr; for (i = 0 ; i < num_of_hop3 ; i++) { pte_val = (ctx->dram_default_hops[i] & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; - write_pte(ctx, hop2_pte_addr, pte_val); - get_pte(ctx, hop2_addr); + hl_mmu_dr_write_pte(ctx, hop2_pte_addr, pte_val); + hl_mmu_dr_get_pte(ctx, hop2_addr); hop2_pte_addr += HL_PTE_SIZE; } @@ -289,23 +100,23 @@ static int dram_default_mapping_init(struct hl_ctx *ctx) for (i = 0 ; i < num_of_hop3 ; i++) { hop3_pte_addr = ctx->dram_default_hops[i]; for (j = 0 ; j < HOP_PTE_ENTRIES_512 ; j++) { - write_final_pte(ctx, hop3_pte_addr, pte_val); - get_pte(ctx, ctx->dram_default_hops[i]); + hl_mmu_dr_write_final_pte(ctx, hop3_pte_addr, pte_val); + hl_mmu_dr_get_pte(ctx, ctx->dram_default_hops[i]); hop3_pte_addr += HL_PTE_SIZE; } } - flush(ctx); + hl_mmu_dr_flush(ctx); return 0; hop3_err: for (i = 0 ; i < hop3_allocated ; i++) - free_hop(ctx, ctx->dram_default_hops[i]); + hl_mmu_dr_free_hop(ctx, ctx->dram_default_hops[i]); - free_hop(ctx, hop2_addr); + hl_mmu_dr_free_hop(ctx, hop2_addr); hop2_err: - free_hop(ctx, hop1_addr); + hl_mmu_dr_free_hop(ctx, hop1_addr); hop1_err: kfree(ctx->dram_default_hops); @@ -329,7 +140,7 @@ static void dram_default_mapping_fini(struct hl_ctx *ctx) do_div(num_of_hop3, prop->dram_page_size); do_div(num_of_hop3, HOP_PTE_ENTRIES_512); - hop0_addr = get_hop0_addr(ctx); + hop0_addr = hl_mmu_dr_get_hop0_addr(ctx); /* add hop1 and hop2 */ total_hops = num_of_hop3 + 2; hop1_addr = ctx->dram_default_hops[total_hops - 1]; @@ -338,101 +149,26 @@ static void dram_default_mapping_fini(struct hl_ctx *ctx) for (i = 0 ; i < num_of_hop3 ; i++) { hop3_pte_addr = ctx->dram_default_hops[i]; for (j = 0 ; j < HOP_PTE_ENTRIES_512 ; j++) { - clear_pte(ctx, hop3_pte_addr); - put_pte(ctx, ctx->dram_default_hops[i]); + hl_mmu_dr_clear_pte(ctx, hop3_pte_addr); + hl_mmu_dr_put_pte(ctx, ctx->dram_default_hops[i]); hop3_pte_addr += HL_PTE_SIZE; } } hop2_pte_addr = hop2_addr; for (i = 0 ; i < num_of_hop3 ; i++) { - clear_pte(ctx, hop2_pte_addr); - put_pte(ctx, hop2_addr); + hl_mmu_dr_clear_pte(ctx, hop2_pte_addr); + hl_mmu_dr_put_pte(ctx, hop2_addr); hop2_pte_addr += HL_PTE_SIZE; } - clear_pte(ctx, hop1_addr); - put_pte(ctx, hop1_addr); - clear_pte(ctx, hop0_addr); + hl_mmu_dr_clear_pte(ctx, hop1_addr); + hl_mmu_dr_put_pte(ctx, hop1_addr); + hl_mmu_dr_clear_pte(ctx, hop0_addr); kfree(ctx->dram_default_hops); - flush(ctx); -} - -/** - * hl_mmu_v1_init() - initialize the MMU module. - * @hdev: habanalabs device structure. - * - * This function does the following: - * - Create a pool of pages for pgt_infos. - * - Create a shadow table for pgt - * - * Return: 0 for success, non-zero for failure. - */ -static int hl_mmu_v1_init(struct hl_device *hdev) -{ - struct asic_fixed_properties *prop = &hdev->asic_prop; - int rc; - - hdev->mmu_priv.dr.mmu_pgt_pool = - gen_pool_create(__ffs(prop->mmu_hop_table_size), -1); - - if (!hdev->mmu_priv.dr.mmu_pgt_pool) { - dev_err(hdev->dev, "Failed to create page gen pool\n"); - return -ENOMEM; - } - - rc = gen_pool_add(hdev->mmu_priv.dr.mmu_pgt_pool, prop->mmu_pgt_addr + - prop->mmu_hop0_tables_total_size, - prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size, - -1); - if (rc) { - dev_err(hdev->dev, "Failed to add memory to page gen pool\n"); - goto err_pool_add; - } - - hdev->mmu_priv.dr.mmu_shadow_hop0 = kvcalloc(prop->max_asid, prop->mmu_hop_table_size, - GFP_KERNEL); - if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) { - rc = -ENOMEM; - goto err_pool_add; - } - - /* MMU H/W init will be done in device hw_init() */ - - return 0; - -err_pool_add: - gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool); - - return rc; -} - -/** - * hl_mmu_v1_fini() - release the MMU module. - * @hdev: habanalabs device structure. - * - * This function does the following: - * - Disable MMU in H/W. - * - Free the pgt_infos pool. - * - * All contexts should be freed before calling this function. - */ -static void hl_mmu_v1_fini(struct hl_device *hdev) -{ - /* MMU H/W fini was already done in device hw_fini() */ - - if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) { - kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0); - gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool); - - /* Make sure that if we arrive here again without init was - * called we won't cause kernel panic. This can happen for - * example if we fail during hard reset code at certain points - */ - hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL; - } + hl_mmu_dr_flush(ctx); } /** @@ -476,7 +212,7 @@ static void hl_mmu_v1_ctx_fini(struct hl_ctx *ctx) dev_err_ratelimited(hdev->dev, "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n", pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes); - _free_hop(ctx, pgt_info); + hl_mmu_dr_free_pgt_node(ctx, pgt_info); } } @@ -495,7 +231,7 @@ static int hl_mmu_v1_unmap(struct hl_ctx *ctx, for (hop_idx = MMU_HOP0; hop_idx < MMU_HOP4; hop_idx++) { if (hop_idx == MMU_HOP0) { - hop_addr[hop_idx] = get_hop0_addr(ctx); + hop_addr[hop_idx] = hl_mmu_dr_get_hop0_addr(ctx); } else { hop_addr[hop_idx] = hl_mmu_get_next_hop_addr(ctx, curr_pte); if (hop_addr[hop_idx] == ULLONG_MAX) @@ -546,30 +282,30 @@ static int hl_mmu_v1_unmap(struct hl_ctx *ctx, } hop_idx = MMU_HOP3; - write_final_pte(ctx, hop_pte_addr[hop_idx], default_pte); - put_pte(ctx, hop_addr[hop_idx]); + hl_mmu_dr_write_final_pte(ctx, hop_pte_addr[hop_idx], default_pte); + hl_mmu_dr_put_pte(ctx, hop_addr[hop_idx]); } else { if (!(curr_pte & PAGE_PRESENT_MASK)) goto not_mapped; if (hop_addr[MMU_HOP4]) - clear_pte(ctx, hop_pte_addr[MMU_HOP4]); + hl_mmu_dr_clear_pte(ctx, hop_pte_addr[MMU_HOP4]); else - clear_pte(ctx, hop_pte_addr[MMU_HOP3]); + hl_mmu_dr_clear_pte(ctx, hop_pte_addr[MMU_HOP3]); - if (hop_addr[MMU_HOP4] && !put_pte(ctx, hop_addr[MMU_HOP4])) + if (hop_addr[MMU_HOP4] && !hl_mmu_dr_put_pte(ctx, hop_addr[MMU_HOP4])) clear_hop3 = true; if (!clear_hop3) goto mapped; for (hop_idx = MMU_HOP3; hop_idx >= 0; hop_idx--) { - clear_pte(ctx, hop_pte_addr[hop_idx]); + hl_mmu_dr_clear_pte(ctx, hop_pte_addr[hop_idx]); if (hop_idx == MMU_HOP0) break; - if (put_pte(ctx, hop_addr[hop_idx])) + if (hl_mmu_dr_put_pte(ctx, hop_addr[hop_idx])) goto mapped; } } @@ -616,10 +352,10 @@ static int hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, for (hop_idx = MMU_HOP0; hop_idx < num_hops; hop_idx++) { if (hop_idx == MMU_HOP0) { - hop_addr[hop_idx] = get_hop0_addr(ctx); + hop_addr[hop_idx] = hl_mmu_dr_get_hop0_addr(ctx); } else { hop_addr[hop_idx] = - get_alloc_next_hop_addr(ctx, curr_pte, &hop_new[hop_idx]); + hl_mmu_dr_get_alloc_next_hop_addr(ctx, curr_pte, &hop_new[hop_idx]); if (hop_addr[hop_idx] == ULLONG_MAX) goto err; } @@ -666,27 +402,27 @@ static int hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask | PAGE_PRESENT_MASK; - write_final_pte(ctx, hop_pte_addr[num_hops - 1], curr_pte); + hl_mmu_dr_write_final_pte(ctx, hop_pte_addr[num_hops - 1], curr_pte); for (hop_idx = MMU_HOP1; hop_idx < num_hops; hop_idx++) { prev_hop = hop_idx - 1; if (hop_new[hop_idx]) { curr_pte = (hop_addr[hop_idx] & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; - write_pte(ctx, hop_pte_addr[prev_hop], curr_pte); + hl_mmu_dr_write_pte(ctx, hop_pte_addr[prev_hop], curr_pte); if (hop_idx != MMU_HOP1) - get_pte(ctx, hop_addr[prev_hop]); + hl_mmu_dr_get_pte(ctx, hop_addr[prev_hop]); } } - get_pte(ctx, hop_addr[num_hops - 1]); + hl_mmu_dr_get_pte(ctx, hop_addr[num_hops - 1]); return 0; err: for (hop_idx = num_hops; hop_idx > MMU_HOP0; hop_idx--) { if (hop_new[hop_idx]) - free_hop(ctx, hop_addr[hop_idx]); + hl_mmu_dr_free_hop(ctx, hop_addr[hop_idx]); } return rc; @@ -752,7 +488,7 @@ static int hl_mmu_v1_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, if (is_huge) used_hops--; - hops->hop_info[0].hop_addr = get_phys_hop0_addr(ctx); + hops->hop_info[0].hop_addr = hl_mmu_dr_get_phys_hop0_addr(ctx); hops->hop_info[0].hop_pte_addr = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, 0, hops->hop_info[0].hop_addr, virt_addr); @@ -801,13 +537,13 @@ static int hl_mmu_v1_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, */ void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu) { - mmu->init = hl_mmu_v1_init; - mmu->fini = hl_mmu_v1_fini; + mmu->init = hl_mmu_dr_init; + mmu->fini = hl_mmu_dr_fini; mmu->ctx_init = hl_mmu_v1_ctx_init; mmu->ctx_fini = hl_mmu_v1_ctx_fini; mmu->map = hl_mmu_v1_map; mmu->unmap = hl_mmu_v1_unmap; - mmu->flush = flush; + mmu->flush = hl_mmu_dr_flush; mmu->swap_out = hl_mmu_v1_swap_out; mmu->swap_in = hl_mmu_v1_swap_in; mmu->get_tlb_info = hl_mmu_v1_get_tlb_info; diff --git a/drivers/accel/habanalabs/common/mmu/mmu_v2.c b/drivers/accel/habanalabs/common/mmu/mmu_v2.c new file mode 100644 index 0000000000..4bc0268fff --- /dev/null +++ b/drivers/accel/habanalabs/common/mmu/mmu_v2.c @@ -0,0 +1,338 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2020 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "../habanalabs.h" +#include "../../include/hw_ip/mmu/mmu_general.h" +#include "../../include/hw_ip/mmu/mmu_v2_0.h" + +#include <linux/slab.h> + +/** + * hl_mmu_v2_ctx_init() - initialize a context for using the MMU module. + * @ctx: pointer to the context structure to initialize. + * + * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all + * page tables hops related to this context. + * Return: 0 on success, non-zero otherwise. + */ +static int hl_mmu_v2_ctx_init(struct hl_ctx *ctx) +{ + hash_init(ctx->mmu_shadow_hash); + + return 0; +} + +/* + * hl_mmu_v2_ctx_fini - disable a ctx from using the mmu module + * + * @ctx: pointer to the context structure + * + * This function does the following: + * - Free any pgts which were not freed yet + * - Free the mutex + * - Free DRAM default page mapping hops + */ +static void hl_mmu_v2_ctx_fini(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + struct pgt_info *pgt_info; + struct hlist_node *tmp; + int i; + + if (!hash_empty(ctx->mmu_shadow_hash)) + dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n", + ctx->asid); + + hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) { + dev_err_ratelimited(hdev->dev, + "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n", + pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes); + hl_mmu_dr_free_pgt_node(ctx, pgt_info); + } +} + +static int hl_mmu_v2_unmap(struct hl_ctx *ctx, u64 virt_addr, bool is_dram_addr) +{ + u64 hop_addr[MMU_ARCH_6_HOPS] = { 0 }, hop_pte_addr[MMU_ARCH_6_HOPS] = { 0 }, curr_pte, + scrambled_virt_addr; + struct asic_fixed_properties *prop = &ctx->hdev->asic_prop; + struct hl_device *hdev = ctx->hdev; + struct hl_mmu_properties *mmu_prop; + bool is_huge = false; + int i, hop_last; + + /* device resident in V2 are allowed only for HMMU */ + if (!is_dram_addr) + return -EINVAL; + + mmu_prop = &prop->dmmu; + + hop_last = mmu_prop->num_hops - 1; + + scrambled_virt_addr = hdev->asic_funcs->scramble_addr(hdev, virt_addr); + + hop_addr[0] = hl_mmu_dr_get_hop0_addr(ctx); + hop_pte_addr[0] = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, 0, + hop_addr[0], scrambled_virt_addr); + if (hop_pte_addr[0] == U64_MAX) + return -EFAULT; + + curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[0]; + + for (i = 1 ; i < mmu_prop->num_hops ; i++) { + hop_addr[i] = hl_mmu_get_next_hop_addr(ctx, curr_pte); + if (hop_addr[i] == ULLONG_MAX) + goto not_mapped; + + hop_pte_addr[i] = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i, + hop_addr[i], scrambled_virt_addr); + if (hop_pte_addr[i] == U64_MAX) + return -EFAULT; + + curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[i]; + + if ((i <= hop_last) && (curr_pte & mmu_prop->last_mask)) { + hop_last = i; + is_huge = true; + break; + } + } + + if (is_dram_addr && !is_huge) { + dev_err(hdev->dev, "DRAM unmapping should use huge pages only\n"); + return -EFAULT; + } + + if (!(curr_pte & PAGE_PRESENT_MASK)) + goto not_mapped; + + for (i = hop_last ; i > 0 ; i--) { + hl_mmu_dr_clear_pte(ctx, hop_pte_addr[i]); + if (hl_mmu_dr_put_pte(ctx, hop_addr[i])) + goto mapped; + } + hl_mmu_dr_clear_pte(ctx, hop_pte_addr[0]); + +mapped: + return 0; + +not_mapped: + dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n", + virt_addr); + + return -EINVAL; +} + +static int hl_mmu_v2_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, + u32 page_size, bool is_dram_addr) +{ + u64 hop_addr[MMU_ARCH_6_HOPS] = { 0 }, hop_pte_addr[MMU_ARCH_6_HOPS] = { 0 }, + curr_pte = 0, scrambled_virt_addr, scrambled_phys_addr; + struct asic_fixed_properties *prop = &ctx->hdev->asic_prop; + bool hop_new[MMU_ARCH_6_HOPS] = { false }; + struct hl_device *hdev = ctx->hdev; + struct hl_mmu_properties *mmu_prop; + int rc, i, hop_last; + + /* device resident in V2 are allowed only for HMMU */ + if (!is_dram_addr) + return -EINVAL; + + mmu_prop = &prop->dmmu; + + hop_last = mmu_prop->num_hops - 1; + + scrambled_virt_addr = hdev->asic_funcs->scramble_addr(hdev, virt_addr); + scrambled_phys_addr = hdev->asic_funcs->scramble_addr(hdev, phys_addr); + + /* First hop is preallocated therefore it is treated differently */ + hop_addr[0] = hl_mmu_dr_get_hop0_addr(ctx); + hop_pte_addr[0] = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, 0, + hop_addr[0], scrambled_virt_addr); + curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[0]; + + /* Handle hop1 to hop_last */ + for (i = 1 ; i <= hop_last ; i++) { + hop_addr[i] = hl_mmu_dr_get_alloc_next_hop_addr(ctx, curr_pte, &hop_new[i]); + if (hop_addr[i] == ULLONG_MAX) { + rc = -ENOMEM; + goto err; + } + + hop_pte_addr[i] = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i, + hop_addr[i], scrambled_virt_addr); + if (hop_pte_addr[i] == U64_MAX) { + rc = -EINVAL; + goto err; + } + + if (!hop_pte_addr[i]) { + rc = -EINVAL; + goto err; + } + + curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[i]; + } + + if (curr_pte & PAGE_PRESENT_MASK) { + dev_err(hdev->dev, + "mapping already exists for virt_addr 0x%llx\n", + virt_addr); + + for (i = 0 ; i <= hop_last ; i++) + dev_dbg(hdev->dev, "hop%d pte: 0x%llx (0x%llx)\n", + i, *(u64 *) (uintptr_t) hop_pte_addr[i], + hop_pte_addr[i]); + + rc = -EINVAL; + goto err; + } + + curr_pte = (scrambled_phys_addr & HOP_PHYS_ADDR_MASK) + | mmu_prop->last_mask | PAGE_PRESENT_MASK; + + /* Write the PTEs */ + hl_mmu_dr_write_final_pte(ctx, hop_pte_addr[hop_last], curr_pte); + + /* for each new hop, add its address to the table of previous-hop */ + for (i = 1 ; i <= hop_last ; i++) { + if (hop_new[i]) { + curr_pte = (hop_addr[i] & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; + hl_mmu_dr_write_pte(ctx, hop_pte_addr[i - 1], curr_pte); + + if (i - 1) + hl_mmu_dr_get_pte(ctx, hop_addr[i - 1]); + } + } + hl_mmu_dr_get_pte(ctx, hop_addr[hop_last]); + + return 0; + +err: + for (i = 1 ; i <= hop_last ; i++) + if (hop_new[i] && (hop_addr[i] != U64_MAX)) + hl_mmu_dr_free_hop(ctx, hop_addr[i]); + + return rc; +} + +/* + * hl_mmu_v2_swap_out - marks all mapping of the given ctx as swapped out + * + * @ctx: pointer to the context structure + * + */ +static void hl_mmu_v2_swap_out(struct hl_ctx *ctx) +{ + +} + +/* + * hl_mmu_v2_swap_in - marks all mapping of the given ctx as swapped in + * + * @ctx: pointer to the context structure + * + */ +static void hl_mmu_v2_swap_in(struct hl_ctx *ctx) +{ + +} + +static int hl_mmu_v2_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_info *hops) +{ + struct asic_fixed_properties *prop = &ctx->hdev->asic_prop; + struct hl_device *hdev = ctx->hdev; + struct hl_mmu_properties *mmu_prop; + bool is_dram_addr; + int i; + + is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, + prop->dmmu.start_addr, + prop->dmmu.end_addr); + + /* device resident in V2 are allowed only for HMMU */ + if (!is_dram_addr) + return -EINVAL; + + mmu_prop = &prop->dmmu; + hops->range_type = HL_VA_RANGE_TYPE_DRAM; + + hops->scrambled_vaddr = hdev->asic_funcs->scramble_addr(hdev, virt_addr); + + hops->hop_info[0].hop_addr = hl_mmu_dr_get_phys_hop0_addr(ctx); + hops->hop_info[0].hop_pte_addr = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, 0, + hops->hop_info[0].hop_addr, + hops->scrambled_vaddr); + if (hops->hop_info[0].hop_pte_addr == U64_MAX) + return -EFAULT; + + hops->hop_info[0].hop_pte_val = hdev->asic_funcs->read_pte(hdev, + hops->hop_info[0].hop_pte_addr); + if (hops->hop_info[0].hop_pte_val == U64_MAX) + return -EFAULT; + + for (i = 1 ; i < mmu_prop->num_hops ; i++) { + hops->hop_info[i].hop_addr = + hl_mmu_get_next_hop_addr(ctx, hops->hop_info[i - 1].hop_pte_val); + if (hops->hop_info[i].hop_addr == ULLONG_MAX) + return -EFAULT; + + hops->hop_info[i].hop_pte_addr = + hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i, + hops->hop_info[i].hop_addr, + hops->scrambled_vaddr); + if (hops->hop_info[i].hop_pte_addr == U64_MAX) + return -EFAULT; + + hops->hop_info[i].hop_pte_val = + hdev->asic_funcs->read_pte(hdev, + hops->hop_info[i].hop_pte_addr); + + if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK)) + return -EFAULT; + + if (hops->hop_info[i].hop_pte_val & mmu_prop->last_mask) + break; + } + + /* if passed over all hops then no last hop was found */ + if (i == mmu_prop->num_hops) + return -EFAULT; + + if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK)) + return -EFAULT; + + if (hops->scrambled_vaddr != virt_addr) + hops->unscrambled_paddr = hdev->asic_funcs->descramble_addr + (hdev, hops->hop_info[i].hop_pte_val); + else + hops->unscrambled_paddr = hops->hop_info[i].hop_pte_val; + + hops->used_hops = i + 1; + + return 0; +} + +/* + * hl_mmu_v2_prepare - prepare mmu_if for working with mmu v2 + * + * @hdev: pointer to the device structure + * @mmu_if: pointer to the mmu interface structure + */ +void hl_mmu_v2_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu) +{ + mmu->init = hl_mmu_dr_init; + mmu->fini = hl_mmu_dr_fini; + mmu->ctx_init = hl_mmu_v2_ctx_init; + mmu->ctx_fini = hl_mmu_v2_ctx_fini; + mmu->map = hl_mmu_v2_map; + mmu->unmap = hl_mmu_v2_unmap; + mmu->flush = hl_mmu_dr_flush; + mmu->swap_out = hl_mmu_v2_swap_out; + mmu->swap_in = hl_mmu_v2_swap_in; + mmu->get_tlb_info = hl_mmu_v2_get_tlb_info; +} diff --git a/drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c b/drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c index afe7ef964f..31507b2a43 100644 --- a/drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c +++ b/drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c @@ -47,7 +47,7 @@ static inline int hl_mmu_v2_hr_init(struct hl_device *hdev) { struct asic_fixed_properties *prop = &hdev->asic_prop; - return hl_mmu_hr_init(hdev, &hdev->mmu_priv.hr, prop->mmu_hop_table_size, + return hl_mmu_hr_init(hdev, &hdev->mmu_priv.hr, prop->pmmu.hop_table_size, prop->mmu_pgt_size); } @@ -65,7 +65,7 @@ static inline void hl_mmu_v2_hr_fini(struct hl_device *hdev) { struct asic_fixed_properties *prop = &hdev->asic_prop; - hl_mmu_hr_fini(hdev, &hdev->mmu_priv.hr, prop->mmu_hop_table_size); + hl_mmu_hr_fini(hdev, &hdev->mmu_priv.hr, prop->pmmu.hop_table_size); } /** @@ -108,7 +108,7 @@ static void hl_mmu_v2_hr_ctx_fini(struct hl_ctx *ctx) "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n", pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes); hl_mmu_hr_free_hop_remove_pgt(pgt_info, &ctx->hdev->mmu_priv.hr, - ctx->hdev->asic_prop.mmu_hop_table_size); + ctx->hdev->asic_prop.pmmu.hop_table_size); } } @@ -150,7 +150,7 @@ static int _hl_mmu_v2_hr_unmap(struct hl_ctx *ctx, curr_pte = *(u64 *) (uintptr_t) hl_mmu_hr_pte_phys_to_virt(ctx, hops_pgt_info[i], hop_pte_phys_addr[i], - ctx->hdev->asic_prop.mmu_hop_table_size); + ctx->hdev->asic_prop.pmmu.hop_table_size); if ((i < hop_last) && (curr_pte & mmu_prop->last_mask)) { hop_last = i; @@ -169,14 +169,14 @@ static int _hl_mmu_v2_hr_unmap(struct hl_ctx *ctx, for (i = hop_last ; i > 0 ; i--) { hl_mmu_hr_clear_pte(ctx, hops_pgt_info[i], hop_pte_phys_addr[i], - ctx->hdev->asic_prop.mmu_hop_table_size); + ctx->hdev->asic_prop.pmmu.hop_table_size); if (hl_mmu_hr_put_pte(ctx, hops_pgt_info[i], &ctx->hdev->mmu_priv.hr, - ctx->hdev->asic_prop.mmu_hop_table_size)) + ctx->hdev->asic_prop.pmmu.hop_table_size)) goto mapped; } hl_mmu_hr_clear_pte(ctx, hops_pgt_info[0], hop_pte_phys_addr[0], - ctx->hdev->asic_prop.mmu_hop_table_size); + ctx->hdev->asic_prop.pmmu.hop_table_size); mapped: return 0; @@ -255,7 +255,7 @@ static int _hl_mmu_v2_hr_map(struct hl_ctx *ctx, scrambled_virt_addr); curr_pte = *(u64 *) (uintptr_t) hl_mmu_hr_pte_phys_to_virt(ctx, hops_pgt_info[i], hop_pte_phys_addr[i], - ctx->hdev->asic_prop.mmu_hop_table_size); + ctx->hdev->asic_prop.pmmu.hop_table_size); } if (curr_pte & PAGE_PRESENT_MASK) { @@ -268,7 +268,7 @@ static int _hl_mmu_v2_hr_map(struct hl_ctx *ctx, *(u64 *) (uintptr_t) hl_mmu_hr_pte_phys_to_virt(ctx, hops_pgt_info[i], hop_pte_phys_addr[i], - ctx->hdev->asic_prop.mmu_hop_table_size), + ctx->hdev->asic_prop.pmmu.hop_table_size), hop_pte_phys_addr[i]); rc = -EINVAL; goto err; @@ -279,7 +279,7 @@ static int _hl_mmu_v2_hr_map(struct hl_ctx *ctx, /* Write the PTEs */ hl_mmu_hr_write_pte(ctx, hops_pgt_info[hop_last], hop_pte_phys_addr[hop_last], curr_pte, - ctx->hdev->asic_prop.mmu_hop_table_size); + ctx->hdev->asic_prop.pmmu.hop_table_size); /* for each new hop, add its address to the table of previous-hop */ for (i = 1 ; i <= hop_last ; i++) { @@ -287,7 +287,7 @@ static int _hl_mmu_v2_hr_map(struct hl_ctx *ctx, curr_pte = (hops_pgt_info[i]->phys_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; hl_mmu_hr_write_pte(ctx, hops_pgt_info[i - 1], hop_pte_phys_addr[i - 1], - curr_pte, ctx->hdev->asic_prop.mmu_hop_table_size); + curr_pte, ctx->hdev->asic_prop.pmmu.hop_table_size); if (i - 1) hl_mmu_hr_get_pte(ctx, &ctx->hdev->mmu_func[MMU_HR_PGT].hr_funcs, hops_pgt_info[i - 1]->phys_addr); @@ -303,7 +303,7 @@ err: for (i = 1 ; i <= hop_last ; i++) if (hop_new[i] && hops_pgt_info[i]) hl_mmu_hr_free_hop_remove_pgt(hops_pgt_info[i], &ctx->hdev->mmu_priv.hr, - ctx->hdev->asic_prop.mmu_hop_table_size); + ctx->hdev->asic_prop.pmmu.hop_table_size); return rc; } |