diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /arch/openrisc/kernel/dma.c | |
parent | Initial commit. (diff) | |
download | linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip |
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/openrisc/kernel/dma.c')
-rw-r--r-- | arch/openrisc/kernel/dma.c | 125 |
1 files changed, 125 insertions, 0 deletions
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c new file mode 100644 index 000000000..b3edbb33b --- /dev/null +++ b/arch/openrisc/kernel/dma.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * OpenRISC Linux + * + * Linux architectural port borrowing liberally from similar works of + * others. All original copyrights apply as per the original source + * declaration. + * + * Modifications for the OpenRISC architecture: + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * + * DMA mapping callbacks... + */ + +#include <linux/dma-map-ops.h> +#include <linux/pagewalk.h> + +#include <asm/cpuinfo.h> +#include <asm/spr_defs.h> +#include <asm/tlbflush.h> + +static int +page_set_nocache(pte_t *pte, unsigned long addr, + unsigned long next, struct mm_walk *walk) +{ + unsigned long cl; + struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; + + pte_val(*pte) |= _PAGE_CI; + + /* + * Flush the page out of the TLB so that the new page flags get + * picked up next time there's an access + */ + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + + /* Flush page out of dcache */ + for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size) + mtspr(SPR_DCBFR, cl); + + return 0; +} + +static const struct mm_walk_ops set_nocache_walk_ops = { + .pte_entry = page_set_nocache, +}; + +static int +page_clear_nocache(pte_t *pte, unsigned long addr, + unsigned long next, struct mm_walk *walk) +{ + pte_val(*pte) &= ~_PAGE_CI; + + /* + * Flush the page out of the TLB so that the new page flags get + * picked up next time there's an access + */ + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + + return 0; +} + +static const struct mm_walk_ops clear_nocache_walk_ops = { + .pte_entry = page_clear_nocache, +}; + +void *arch_dma_set_uncached(void *cpu_addr, size_t size) +{ + unsigned long va = (unsigned long)cpu_addr; + int error; + + /* + * We need to iterate through the pages, clearing the dcache for + * them and setting the cache-inhibit bit. + */ + mmap_write_lock(&init_mm); + error = walk_page_range_novma(&init_mm, va, va + size, + &set_nocache_walk_ops, NULL, NULL); + mmap_write_unlock(&init_mm); + + if (error) + return ERR_PTR(error); + return cpu_addr; +} + +void arch_dma_clear_uncached(void *cpu_addr, size_t size) +{ + unsigned long va = (unsigned long)cpu_addr; + + mmap_write_lock(&init_mm); + /* walk_page_range shouldn't be able to fail here */ + WARN_ON(walk_page_range_novma(&init_mm, va, va + size, + &clear_nocache_walk_ops, NULL, NULL)); + mmap_write_unlock(&init_mm); +} + +void arch_sync_dma_for_device(phys_addr_t addr, size_t size, + enum dma_data_direction dir) +{ + unsigned long cl; + struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; + + switch (dir) { + case DMA_TO_DEVICE: + /* Flush the dcache for the requested range */ + for (cl = addr; cl < addr + size; + cl += cpuinfo->dcache_block_size) + mtspr(SPR_DCBFR, cl); + break; + case DMA_FROM_DEVICE: + /* Invalidate the dcache for the requested range */ + for (cl = addr; cl < addr + size; + cl += cpuinfo->dcache_block_size) + mtspr(SPR_DCBIR, cl); + break; + default: + /* + * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to + * flush nor invalidate the cache here as the area will need + * to be manually synced anyway. + */ + break; + } +} |