diff options
Diffstat (limited to '')
-rw-r--r-- | arch/mips/mm/dma-noncoherent.c | 145 |
1 files changed, 145 insertions, 0 deletions
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c new file mode 100644 index 000000000..38d3d9143 --- /dev/null +++ b/arch/mips/mm/dma-noncoherent.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> + * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org> + * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. + */ +#include <linux/dma-direct.h> +#include <linux/dma-map-ops.h> +#include <linux/highmem.h> + +#include <asm/cache.h> +#include <asm/cpu-type.h> +#include <asm/dma-coherence.h> +#include <asm/io.h> + +/* + * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively + * fill random cachelines with stale data at any time, requiring an extra + * flush post-DMA. + * + * Warning on the terminology - Linux calls an uncached area coherent; MIPS + * terminology calls memory areas with hardware maintained coherency coherent. + * + * Note that the R14000 and R16000 should also be checked for in this condition. + * However this function is only called on non-I/O-coherent systems and only the + * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp. + * SGI IP32 aka O2. + */ +static inline bool cpu_needs_post_dma_flush(void) +{ + switch (boot_cpu_type()) { + case CPU_R10000: + case CPU_R12000: + case CPU_BMIPS5000: + case CPU_LOONGSON2EF: + return true; + default: + /* + * Presence of MAARs suggests that the CPU supports + * speculatively prefetching data, and therefore requires + * the post-DMA flush/invalidate. + */ + return cpu_has_maar; + } +} + +void arch_dma_prep_coherent(struct page *page, size_t size) +{ + dma_cache_wback_inv((unsigned long)page_address(page), size); +} + +void *arch_dma_set_uncached(void *addr, size_t size) +{ + return (void *)(__pa(addr) + UNCAC_BASE); +} + +static inline void dma_sync_virt_for_device(void *addr, size_t size, + enum dma_data_direction dir) +{ + switch (dir) { + case DMA_TO_DEVICE: + dma_cache_wback((unsigned long)addr, size); + break; + case DMA_FROM_DEVICE: + dma_cache_inv((unsigned long)addr, size); + break; + case DMA_BIDIRECTIONAL: + dma_cache_wback_inv((unsigned long)addr, size); + break; + default: + BUG(); + } +} + +static inline void dma_sync_virt_for_cpu(void *addr, size_t size, + enum dma_data_direction dir) +{ + switch (dir) { + case DMA_TO_DEVICE: + break; + case DMA_FROM_DEVICE: + case DMA_BIDIRECTIONAL: + dma_cache_inv((unsigned long)addr, size); + break; + default: + BUG(); + } +} + +/* + * A single sg entry may refer to multiple physically contiguous pages. But + * we still need to process highmem pages individually. If highmem is not + * configured then the bulk of this loop gets optimized out. + */ +static inline void dma_sync_phys(phys_addr_t paddr, size_t size, + enum dma_data_direction dir, bool for_device) +{ + struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); + unsigned long offset = paddr & ~PAGE_MASK; + size_t left = size; + + do { + size_t len = left; + void *addr; + + if (PageHighMem(page)) { + if (offset + len > PAGE_SIZE) + len = PAGE_SIZE - offset; + } + + addr = kmap_atomic(page); + if (for_device) + dma_sync_virt_for_device(addr + offset, len, dir); + else + dma_sync_virt_for_cpu(addr + offset, len, dir); + kunmap_atomic(addr); + + offset = 0; + page++; + left -= len; + } while (left); +} + +void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) +{ + dma_sync_phys(paddr, size, dir, true); +} + +#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU +void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) +{ + if (cpu_needs_post_dma_flush()) + dma_sync_phys(paddr, size, dir, false); +} +#endif + +#ifdef CONFIG_DMA_PERDEV_COHERENT +void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, + const struct iommu_ops *iommu, bool coherent) +{ + dev->dma_coherent = coherent; +} +#endif |