summaryrefslogtreecommitdiffstats
path: root/arch/c6x/mm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /arch/c6x/mm
parentInitial commit. (diff)
downloadlinux-upstream/5.10.209.tar.xz
linux-upstream/5.10.209.zip
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/c6x/mm')
-rw-r--r--arch/c6x/mm/Makefile6
-rw-r--r--arch/c6x/mm/dma-coherent.c173
-rw-r--r--arch/c6x/mm/init.c65
3 files changed, 244 insertions, 0 deletions
diff --git a/arch/c6x/mm/Makefile b/arch/c6x/mm/Makefile
new file mode 100644
index 000000000..19d05e972
--- /dev/null
+++ b/arch/c6x/mm/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the linux c6x-specific parts of the memory manager.
+#
+
+obj-y := init.o dma-coherent.o
diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c
new file mode 100644
index 000000000..03df07a83
--- /dev/null
+++ b/arch/c6x/mm/dma-coherent.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot <aurelien.jacquiot@ti.com>
+ *
+ * DMA uncached mapping support.
+ *
+ * Using code pulled from ARM
+ * Copyright (C) 2000-2004 Russell King
+ */
+#include <linux/slab.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/dma-map-ops.h>
+#include <linux/memblock.h>
+
+#include <asm/cacheflush.h>
+#include <asm/page.h>
+#include <asm/setup.h>
+
+/*
+ * DMA coherent memory management, can be redefined using the memdma=
+ * kernel command line
+ */
+
+/* none by default */
+static phys_addr_t dma_base;
+static u32 dma_size;
+static u32 dma_pages;
+
+static unsigned long *dma_bitmap;
+
+/* bitmap lock */
+static DEFINE_SPINLOCK(dma_lock);
+
+/*
+ * Return a DMA coherent and contiguous memory chunk from the DMA memory
+ */
+static inline u32 __alloc_dma_pages(int order)
+{
+ unsigned long flags;
+ u32 pos;
+
+ spin_lock_irqsave(&dma_lock, flags);
+ pos = bitmap_find_free_region(dma_bitmap, dma_pages, order);
+ spin_unlock_irqrestore(&dma_lock, flags);
+
+ return dma_base + (pos << PAGE_SHIFT);
+}
+
+static void __free_dma_pages(u32 addr, int order)
+{
+ unsigned long flags;
+ u32 pos = (addr - dma_base) >> PAGE_SHIFT;
+
+ if (addr < dma_base || (pos + (1 << order)) >= dma_pages) {
+ printk(KERN_ERR "%s: freeing outside range.\n", __func__);
+ BUG();
+ }
+
+ spin_lock_irqsave(&dma_lock, flags);
+ bitmap_release_region(dma_bitmap, pos, order);
+ spin_unlock_irqrestore(&dma_lock, flags);
+}
+
+/*
+ * Allocate DMA coherent memory space and return both the kernel
+ * virtual and DMA address for that space.
+ */
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, unsigned long attrs)
+{
+ void *ret;
+ u32 paddr;
+ int order;
+
+ if (!dma_size || !size)
+ return NULL;
+
+ order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1);
+
+ paddr = __alloc_dma_pages(order);
+
+ if (handle)
+ *handle = paddr;
+
+ if (!paddr)
+ return NULL;
+
+ ret = phys_to_virt(paddr);
+ memset(ret, 0, 1 << order);
+ return ret;
+}
+
+/*
+ * Free DMA coherent memory as defined by the above mapping.
+ */
+void arch_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs)
+{
+ int order;
+
+ if (!dma_size || !size)
+ return;
+
+ order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1);
+
+ __free_dma_pages(virt_to_phys(vaddr), order);
+}
+
+/*
+ * Initialise the coherent DMA memory allocator using the given uncached region.
+ */
+void __init coherent_mem_init(phys_addr_t start, u32 size)
+{
+ if (!size)
+ return;
+
+ printk(KERN_INFO
+ "Coherent memory (DMA) region start=0x%x size=0x%x\n",
+ start, size);
+
+ dma_base = start;
+ dma_size = size;
+
+ /* allocate bitmap */
+ dma_pages = dma_size >> PAGE_SHIFT;
+ if (dma_size & (PAGE_SIZE - 1))
+ ++dma_pages;
+
+ dma_bitmap = memblock_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long),
+ sizeof(long));
+ if (!dma_bitmap)
+ panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
+ __func__, BITS_TO_LONGS(dma_pages) * sizeof(long),
+ sizeof(long));
+}
+
+static void c6x_dma_sync(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ BUG_ON(!valid_dma_direction(dir));
+
+ switch (dir) {
+ case DMA_FROM_DEVICE:
+ L2_cache_block_invalidate(paddr, paddr + size);
+ break;
+ case DMA_TO_DEVICE:
+ L2_cache_block_writeback(paddr, paddr + size);
+ break;
+ case DMA_BIDIRECTIONAL:
+ L2_cache_block_writeback_invalidate(paddr, paddr + size);
+ break;
+ default:
+ break;
+ }
+}
+
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ return c6x_dma_sync(paddr, size, dir);
+}
+
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ return c6x_dma_sync(paddr, size, dir);
+}
diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c
new file mode 100644
index 000000000..a97e51a3e
--- /dev/null
+++ b/arch/c6x/mm/init.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Port on Texas Instruments TMS320C6x architecture
+ *
+ * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
+ */
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/module.h>
+#include <linux/memblock.h>
+#ifdef CONFIG_BLK_DEV_RAM
+#include <linux/blkdev.h>
+#endif
+#include <linux/initrd.h>
+
+#include <asm/sections.h>
+#include <linux/uaccess.h>
+
+/*
+ * ZERO_PAGE is a special page that is used for zero-initialized
+ * data and COW.
+ */
+unsigned long empty_zero_page;
+EXPORT_SYMBOL(empty_zero_page);
+
+/*
+ * paging_init() continues the virtual memory environment setup which
+ * was begun by the code in arch/head.S.
+ * The parameters are pointers to where to stick the starting and ending
+ * addresses of available kernel virtual memory.
+ */
+void __init paging_init(void)
+{
+ struct pglist_data *pgdat = NODE_DATA(0);
+ unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
+
+ empty_zero_page = (unsigned long) memblock_alloc(PAGE_SIZE,
+ PAGE_SIZE);
+ if (!empty_zero_page)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
+
+ /*
+ * Set up user data space
+ */
+ set_fs(KERNEL_DS);
+
+ /*
+ * Define zones
+ */
+ max_zone_pfn[ZONE_NORMAL] = memory_end >> PAGE_SHIFT;
+
+ free_area_init(max_zone_pfn);
+}
+
+void __init mem_init(void)
+{
+ high_memory = (void *)(memory_end & PAGE_MASK);
+
+ /* this will put all memory onto the freelists */
+ memblock_free_all();
+
+ mem_init_print_info(NULL);
+}