summaryrefslogtreecommitdiffstats
path: root/arch/arc/mm/mmap.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /arch/arc/mm/mmap.c
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/arc/mm/mmap.c')
-rw-r--r--arch/arc/mm/mmap.c96
1 files changed, 96 insertions, 0 deletions
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
new file mode 100644
index 0000000000..fce5fa2b4f
--- /dev/null
+++ b/arch/arc/mm/mmap.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARC700 mmap
+ *
+ * (started from arm version - for VIPT alias handling)
+ *
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/sched/mm.h>
+
+#include <asm/cacheflush.h>
+
+#define COLOUR_ALIGN(addr, pgoff) \
+ ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
+ (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
+
+/*
+ * Ensure that shared mappings are correctly aligned to
+ * avoid aliasing issues with VIPT caches.
+ * We need to ensure that
+ * a specific page of an object is always mapped at a multiple of
+ * SHMLBA bytes.
+ */
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ int do_align = 0;
+ int aliasing = cache_is_vipt_aliasing();
+ struct vm_unmapped_area_info info;
+
+ /*
+ * We only need to do colour alignment if D cache aliases.
+ */
+ if (aliasing)
+ do_align = filp || (flags & MAP_SHARED);
+
+ /*
+ * We enforce the MAP_FIXED case.
+ */
+ if (flags & MAP_FIXED) {
+ if (aliasing && flags & MAP_SHARED &&
+ (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
+ return -EINVAL;
+ return addr;
+ }
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ if (addr) {
+ if (do_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+
+ info.flags = 0;
+ info.length = len;
+ info.low_limit = mm->mmap_base;
+ info.high_limit = TASK_SIZE;
+ info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
+ return vm_unmapped_area(&info);
+}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_U_NONE,
+ [VM_READ] = PAGE_U_R,
+ [VM_WRITE] = PAGE_U_R,
+ [VM_WRITE | VM_READ] = PAGE_U_R,
+ [VM_EXEC] = PAGE_U_X_R,
+ [VM_EXEC | VM_READ] = PAGE_U_X_R,
+ [VM_EXEC | VM_WRITE] = PAGE_U_X_R,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_R,
+ [VM_SHARED] = PAGE_U_NONE,
+ [VM_SHARED | VM_READ] = PAGE_U_R,
+ [VM_SHARED | VM_WRITE] = PAGE_U_W_R,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_U_W_R,
+ [VM_SHARED | VM_EXEC] = PAGE_U_X_R,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_U_X_R,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_U_X_W_R,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_W_R
+};
+DECLARE_VM_GET_PAGE_PROT