summaryrefslogtreecommitdiffstats
path: root/mm/msync.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--mm/msync.c114
1 files changed, 114 insertions, 0 deletions
diff --git a/mm/msync.c b/mm/msync.c
new file mode 100644
index 000000000..ac4c9bfea
--- /dev/null
+++ b/mm/msync.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/mm/msync.c
+ *
+ * Copyright (C) 1994-1999 Linus Torvalds
+ */
+
+/*
+ * The msync() system call.
+ */
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/file.h>
+#include <linux/syscalls.h>
+#include <linux/sched.h>
+
+/*
+ * MS_SYNC syncs the entire file - including mappings.
+ *
+ * MS_ASYNC does not start I/O (it used to, up to 2.5.67).
+ * Nor does it marks the relevant pages dirty (it used to up to 2.6.17).
+ * Now it doesn't do anything, since dirty pages are properly tracked.
+ *
+ * The application may now run fsync() to
+ * write out the dirty pages and wait on the writeout and check the result.
+ * Or the application may run fadvise(FADV_DONTNEED) against the fd to start
+ * async writeout immediately.
+ * So by _not_ starting I/O in MS_ASYNC we provide complete flexibility to
+ * applications.
+ */
+SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags)
+{
+ unsigned long end;
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ int unmapped_error = 0;
+ int error = -EINVAL;
+
+ start = untagged_addr(start);
+
+ if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
+ goto out;
+ if (offset_in_page(start))
+ goto out;
+ if ((flags & MS_ASYNC) && (flags & MS_SYNC))
+ goto out;
+ error = -ENOMEM;
+ len = (len + ~PAGE_MASK) & PAGE_MASK;
+ end = start + len;
+ if (end < start)
+ goto out;
+ error = 0;
+ if (end == start)
+ goto out;
+ /*
+ * If the interval [start,end) covers some unmapped address ranges,
+ * just ignore them, but return -ENOMEM at the end. Besides, if the
+ * flag is MS_ASYNC (w/o MS_INVALIDATE) the result would be -ENOMEM
+ * anyway and there is nothing left to do, so return immediately.
+ */
+ mmap_read_lock(mm);
+ vma = find_vma(mm, start);
+ for (;;) {
+ struct file *file;
+ loff_t fstart, fend;
+
+ /* Still start < end. */
+ error = -ENOMEM;
+ if (!vma)
+ goto out_unlock;
+ /* Here start < vma->vm_end. */
+ if (start < vma->vm_start) {
+ if (flags == MS_ASYNC)
+ goto out_unlock;
+ start = vma->vm_start;
+ if (start >= end)
+ goto out_unlock;
+ unmapped_error = -ENOMEM;
+ }
+ /* Here vma->vm_start <= start < vma->vm_end. */
+ if ((flags & MS_INVALIDATE) &&
+ (vma->vm_flags & VM_LOCKED)) {
+ error = -EBUSY;
+ goto out_unlock;
+ }
+ file = vma->vm_file;
+ fstart = (start - vma->vm_start) +
+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+ fend = fstart + (min(end, vma->vm_end) - start) - 1;
+ start = vma->vm_end;
+ if ((flags & MS_SYNC) && file &&
+ (vma->vm_flags & VM_SHARED)) {
+ get_file(file);
+ mmap_read_unlock(mm);
+ error = vfs_fsync_range(file, fstart, fend, 1);
+ fput(file);
+ if (error || start >= end)
+ goto out;
+ mmap_read_lock(mm);
+ vma = find_vma(mm, start);
+ } else {
+ if (start >= end) {
+ error = 0;
+ goto out_unlock;
+ }
+ vma = find_vma(mm, vma->vm_end);
+ }
+ }
+out_unlock:
+ mmap_read_unlock(mm);
+out:
+ return error ? : unmapped_error;
+}