summaryrefslogtreecommitdiffstats
path: root/fs/quota
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /fs/quota
parentInitial commit. (diff)
downloadlinux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz
linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip
Adding upstream version 5.10.209.upstream/5.10.209
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'fs/quota')
-rw-r--r--fs/quota/Kconfig72
-rw-r--r--fs/quota/Makefile7
-rw-r--r--fs/quota/compat.h34
-rw-r--r--fs/quota/dquot.c3092
-rw-r--r--fs/quota/kqid.c133
-rw-r--r--fs/quota/netlink.c103
-rw-r--r--fs/quota/quota.c954
-rw-r--r--fs/quota/quota_tree.c787
-rw-r--r--fs/quota/quota_tree.h26
-rw-r--r--fs/quota/quota_v1.c242
-rw-r--r--fs/quota/quota_v2.c437
-rw-r--r--fs/quota/quotaio_v1.h36
-rw-r--r--fs/quota/quotaio_v2.h76
13 files changed, 5999 insertions, 0 deletions
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
new file mode 100644
index 000000000..b59cd172b
--- /dev/null
+++ b/fs/quota/Kconfig
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Quota configuration
+#
+
+config QUOTA
+ bool "Quota support"
+ select QUOTACTL
+ select SRCU
+ help
+ If you say Y here, you will be able to set per user limits for disk
+ usage (also called disk quotas). Currently, it works for the
+ ext2, ext3, ext4, jfs, ocfs2 and reiserfs file systems.
+ Note that gfs2 and xfs use their own quota system.
+ Ext3, ext4 and reiserfs also support journaled quotas for which
+ you don't need to run quotacheck(8) after an unclean shutdown.
+ For further details, read the Quota mini-HOWTO, available from
+ <https://www.tldp.org/docs.html#howto>, or the documentation provided
+ with the quota tools. Probably the quota support is only useful for
+ multi user systems. If unsure, say N.
+
+config QUOTA_NETLINK_INTERFACE
+ bool "Report quota messages through netlink interface"
+ depends on QUOTACTL && NET
+ help
+ If you say Y here, quota warnings (about exceeding softlimit, reaching
+ hardlimit, etc.) will be reported through netlink interface. If unsure,
+ say Y.
+
+config PRINT_QUOTA_WARNING
+ bool "Print quota warnings to console (OBSOLETE)"
+ depends on QUOTA
+ default y
+ help
+ If you say Y here, quota warnings (about exceeding softlimit, reaching
+ hardlimit, etc.) will be printed to the process' controlling terminal.
+ Note that this behavior is currently deprecated and may go away in
+ future. Please use notification via netlink socket instead.
+
+config QUOTA_DEBUG
+ bool "Additional quota sanity checks"
+ depends on QUOTA
+ default n
+ help
+ If you say Y here, quota subsystem will perform some additional
+ sanity checks of quota internal structures. If unsure, say N.
+
+# Generic support for tree structured quota files. Selected when needed.
+config QUOTA_TREE
+ tristate
+
+config QFMT_V1
+ tristate "Old quota format support"
+ depends on QUOTA
+ help
+ This quota format was (is) used by kernels earlier than 2.4.22. If
+ you have quota working and you don't want to convert to new quota
+ format say Y here.
+
+config QFMT_V2
+ tristate "Quota format vfsv0 and vfsv1 support"
+ depends on QUOTA
+ select QUOTA_TREE
+ help
+ This config option enables kernel support for vfsv0 and vfsv1 quota
+ formats. Both these formats support 32-bit UIDs/GIDs and vfsv1 format
+ also supports 64-bit inode and block quota limits. If you need this
+ functionality say Y here.
+
+config QUOTACTL
+ bool
+ default n
diff --git a/fs/quota/Makefile b/fs/quota/Makefile
new file mode 100644
index 000000000..9160639da
--- /dev/null
+++ b/fs/quota/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_QUOTA) += dquot.o
+obj-$(CONFIG_QFMT_V1) += quota_v1.o
+obj-$(CONFIG_QFMT_V2) += quota_v2.o
+obj-$(CONFIG_QUOTA_TREE) += quota_tree.o
+obj-$(CONFIG_QUOTACTL) += quota.o kqid.o
+obj-$(CONFIG_QUOTA_NETLINK_INTERFACE) += netlink.o
diff --git a/fs/quota/compat.h b/fs/quota/compat.h
new file mode 100644
index 000000000..ef7d1e12d
--- /dev/null
+++ b/fs/quota/compat.h
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/compat.h>
+
+struct compat_if_dqblk {
+ compat_u64 dqb_bhardlimit;
+ compat_u64 dqb_bsoftlimit;
+ compat_u64 dqb_curspace;
+ compat_u64 dqb_ihardlimit;
+ compat_u64 dqb_isoftlimit;
+ compat_u64 dqb_curinodes;
+ compat_u64 dqb_btime;
+ compat_u64 dqb_itime;
+ compat_uint_t dqb_valid;
+};
+
+struct compat_fs_qfilestat {
+ compat_u64 dqb_bhardlimit;
+ compat_u64 qfs_nblks;
+ compat_uint_t qfs_nextents;
+};
+
+struct compat_fs_quota_stat {
+ __s8 qs_version;
+ __u16 qs_flags;
+ __s8 qs_pad;
+ struct compat_fs_qfilestat qs_uquota;
+ struct compat_fs_qfilestat qs_gquota;
+ compat_uint_t qs_incoredqs;
+ compat_int_t qs_btimelimit;
+ compat_int_t qs_itimelimit;
+ compat_int_t qs_rtbtimelimit;
+ __u16 qs_bwarnlimit;
+ __u16 qs_iwarnlimit;
+};
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
new file mode 100644
index 000000000..4bb4b4b79
--- /dev/null
+++ b/fs/quota/dquot.c
@@ -0,0 +1,3092 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implementation of the diskquota system for the LINUX operating system. QUOTA
+ * is implemented using the BSD system call interface as the means of
+ * communication with the user level. This file contains the generic routines
+ * called by the different filesystems on allocation of an inode or block.
+ * These routines take care of the administration needed to have a consistent
+ * diskquota tracking system. The ideas of both user and group quotas are based
+ * on the Melbourne quota system as used on BSD derived systems. The internal
+ * implementation is based on one of the several variants of the LINUX
+ * inode-subsystem with added complexity of the diskquota system.
+ *
+ * Author: Marco van Wieringen <mvw@planets.elm.net>
+ *
+ * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
+ *
+ * Revised list management to avoid races
+ * -- Bill Hawes, <whawes@star.net>, 9/98
+ *
+ * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
+ * As the consequence the locking was moved from dquot_decr_...(),
+ * dquot_incr_...() to calling functions.
+ * invalidate_dquots() now writes modified dquots.
+ * Serialized quota_off() and quota_on() for mount point.
+ * Fixed a few bugs in grow_dquots().
+ * Fixed deadlock in write_dquot() - we no longer account quotas on
+ * quota files
+ * remove_dquot_ref() moved to inode.c - it now traverses through inodes
+ * add_dquot_ref() restarts after blocking
+ * Added check for bogus uid and fixed check for group in quotactl.
+ * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
+ *
+ * Used struct list_head instead of own list struct
+ * Invalidation of referenced dquots is no longer possible
+ * Improved free_dquots list management
+ * Quota and i_blocks are now updated in one place to avoid races
+ * Warnings are now delayed so we won't block in critical section
+ * Write updated not to require dquot lock
+ * Jan Kara, <jack@suse.cz>, 9/2000
+ *
+ * Added dynamic quota structure allocation
+ * Jan Kara <jack@suse.cz> 12/2000
+ *
+ * Rewritten quota interface. Implemented new quota format and
+ * formats registering.
+ * Jan Kara, <jack@suse.cz>, 2001,2002
+ *
+ * New SMP locking.
+ * Jan Kara, <jack@suse.cz>, 10/2002
+ *
+ * Added journalled quota support, fix lock inversion problems
+ * Jan Kara, <jack@suse.cz>, 2003,2004
+ *
+ * (C) Copyright 1994 - 1997 Marco van Wieringen
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/mm.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/stat.h>
+#include <linux/tty.h>
+#include <linux/file.h>
+#include <linux/slab.h>
+#include <linux/sysctl.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/security.h>
+#include <linux/sched.h>
+#include <linux/cred.h>
+#include <linux/kmod.h>
+#include <linux/namei.h>
+#include <linux/capability.h>
+#include <linux/quotaops.h>
+#include <linux/blkdev.h>
+#include <linux/sched/mm.h>
+#include "../internal.h" /* ugh */
+
+#include <linux/uaccess.h>
+
+/*
+ * There are five quota SMP locks:
+ * * dq_list_lock protects all lists with quotas and quota formats.
+ * * dquot->dq_dqb_lock protects data from dq_dqb
+ * * inode->i_lock protects inode->i_blocks, i_bytes and also guards
+ * consistency of dquot->dq_dqb with inode->i_blocks, i_bytes so that
+ * dquot_transfer() can stabilize amount it transfers
+ * * dq_data_lock protects mem_dqinfo structures and modifications of dquot
+ * pointers in the inode
+ * * dq_state_lock protects modifications of quota state (on quotaon and
+ * quotaoff) and readers who care about latest values take it as well.
+ *
+ * The spinlock ordering is hence:
+ * dq_data_lock > dq_list_lock > i_lock > dquot->dq_dqb_lock,
+ * dq_list_lock > dq_state_lock
+ *
+ * Note that some things (eg. sb pointer, type, id) doesn't change during
+ * the life of the dquot structure and so needn't to be protected by a lock
+ *
+ * Operation accessing dquots via inode pointers are protected by dquot_srcu.
+ * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and
+ * synchronize_srcu(&dquot_srcu) is called after clearing pointers from
+ * inode and before dropping dquot references to avoid use of dquots after
+ * they are freed. dq_data_lock is used to serialize the pointer setting and
+ * clearing operations.
+ * Special care needs to be taken about S_NOQUOTA inode flag (marking that
+ * inode is a quota file). Functions adding pointers from inode to dquots have
+ * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they
+ * have to do all pointer modifications before dropping dq_data_lock. This makes
+ * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
+ * then drops all pointers to dquots from an inode.
+ *
+ * Each dquot has its dq_lock mutex. Dquot is locked when it is being read to
+ * memory (or space for it is being allocated) on the first dqget(), when it is
+ * being written out, and when it is being released on the last dqput(). The
+ * allocation and release operations are serialized by the dq_lock and by
+ * checking the use count in dquot_release().
+ *
+ * Lock ordering (including related VFS locks) is the following:
+ * s_umount > i_mutex > journal_lock > dquot->dq_lock > dqio_sem
+ */
+
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
+EXPORT_SYMBOL(dq_data_lock);
+DEFINE_STATIC_SRCU(dquot_srcu);
+
+static DECLARE_WAIT_QUEUE_HEAD(dquot_ref_wq);
+
+void __quota_error(struct super_block *sb, const char *func,
+ const char *fmt, ...)
+{
+ if (printk_ratelimit()) {
+ va_list args;
+ struct va_format vaf;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk(KERN_ERR "Quota error (device %s): %s: %pV\n",
+ sb->s_id, func, &vaf);
+
+ va_end(args);
+ }
+}
+EXPORT_SYMBOL(__quota_error);
+
+#if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
+static char *quotatypes[] = INITQFNAMES;
+#endif
+static struct quota_format_type *quota_formats; /* List of registered formats */
+static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
+
+/* SLAB cache for dquot structures */
+static struct kmem_cache *dquot_cachep;
+
+int register_quota_format(struct quota_format_type *fmt)
+{
+ spin_lock(&dq_list_lock);
+ fmt->qf_next = quota_formats;
+ quota_formats = fmt;
+ spin_unlock(&dq_list_lock);
+ return 0;
+}
+EXPORT_SYMBOL(register_quota_format);
+
+void unregister_quota_format(struct quota_format_type *fmt)
+{
+ struct quota_format_type **actqf;
+
+ spin_lock(&dq_list_lock);
+ for (actqf = &quota_formats; *actqf && *actqf != fmt;
+ actqf = &(*actqf)->qf_next)
+ ;
+ if (*actqf)
+ *actqf = (*actqf)->qf_next;
+ spin_unlock(&dq_list_lock);
+}
+EXPORT_SYMBOL(unregister_quota_format);
+
+static struct quota_format_type *find_quota_format(int id)
+{
+ struct quota_format_type *actqf;
+
+ spin_lock(&dq_list_lock);
+ for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
+ actqf = actqf->qf_next)
+ ;
+ if (!actqf || !try_module_get(actqf->qf_owner)) {
+ int qm;
+
+ spin_unlock(&dq_list_lock);
+
+ for (qm = 0; module_names[qm].qm_fmt_id &&
+ module_names[qm].qm_fmt_id != id; qm++)
+ ;
+ if (!module_names[qm].qm_fmt_id ||
+ request_module(module_names[qm].qm_mod_name))
+ return NULL;
+
+ spin_lock(&dq_list_lock);
+ for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
+ actqf = actqf->qf_next)
+ ;
+ if (actqf && !try_module_get(actqf->qf_owner))
+ actqf = NULL;
+ }
+ spin_unlock(&dq_list_lock);
+ return actqf;
+}
+
+static void put_quota_format(struct quota_format_type *fmt)
+{
+ module_put(fmt->qf_owner);
+}
+
+/*
+ * Dquot List Management:
+ * The quota code uses five lists for dquot management: the inuse_list,
+ * releasing_dquots, free_dquots, dqi_dirty_list, and dquot_hash[] array.
+ * A single dquot structure may be on some of those lists, depending on
+ * its current state.
+ *
+ * All dquots are placed to the end of inuse_list when first created, and this
+ * list is used for invalidate operation, which must look at every dquot.
+ *
+ * When the last reference of a dquot is dropped, the dquot is added to
+ * releasing_dquots. We'll then queue work item which will call
+ * synchronize_srcu() and after that perform the final cleanup of all the
+ * dquots on the list. Each cleaned up dquot is moved to free_dquots list.
+ * Both releasing_dquots and free_dquots use the dq_free list_head in the dquot
+ * struct.
+ *
+ * Unused and cleaned up dquots are in the free_dquots list and this list is
+ * searched whenever we need an available dquot. Dquots are removed from the
+ * list as soon as they are used again and dqstats.free_dquots gives the number
+ * of dquots on the list. When dquot is invalidated it's completely released
+ * from memory.
+ *
+ * Dirty dquots are added to the dqi_dirty_list of quota_info when mark
+ * dirtied, and this list is searched when writing dirty dquots back to
+ * quota file. Note that some filesystems do dirty dquot tracking on their
+ * own (e.g. in a journal) and thus don't use dqi_dirty_list.
+ *
+ * Dquots with a specific identity (device, type and id) are placed on
+ * one of the dquot_hash[] hash chains. The provides an efficient search
+ * mechanism to locate a specific dquot.
+ */
+
+static LIST_HEAD(inuse_list);
+static LIST_HEAD(free_dquots);
+static LIST_HEAD(releasing_dquots);
+static unsigned int dq_hash_bits, dq_hash_mask;
+static struct hlist_head *dquot_hash;
+
+struct dqstats dqstats;
+EXPORT_SYMBOL(dqstats);
+
+static qsize_t inode_get_rsv_space(struct inode *inode);
+static qsize_t __inode_get_rsv_space(struct inode *inode);
+static int __dquot_initialize(struct inode *inode, int type);
+
+static void quota_release_workfn(struct work_struct *work);
+static DECLARE_DELAYED_WORK(quota_release_work, quota_release_workfn);
+
+static inline unsigned int
+hashfn(const struct super_block *sb, struct kqid qid)
+{
+ unsigned int id = from_kqid(&init_user_ns, qid);
+ int type = qid.type;
+ unsigned long tmp;
+
+ tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
+ return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
+}
+
+/*
+ * Following list functions expect dq_list_lock to be held
+ */
+static inline void insert_dquot_hash(struct dquot *dquot)
+{
+ struct hlist_head *head;
+ head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id);
+ hlist_add_head(&dquot->dq_hash, head);
+}
+
+static inline void remove_dquot_hash(struct dquot *dquot)
+{
+ hlist_del_init(&dquot->dq_hash);
+}
+
+static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
+ struct kqid qid)
+{
+ struct hlist_node *node;
+ struct dquot *dquot;
+
+ hlist_for_each (node, dquot_hash+hashent) {
+ dquot = hlist_entry(node, struct dquot, dq_hash);
+ if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid))
+ return dquot;
+ }
+ return NULL;
+}
+
+/* Add a dquot to the tail of the free list */
+static inline void put_dquot_last(struct dquot *dquot)
+{
+ list_add_tail(&dquot->dq_free, &free_dquots);
+ dqstats_inc(DQST_FREE_DQUOTS);
+}
+
+static inline void put_releasing_dquots(struct dquot *dquot)
+{
+ list_add_tail(&dquot->dq_free, &releasing_dquots);
+ set_bit(DQ_RELEASING_B, &dquot->dq_flags);
+}
+
+static inline void remove_free_dquot(struct dquot *dquot)
+{
+ if (list_empty(&dquot->dq_free))
+ return;
+ list_del_init(&dquot->dq_free);
+ if (!test_bit(DQ_RELEASING_B, &dquot->dq_flags))
+ dqstats_dec(DQST_FREE_DQUOTS);
+ else
+ clear_bit(DQ_RELEASING_B, &dquot->dq_flags);
+}
+
+static inline void put_inuse(struct dquot *dquot)
+{
+ /* We add to the back of inuse list so we don't have to restart
+ * when traversing this list and we block */
+ list_add_tail(&dquot->dq_inuse, &inuse_list);
+ dqstats_inc(DQST_ALLOC_DQUOTS);
+}
+
+static inline void remove_inuse(struct dquot *dquot)
+{
+ dqstats_dec(DQST_ALLOC_DQUOTS);
+ list_del(&dquot->dq_inuse);
+}
+/*
+ * End of list functions needing dq_list_lock
+ */
+
+static void wait_on_dquot(struct dquot *dquot)
+{
+ mutex_lock(&dquot->dq_lock);
+ mutex_unlock(&dquot->dq_lock);
+}
+
+static inline int dquot_active(struct dquot *dquot)
+{
+ return test_bit(DQ_ACTIVE_B, &dquot->dq_flags);
+}
+
+static inline int dquot_dirty(struct dquot *dquot)
+{
+ return test_bit(DQ_MOD_B, &dquot->dq_flags);
+}
+
+static inline int mark_dquot_dirty(struct dquot *dquot)
+{
+ return dquot->dq_sb->dq_op->mark_dirty(dquot);
+}
+
+/* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
+int dquot_mark_dquot_dirty(struct dquot *dquot)
+{
+ int ret = 1;
+
+ if (!dquot_active(dquot))
+ return 0;
+
+ if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
+ return test_and_set_bit(DQ_MOD_B, &dquot->dq_flags);
+
+ /* If quota is dirty already, we don't have to acquire dq_list_lock */
+ if (dquot_dirty(dquot))
+ return 1;
+
+ spin_lock(&dq_list_lock);
+ if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
+ list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
+ info[dquot->dq_id.type].dqi_dirty_list);
+ ret = 0;
+ }
+ spin_unlock(&dq_list_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dquot_mark_dquot_dirty);
+
+/* Dirtify all the dquots - this can block when journalling */
+static inline int mark_all_dquot_dirty(struct dquot * const *dquot)
+{
+ int ret, err, cnt;
+
+ ret = err = 0;
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (dquot[cnt])
+ /* Even in case of error we have to continue */
+ ret = mark_dquot_dirty(dquot[cnt]);
+ if (!err)
+ err = ret;
+ }
+ return err;
+}
+
+static inline void dqput_all(struct dquot **dquot)
+{
+ unsigned int cnt;
+
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ dqput(dquot[cnt]);
+}
+
+static inline int clear_dquot_dirty(struct dquot *dquot)
+{
+ if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
+ return test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags);
+
+ spin_lock(&dq_list_lock);
+ if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) {
+ spin_unlock(&dq_list_lock);
+ return 0;
+ }
+ list_del_init(&dquot->dq_dirty);
+ spin_unlock(&dq_list_lock);
+ return 1;
+}
+
+void mark_info_dirty(struct super_block *sb, int type)
+{
+ spin_lock(&dq_data_lock);
+ sb_dqopt(sb)->info[type].dqi_flags |= DQF_INFO_DIRTY;
+ spin_unlock(&dq_data_lock);
+}
+EXPORT_SYMBOL(mark_info_dirty);
+
+/*
+ * Read dquot from disk and alloc space for it
+ */
+
+int dquot_acquire(struct dquot *dquot)
+{
+ int ret = 0, ret2 = 0;
+ unsigned int memalloc;
+ struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
+
+ mutex_lock(&dquot->dq_lock);
+ memalloc = memalloc_nofs_save();
+ if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
+ ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot);
+ if (ret < 0)
+ goto out_iolock;
+ }
+ /* Make sure flags update is visible after dquot has been filled */
+ smp_mb__before_atomic();
+ set_bit(DQ_READ_B, &dquot->dq_flags);
+ /* Instantiate dquot if needed */
+ if (!dquot_active(dquot) && !dquot->dq_off) {
+ ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
+ /* Write the info if needed */
+ if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
+ ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
+ dquot->dq_sb, dquot->dq_id.type);
+ }
+ if (ret < 0)
+ goto out_iolock;
+ if (ret2 < 0) {
+ ret = ret2;
+ goto out_iolock;
+ }
+ }
+ /*
+ * Make sure flags update is visible after on-disk struct has been
+ * allocated. Paired with smp_rmb() in dqget().
+ */
+ smp_mb__before_atomic();
+ set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
+out_iolock:
+ memalloc_nofs_restore(memalloc);
+ mutex_unlock(&dquot->dq_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dquot_acquire);
+
+/*
+ * Write dquot to disk
+ */
+int dquot_commit(struct dquot *dquot)
+{
+ int ret = 0;
+ unsigned int memalloc;
+ struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
+
+ mutex_lock(&dquot->dq_lock);
+ memalloc = memalloc_nofs_save();
+ if (!clear_dquot_dirty(dquot))
+ goto out_lock;
+ /* Inactive dquot can be only if there was error during read/init
+ * => we have better not writing it */
+ if (dquot_active(dquot))
+ ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
+ else
+ ret = -EIO;
+out_lock:
+ memalloc_nofs_restore(memalloc);
+ mutex_unlock(&dquot->dq_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dquot_commit);
+
+/*
+ * Release dquot
+ */
+int dquot_release(struct dquot *dquot)
+{
+ int ret = 0, ret2 = 0;
+ unsigned int memalloc;
+ struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
+
+ mutex_lock(&dquot->dq_lock);
+ memalloc = memalloc_nofs_save();
+ /* Check whether we are not racing with some other dqget() */
+ if (dquot_is_busy(dquot))
+ goto out_dqlock;
+ if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
+ ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot);
+ /* Write the info */
+ if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
+ ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
+ dquot->dq_sb, dquot->dq_id.type);
+ }
+ if (ret >= 0)
+ ret = ret2;
+ }
+ clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
+out_dqlock:
+ memalloc_nofs_restore(memalloc);
+ mutex_unlock(&dquot->dq_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dquot_release);
+
+void dquot_destroy(struct dquot *dquot)
+{
+ kmem_cache_free(dquot_cachep, dquot);
+}
+EXPORT_SYMBOL(dquot_destroy);
+
+static inline void do_destroy_dquot(struct dquot *dquot)
+{
+ dquot->dq_sb->dq_op->destroy_dquot(dquot);
+}
+
+/* Invalidate all dquots on the list. Note that this function is called after
+ * quota is disabled and pointers from inodes removed so there cannot be new
+ * quota users. There can still be some users of quotas due to inodes being
+ * just deleted or pruned by prune_icache() (those are not attached to any
+ * list) or parallel quotactl call. We have to wait for such users.
+ */
+static void invalidate_dquots(struct super_block *sb, int type)
+{
+ struct dquot *dquot, *tmp;
+
+restart:
+ flush_delayed_work(&quota_release_work);
+
+ spin_lock(&dq_list_lock);
+ list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
+ if (dquot->dq_sb != sb)
+ continue;
+ if (dquot->dq_id.type != type)
+ continue;
+ /* Wait for dquot users */
+ if (atomic_read(&dquot->dq_count)) {
+ atomic_inc(&dquot->dq_count);
+ spin_unlock(&dq_list_lock);
+ /*
+ * Once dqput() wakes us up, we know it's time to free
+ * the dquot.
+ * IMPORTANT: we rely on the fact that there is always
+ * at most one process waiting for dquot to free.
+ * Otherwise dq_count would be > 1 and we would never
+ * wake up.
+ */
+ wait_event(dquot_ref_wq,
+ atomic_read(&dquot->dq_count) == 1);
+ dqput(dquot);
+ /* At this moment dquot() need not exist (it could be
+ * reclaimed by prune_dqcache(). Hence we must
+ * restart. */
+ goto restart;
+ }
+ /*
+ * The last user already dropped its reference but dquot didn't
+ * get fully cleaned up yet. Restart the scan which flushes the
+ * work cleaning up released dquots.
+ */
+ if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) {
+ spin_unlock(&dq_list_lock);
+ goto restart;
+ }
+ /*
+ * Quota now has no users and it has been written on last
+ * dqput()
+ */
+ remove_dquot_hash(dquot);
+ remove_free_dquot(dquot);
+ remove_inuse(dquot);
+ do_destroy_dquot(dquot);
+ }
+ spin_unlock(&dq_list_lock);
+}
+
+/* Call callback for every active dquot on given filesystem */
+int dquot_scan_active(struct super_block *sb,
+ int (*fn)(struct dquot *dquot, unsigned long priv),
+ unsigned long priv)
+{
+ struct dquot *dquot, *old_dquot = NULL;
+ int ret = 0;
+
+ WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
+
+ spin_lock(&dq_list_lock);
+ list_for_each_entry(dquot, &inuse_list, dq_inuse) {
+ if (!dquot_active(dquot))
+ continue;
+ if (dquot->dq_sb != sb)
+ continue;
+ /* Now we have active dquot so we can just increase use count */
+ atomic_inc(&dquot->dq_count);
+ spin_unlock(&dq_list_lock);
+ dqput(old_dquot);
+ old_dquot = dquot;
+ /*
+ * ->release_dquot() can be racing with us. Our reference
+ * protects us from new calls to it so just wait for any
+ * outstanding call and recheck the DQ_ACTIVE_B after that.
+ */
+ wait_on_dquot(dquot);
+ if (dquot_active(dquot)) {
+ ret = fn(dquot, priv);
+ if (ret < 0)
+ goto out;
+ }
+ spin_lock(&dq_list_lock);
+ /* We are safe to continue now because our dquot could not
+ * be moved out of the inuse list while we hold the reference */
+ }
+ spin_unlock(&dq_list_lock);
+out:
+ dqput(old_dquot);
+ return ret;
+}
+EXPORT_SYMBOL(dquot_scan_active);
+
+static inline int dquot_write_dquot(struct dquot *dquot)
+{
+ int ret = dquot->dq_sb->dq_op->write_dquot(dquot);
+ if (ret < 0) {
+ quota_error(dquot->dq_sb, "Can't write quota structure "
+ "(error %d). Quota may get out of sync!", ret);
+ /* Clear dirty bit anyway to avoid infinite loop. */
+ clear_dquot_dirty(dquot);
+ }
+ return ret;
+}
+
+/* Write all dquot structures to quota files */
+int dquot_writeback_dquots(struct super_block *sb, int type)
+{
+ struct list_head dirty;
+ struct dquot *dquot;
+ struct quota_info *dqopt = sb_dqopt(sb);
+ int cnt;
+ int err, ret = 0;
+
+ WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
+
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (type != -1 && cnt != type)
+ continue;
+ if (!sb_has_quota_active(sb, cnt))
+ continue;
+ spin_lock(&dq_list_lock);
+ /* Move list away to avoid livelock. */
+ list_replace_init(&dqopt->info[cnt].dqi_dirty_list, &dirty);
+ while (!list_empty(&dirty)) {
+ dquot = list_first_entry(&dirty, struct dquot,
+ dq_dirty);
+
+ WARN_ON(!dquot_active(dquot));
+ /* If the dquot is releasing we should not touch it */
+ if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) {
+ spin_unlock(&dq_list_lock);
+ flush_delayed_work(&quota_release_work);
+ spin_lock(&dq_list_lock);
+ continue;
+ }
+
+ /* Now we have active dquot from which someone is
+ * holding reference so we can safely just increase
+ * use count */
+ dqgrab(dquot);
+ spin_unlock(&dq_list_lock);
+ err = dquot_write_dquot(dquot);
+ if (err && !ret)
+ ret = err;
+ dqput(dquot);
+ spin_lock(&dq_list_lock);
+ }
+ spin_unlock(&dq_list_lock);
+ }
+
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
+ && info_dirty(&dqopt->info[cnt]))
+ sb->dq_op->write_info(sb, cnt);
+ dqstats_inc(DQST_SYNCS);
+
+ return ret;
+}
+EXPORT_SYMBOL(dquot_writeback_dquots);
+
+/* Write all dquot structures to disk and make them visible from userspace */
+int dquot_quota_sync(struct super_block *sb, int type)
+{
+ struct quota_info *dqopt = sb_dqopt(sb);
+ int cnt;
+ int ret;
+
+ ret = dquot_writeback_dquots(sb, type);
+ if (ret)
+ return ret;
+ if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
+ return 0;
+
+ /* This is not very clever (and fast) but currently I don't know about
+ * any other simple way of getting quota data to disk and we must get
+ * them there for userspace to be visible... */
+ if (sb->s_op->sync_fs) {
+ ret = sb->s_op->sync_fs(sb, 1);
+ if (ret)
+ return ret;
+ }
+ ret = sync_blockdev(sb->s_bdev);
+ if (ret)
+ return ret;
+
+ /*
+ * Now when everything is written we can discard the pagecache so
+ * that userspace sees the changes.
+ */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (type != -1 && cnt != type)
+ continue;
+ if (!sb_has_quota_active(sb, cnt))
+ continue;
+ inode_lock(dqopt->files[cnt]);
+ truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
+ inode_unlock(dqopt->files[cnt]);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(dquot_quota_sync);
+
+static unsigned long
+dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+{
+ struct dquot *dquot;
+ unsigned long freed = 0;
+
+ spin_lock(&dq_list_lock);
+ while (!list_empty(&free_dquots) && sc->nr_to_scan) {
+ dquot = list_first_entry(&free_dquots, struct dquot, dq_free);
+ remove_dquot_hash(dquot);
+ remove_free_dquot(dquot);
+ remove_inuse(dquot);
+ do_destroy_dquot(dquot);
+ sc->nr_to_scan--;
+ freed++;
+ }
+ spin_unlock(&dq_list_lock);
+ return freed;
+}
+
+static unsigned long
+dqcache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ return vfs_pressure_ratio(
+ percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS]));
+}
+
+static struct shrinker dqcache_shrinker = {
+ .count_objects = dqcache_shrink_count,
+ .scan_objects = dqcache_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
+/*
+ * Safely release dquot and put reference to dquot.
+ */
+static void quota_release_workfn(struct work_struct *work)
+{
+ struct dquot *dquot;
+ struct list_head rls_head;
+
+ spin_lock(&dq_list_lock);
+ /* Exchange the list head to avoid livelock. */
+ list_replace_init(&releasing_dquots, &rls_head);
+ spin_unlock(&dq_list_lock);
+ synchronize_srcu(&dquot_srcu);
+
+restart:
+ spin_lock(&dq_list_lock);
+ while (!list_empty(&rls_head)) {
+ dquot = list_first_entry(&rls_head, struct dquot, dq_free);
+ WARN_ON_ONCE(atomic_read(&dquot->dq_count));
+ /*
+ * Note that DQ_RELEASING_B protects us from racing with
+ * invalidate_dquots() calls so we are safe to work with the
+ * dquot even after we drop dq_list_lock.
+ */
+ if (dquot_dirty(dquot)) {
+ spin_unlock(&dq_list_lock);
+ /* Commit dquot before releasing */
+ dquot_write_dquot(dquot);
+ goto restart;
+ }
+ if (dquot_active(dquot)) {
+ spin_unlock(&dq_list_lock);
+ dquot->dq_sb->dq_op->release_dquot(dquot);
+ goto restart;
+ }
+ /* Dquot is inactive and clean, now move it to free list */
+ remove_free_dquot(dquot);
+ put_dquot_last(dquot);
+ }
+ spin_unlock(&dq_list_lock);
+}
+
+/*
+ * Put reference to dquot
+ */
+void dqput(struct dquot *dquot)
+{
+ if (!dquot)
+ return;
+#ifdef CONFIG_QUOTA_DEBUG
+ if (!atomic_read(&dquot->dq_count)) {
+ quota_error(dquot->dq_sb, "trying to free free dquot of %s %d",
+ quotatypes[dquot->dq_id.type],
+ from_kqid(&init_user_ns, dquot->dq_id));
+ BUG();
+ }
+#endif
+ dqstats_inc(DQST_DROPS);
+
+ spin_lock(&dq_list_lock);
+ if (atomic_read(&dquot->dq_count) > 1) {
+ /* We have more than one user... nothing to do */
+ atomic_dec(&dquot->dq_count);
+ /* Releasing dquot during quotaoff phase? */
+ if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) &&
+ atomic_read(&dquot->dq_count) == 1)
+ wake_up(&dquot_ref_wq);
+ spin_unlock(&dq_list_lock);
+ return;
+ }
+
+ /* Need to release dquot? */
+#ifdef CONFIG_QUOTA_DEBUG
+ /* sanity check */
+ BUG_ON(!list_empty(&dquot->dq_free));
+#endif
+ put_releasing_dquots(dquot);
+ atomic_dec(&dquot->dq_count);
+ spin_unlock(&dq_list_lock);
+ queue_delayed_work(system_unbound_wq, &quota_release_work, 1);
+}
+EXPORT_SYMBOL(dqput);
+
+struct dquot *dquot_alloc(struct super_block *sb, int type)
+{
+ return kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
+}
+EXPORT_SYMBOL(dquot_alloc);
+
+static struct dquot *get_empty_dquot(struct super_block *sb, int type)
+{
+ struct dquot *dquot;
+
+ dquot = sb->dq_op->alloc_dquot(sb, type);
+ if(!dquot)
+ return NULL;
+
+ mutex_init(&dquot->dq_lock);
+ INIT_LIST_HEAD(&dquot->dq_free);
+ INIT_LIST_HEAD(&dquot->dq_inuse);
+ INIT_HLIST_NODE(&dquot->dq_hash);
+ INIT_LIST_HEAD(&dquot->dq_dirty);
+ dquot->dq_sb = sb;
+ dquot->dq_id = make_kqid_invalid(type);
+ atomic_set(&dquot->dq_count, 1);
+ spin_lock_init(&dquot->dq_dqb_lock);
+
+ return dquot;
+}
+
+/*
+ * Get reference to dquot
+ *
+ * Locking is slightly tricky here. We are guarded from parallel quotaoff()
+ * destroying our dquot by:
+ * a) checking for quota flags under dq_list_lock and
+ * b) getting a reference to dquot before we release dq_list_lock
+ */
+struct dquot *dqget(struct super_block *sb, struct kqid qid)
+{
+ unsigned int hashent = hashfn(sb, qid);
+ struct dquot *dquot, *empty = NULL;
+
+ if (!qid_has_mapping(sb->s_user_ns, qid))
+ return ERR_PTR(-EINVAL);
+
+ if (!sb_has_quota_active(sb, qid.type))
+ return ERR_PTR(-ESRCH);
+we_slept:
+ spin_lock(&dq_list_lock);
+ spin_lock(&dq_state_lock);
+ if (!sb_has_quota_active(sb, qid.type)) {
+ spin_unlock(&dq_state_lock);
+ spin_unlock(&dq_list_lock);
+ dquot = ERR_PTR(-ESRCH);
+ goto out;
+ }
+ spin_unlock(&dq_state_lock);
+
+ dquot = find_dquot(hashent, sb, qid);
+ if (!dquot) {
+ if (!empty) {
+ spin_unlock(&dq_list_lock);
+ empty = get_empty_dquot(sb, qid.type);
+ if (!empty)
+ schedule(); /* Try to wait for a moment... */
+ goto we_slept;
+ }
+ dquot = empty;
+ empty = NULL;
+ dquot->dq_id = qid;
+ /* all dquots go on the inuse_list */
+ put_inuse(dquot);
+ /* hash it first so it can be found */
+ insert_dquot_hash(dquot);
+ spin_unlock(&dq_list_lock);
+ dqstats_inc(DQST_LOOKUPS);
+ } else {
+ if (!atomic_read(&dquot->dq_count))
+ remove_free_dquot(dquot);
+ atomic_inc(&dquot->dq_count);
+ spin_unlock(&dq_list_lock);
+ dqstats_inc(DQST_CACHE_HITS);
+ dqstats_inc(DQST_LOOKUPS);
+ }
+ /* Wait for dq_lock - after this we know that either dquot_release() is
+ * already finished or it will be canceled due to dq_count > 0 test */
+ wait_on_dquot(dquot);
+ /* Read the dquot / allocate space in quota file */
+ if (!dquot_active(dquot)) {
+ int err;
+
+ err = sb->dq_op->acquire_dquot(dquot);
+ if (err < 0) {
+ dqput(dquot);
+ dquot = ERR_PTR(err);
+ goto out;
+ }
+ }
+ /*
+ * Make sure following reads see filled structure - paired with
+ * smp_mb__before_atomic() in dquot_acquire().
+ */
+ smp_rmb();
+#ifdef CONFIG_QUOTA_DEBUG
+ BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
+#endif
+out:
+ if (empty)
+ do_destroy_dquot(empty);
+
+ return dquot;
+}
+EXPORT_SYMBOL(dqget);
+
+static inline struct dquot **i_dquot(struct inode *inode)
+{
+ return inode->i_sb->s_op->get_dquots(inode);
+}
+
+static int dqinit_needed(struct inode *inode, int type)
+{
+ struct dquot * const *dquots;
+ int cnt;
+
+ if (IS_NOQUOTA(inode))
+ return 0;
+
+ dquots = i_dquot(inode);
+ if (type != -1)
+ return !dquots[type];
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ if (!dquots[cnt])
+ return 1;
+ return 0;
+}
+
+/* This routine is guarded by s_umount semaphore */
+static int add_dquot_ref(struct super_block *sb, int type)
+{
+ struct inode *inode, *old_inode = NULL;
+#ifdef CONFIG_QUOTA_DEBUG
+ int reserved = 0;
+#endif
+ int err = 0;
+
+ spin_lock(&sb->s_inode_list_lock);
+ list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+ spin_lock(&inode->i_lock);
+ if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
+ !atomic_read(&inode->i_writecount) ||
+ !dqinit_needed(inode, type)) {
+ spin_unlock(&inode->i_lock);
+ continue;
+ }
+ __iget(inode);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&sb->s_inode_list_lock);
+
+#ifdef CONFIG_QUOTA_DEBUG
+ if (unlikely(inode_get_rsv_space(inode) > 0))
+ reserved = 1;
+#endif
+ iput(old_inode);
+ err = __dquot_initialize(inode, type);
+ if (err) {
+ iput(inode);
+ goto out;
+ }
+
+ /*
+ * We hold a reference to 'inode' so it couldn't have been
+ * removed from s_inodes list while we dropped the
+ * s_inode_list_lock. We cannot iput the inode now as we can be
+ * holding the last reference and we cannot iput it under
+ * s_inode_list_lock. So we keep the reference and iput it
+ * later.
+ */
+ old_inode = inode;
+ cond_resched();
+ spin_lock(&sb->s_inode_list_lock);
+ }
+ spin_unlock(&sb->s_inode_list_lock);
+ iput(old_inode);
+out:
+#ifdef CONFIG_QUOTA_DEBUG
+ if (reserved) {
+ quota_error(sb, "Writes happened before quota was turned on "
+ "thus quota information is probably inconsistent. "
+ "Please run quotacheck(8)");
+ }
+#endif
+ return err;
+}
+
+/*
+ * Remove references to dquots from inode and add dquot to list for freeing
+ * if we have the last reference to dquot
+ */
+static void remove_inode_dquot_ref(struct inode *inode, int type,
+ struct list_head *tofree_head)
+{
+ struct dquot **dquots = i_dquot(inode);
+ struct dquot *dquot = dquots[type];
+
+ if (!dquot)
+ return;
+
+ dquots[type] = NULL;
+ if (list_empty(&dquot->dq_free)) {
+ /*
+ * The inode still has reference to dquot so it can't be in the
+ * free list
+ */
+ spin_lock(&dq_list_lock);
+ list_add(&dquot->dq_free, tofree_head);
+ spin_unlock(&dq_list_lock);
+ } else {
+ /*
+ * Dquot is already in a list to put so we won't drop the last
+ * reference here.
+ */
+ dqput(dquot);
+ }
+}
+
+/*
+ * Free list of dquots
+ * Dquots are removed from inodes and no new references can be got so we are
+ * the only ones holding reference
+ */
+static void put_dquot_list(struct list_head *tofree_head)
+{
+ struct list_head *act_head;
+ struct dquot *dquot;
+
+ act_head = tofree_head->next;
+ while (act_head != tofree_head) {
+ dquot = list_entry(act_head, struct dquot, dq_free);
+ act_head = act_head->next;
+ /* Remove dquot from the list so we won't have problems... */
+ list_del_init(&dquot->dq_free);
+ dqput(dquot);
+ }
+}
+
+static void remove_dquot_ref(struct super_block *sb, int type,
+ struct list_head *tofree_head)
+{
+ struct inode *inode;
+#ifdef CONFIG_QUOTA_DEBUG
+ int reserved = 0;
+#endif
+
+ spin_lock(&sb->s_inode_list_lock);
+ list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+ /*
+ * We have to scan also I_NEW inodes because they can already
+ * have quota pointer initialized. Luckily, we need to touch
+ * only quota pointers and these have separate locking
+ * (dq_data_lock).
+ */
+ spin_lock(&dq_data_lock);
+ if (!IS_NOQUOTA(inode)) {
+#ifdef CONFIG_QUOTA_DEBUG
+ if (unlikely(inode_get_rsv_space(inode) > 0))
+ reserved = 1;
+#endif
+ remove_inode_dquot_ref(inode, type, tofree_head);
+ }
+ spin_unlock(&dq_data_lock);
+ }
+ spin_unlock(&sb->s_inode_list_lock);
+#ifdef CONFIG_QUOTA_DEBUG
+ if (reserved) {
+ printk(KERN_WARNING "VFS (%s): Writes happened after quota"
+ " was disabled thus quota information is probably "
+ "inconsistent. Please run quotacheck(8).\n", sb->s_id);
+ }
+#endif
+}
+
+/* Gather all references from inodes and drop them */
+static void drop_dquot_ref(struct super_block *sb, int type)
+{
+ LIST_HEAD(tofree_head);
+
+ if (sb->dq_op) {
+ remove_dquot_ref(sb, type, &tofree_head);
+ synchronize_srcu(&dquot_srcu);
+ put_dquot_list(&tofree_head);
+ }
+}
+
+static inline
+void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
+{
+ if (dquot->dq_dqb.dqb_rsvspace >= number)
+ dquot->dq_dqb.dqb_rsvspace -= number;
+ else {
+ WARN_ON_ONCE(1);
+ dquot->dq_dqb.dqb_rsvspace = 0;
+ }
+ if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
+ dquot->dq_dqb.dqb_bsoftlimit)
+ dquot->dq_dqb.dqb_btime = (time64_t) 0;
+ clear_bit(DQ_BLKS_B, &dquot->dq_flags);
+}
+
+static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
+{
+ if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
+ dquot->dq_dqb.dqb_curinodes >= number)
+ dquot->dq_dqb.dqb_curinodes -= number;
+ else
+ dquot->dq_dqb.dqb_curinodes = 0;
+ if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
+ dquot->dq_dqb.dqb_itime = (time64_t) 0;
+ clear_bit(DQ_INODES_B, &dquot->dq_flags);
+}
+
+static void dquot_decr_space(struct dquot *dquot, qsize_t number)
+{
+ if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
+ dquot->dq_dqb.dqb_curspace >= number)
+ dquot->dq_dqb.dqb_curspace -= number;
+ else
+ dquot->dq_dqb.dqb_curspace = 0;
+ if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
+ dquot->dq_dqb.dqb_bsoftlimit)
+ dquot->dq_dqb.dqb_btime = (time64_t) 0;
+ clear_bit(DQ_BLKS_B, &dquot->dq_flags);
+}
+
+struct dquot_warn {
+ struct super_block *w_sb;
+ struct kqid w_dq_id;
+ short w_type;
+};
+
+static int warning_issued(struct dquot *dquot, const int warntype)
+{
+ int flag = (warntype == QUOTA_NL_BHARDWARN ||
+ warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B :
+ ((warntype == QUOTA_NL_IHARDWARN ||
+ warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0);
+
+ if (!flag)
+ return 0;
+ return test_and_set_bit(flag, &dquot->dq_flags);
+}
+
+#ifdef CONFIG_PRINT_QUOTA_WARNING
+static int flag_print_warnings = 1;
+
+static int need_print_warning(struct dquot_warn *warn)
+{
+ if (!flag_print_warnings)
+ return 0;
+
+ switch (warn->w_dq_id.type) {
+ case USRQUOTA:
+ return uid_eq(current_fsuid(), warn->w_dq_id.uid);
+ case GRPQUOTA:
+ return in_group_p(warn->w_dq_id.gid);
+ case PRJQUOTA:
+ return 1;
+ }
+ return 0;
+}
+
+/* Print warning to user which exceeded quota */
+static void print_warning(struct dquot_warn *warn)
+{
+ char *msg = NULL;
+ struct tty_struct *tty;
+ int warntype = warn->w_type;
+
+ if (warntype == QUOTA_NL_IHARDBELOW ||
+ warntype == QUOTA_NL_ISOFTBELOW ||
+ warntype == QUOTA_NL_BHARDBELOW ||
+ warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(warn))
+ return;
+
+ tty = get_current_tty();
+ if (!tty)
+ return;
+ tty_write_message(tty, warn->w_sb->s_id);
+ if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN)
+ tty_write_message(tty, ": warning, ");
+ else
+ tty_write_message(tty, ": write failed, ");
+ tty_write_message(tty, quotatypes[warn->w_dq_id.type]);
+ switch (warntype) {
+ case QUOTA_NL_IHARDWARN:
+ msg = " file limit reached.\r\n";
+ break;
+ case QUOTA_NL_ISOFTLONGWARN:
+ msg = " file quota exceeded too long.\r\n";
+ break;
+ case QUOTA_NL_ISOFTWARN:
+ msg = " file quota exceeded.\r\n";
+ break;
+ case QUOTA_NL_BHARDWARN:
+ msg = " block limit reached.\r\n";
+ break;
+ case QUOTA_NL_BSOFTLONGWARN:
+ msg = " block quota exceeded too long.\r\n";
+ break;
+ case QUOTA_NL_BSOFTWARN:
+ msg = " block quota exceeded.\r\n";
+ break;
+ }
+ tty_write_message(tty, msg);
+ tty_kref_put(tty);
+}
+#endif
+
+static void prepare_warning(struct dquot_warn *warn, struct dquot *dquot,
+ int warntype)
+{
+ if (warning_issued(dquot, warntype))
+ return;
+ warn->w_type = warntype;
+ warn->w_sb = dquot->dq_sb;
+ warn->w_dq_id = dquot->dq_id;
+}
+
+/*
+ * Write warnings to the console and send warning messages over netlink.
+ *
+ * Note that this function can call into tty and networking code.
+ */
+static void flush_warnings(struct dquot_warn *warn)
+{
+ int i;
+
+ for (i = 0; i < MAXQUOTAS; i++) {
+ if (warn[i].w_type == QUOTA_NL_NOWARN)
+ continue;
+#ifdef CONFIG_PRINT_QUOTA_WARNING
+ print_warning(&warn[i]);
+#endif
+ quota_send_warning(warn[i].w_dq_id,
+ warn[i].w_sb->s_dev, warn[i].w_type);
+ }
+}
+
+static int ignore_hardlimit(struct dquot *dquot)
+{
+ struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
+
+ return capable(CAP_SYS_RESOURCE) &&
+ (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
+ !(info->dqi_flags & DQF_ROOT_SQUASH));
+}
+
+static int dquot_add_inodes(struct dquot *dquot, qsize_t inodes,
+ struct dquot_warn *warn)
+{
+ qsize_t newinodes;
+ int ret = 0;
+
+ spin_lock(&dquot->dq_dqb_lock);
+ newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
+ if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) ||
+ test_bit(DQ_FAKE_B, &dquot->dq_flags))
+ goto add;
+
+ if (dquot->dq_dqb.dqb_ihardlimit &&
+ newinodes > dquot->dq_dqb.dqb_ihardlimit &&
+ !ignore_hardlimit(dquot)) {
+ prepare_warning(warn, dquot, QUOTA_NL_IHARDWARN);
+ ret = -EDQUOT;
+ goto out;
+ }
+
+ if (dquot->dq_dqb.dqb_isoftlimit &&
+ newinodes > dquot->dq_dqb.dqb_isoftlimit &&
+ dquot->dq_dqb.dqb_itime &&
+ ktime_get_real_seconds() >= dquot->dq_dqb.dqb_itime &&
+ !ignore_hardlimit(dquot)) {
+ prepare_warning(warn, dquot, QUOTA_NL_ISOFTLONGWARN);
+ ret = -EDQUOT;
+ goto out;
+ }
+
+ if (dquot->dq_dqb.dqb_isoftlimit &&
+ newinodes > dquot->dq_dqb.dqb_isoftlimit &&
+ dquot->dq_dqb.dqb_itime == 0) {
+ prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN);
+ dquot->dq_dqb.dqb_itime = ktime_get_real_seconds() +
+ sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace;
+ }
+add:
+ dquot->dq_dqb.dqb_curinodes = newinodes;
+
+out:
+ spin_unlock(&dquot->dq_dqb_lock);
+ return ret;
+}
+
+static int dquot_add_space(struct dquot *dquot, qsize_t space,
+ qsize_t rsv_space, unsigned int flags,
+ struct dquot_warn *warn)
+{
+ qsize_t tspace;
+ struct super_block *sb = dquot->dq_sb;
+ int ret = 0;
+
+ spin_lock(&dquot->dq_dqb_lock);
+ if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) ||
+ test_bit(DQ_FAKE_B, &dquot->dq_flags))
+ goto finish;
+
+ tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
+ + space + rsv_space;
+
+ if (dquot->dq_dqb.dqb_bhardlimit &&
+ tspace > dquot->dq_dqb.dqb_bhardlimit &&
+ !ignore_hardlimit(dquot)) {
+ if (flags & DQUOT_SPACE_WARN)
+ prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN);
+ ret = -EDQUOT;
+ goto finish;
+ }
+
+ if (dquot->dq_dqb.dqb_bsoftlimit &&
+ tspace > dquot->dq_dqb.dqb_bsoftlimit &&
+ dquot->dq_dqb.dqb_btime &&
+ ktime_get_real_seconds() >= dquot->dq_dqb.dqb_btime &&
+ !ignore_hardlimit(dquot)) {
+ if (flags & DQUOT_SPACE_WARN)
+ prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN);
+ ret = -EDQUOT;
+ goto finish;
+ }
+
+ if (dquot->dq_dqb.dqb_bsoftlimit &&
+ tspace > dquot->dq_dqb.dqb_bsoftlimit &&
+ dquot->dq_dqb.dqb_btime == 0) {
+ if (flags & DQUOT_SPACE_WARN) {
+ prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN);
+ dquot->dq_dqb.dqb_btime = ktime_get_real_seconds() +
+ sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace;
+ } else {
+ /*
+ * We don't allow preallocation to exceed softlimit so exceeding will
+ * be always printed
+ */
+ ret = -EDQUOT;
+ goto finish;
+ }
+ }
+finish:
+ /*
+ * We have to be careful and go through warning generation & grace time
+ * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it
+ * only here...
+ */
+ if (flags & DQUOT_SPACE_NOFAIL)
+ ret = 0;
+ if (!ret) {
+ dquot->dq_dqb.dqb_rsvspace += rsv_space;
+ dquot->dq_dqb.dqb_curspace += space;
+ }
+ spin_unlock(&dquot->dq_dqb_lock);
+ return ret;
+}
+
+static int info_idq_free(struct dquot *dquot, qsize_t inodes)
+{
+ qsize_t newinodes;
+
+ if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
+ dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
+ !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type))
+ return QUOTA_NL_NOWARN;
+
+ newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
+ if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
+ return QUOTA_NL_ISOFTBELOW;
+ if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
+ newinodes < dquot->dq_dqb.dqb_ihardlimit)
+ return QUOTA_NL_IHARDBELOW;
+ return QUOTA_NL_NOWARN;
+}
+
+static int info_bdq_free(struct dquot *dquot, qsize_t space)
+{
+ qsize_t tspace;
+
+ tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace;
+
+ if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
+ tspace <= dquot->dq_dqb.dqb_bsoftlimit)
+ return QUOTA_NL_NOWARN;
+
+ if (tspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
+ return QUOTA_NL_BSOFTBELOW;
+ if (tspace >= dquot->dq_dqb.dqb_bhardlimit &&
+ tspace - space < dquot->dq_dqb.dqb_bhardlimit)
+ return QUOTA_NL_BHARDBELOW;
+ return QUOTA_NL_NOWARN;
+}
+
+static int inode_quota_active(const struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+
+ if (IS_NOQUOTA(inode))
+ return 0;
+ return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb);
+}
+
+/*
+ * Initialize quota pointers in inode
+ *
+ * It is better to call this function outside of any transaction as it
+ * might need a lot of space in journal for dquot structure allocation.
+ */
+static int __dquot_initialize(struct inode *inode, int type)
+{
+ int cnt, init_needed = 0;
+ struct dquot **dquots, *got[MAXQUOTAS] = {};
+ struct super_block *sb = inode->i_sb;
+ qsize_t rsv;
+ int ret = 0;
+
+ if (!inode_quota_active(inode))
+ return 0;
+
+ dquots = i_dquot(inode);
+
+ /* First get references to structures we might need. */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ struct kqid qid;
+ kprojid_t projid;
+ int rc;
+ struct dquot *dquot;
+
+ if (type != -1 && cnt != type)
+ continue;
+ /*
+ * The i_dquot should have been initialized in most cases,
+ * we check it without locking here to avoid unnecessary
+ * dqget()/dqput() calls.
+ */
+ if (dquots[cnt])
+ continue;
+
+ if (!sb_has_quota_active(sb, cnt))
+ continue;
+
+ init_needed = 1;
+
+ switch (cnt) {
+ case USRQUOTA:
+ qid = make_kqid_uid(inode->i_uid);
+ break;
+ case GRPQUOTA:
+ qid = make_kqid_gid(inode->i_gid);
+ break;
+ case PRJQUOTA:
+ rc = inode->i_sb->dq_op->get_projid(inode, &projid);
+ if (rc)
+ continue;
+ qid = make_kqid_projid(projid);
+ break;
+ }
+ dquot = dqget(sb, qid);
+ if (IS_ERR(dquot)) {
+ /* We raced with somebody turning quotas off... */
+ if (PTR_ERR(dquot) != -ESRCH) {
+ ret = PTR_ERR(dquot);
+ goto out_put;
+ }
+ dquot = NULL;
+ }
+ got[cnt] = dquot;
+ }
+
+ /* All required i_dquot has been initialized */
+ if (!init_needed)
+ return 0;
+
+ spin_lock(&dq_data_lock);
+ if (IS_NOQUOTA(inode))
+ goto out_lock;
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (type != -1 && cnt != type)
+ continue;
+ /* Avoid races with quotaoff() */
+ if (!sb_has_quota_active(sb, cnt))
+ continue;
+ /* We could race with quotaon or dqget() could have failed */
+ if (!got[cnt])
+ continue;
+ if (!dquots[cnt]) {
+ dquots[cnt] = got[cnt];
+ got[cnt] = NULL;
+ /*
+ * Make quota reservation system happy if someone
+ * did a write before quota was turned on
+ */
+ rsv = inode_get_rsv_space(inode);
+ if (unlikely(rsv)) {
+ spin_lock(&inode->i_lock);
+ /* Get reservation again under proper lock */
+ rsv = __inode_get_rsv_space(inode);
+ spin_lock(&dquots[cnt]->dq_dqb_lock);
+ dquots[cnt]->dq_dqb.dqb_rsvspace += rsv;
+ spin_unlock(&dquots[cnt]->dq_dqb_lock);
+ spin_unlock(&inode->i_lock);
+ }
+ }
+ }
+out_lock:
+ spin_unlock(&dq_data_lock);
+out_put:
+ /* Drop unused references */
+ dqput_all(got);
+
+ return ret;
+}
+
+int dquot_initialize(struct inode *inode)
+{
+ return __dquot_initialize(inode, -1);
+}
+EXPORT_SYMBOL(dquot_initialize);
+
+bool dquot_initialize_needed(struct inode *inode)
+{
+ struct dquot **dquots;
+ int i;
+
+ if (!inode_quota_active(inode))
+ return false;
+
+ dquots = i_dquot(inode);
+ for (i = 0; i < MAXQUOTAS; i++)
+ if (!dquots[i] && sb_has_quota_active(inode->i_sb, i))
+ return true;
+ return false;
+}
+EXPORT_SYMBOL(dquot_initialize_needed);
+
+/*
+ * Release all quotas referenced by inode.
+ *
+ * This function only be called on inode free or converting
+ * a file to quota file, no other users for the i_dquot in
+ * both cases, so we needn't call synchronize_srcu() after
+ * clearing i_dquot.
+ */
+static void __dquot_drop(struct inode *inode)
+{
+ int cnt;
+ struct dquot **dquots = i_dquot(inode);
+ struct dquot *put[MAXQUOTAS];
+
+ spin_lock(&dq_data_lock);
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ put[cnt] = dquots[cnt];
+ dquots[cnt] = NULL;
+ }
+ spin_unlock(&dq_data_lock);
+ dqput_all(put);
+}
+
+void dquot_drop(struct inode *inode)
+{
+ struct dquot * const *dquots;
+ int cnt;
+
+ if (IS_NOQUOTA(inode))
+ return;
+
+ /*
+ * Test before calling to rule out calls from proc and such
+ * where we are not allowed to block. Note that this is
+ * actually reliable test even without the lock - the caller
+ * must assure that nobody can come after the DQUOT_DROP and
+ * add quota pointers back anyway.
+ */
+ dquots = i_dquot(inode);
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (dquots[cnt])
+ break;
+ }
+
+ if (cnt < MAXQUOTAS)
+ __dquot_drop(inode);
+}
+EXPORT_SYMBOL(dquot_drop);
+
+/*
+ * inode_reserved_space is managed internally by quota, and protected by
+ * i_lock similar to i_blocks+i_bytes.
+ */
+static qsize_t *inode_reserved_space(struct inode * inode)
+{
+ /* Filesystem must explicitly define it's own method in order to use
+ * quota reservation interface */
+ BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
+ return inode->i_sb->dq_op->get_reserved_space(inode);
+}
+
+static qsize_t __inode_get_rsv_space(struct inode *inode)
+{
+ if (!inode->i_sb->dq_op->get_reserved_space)
+ return 0;
+ return *inode_reserved_space(inode);
+}
+
+static qsize_t inode_get_rsv_space(struct inode *inode)
+{
+ qsize_t ret;
+
+ if (!inode->i_sb->dq_op->get_reserved_space)
+ return 0;
+ spin_lock(&inode->i_lock);
+ ret = __inode_get_rsv_space(inode);
+ spin_unlock(&inode->i_lock);
+ return ret;
+}
+
+/*
+ * This functions updates i_blocks+i_bytes fields and quota information
+ * (together with appropriate checks).
+ *
+ * NOTE: We absolutely rely on the fact that caller dirties the inode
+ * (usually helpers in quotaops.h care about this) and holds a handle for
+ * the current transaction so that dquot write and inode write go into the
+ * same transaction.
+ */
+
+/*
+ * This operation can block, but only after everything is updated
+ */
+int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
+{
+ int cnt, ret = 0, index;
+ struct dquot_warn warn[MAXQUOTAS];
+ int reserve = flags & DQUOT_SPACE_RESERVE;
+ struct dquot **dquots;
+
+ if (!inode_quota_active(inode)) {
+ if (reserve) {
+ spin_lock(&inode->i_lock);
+ *inode_reserved_space(inode) += number;
+ spin_unlock(&inode->i_lock);
+ } else {
+ inode_add_bytes(inode, number);
+ }
+ goto out;
+ }
+
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ warn[cnt].w_type = QUOTA_NL_NOWARN;
+
+ dquots = i_dquot(inode);
+ index = srcu_read_lock(&dquot_srcu);
+ spin_lock(&inode->i_lock);
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (!dquots[cnt])
+ continue;
+ if (reserve) {
+ ret = dquot_add_space(dquots[cnt], 0, number, flags,
+ &warn[cnt]);
+ } else {
+ ret = dquot_add_space(dquots[cnt], number, 0, flags,
+ &warn[cnt]);
+ }
+ if (ret) {
+ /* Back out changes we already did */
+ for (cnt--; cnt >= 0; cnt--) {
+ if (!dquots[cnt])
+ continue;
+ spin_lock(&dquots[cnt]->dq_dqb_lock);
+ if (reserve)
+ dquot_free_reserved_space(dquots[cnt],
+ number);
+ else
+ dquot_decr_space(dquots[cnt], number);
+ spin_unlock(&dquots[cnt]->dq_dqb_lock);
+ }
+ spin_unlock(&inode->i_lock);
+ goto out_flush_warn;
+ }
+ }
+ if (reserve)
+ *inode_reserved_space(inode) += number;
+ else
+ __inode_add_bytes(inode, number);
+ spin_unlock(&inode->i_lock);
+
+ if (reserve)
+ goto out_flush_warn;
+ mark_all_dquot_dirty(dquots);
+out_flush_warn:
+ srcu_read_unlock(&dquot_srcu, index);
+ flush_warnings(warn);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(__dquot_alloc_space);
+
+/*
+ * This operation can block, but only after everything is updated
+ */
+int dquot_alloc_inode(struct inode *inode)
+{
+ int cnt, ret = 0, index;
+ struct dquot_warn warn[MAXQUOTAS];
+ struct dquot * const *dquots;
+
+ if (!inode_quota_active(inode))
+ return 0;
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ warn[cnt].w_type = QUOTA_NL_NOWARN;
+
+ dquots = i_dquot(inode);
+ index = srcu_read_lock(&dquot_srcu);
+ spin_lock(&inode->i_lock);
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (!dquots[cnt])
+ continue;
+ ret = dquot_add_inodes(dquots[cnt], 1, &warn[cnt]);
+ if (ret) {
+ for (cnt--; cnt >= 0; cnt--) {
+ if (!dquots[cnt])
+ continue;
+ /* Back out changes we already did */
+ spin_lock(&dquots[cnt]->dq_dqb_lock);
+ dquot_decr_inodes(dquots[cnt], 1);
+ spin_unlock(&dquots[cnt]->dq_dqb_lock);
+ }
+ goto warn_put_all;
+ }
+ }
+
+warn_put_all:
+ spin_unlock(&inode->i_lock);
+ if (ret == 0)
+ mark_all_dquot_dirty(dquots);
+ srcu_read_unlock(&dquot_srcu, index);
+ flush_warnings(warn);
+ return ret;
+}
+EXPORT_SYMBOL(dquot_alloc_inode);
+
+/*
+ * Convert in-memory reserved quotas to real consumed quotas
+ */
+int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
+{
+ struct dquot **dquots;
+ int cnt, index;
+
+ if (!inode_quota_active(inode)) {
+ spin_lock(&inode->i_lock);
+ *inode_reserved_space(inode) -= number;
+ __inode_add_bytes(inode, number);
+ spin_unlock(&inode->i_lock);
+ return 0;
+ }
+
+ dquots = i_dquot(inode);
+ index = srcu_read_lock(&dquot_srcu);
+ spin_lock(&inode->i_lock);
+ /* Claim reserved quotas to allocated quotas */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (dquots[cnt]) {
+ struct dquot *dquot = dquots[cnt];
+
+ spin_lock(&dquot->dq_dqb_lock);
+ if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number))
+ number = dquot->dq_dqb.dqb_rsvspace;
+ dquot->dq_dqb.dqb_curspace += number;
+ dquot->dq_dqb.dqb_rsvspace -= number;
+ spin_unlock(&dquot->dq_dqb_lock);
+ }
+ }
+ /* Update inode bytes */
+ *inode_reserved_space(inode) -= number;
+ __inode_add_bytes(inode, number);
+ spin_unlock(&inode->i_lock);
+ mark_all_dquot_dirty(dquots);
+ srcu_read_unlock(&dquot_srcu, index);
+ return 0;
+}
+EXPORT_SYMBOL(dquot_claim_space_nodirty);
+
+/*
+ * Convert allocated space back to in-memory reserved quotas
+ */
+void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
+{
+ struct dquot **dquots;
+ int cnt, index;
+
+ if (!inode_quota_active(inode)) {
+ spin_lock(&inode->i_lock);
+ *inode_reserved_space(inode) += number;
+ __inode_sub_bytes(inode, number);
+ spin_unlock(&inode->i_lock);
+ return;
+ }
+
+ dquots = i_dquot(inode);
+ index = srcu_read_lock(&dquot_srcu);
+ spin_lock(&inode->i_lock);
+ /* Claim reserved quotas to allocated quotas */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (dquots[cnt]) {
+ struct dquot *dquot = dquots[cnt];
+
+ spin_lock(&dquot->dq_dqb_lock);
+ if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
+ number = dquot->dq_dqb.dqb_curspace;
+ dquot->dq_dqb.dqb_rsvspace += number;
+ dquot->dq_dqb.dqb_curspace -= number;
+ spin_unlock(&dquot->dq_dqb_lock);
+ }
+ }
+ /* Update inode bytes */
+ *inode_reserved_space(inode) += number;
+ __inode_sub_bytes(inode, number);
+ spin_unlock(&inode->i_lock);
+ mark_all_dquot_dirty(dquots);
+ srcu_read_unlock(&dquot_srcu, index);
+ return;
+}
+EXPORT_SYMBOL(dquot_reclaim_space_nodirty);
+
+/*
+ * This operation can block, but only after everything is updated
+ */
+void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
+{
+ unsigned int cnt;
+ struct dquot_warn warn[MAXQUOTAS];
+ struct dquot **dquots;
+ int reserve = flags & DQUOT_SPACE_RESERVE, index;
+
+ if (!inode_quota_active(inode)) {
+ if (reserve) {
+ spin_lock(&inode->i_lock);
+ *inode_reserved_space(inode) -= number;
+ spin_unlock(&inode->i_lock);
+ } else {
+ inode_sub_bytes(inode, number);
+ }
+ return;
+ }
+
+ dquots = i_dquot(inode);
+ index = srcu_read_lock(&dquot_srcu);
+ spin_lock(&inode->i_lock);
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ int wtype;
+
+ warn[cnt].w_type = QUOTA_NL_NOWARN;
+ if (!dquots[cnt])
+ continue;
+ spin_lock(&dquots[cnt]->dq_dqb_lock);
+ wtype = info_bdq_free(dquots[cnt], number);
+ if (wtype != QUOTA_NL_NOWARN)
+ prepare_warning(&warn[cnt], dquots[cnt], wtype);
+ if (reserve)
+ dquot_free_reserved_space(dquots[cnt], number);
+ else
+ dquot_decr_space(dquots[cnt], number);
+ spin_unlock(&dquots[cnt]->dq_dqb_lock);
+ }
+ if (reserve)
+ *inode_reserved_space(inode) -= number;
+ else
+ __inode_sub_bytes(inode, number);
+ spin_unlock(&inode->i_lock);
+
+ if (reserve)
+ goto out_unlock;
+ mark_all_dquot_dirty(dquots);
+out_unlock:
+ srcu_read_unlock(&dquot_srcu, index);
+ flush_warnings(warn);
+}
+EXPORT_SYMBOL(__dquot_free_space);
+
+/*
+ * This operation can block, but only after everything is updated
+ */
+void dquot_free_inode(struct inode *inode)
+{
+ unsigned int cnt;
+ struct dquot_warn warn[MAXQUOTAS];
+ struct dquot * const *dquots;
+ int index;
+
+ if (!inode_quota_active(inode))
+ return;
+
+ dquots = i_dquot(inode);
+ index = srcu_read_lock(&dquot_srcu);
+ spin_lock(&inode->i_lock);
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ int wtype;
+
+ warn[cnt].w_type = QUOTA_NL_NOWARN;
+ if (!dquots[cnt])
+ continue;
+ spin_lock(&dquots[cnt]->dq_dqb_lock);
+ wtype = info_idq_free(dquots[cnt], 1);
+ if (wtype != QUOTA_NL_NOWARN)
+ prepare_warning(&warn[cnt], dquots[cnt], wtype);
+ dquot_decr_inodes(dquots[cnt], 1);
+ spin_unlock(&dquots[cnt]->dq_dqb_lock);
+ }
+ spin_unlock(&inode->i_lock);
+ mark_all_dquot_dirty(dquots);
+ srcu_read_unlock(&dquot_srcu, index);
+ flush_warnings(warn);
+}
+EXPORT_SYMBOL(dquot_free_inode);
+
+/*
+ * Transfer the number of inode and blocks from one diskquota to an other.
+ * On success, dquot references in transfer_to are consumed and references
+ * to original dquots that need to be released are placed there. On failure,
+ * references are kept untouched.
+ *
+ * This operation can block, but only after everything is updated
+ * A transaction must be started when entering this function.
+ *
+ * We are holding reference on transfer_from & transfer_to, no need to
+ * protect them by srcu_read_lock().
+ */
+int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
+{
+ qsize_t cur_space;
+ qsize_t rsv_space = 0;
+ qsize_t inode_usage = 1;
+ struct dquot *transfer_from[MAXQUOTAS] = {};
+ int cnt, ret = 0;
+ char is_valid[MAXQUOTAS] = {};
+ struct dquot_warn warn_to[MAXQUOTAS];
+ struct dquot_warn warn_from_inodes[MAXQUOTAS];
+ struct dquot_warn warn_from_space[MAXQUOTAS];
+
+ if (IS_NOQUOTA(inode))
+ return 0;
+
+ if (inode->i_sb->dq_op->get_inode_usage) {
+ ret = inode->i_sb->dq_op->get_inode_usage(inode, &inode_usage);
+ if (ret)
+ return ret;
+ }
+
+ /* Initialize the arrays */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ warn_to[cnt].w_type = QUOTA_NL_NOWARN;
+ warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN;
+ warn_from_space[cnt].w_type = QUOTA_NL_NOWARN;
+ }
+
+ spin_lock(&dq_data_lock);
+ spin_lock(&inode->i_lock);
+ if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&dq_data_lock);
+ return 0;
+ }
+ cur_space = __inode_get_bytes(inode);
+ rsv_space = __inode_get_rsv_space(inode);
+ /*
+ * Build the transfer_from list, check limits, and update usage in
+ * the target structures.
+ */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ /*
+ * Skip changes for same uid or gid or for turned off quota-type.
+ */
+ if (!transfer_to[cnt])
+ continue;
+ /* Avoid races with quotaoff() */
+ if (!sb_has_quota_active(inode->i_sb, cnt))
+ continue;
+ is_valid[cnt] = 1;
+ transfer_from[cnt] = i_dquot(inode)[cnt];
+ ret = dquot_add_inodes(transfer_to[cnt], inode_usage,
+ &warn_to[cnt]);
+ if (ret)
+ goto over_quota;
+ ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space,
+ DQUOT_SPACE_WARN, &warn_to[cnt]);
+ if (ret) {
+ spin_lock(&transfer_to[cnt]->dq_dqb_lock);
+ dquot_decr_inodes(transfer_to[cnt], inode_usage);
+ spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
+ goto over_quota;
+ }
+ }
+
+ /* Decrease usage for source structures and update quota pointers */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (!is_valid[cnt])
+ continue;
+ /* Due to IO error we might not have transfer_from[] structure */
+ if (transfer_from[cnt]) {
+ int wtype;
+
+ spin_lock(&transfer_from[cnt]->dq_dqb_lock);
+ wtype = info_idq_free(transfer_from[cnt], inode_usage);
+ if (wtype != QUOTA_NL_NOWARN)
+ prepare_warning(&warn_from_inodes[cnt],
+ transfer_from[cnt], wtype);
+ wtype = info_bdq_free(transfer_from[cnt],
+ cur_space + rsv_space);
+ if (wtype != QUOTA_NL_NOWARN)
+ prepare_warning(&warn_from_space[cnt],
+ transfer_from[cnt], wtype);
+ dquot_decr_inodes(transfer_from[cnt], inode_usage);
+ dquot_decr_space(transfer_from[cnt], cur_space);
+ dquot_free_reserved_space(transfer_from[cnt],
+ rsv_space);
+ spin_unlock(&transfer_from[cnt]->dq_dqb_lock);
+ }
+ i_dquot(inode)[cnt] = transfer_to[cnt];
+ }
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&dq_data_lock);
+
+ mark_all_dquot_dirty(transfer_from);
+ mark_all_dquot_dirty(transfer_to);
+ flush_warnings(warn_to);
+ flush_warnings(warn_from_inodes);
+ flush_warnings(warn_from_space);
+ /* Pass back references to put */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ if (is_valid[cnt])
+ transfer_to[cnt] = transfer_from[cnt];
+ return 0;
+over_quota:
+ /* Back out changes we already did */
+ for (cnt--; cnt >= 0; cnt--) {
+ if (!is_valid[cnt])
+ continue;
+ spin_lock(&transfer_to[cnt]->dq_dqb_lock);
+ dquot_decr_inodes(transfer_to[cnt], inode_usage);
+ dquot_decr_space(transfer_to[cnt], cur_space);
+ dquot_free_reserved_space(transfer_to[cnt], rsv_space);
+ spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
+ }
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&dq_data_lock);
+ flush_warnings(warn_to);
+ return ret;
+}
+EXPORT_SYMBOL(__dquot_transfer);
+
+/* Wrapper for transferring ownership of an inode for uid/gid only
+ * Called from FSXXX_setattr()
+ */
+int dquot_transfer(struct inode *inode, struct iattr *iattr)
+{
+ struct dquot *transfer_to[MAXQUOTAS] = {};
+ struct dquot *dquot;
+ struct super_block *sb = inode->i_sb;
+ int ret;
+
+ if (!inode_quota_active(inode))
+ return 0;
+
+ if (iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)){
+ dquot = dqget(sb, make_kqid_uid(iattr->ia_uid));
+ if (IS_ERR(dquot)) {
+ if (PTR_ERR(dquot) != -ESRCH) {
+ ret = PTR_ERR(dquot);
+ goto out_put;
+ }
+ dquot = NULL;
+ }
+ transfer_to[USRQUOTA] = dquot;
+ }
+ if (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid)){
+ dquot = dqget(sb, make_kqid_gid(iattr->ia_gid));
+ if (IS_ERR(dquot)) {
+ if (PTR_ERR(dquot) != -ESRCH) {
+ ret = PTR_ERR(dquot);
+ goto out_put;
+ }
+ dquot = NULL;
+ }
+ transfer_to[GRPQUOTA] = dquot;
+ }
+ ret = __dquot_transfer(inode, transfer_to);
+out_put:
+ dqput_all(transfer_to);
+ return ret;
+}
+EXPORT_SYMBOL(dquot_transfer);
+
+/*
+ * Write info of quota file to disk
+ */
+int dquot_commit_info(struct super_block *sb, int type)
+{
+ struct quota_info *dqopt = sb_dqopt(sb);
+
+ return dqopt->ops[type]->write_file_info(sb, type);
+}
+EXPORT_SYMBOL(dquot_commit_info);
+
+int dquot_get_next_id(struct super_block *sb, struct kqid *qid)
+{
+ struct quota_info *dqopt = sb_dqopt(sb);
+
+ if (!sb_has_quota_active(sb, qid->type))
+ return -ESRCH;
+ if (!dqopt->ops[qid->type]->get_next_id)
+ return -ENOSYS;
+ return dqopt->ops[qid->type]->get_next_id(sb, qid);
+}
+EXPORT_SYMBOL(dquot_get_next_id);
+
+/*
+ * Definitions of diskquota operations.
+ */
+const struct dquot_operations dquot_operations = {
+ .write_dquot = dquot_commit,
+ .acquire_dquot = dquot_acquire,
+ .release_dquot = dquot_release,
+ .mark_dirty = dquot_mark_dquot_dirty,
+ .write_info = dquot_commit_info,
+ .alloc_dquot = dquot_alloc,
+ .destroy_dquot = dquot_destroy,
+ .get_next_id = dquot_get_next_id,
+};
+EXPORT_SYMBOL(dquot_operations);
+
+/*
+ * Generic helper for ->open on filesystems supporting disk quotas.
+ */
+int dquot_file_open(struct inode *inode, struct file *file)
+{
+ int error;
+
+ error = generic_file_open(inode, file);
+ if (!error && (file->f_mode & FMODE_WRITE))
+ error = dquot_initialize(inode);
+ return error;
+}
+EXPORT_SYMBOL(dquot_file_open);
+
+static void vfs_cleanup_quota_inode(struct super_block *sb, int type)
+{
+ struct quota_info *dqopt = sb_dqopt(sb);
+ struct inode *inode = dqopt->files[type];
+
+ if (!inode)
+ return;
+ if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
+ inode_lock(inode);
+ inode->i_flags &= ~S_NOQUOTA;
+ inode_unlock(inode);
+ }
+ dqopt->files[type] = NULL;
+ iput(inode);
+}
+
+/*
+ * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
+ */
+int dquot_disable(struct super_block *sb, int type, unsigned int flags)
+{
+ int cnt;
+ struct quota_info *dqopt = sb_dqopt(sb);
+
+ /* s_umount should be held in exclusive mode */
+ if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
+ up_read(&sb->s_umount);
+
+ /* Cannot turn off usage accounting without turning off limits, or
+ * suspend quotas and simultaneously turn quotas off. */
+ if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED))
+ || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED |
+ DQUOT_USAGE_ENABLED)))
+ return -EINVAL;
+
+ /*
+ * Skip everything if there's nothing to do. We have to do this because
+ * sometimes we are called when fill_super() failed and calling
+ * sync_fs() in such cases does no good.
+ */
+ if (!sb_any_quota_loaded(sb))
+ return 0;
+
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (type != -1 && cnt != type)
+ continue;
+ if (!sb_has_quota_loaded(sb, cnt))
+ continue;
+
+ if (flags & DQUOT_SUSPENDED) {
+ spin_lock(&dq_state_lock);
+ dqopt->flags |=
+ dquot_state_flag(DQUOT_SUSPENDED, cnt);
+ spin_unlock(&dq_state_lock);
+ } else {
+ spin_lock(&dq_state_lock);
+ dqopt->flags &= ~dquot_state_flag(flags, cnt);
+ /* Turning off suspended quotas? */
+ if (!sb_has_quota_loaded(sb, cnt) &&
+ sb_has_quota_suspended(sb, cnt)) {
+ dqopt->flags &= ~dquot_state_flag(
+ DQUOT_SUSPENDED, cnt);
+ spin_unlock(&dq_state_lock);
+ vfs_cleanup_quota_inode(sb, cnt);
+ continue;
+ }
+ spin_unlock(&dq_state_lock);
+ }
+
+ /* We still have to keep quota loaded? */
+ if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED))
+ continue;
+
+ /* Note: these are blocking operations */
+ drop_dquot_ref(sb, cnt);
+ invalidate_dquots(sb, cnt);
+ /*
+ * Now all dquots should be invalidated, all writes done so we
+ * should be only users of the info. No locks needed.
+ */
+ if (info_dirty(&dqopt->info[cnt]))
+ sb->dq_op->write_info(sb, cnt);
+ if (dqopt->ops[cnt]->free_file_info)
+ dqopt->ops[cnt]->free_file_info(sb, cnt);
+ put_quota_format(dqopt->info[cnt].dqi_format);
+ dqopt->info[cnt].dqi_flags = 0;
+ dqopt->info[cnt].dqi_igrace = 0;
+ dqopt->info[cnt].dqi_bgrace = 0;
+ dqopt->ops[cnt] = NULL;
+ }
+
+ /* Skip syncing and setting flags if quota files are hidden */
+ if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
+ goto put_inodes;
+
+ /* Sync the superblock so that buffers with quota data are written to
+ * disk (and so userspace sees correct data afterwards). */
+ if (sb->s_op->sync_fs)
+ sb->s_op->sync_fs(sb, 1);
+ sync_blockdev(sb->s_bdev);
+ /* Now the quota files are just ordinary files and we can set the
+ * inode flags back. Moreover we discard the pagecache so that
+ * userspace sees the writes we did bypassing the pagecache. We
+ * must also discard the blockdev buffers so that we see the
+ * changes done by userspace on the next quotaon() */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ if (!sb_has_quota_loaded(sb, cnt) && dqopt->files[cnt]) {
+ inode_lock(dqopt->files[cnt]);
+ truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
+ inode_unlock(dqopt->files[cnt]);
+ }
+ if (sb->s_bdev)
+ invalidate_bdev(sb->s_bdev);
+put_inodes:
+ /* We are done when suspending quotas */
+ if (flags & DQUOT_SUSPENDED)
+ return 0;
+
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ if (!sb_has_quota_loaded(sb, cnt))
+ vfs_cleanup_quota_inode(sb, cnt);
+ return 0;
+}
+EXPORT_SYMBOL(dquot_disable);
+
+int dquot_quota_off(struct super_block *sb, int type)
+{
+ return dquot_disable(sb, type,
+ DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
+}
+EXPORT_SYMBOL(dquot_quota_off);
+
+/*
+ * Turn quotas on on a device
+ */
+
+static int vfs_setup_quota_inode(struct inode *inode, int type)
+{
+ struct super_block *sb = inode->i_sb;
+ struct quota_info *dqopt = sb_dqopt(sb);
+
+ if (is_bad_inode(inode))
+ return -EUCLEAN;
+ if (!S_ISREG(inode->i_mode))
+ return -EACCES;
+ if (IS_RDONLY(inode))
+ return -EROFS;
+ if (sb_has_quota_loaded(sb, type))
+ return -EBUSY;
+
+ /*
+ * Quota files should never be encrypted. They should be thought of as
+ * filesystem metadata, not user data. New-style internal quota files
+ * cannot be encrypted by users anyway, but old-style external quota
+ * files could potentially be incorrectly created in an encrypted
+ * directory, hence this explicit check. Some reasons why encrypted
+ * quota files don't work include: (1) some filesystems that support
+ * encryption don't handle it in their quota_read and quota_write, and
+ * (2) cleaning up encrypted quota files at unmount would need special
+ * consideration, as quota files are cleaned up later than user files.
+ */
+ if (IS_ENCRYPTED(inode))
+ return -EINVAL;
+
+ dqopt->files[type] = igrab(inode);
+ if (!dqopt->files[type])
+ return -EIO;
+ if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
+ /* We don't want quota and atime on quota files (deadlocks
+ * possible) Also nobody should write to the file - we use
+ * special IO operations which ignore the immutable bit. */
+ inode_lock(inode);
+ inode->i_flags |= S_NOQUOTA;
+ inode_unlock(inode);
+ /*
+ * When S_NOQUOTA is set, remove dquot references as no more
+ * references can be added
+ */
+ __dquot_drop(inode);
+ }
+ return 0;
+}
+
+int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
+ unsigned int flags)
+{
+ struct quota_format_type *fmt = find_quota_format(format_id);
+ struct quota_info *dqopt = sb_dqopt(sb);
+ int error;
+
+ /* Just unsuspend quotas? */
+ BUG_ON(flags & DQUOT_SUSPENDED);
+ /* s_umount should be held in exclusive mode */
+ if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
+ up_read(&sb->s_umount);
+
+ if (!fmt)
+ return -ESRCH;
+ if (!sb->s_op->quota_write || !sb->s_op->quota_read ||
+ (type == PRJQUOTA && sb->dq_op->get_projid == NULL)) {
+ error = -EINVAL;
+ goto out_fmt;
+ }
+ /* Filesystems outside of init_user_ns not yet supported */
+ if (sb->s_user_ns != &init_user_ns) {
+ error = -EINVAL;
+ goto out_fmt;
+ }
+ /* Usage always has to be set... */
+ if (!(flags & DQUOT_USAGE_ENABLED)) {
+ error = -EINVAL;
+ goto out_fmt;
+ }
+ if (sb_has_quota_loaded(sb, type)) {
+ error = -EBUSY;
+ goto out_fmt;
+ }
+
+ if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
+ /* As we bypass the pagecache we must now flush all the
+ * dirty data and invalidate caches so that kernel sees
+ * changes from userspace. It is not enough to just flush
+ * the quota file since if blocksize < pagesize, invalidation
+ * of the cache could fail because of other unrelated dirty
+ * data */
+ sync_filesystem(sb);
+ invalidate_bdev(sb->s_bdev);
+ }
+
+ error = -EINVAL;
+ if (!fmt->qf_ops->check_quota_file(sb, type))
+ goto out_fmt;
+
+ dqopt->ops[type] = fmt->qf_ops;
+ dqopt->info[type].dqi_format = fmt;
+ dqopt->info[type].dqi_fmt_id = format_id;
+ INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
+ error = dqopt->ops[type]->read_file_info(sb, type);
+ if (error < 0)
+ goto out_fmt;
+ if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) {
+ spin_lock(&dq_data_lock);
+ dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
+ spin_unlock(&dq_data_lock);
+ }
+ spin_lock(&dq_state_lock);
+ dqopt->flags |= dquot_state_flag(flags, type);
+ spin_unlock(&dq_state_lock);
+
+ error = add_dquot_ref(sb, type);
+ if (error)
+ dquot_disable(sb, type,
+ DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
+
+ return error;
+out_fmt:
+ put_quota_format(fmt);
+
+ return error;
+}
+EXPORT_SYMBOL(dquot_load_quota_sb);
+
+/*
+ * More powerful function for turning on quotas on given quota inode allowing
+ * setting of individual quota flags
+ */
+int dquot_load_quota_inode(struct inode *inode, int type, int format_id,
+ unsigned int flags)
+{
+ int err;
+
+ err = vfs_setup_quota_inode(inode, type);
+ if (err < 0)
+ return err;
+ err = dquot_load_quota_sb(inode->i_sb, type, format_id, flags);
+ if (err < 0)
+ vfs_cleanup_quota_inode(inode->i_sb, type);
+ return err;
+}
+EXPORT_SYMBOL(dquot_load_quota_inode);
+
+/* Reenable quotas on remount RW */
+int dquot_resume(struct super_block *sb, int type)
+{
+ struct quota_info *dqopt = sb_dqopt(sb);
+ int ret = 0, cnt;
+ unsigned int flags;
+
+ /* s_umount should be held in exclusive mode */
+ if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
+ up_read(&sb->s_umount);
+
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (type != -1 && cnt != type)
+ continue;
+ if (!sb_has_quota_suspended(sb, cnt))
+ continue;
+
+ spin_lock(&dq_state_lock);
+ flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
+ DQUOT_LIMITS_ENABLED,
+ cnt);
+ dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt);
+ spin_unlock(&dq_state_lock);
+
+ flags = dquot_generic_flag(flags, cnt);
+ ret = dquot_load_quota_sb(sb, cnt, dqopt->info[cnt].dqi_fmt_id,
+ flags);
+ if (ret < 0)
+ vfs_cleanup_quota_inode(sb, cnt);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(dquot_resume);
+
+int dquot_quota_on(struct super_block *sb, int type, int format_id,
+ const struct path *path)
+{
+ int error = security_quota_on(path->dentry);
+ if (error)
+ return error;
+ /* Quota file not on the same filesystem? */
+ if (path->dentry->d_sb != sb)
+ error = -EXDEV;
+ else
+ error = dquot_load_quota_inode(d_inode(path->dentry), type,
+ format_id, DQUOT_USAGE_ENABLED |
+ DQUOT_LIMITS_ENABLED);
+ return error;
+}
+EXPORT_SYMBOL(dquot_quota_on);
+
+/*
+ * This function is used when filesystem needs to initialize quotas
+ * during mount time.
+ */
+int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
+ int format_id, int type)
+{
+ struct dentry *dentry;
+ int error;
+
+ dentry = lookup_positive_unlocked(qf_name, sb->s_root, strlen(qf_name));
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ error = security_quota_on(dentry);
+ if (!error)
+ error = dquot_load_quota_inode(d_inode(dentry), type, format_id,
+ DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
+
+ dput(dentry);
+ return error;
+}
+EXPORT_SYMBOL(dquot_quota_on_mount);
+
+static int dquot_quota_enable(struct super_block *sb, unsigned int flags)
+{
+ int ret;
+ int type;
+ struct quota_info *dqopt = sb_dqopt(sb);
+
+ if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
+ return -ENOSYS;
+ /* Accounting cannot be turned on while fs is mounted */
+ flags &= ~(FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT);
+ if (!flags)
+ return -EINVAL;
+ for (type = 0; type < MAXQUOTAS; type++) {
+ if (!(flags & qtype_enforce_flag(type)))
+ continue;
+ /* Can't enforce without accounting */
+ if (!sb_has_quota_usage_enabled(sb, type)) {
+ ret = -EINVAL;
+ goto out_err;
+ }
+ if (sb_has_quota_limits_enabled(sb, type)) {
+ ret = -EBUSY;
+ goto out_err;
+ }
+ spin_lock(&dq_state_lock);
+ dqopt->flags |= dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
+ spin_unlock(&dq_state_lock);
+ }
+ return 0;
+out_err:
+ /* Backout enforcement enablement we already did */
+ for (type--; type >= 0; type--) {
+ if (flags & qtype_enforce_flag(type))
+ dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
+ }
+ /* Error code translation for better compatibility with XFS */
+ if (ret == -EBUSY)
+ ret = -EEXIST;
+ return ret;
+}
+
+static int dquot_quota_disable(struct super_block *sb, unsigned int flags)
+{
+ int ret;
+ int type;
+ struct quota_info *dqopt = sb_dqopt(sb);
+
+ if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
+ return -ENOSYS;
+ /*
+ * We don't support turning off accounting via quotactl. In principle
+ * quota infrastructure can do this but filesystems don't expect
+ * userspace to be able to do it.
+ */
+ if (flags &
+ (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT))
+ return -EOPNOTSUPP;
+
+ /* Filter out limits not enabled */
+ for (type = 0; type < MAXQUOTAS; type++)
+ if (!sb_has_quota_limits_enabled(sb, type))
+ flags &= ~qtype_enforce_flag(type);
+ /* Nothing left? */
+ if (!flags)
+ return -EEXIST;
+ for (type = 0; type < MAXQUOTAS; type++) {
+ if (flags & qtype_enforce_flag(type)) {
+ ret = dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
+ if (ret < 0)
+ goto out_err;
+ }
+ }
+ return 0;
+out_err:
+ /* Backout enforcement disabling we already did */
+ for (type--; type >= 0; type--) {
+ if (flags & qtype_enforce_flag(type)) {
+ spin_lock(&dq_state_lock);
+ dqopt->flags |=
+ dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
+ spin_unlock(&dq_state_lock);
+ }
+ }
+ return ret;
+}
+
+/* Generic routine for getting common part of quota structure */
+static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
+{
+ struct mem_dqblk *dm = &dquot->dq_dqb;
+
+ memset(di, 0, sizeof(*di));
+ spin_lock(&dquot->dq_dqb_lock);
+ di->d_spc_hardlimit = dm->dqb_bhardlimit;
+ di->d_spc_softlimit = dm->dqb_bsoftlimit;
+ di->d_ino_hardlimit = dm->dqb_ihardlimit;
+ di->d_ino_softlimit = dm->dqb_isoftlimit;
+ di->d_space = dm->dqb_curspace + dm->dqb_rsvspace;
+ di->d_ino_count = dm->dqb_curinodes;
+ di->d_spc_timer = dm->dqb_btime;
+ di->d_ino_timer = dm->dqb_itime;
+ spin_unlock(&dquot->dq_dqb_lock);
+}
+
+int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
+ struct qc_dqblk *di)
+{
+ struct dquot *dquot;
+
+ dquot = dqget(sb, qid);
+ if (IS_ERR(dquot))
+ return PTR_ERR(dquot);
+ do_get_dqblk(dquot, di);
+ dqput(dquot);
+
+ return 0;
+}
+EXPORT_SYMBOL(dquot_get_dqblk);
+
+int dquot_get_next_dqblk(struct super_block *sb, struct kqid *qid,
+ struct qc_dqblk *di)
+{
+ struct dquot *dquot;
+ int err;
+
+ if (!sb->dq_op->get_next_id)
+ return -ENOSYS;
+ err = sb->dq_op->get_next_id(sb, qid);
+ if (err < 0)
+ return err;
+ dquot = dqget(sb, *qid);
+ if (IS_ERR(dquot))
+ return PTR_ERR(dquot);
+ do_get_dqblk(dquot, di);
+ dqput(dquot);
+
+ return 0;
+}
+EXPORT_SYMBOL(dquot_get_next_dqblk);
+
+#define VFS_QC_MASK \
+ (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
+ QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
+ QC_SPC_TIMER | QC_INO_TIMER)
+
+/* Generic routine for setting common part of quota structure */
+static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
+{
+ struct mem_dqblk *dm = &dquot->dq_dqb;
+ int check_blim = 0, check_ilim = 0;
+ struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
+
+ if (di->d_fieldmask & ~VFS_QC_MASK)
+ return -EINVAL;
+
+ if (((di->d_fieldmask & QC_SPC_SOFT) &&
+ di->d_spc_softlimit > dqi->dqi_max_spc_limit) ||
+ ((di->d_fieldmask & QC_SPC_HARD) &&
+ di->d_spc_hardlimit > dqi->dqi_max_spc_limit) ||
+ ((di->d_fieldmask & QC_INO_SOFT) &&
+ (di->d_ino_softlimit > dqi->dqi_max_ino_limit)) ||
+ ((di->d_fieldmask & QC_INO_HARD) &&
+ (di->d_ino_hardlimit > dqi->dqi_max_ino_limit)))
+ return -ERANGE;
+
+ spin_lock(&dquot->dq_dqb_lock);
+ if (di->d_fieldmask & QC_SPACE) {
+ dm->dqb_curspace = di->d_space - dm->dqb_rsvspace;
+ check_blim = 1;
+ set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
+ }
+
+ if (di->d_fieldmask & QC_SPC_SOFT)
+ dm->dqb_bsoftlimit = di->d_spc_softlimit;
+ if (di->d_fieldmask & QC_SPC_HARD)
+ dm->dqb_bhardlimit = di->d_spc_hardlimit;
+ if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) {
+ check_blim = 1;
+ set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
+ }
+
+ if (di->d_fieldmask & QC_INO_COUNT) {
+ dm->dqb_curinodes = di->d_ino_count;
+ check_ilim = 1;
+ set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
+ }
+
+ if (di->d_fieldmask & QC_INO_SOFT)
+ dm->dqb_isoftlimit = di->d_ino_softlimit;
+ if (di->d_fieldmask & QC_INO_HARD)
+ dm->dqb_ihardlimit = di->d_ino_hardlimit;
+ if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) {
+ check_ilim = 1;
+ set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
+ }
+
+ if (di->d_fieldmask & QC_SPC_TIMER) {
+ dm->dqb_btime = di->d_spc_timer;
+ check_blim = 1;
+ set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
+ }
+
+ if (di->d_fieldmask & QC_INO_TIMER) {
+ dm->dqb_itime = di->d_ino_timer;
+ check_ilim = 1;
+ set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
+ }
+
+ if (check_blim) {
+ if (!dm->dqb_bsoftlimit ||
+ dm->dqb_curspace + dm->dqb_rsvspace <= dm->dqb_bsoftlimit) {
+ dm->dqb_btime = 0;
+ clear_bit(DQ_BLKS_B, &dquot->dq_flags);
+ } else if (!(di->d_fieldmask & QC_SPC_TIMER))
+ /* Set grace only if user hasn't provided his own... */
+ dm->dqb_btime = ktime_get_real_seconds() + dqi->dqi_bgrace;
+ }
+ if (check_ilim) {
+ if (!dm->dqb_isoftlimit ||
+ dm->dqb_curinodes <= dm->dqb_isoftlimit) {
+ dm->dqb_itime = 0;
+ clear_bit(DQ_INODES_B, &dquot->dq_flags);
+ } else if (!(di->d_fieldmask & QC_INO_TIMER))
+ /* Set grace only if user hasn't provided his own... */
+ dm->dqb_itime = ktime_get_real_seconds() + dqi->dqi_igrace;
+ }
+ if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
+ dm->dqb_isoftlimit)
+ clear_bit(DQ_FAKE_B, &dquot->dq_flags);
+ else
+ set_bit(DQ_FAKE_B, &dquot->dq_flags);
+ spin_unlock(&dquot->dq_dqb_lock);
+ mark_dquot_dirty(dquot);
+
+ return 0;
+}
+
+int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
+ struct qc_dqblk *di)
+{
+ struct dquot *dquot;
+ int rc;
+
+ dquot = dqget(sb, qid);
+ if (IS_ERR(dquot)) {
+ rc = PTR_ERR(dquot);
+ goto out;
+ }
+ rc = do_set_dqblk(dquot, di);
+ dqput(dquot);
+out:
+ return rc;
+}
+EXPORT_SYMBOL(dquot_set_dqblk);
+
+/* Generic routine for getting common part of quota file information */
+int dquot_get_state(struct super_block *sb, struct qc_state *state)
+{
+ struct mem_dqinfo *mi;
+ struct qc_type_state *tstate;
+ struct quota_info *dqopt = sb_dqopt(sb);
+ int type;
+
+ memset(state, 0, sizeof(*state));
+ for (type = 0; type < MAXQUOTAS; type++) {
+ if (!sb_has_quota_active(sb, type))
+ continue;
+ tstate = state->s_state + type;
+ mi = sb_dqopt(sb)->info + type;
+ tstate->flags = QCI_ACCT_ENABLED;
+ spin_lock(&dq_data_lock);
+ if (mi->dqi_flags & DQF_SYS_FILE)
+ tstate->flags |= QCI_SYSFILE;
+ if (mi->dqi_flags & DQF_ROOT_SQUASH)
+ tstate->flags |= QCI_ROOT_SQUASH;
+ if (sb_has_quota_limits_enabled(sb, type))
+ tstate->flags |= QCI_LIMITS_ENFORCED;
+ tstate->spc_timelimit = mi->dqi_bgrace;
+ tstate->ino_timelimit = mi->dqi_igrace;
+ if (dqopt->files[type]) {
+ tstate->ino = dqopt->files[type]->i_ino;
+ tstate->blocks = dqopt->files[type]->i_blocks;
+ }
+ tstate->nextents = 1; /* We don't know... */
+ spin_unlock(&dq_data_lock);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(dquot_get_state);
+
+/* Generic routine for setting common part of quota file information */
+int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii)
+{
+ struct mem_dqinfo *mi;
+ int err = 0;
+
+ if ((ii->i_fieldmask & QC_WARNS_MASK) ||
+ (ii->i_fieldmask & QC_RT_SPC_TIMER))
+ return -EINVAL;
+ if (!sb_has_quota_active(sb, type))
+ return -ESRCH;
+ mi = sb_dqopt(sb)->info + type;
+ if (ii->i_fieldmask & QC_FLAGS) {
+ if ((ii->i_flags & QCI_ROOT_SQUASH &&
+ mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD))
+ return -EINVAL;
+ }
+ spin_lock(&dq_data_lock);
+ if (ii->i_fieldmask & QC_SPC_TIMER)
+ mi->dqi_bgrace = ii->i_spc_timelimit;
+ if (ii->i_fieldmask & QC_INO_TIMER)
+ mi->dqi_igrace = ii->i_ino_timelimit;
+ if (ii->i_fieldmask & QC_FLAGS) {
+ if (ii->i_flags & QCI_ROOT_SQUASH)
+ mi->dqi_flags |= DQF_ROOT_SQUASH;
+ else
+ mi->dqi_flags &= ~DQF_ROOT_SQUASH;
+ }
+ spin_unlock(&dq_data_lock);
+ mark_info_dirty(sb, type);
+ /* Force write to disk */
+ sb->dq_op->write_info(sb, type);
+ return err;
+}
+EXPORT_SYMBOL(dquot_set_dqinfo);
+
+const struct quotactl_ops dquot_quotactl_sysfile_ops = {
+ .quota_enable = dquot_quota_enable,
+ .quota_disable = dquot_quota_disable,
+ .quota_sync = dquot_quota_sync,
+ .get_state = dquot_get_state,
+ .set_info = dquot_set_dqinfo,
+ .get_dqblk = dquot_get_dqblk,
+ .get_nextdqblk = dquot_get_next_dqblk,
+ .set_dqblk = dquot_set_dqblk
+};
+EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
+
+static int do_proc_dqstats(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ unsigned int type = (unsigned long *)table->data - dqstats.stat;
+ s64 value = percpu_counter_sum(&dqstats.counter[type]);
+
+ /* Filter negative values for non-monotonic counters */
+ if (value < 0 && (type == DQST_ALLOC_DQUOTS ||
+ type == DQST_FREE_DQUOTS))
+ value = 0;
+
+ /* Update global table */
+ dqstats.stat[type] = value;
+ return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
+}
+
+static struct ctl_table fs_dqstats_table[] = {
+ {
+ .procname = "lookups",
+ .data = &dqstats.stat[DQST_LOOKUPS],
+ .maxlen = sizeof(unsigned long),
+ .mode = 0444,
+ .proc_handler = do_proc_dqstats,
+ },
+ {
+ .procname = "drops",
+ .data = &dqstats.stat[DQST_DROPS],
+ .maxlen = sizeof(unsigned long),
+ .mode = 0444,
+ .proc_handler = do_proc_dqstats,
+ },
+ {
+ .procname = "reads",
+ .data = &dqstats.stat[DQST_READS],
+ .maxlen = sizeof(unsigned long),
+ .mode = 0444,
+ .proc_handler = do_proc_dqstats,
+ },
+ {
+ .procname = "writes",
+ .data = &dqstats.stat[DQST_WRITES],
+ .maxlen = sizeof(unsigned long),
+ .mode = 0444,
+ .proc_handler = do_proc_dqstats,
+ },
+ {
+ .procname = "cache_hits",
+ .data = &dqstats.stat[DQST_CACHE_HITS],
+ .maxlen = sizeof(unsigned long),
+ .mode = 0444,
+ .proc_handler = do_proc_dqstats,
+ },
+ {
+ .procname = "allocated_dquots",
+ .data = &dqstats.stat[DQST_ALLOC_DQUOTS],
+ .maxlen = sizeof(unsigned long),
+ .mode = 0444,
+ .proc_handler = do_proc_dqstats,
+ },
+ {
+ .procname = "free_dquots",
+ .data = &dqstats.stat[DQST_FREE_DQUOTS],
+ .maxlen = sizeof(unsigned long),
+ .mode = 0444,
+ .proc_handler = do_proc_dqstats,
+ },
+ {
+ .procname = "syncs",
+ .data = &dqstats.stat[DQST_SYNCS],
+ .maxlen = sizeof(unsigned long),
+ .mode = 0444,
+ .proc_handler = do_proc_dqstats,
+ },
+#ifdef CONFIG_PRINT_QUOTA_WARNING
+ {
+ .procname = "warnings",
+ .data = &flag_print_warnings,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#endif
+ { },
+};
+
+static struct ctl_table fs_table[] = {
+ {
+ .procname = "quota",
+ .mode = 0555,
+ .child = fs_dqstats_table,
+ },
+ { },
+};
+
+static struct ctl_table sys_table[] = {
+ {
+ .procname = "fs",
+ .mode = 0555,
+ .child = fs_table,
+ },
+ { },
+};
+
+static int __init dquot_init(void)
+{
+ int i, ret;
+ unsigned long nr_hash, order;
+
+ printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
+
+ register_sysctl_table(sys_table);
+
+ dquot_cachep = kmem_cache_create("dquot",
+ sizeof(struct dquot), sizeof(unsigned long) * 4,
+ (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
+ SLAB_MEM_SPREAD|SLAB_PANIC),
+ NULL);
+
+ order = 0;
+ dquot_hash = (struct hlist_head *)__get_free_pages(GFP_KERNEL, order);
+ if (!dquot_hash)
+ panic("Cannot create dquot hash table");
+
+ for (i = 0; i < _DQST_DQSTAT_LAST; i++) {
+ ret = percpu_counter_init(&dqstats.counter[i], 0, GFP_KERNEL);
+ if (ret)
+ panic("Cannot create dquot stat counters");
+ }
+
+ /* Find power-of-two hlist_heads which can fit into allocation */
+ nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
+ dq_hash_bits = ilog2(nr_hash);
+
+ nr_hash = 1UL << dq_hash_bits;
+ dq_hash_mask = nr_hash - 1;
+ for (i = 0; i < nr_hash; i++)
+ INIT_HLIST_HEAD(dquot_hash + i);
+
+ pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
+ " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
+
+ if (register_shrinker(&dqcache_shrinker))
+ panic("Cannot register dquot shrinker");
+
+ return 0;
+}
+fs_initcall(dquot_init);
diff --git a/fs/quota/kqid.c b/fs/quota/kqid.c
new file mode 100644
index 000000000..f814fa90a
--- /dev/null
+++ b/fs/quota/kqid.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/fs.h>
+#include <linux/quota.h>
+#include <linux/export.h>
+
+/**
+ * qid_eq - Test to see if to kquid values are the same
+ * @left: A qid value
+ * @right: Another quid value
+ *
+ * Return true if the two qid values are equal and false otherwise.
+ */
+bool qid_eq(struct kqid left, struct kqid right)
+{
+ if (left.type != right.type)
+ return false;
+ switch(left.type) {
+ case USRQUOTA:
+ return uid_eq(left.uid, right.uid);
+ case GRPQUOTA:
+ return gid_eq(left.gid, right.gid);
+ case PRJQUOTA:
+ return projid_eq(left.projid, right.projid);
+ default:
+ BUG();
+ }
+}
+EXPORT_SYMBOL(qid_eq);
+
+/**
+ * qid_lt - Test to see if one qid value is less than another
+ * @left: The possibly lesser qid value
+ * @right: The possibly greater qid value
+ *
+ * Return true if left is less than right and false otherwise.
+ */
+bool qid_lt(struct kqid left, struct kqid right)
+{
+ if (left.type < right.type)
+ return true;
+ if (left.type > right.type)
+ return false;
+ switch (left.type) {
+ case USRQUOTA:
+ return uid_lt(left.uid, right.uid);
+ case GRPQUOTA:
+ return gid_lt(left.gid, right.gid);
+ case PRJQUOTA:
+ return projid_lt(left.projid, right.projid);
+ default:
+ BUG();
+ }
+}
+EXPORT_SYMBOL(qid_lt);
+
+/**
+ * from_kqid - Create a qid from a kqid user-namespace pair.
+ * @targ: The user namespace we want a qid in.
+ * @kqid: The kernel internal quota identifier to start with.
+ *
+ * Map @kqid into the user-namespace specified by @targ and
+ * return the resulting qid.
+ *
+ * There is always a mapping into the initial user_namespace.
+ *
+ * If @kqid has no mapping in @targ (qid_t)-1 is returned.
+ */
+qid_t from_kqid(struct user_namespace *targ, struct kqid kqid)
+{
+ switch (kqid.type) {
+ case USRQUOTA:
+ return from_kuid(targ, kqid.uid);
+ case GRPQUOTA:
+ return from_kgid(targ, kqid.gid);
+ case PRJQUOTA:
+ return from_kprojid(targ, kqid.projid);
+ default:
+ BUG();
+ }
+}
+EXPORT_SYMBOL(from_kqid);
+
+/**
+ * from_kqid_munged - Create a qid from a kqid user-namespace pair.
+ * @targ: The user namespace we want a qid in.
+ * @kqid: The kernel internal quota identifier to start with.
+ *
+ * Map @kqid into the user-namespace specified by @targ and
+ * return the resulting qid.
+ *
+ * There is always a mapping into the initial user_namespace.
+ *
+ * Unlike from_kqid from_kqid_munged never fails and always
+ * returns a valid projid. This makes from_kqid_munged
+ * appropriate for use in places where failing to provide
+ * a qid_t is not a good option.
+ *
+ * If @kqid has no mapping in @targ the kqid.type specific
+ * overflow identifier is returned.
+ */
+qid_t from_kqid_munged(struct user_namespace *targ, struct kqid kqid)
+{
+ switch (kqid.type) {
+ case USRQUOTA:
+ return from_kuid_munged(targ, kqid.uid);
+ case GRPQUOTA:
+ return from_kgid_munged(targ, kqid.gid);
+ case PRJQUOTA:
+ return from_kprojid_munged(targ, kqid.projid);
+ default:
+ BUG();
+ }
+}
+EXPORT_SYMBOL(from_kqid_munged);
+
+/**
+ * qid_valid - Report if a valid value is stored in a kqid.
+ * @qid: The kernel internal quota identifier to test.
+ */
+bool qid_valid(struct kqid qid)
+{
+ switch (qid.type) {
+ case USRQUOTA:
+ return uid_valid(qid.uid);
+ case GRPQUOTA:
+ return gid_valid(qid.gid);
+ case PRJQUOTA:
+ return projid_valid(qid.projid);
+ default:
+ BUG();
+ }
+}
+EXPORT_SYMBOL(qid_valid);
diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
new file mode 100644
index 000000000..95acdae39
--- /dev/null
+++ b/fs/quota/netlink.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/cred.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/quotaops.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+static const struct genl_multicast_group quota_mcgrps[] = {
+ { .name = "events", },
+};
+
+/* Netlink family structure for quota */
+static struct genl_family quota_genl_family __ro_after_init = {
+ .module = THIS_MODULE,
+ .hdrsize = 0,
+ .name = "VFS_DQUOT",
+ .version = 1,
+ .maxattr = QUOTA_NL_A_MAX,
+ .mcgrps = quota_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(quota_mcgrps),
+};
+
+/**
+ * quota_send_warning - Send warning to userspace about exceeded quota
+ * @qid: The kernel internal quota identifier.
+ * @dev: The device on which the fs is mounted (sb->s_dev)
+ * @warntype: The type of the warning: QUOTA_NL_...
+ *
+ * This can be used by filesystems (including those which don't use
+ * dquot) to send a message to userspace relating to quota limits.
+ *
+ */
+
+void quota_send_warning(struct kqid qid, dev_t dev,
+ const char warntype)
+{
+ static atomic_t seq;
+ struct sk_buff *skb;
+ void *msg_head;
+ int ret;
+ int msg_size = 4 * nla_total_size(sizeof(u32)) +
+ 2 * nla_total_size_64bit(sizeof(u64));
+
+ /* We have to allocate using GFP_NOFS as we are called from a
+ * filesystem performing write and thus further recursion into
+ * the fs to free some data could cause deadlocks. */
+ skb = genlmsg_new(msg_size, GFP_NOFS);
+ if (!skb) {
+ printk(KERN_ERR
+ "VFS: Not enough memory to send quota warning.\n");
+ return;
+ }
+ msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
+ &quota_genl_family, 0, QUOTA_NL_C_WARNING);
+ if (!msg_head) {
+ printk(KERN_ERR
+ "VFS: Cannot store netlink header in quota warning.\n");
+ goto err_out;
+ }
+ ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, qid.type);
+ if (ret)
+ goto attr_err_out;
+ ret = nla_put_u64_64bit(skb, QUOTA_NL_A_EXCESS_ID,
+ from_kqid_munged(&init_user_ns, qid),
+ QUOTA_NL_A_PAD);
+ if (ret)
+ goto attr_err_out;
+ ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
+ if (ret)
+ goto attr_err_out;
+ ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev));
+ if (ret)
+ goto attr_err_out;
+ ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev));
+ if (ret)
+ goto attr_err_out;
+ ret = nla_put_u64_64bit(skb, QUOTA_NL_A_CAUSED_ID,
+ from_kuid_munged(&init_user_ns, current_uid()),
+ QUOTA_NL_A_PAD);
+ if (ret)
+ goto attr_err_out;
+ genlmsg_end(skb, msg_head);
+
+ genlmsg_multicast(&quota_genl_family, skb, 0, 0, GFP_NOFS);
+ return;
+attr_err_out:
+ printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
+err_out:
+ kfree_skb(skb);
+}
+EXPORT_SYMBOL(quota_send_warning);
+
+static int __init quota_init(void)
+{
+ if (genl_register_family(&quota_genl_family) != 0)
+ printk(KERN_ERR
+ "VFS: Failed to create quota netlink interface.\n");
+ return 0;
+};
+fs_initcall(quota_init);
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
new file mode 100644
index 000000000..9af95c7a0
--- /dev/null
+++ b/fs/quota/quota.c
@@ -0,0 +1,954 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Quota code necessary even when VFS quota support is not compiled
+ * into the kernel. The interesting stuff is over in dquot.c, here
+ * we have symbols for initial quotactl(2) handling, the sysctl(2)
+ * variables, etc - things needed even when quota support disabled.
+ */
+
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/slab.h>
+#include <asm/current.h>
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/security.h>
+#include <linux/syscalls.h>
+#include <linux/capability.h>
+#include <linux/quotaops.h>
+#include <linux/types.h>
+#include <linux/writeback.h>
+#include <linux/nospec.h>
+#include "compat.h"
+
+static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
+ qid_t id)
+{
+ switch (cmd) {
+ /* these commands do not require any special privilegues */
+ case Q_GETFMT:
+ case Q_SYNC:
+ case Q_GETINFO:
+ case Q_XGETQSTAT:
+ case Q_XGETQSTATV:
+ case Q_XQUOTASYNC:
+ break;
+ /* allow to query information for dquots we "own" */
+ case Q_GETQUOTA:
+ case Q_XGETQUOTA:
+ if ((type == USRQUOTA && uid_eq(current_euid(), make_kuid(current_user_ns(), id))) ||
+ (type == GRPQUOTA && in_egroup_p(make_kgid(current_user_ns(), id))))
+ break;
+ fallthrough;
+ default:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ }
+
+ return security_quotactl(cmd, type, id, sb);
+}
+
+static void quota_sync_one(struct super_block *sb, void *arg)
+{
+ int type = *(int *)arg;
+
+ if (sb->s_qcop && sb->s_qcop->quota_sync &&
+ (sb->s_quota_types & (1 << type)))
+ sb->s_qcop->quota_sync(sb, type);
+}
+
+static int quota_sync_all(int type)
+{
+ int ret;
+
+ ret = security_quotactl(Q_SYNC, type, 0, NULL);
+ if (!ret)
+ iterate_supers(quota_sync_one, &type);
+ return ret;
+}
+
+unsigned int qtype_enforce_flag(int type)
+{
+ switch (type) {
+ case USRQUOTA:
+ return FS_QUOTA_UDQ_ENFD;
+ case GRPQUOTA:
+ return FS_QUOTA_GDQ_ENFD;
+ case PRJQUOTA:
+ return FS_QUOTA_PDQ_ENFD;
+ }
+ return 0;
+}
+
+static int quota_quotaon(struct super_block *sb, int type, qid_t id,
+ const struct path *path)
+{
+ if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_enable)
+ return -ENOSYS;
+ if (sb->s_qcop->quota_enable)
+ return sb->s_qcop->quota_enable(sb, qtype_enforce_flag(type));
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+ return sb->s_qcop->quota_on(sb, type, id, path);
+}
+
+static int quota_quotaoff(struct super_block *sb, int type)
+{
+ if (!sb->s_qcop->quota_off && !sb->s_qcop->quota_disable)
+ return -ENOSYS;
+ if (sb->s_qcop->quota_disable)
+ return sb->s_qcop->quota_disable(sb, qtype_enforce_flag(type));
+ return sb->s_qcop->quota_off(sb, type);
+}
+
+static int quota_getfmt(struct super_block *sb, int type, void __user *addr)
+{
+ __u32 fmt;
+
+ if (!sb_has_quota_active(sb, type))
+ return -ESRCH;
+ fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
+ if (copy_to_user(addr, &fmt, sizeof(fmt)))
+ return -EFAULT;
+ return 0;
+}
+
+static int quota_getinfo(struct super_block *sb, int type, void __user *addr)
+{
+ struct qc_state state;
+ struct qc_type_state *tstate;
+ struct if_dqinfo uinfo;
+ int ret;
+
+ if (!sb->s_qcop->get_state)
+ return -ENOSYS;
+ ret = sb->s_qcop->get_state(sb, &state);
+ if (ret)
+ return ret;
+ tstate = state.s_state + type;
+ if (!(tstate->flags & QCI_ACCT_ENABLED))
+ return -ESRCH;
+ memset(&uinfo, 0, sizeof(uinfo));
+ uinfo.dqi_bgrace = tstate->spc_timelimit;
+ uinfo.dqi_igrace = tstate->ino_timelimit;
+ if (tstate->flags & QCI_SYSFILE)
+ uinfo.dqi_flags |= DQF_SYS_FILE;
+ if (tstate->flags & QCI_ROOT_SQUASH)
+ uinfo.dqi_flags |= DQF_ROOT_SQUASH;
+ uinfo.dqi_valid = IIF_ALL;
+ if (copy_to_user(addr, &uinfo, sizeof(uinfo)))
+ return -EFAULT;
+ return 0;
+}
+
+static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
+{
+ struct if_dqinfo info;
+ struct qc_info qinfo;
+
+ if (copy_from_user(&info, addr, sizeof(info)))
+ return -EFAULT;
+ if (!sb->s_qcop->set_info)
+ return -ENOSYS;
+ if (info.dqi_valid & ~(IIF_FLAGS | IIF_BGRACE | IIF_IGRACE))
+ return -EINVAL;
+ memset(&qinfo, 0, sizeof(qinfo));
+ if (info.dqi_valid & IIF_FLAGS) {
+ if (info.dqi_flags & ~DQF_SETINFO_MASK)
+ return -EINVAL;
+ if (info.dqi_flags & DQF_ROOT_SQUASH)
+ qinfo.i_flags |= QCI_ROOT_SQUASH;
+ qinfo.i_fieldmask |= QC_FLAGS;
+ }
+ if (info.dqi_valid & IIF_BGRACE) {
+ qinfo.i_spc_timelimit = info.dqi_bgrace;
+ qinfo.i_fieldmask |= QC_SPC_TIMER;
+ }
+ if (info.dqi_valid & IIF_IGRACE) {
+ qinfo.i_ino_timelimit = info.dqi_igrace;
+ qinfo.i_fieldmask |= QC_INO_TIMER;
+ }
+ return sb->s_qcop->set_info(sb, type, &qinfo);
+}
+
+static inline qsize_t qbtos(qsize_t blocks)
+{
+ return blocks << QIF_DQBLKSIZE_BITS;
+}
+
+static inline qsize_t stoqb(qsize_t space)
+{
+ return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
+}
+
+static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src)
+{
+ memset(dst, 0, sizeof(*dst));
+ dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit);
+ dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit);
+ dst->dqb_curspace = src->d_space;
+ dst->dqb_ihardlimit = src->d_ino_hardlimit;
+ dst->dqb_isoftlimit = src->d_ino_softlimit;
+ dst->dqb_curinodes = src->d_ino_count;
+ dst->dqb_btime = src->d_spc_timer;
+ dst->dqb_itime = src->d_ino_timer;
+ dst->dqb_valid = QIF_ALL;
+}
+
+static int quota_getquota(struct super_block *sb, int type, qid_t id,
+ void __user *addr)
+{
+ struct kqid qid;
+ struct qc_dqblk fdq;
+ struct if_dqblk idq;
+ int ret;
+
+ if (!sb->s_qcop->get_dqblk)
+ return -ENOSYS;
+ qid = make_kqid(current_user_ns(), type, id);
+ if (!qid_has_mapping(sb->s_user_ns, qid))
+ return -EINVAL;
+ ret = sb->s_qcop->get_dqblk(sb, qid, &fdq);
+ if (ret)
+ return ret;
+ copy_to_if_dqblk(&idq, &fdq);
+
+ if (compat_need_64bit_alignment_fixup()) {
+ struct compat_if_dqblk __user *compat_dqblk = addr;
+
+ if (copy_to_user(compat_dqblk, &idq, sizeof(*compat_dqblk)))
+ return -EFAULT;
+ if (put_user(idq.dqb_valid, &compat_dqblk->dqb_valid))
+ return -EFAULT;
+ } else {
+ if (copy_to_user(addr, &idq, sizeof(idq)))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/*
+ * Return quota for next active quota >= this id, if any exists,
+ * otherwise return -ENOENT via ->get_nextdqblk
+ */
+static int quota_getnextquota(struct super_block *sb, int type, qid_t id,
+ void __user *addr)
+{
+ struct kqid qid;
+ struct qc_dqblk fdq;
+ struct if_nextdqblk idq;
+ int ret;
+
+ if (!sb->s_qcop->get_nextdqblk)
+ return -ENOSYS;
+ qid = make_kqid(current_user_ns(), type, id);
+ if (!qid_has_mapping(sb->s_user_ns, qid))
+ return -EINVAL;
+ ret = sb->s_qcop->get_nextdqblk(sb, &qid, &fdq);
+ if (ret)
+ return ret;
+ /* struct if_nextdqblk is a superset of struct if_dqblk */
+ copy_to_if_dqblk((struct if_dqblk *)&idq, &fdq);
+ idq.dqb_id = from_kqid(current_user_ns(), qid);
+ if (copy_to_user(addr, &idq, sizeof(idq)))
+ return -EFAULT;
+ return 0;
+}
+
+static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src)
+{
+ dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit);
+ dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit);
+ dst->d_space = src->dqb_curspace;
+ dst->d_ino_hardlimit = src->dqb_ihardlimit;
+ dst->d_ino_softlimit = src->dqb_isoftlimit;
+ dst->d_ino_count = src->dqb_curinodes;
+ dst->d_spc_timer = src->dqb_btime;
+ dst->d_ino_timer = src->dqb_itime;
+
+ dst->d_fieldmask = 0;
+ if (src->dqb_valid & QIF_BLIMITS)
+ dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD;
+ if (src->dqb_valid & QIF_SPACE)
+ dst->d_fieldmask |= QC_SPACE;
+ if (src->dqb_valid & QIF_ILIMITS)
+ dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD;
+ if (src->dqb_valid & QIF_INODES)
+ dst->d_fieldmask |= QC_INO_COUNT;
+ if (src->dqb_valid & QIF_BTIME)
+ dst->d_fieldmask |= QC_SPC_TIMER;
+ if (src->dqb_valid & QIF_ITIME)
+ dst->d_fieldmask |= QC_INO_TIMER;
+}
+
+static int quota_setquota(struct super_block *sb, int type, qid_t id,
+ void __user *addr)
+{
+ struct qc_dqblk fdq;
+ struct if_dqblk idq;
+ struct kqid qid;
+
+ if (compat_need_64bit_alignment_fixup()) {
+ struct compat_if_dqblk __user *compat_dqblk = addr;
+
+ if (copy_from_user(&idq, compat_dqblk, sizeof(*compat_dqblk)) ||
+ get_user(idq.dqb_valid, &compat_dqblk->dqb_valid))
+ return -EFAULT;
+ } else {
+ if (copy_from_user(&idq, addr, sizeof(idq)))
+ return -EFAULT;
+ }
+ if (!sb->s_qcop->set_dqblk)
+ return -ENOSYS;
+ qid = make_kqid(current_user_ns(), type, id);
+ if (!qid_has_mapping(sb->s_user_ns, qid))
+ return -EINVAL;
+ copy_from_if_dqblk(&fdq, &idq);
+ return sb->s_qcop->set_dqblk(sb, qid, &fdq);
+}
+
+static int quota_enable(struct super_block *sb, void __user *addr)
+{
+ __u32 flags;
+
+ if (copy_from_user(&flags, addr, sizeof(flags)))
+ return -EFAULT;
+ if (!sb->s_qcop->quota_enable)
+ return -ENOSYS;
+ return sb->s_qcop->quota_enable(sb, flags);
+}
+
+static int quota_disable(struct super_block *sb, void __user *addr)
+{
+ __u32 flags;
+
+ if (copy_from_user(&flags, addr, sizeof(flags)))
+ return -EFAULT;
+ if (!sb->s_qcop->quota_disable)
+ return -ENOSYS;
+ return sb->s_qcop->quota_disable(sb, flags);
+}
+
+static int quota_state_to_flags(struct qc_state *state)
+{
+ int flags = 0;
+
+ if (state->s_state[USRQUOTA].flags & QCI_ACCT_ENABLED)
+ flags |= FS_QUOTA_UDQ_ACCT;
+ if (state->s_state[USRQUOTA].flags & QCI_LIMITS_ENFORCED)
+ flags |= FS_QUOTA_UDQ_ENFD;
+ if (state->s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED)
+ flags |= FS_QUOTA_GDQ_ACCT;
+ if (state->s_state[GRPQUOTA].flags & QCI_LIMITS_ENFORCED)
+ flags |= FS_QUOTA_GDQ_ENFD;
+ if (state->s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED)
+ flags |= FS_QUOTA_PDQ_ACCT;
+ if (state->s_state[PRJQUOTA].flags & QCI_LIMITS_ENFORCED)
+ flags |= FS_QUOTA_PDQ_ENFD;
+ return flags;
+}
+
+static int quota_getstate(struct super_block *sb, int type,
+ struct fs_quota_stat *fqs)
+{
+ struct qc_state state;
+ int ret;
+
+ memset(&state, 0, sizeof (struct qc_state));
+ ret = sb->s_qcop->get_state(sb, &state);
+ if (ret < 0)
+ return ret;
+
+ memset(fqs, 0, sizeof(*fqs));
+ fqs->qs_version = FS_QSTAT_VERSION;
+ fqs->qs_flags = quota_state_to_flags(&state);
+ /* No quota enabled? */
+ if (!fqs->qs_flags)
+ return -ENOSYS;
+ fqs->qs_incoredqs = state.s_incoredqs;
+
+ fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
+ fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
+ fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
+ fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit;
+ fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit;
+
+ /* Inodes may be allocated even if inactive; copy out if present */
+ if (state.s_state[USRQUOTA].ino) {
+ fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino;
+ fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks;
+ fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents;
+ }
+ if (state.s_state[GRPQUOTA].ino) {
+ fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino;
+ fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks;
+ fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents;
+ }
+ if (state.s_state[PRJQUOTA].ino) {
+ /*
+ * Q_XGETQSTAT doesn't have room for both group and project
+ * quotas. So, allow the project quota values to be copied out
+ * only if there is no group quota information available.
+ */
+ if (!(state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED)) {
+ fqs->qs_gquota.qfs_ino = state.s_state[PRJQUOTA].ino;
+ fqs->qs_gquota.qfs_nblks =
+ state.s_state[PRJQUOTA].blocks;
+ fqs->qs_gquota.qfs_nextents =
+ state.s_state[PRJQUOTA].nextents;
+ }
+ }
+ return 0;
+}
+
+static int compat_copy_fs_qfilestat(struct compat_fs_qfilestat __user *to,
+ struct fs_qfilestat *from)
+{
+ if (copy_to_user(to, from, sizeof(*to)) ||
+ put_user(from->qfs_nextents, &to->qfs_nextents))
+ return -EFAULT;
+ return 0;
+}
+
+static int compat_copy_fs_quota_stat(struct compat_fs_quota_stat __user *to,
+ struct fs_quota_stat *from)
+{
+ if (put_user(from->qs_version, &to->qs_version) ||
+ put_user(from->qs_flags, &to->qs_flags) ||
+ put_user(from->qs_pad, &to->qs_pad) ||
+ compat_copy_fs_qfilestat(&to->qs_uquota, &from->qs_uquota) ||
+ compat_copy_fs_qfilestat(&to->qs_gquota, &from->qs_gquota) ||
+ put_user(from->qs_incoredqs, &to->qs_incoredqs) ||
+ put_user(from->qs_btimelimit, &to->qs_btimelimit) ||
+ put_user(from->qs_itimelimit, &to->qs_itimelimit) ||
+ put_user(from->qs_rtbtimelimit, &to->qs_rtbtimelimit) ||
+ put_user(from->qs_bwarnlimit, &to->qs_bwarnlimit) ||
+ put_user(from->qs_iwarnlimit, &to->qs_iwarnlimit))
+ return -EFAULT;
+ return 0;
+}
+
+static int quota_getxstate(struct super_block *sb, int type, void __user *addr)
+{
+ struct fs_quota_stat fqs;
+ int ret;
+
+ if (!sb->s_qcop->get_state)
+ return -ENOSYS;
+ ret = quota_getstate(sb, type, &fqs);
+ if (ret)
+ return ret;
+
+ if (compat_need_64bit_alignment_fixup())
+ return compat_copy_fs_quota_stat(addr, &fqs);
+ if (copy_to_user(addr, &fqs, sizeof(fqs)))
+ return -EFAULT;
+ return 0;
+}
+
+static int quota_getstatev(struct super_block *sb, int type,
+ struct fs_quota_statv *fqs)
+{
+ struct qc_state state;
+ int ret;
+
+ memset(&state, 0, sizeof (struct qc_state));
+ ret = sb->s_qcop->get_state(sb, &state);
+ if (ret < 0)
+ return ret;
+
+ memset(fqs, 0, sizeof(*fqs));
+ fqs->qs_version = FS_QSTAT_VERSION;
+ fqs->qs_flags = quota_state_to_flags(&state);
+ /* No quota enabled? */
+ if (!fqs->qs_flags)
+ return -ENOSYS;
+ fqs->qs_incoredqs = state.s_incoredqs;
+
+ fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
+ fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
+ fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
+ fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit;
+ fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit;
+
+ /* Inodes may be allocated even if inactive; copy out if present */
+ if (state.s_state[USRQUOTA].ino) {
+ fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino;
+ fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks;
+ fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents;
+ }
+ if (state.s_state[GRPQUOTA].ino) {
+ fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino;
+ fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks;
+ fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents;
+ }
+ if (state.s_state[PRJQUOTA].ino) {
+ fqs->qs_pquota.qfs_ino = state.s_state[PRJQUOTA].ino;
+ fqs->qs_pquota.qfs_nblks = state.s_state[PRJQUOTA].blocks;
+ fqs->qs_pquota.qfs_nextents = state.s_state[PRJQUOTA].nextents;
+ }
+ return 0;
+}
+
+static int quota_getxstatev(struct super_block *sb, int type, void __user *addr)
+{
+ struct fs_quota_statv fqs;
+ int ret;
+
+ if (!sb->s_qcop->get_state)
+ return -ENOSYS;
+
+ memset(&fqs, 0, sizeof(fqs));
+ if (copy_from_user(&fqs, addr, 1)) /* Just read qs_version */
+ return -EFAULT;
+
+ /* If this kernel doesn't support user specified version, fail */
+ switch (fqs.qs_version) {
+ case FS_QSTATV_VERSION1:
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = quota_getstatev(sb, type, &fqs);
+ if (!ret && copy_to_user(addr, &fqs, sizeof(fqs)))
+ return -EFAULT;
+ return ret;
+}
+
+/*
+ * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them
+ * out of there as xfsprogs rely on definitions being in that header file. So
+ * just define same functions here for quota purposes.
+ */
+#define XFS_BB_SHIFT 9
+
+static inline u64 quota_bbtob(u64 blocks)
+{
+ return blocks << XFS_BB_SHIFT;
+}
+
+static inline u64 quota_btobb(u64 bytes)
+{
+ return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT;
+}
+
+static inline s64 copy_from_xfs_dqblk_ts(const struct fs_disk_quota *d,
+ __s32 timer, __s8 timer_hi)
+{
+ if (d->d_fieldmask & FS_DQ_BIGTIME)
+ return (u32)timer | (s64)timer_hi << 32;
+ return timer;
+}
+
+static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src)
+{
+ dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit);
+ dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit);
+ dst->d_ino_hardlimit = src->d_ino_hardlimit;
+ dst->d_ino_softlimit = src->d_ino_softlimit;
+ dst->d_space = quota_bbtob(src->d_bcount);
+ dst->d_ino_count = src->d_icount;
+ dst->d_ino_timer = copy_from_xfs_dqblk_ts(src, src->d_itimer,
+ src->d_itimer_hi);
+ dst->d_spc_timer = copy_from_xfs_dqblk_ts(src, src->d_btimer,
+ src->d_btimer_hi);
+ dst->d_ino_warns = src->d_iwarns;
+ dst->d_spc_warns = src->d_bwarns;
+ dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit);
+ dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit);
+ dst->d_rt_space = quota_bbtob(src->d_rtbcount);
+ dst->d_rt_spc_timer = copy_from_xfs_dqblk_ts(src, src->d_rtbtimer,
+ src->d_rtbtimer_hi);
+ dst->d_rt_spc_warns = src->d_rtbwarns;
+ dst->d_fieldmask = 0;
+ if (src->d_fieldmask & FS_DQ_ISOFT)
+ dst->d_fieldmask |= QC_INO_SOFT;
+ if (src->d_fieldmask & FS_DQ_IHARD)
+ dst->d_fieldmask |= QC_INO_HARD;
+ if (src->d_fieldmask & FS_DQ_BSOFT)
+ dst->d_fieldmask |= QC_SPC_SOFT;
+ if (src->d_fieldmask & FS_DQ_BHARD)
+ dst->d_fieldmask |= QC_SPC_HARD;
+ if (src->d_fieldmask & FS_DQ_RTBSOFT)
+ dst->d_fieldmask |= QC_RT_SPC_SOFT;
+ if (src->d_fieldmask & FS_DQ_RTBHARD)
+ dst->d_fieldmask |= QC_RT_SPC_HARD;
+ if (src->d_fieldmask & FS_DQ_BTIMER)
+ dst->d_fieldmask |= QC_SPC_TIMER;
+ if (src->d_fieldmask & FS_DQ_ITIMER)
+ dst->d_fieldmask |= QC_INO_TIMER;
+ if (src->d_fieldmask & FS_DQ_RTBTIMER)
+ dst->d_fieldmask |= QC_RT_SPC_TIMER;
+ if (src->d_fieldmask & FS_DQ_BWARNS)
+ dst->d_fieldmask |= QC_SPC_WARNS;
+ if (src->d_fieldmask & FS_DQ_IWARNS)
+ dst->d_fieldmask |= QC_INO_WARNS;
+ if (src->d_fieldmask & FS_DQ_RTBWARNS)
+ dst->d_fieldmask |= QC_RT_SPC_WARNS;
+ if (src->d_fieldmask & FS_DQ_BCOUNT)
+ dst->d_fieldmask |= QC_SPACE;
+ if (src->d_fieldmask & FS_DQ_ICOUNT)
+ dst->d_fieldmask |= QC_INO_COUNT;
+ if (src->d_fieldmask & FS_DQ_RTBCOUNT)
+ dst->d_fieldmask |= QC_RT_SPACE;
+}
+
+static void copy_qcinfo_from_xfs_dqblk(struct qc_info *dst,
+ struct fs_disk_quota *src)
+{
+ memset(dst, 0, sizeof(*dst));
+ dst->i_spc_timelimit = src->d_btimer;
+ dst->i_ino_timelimit = src->d_itimer;
+ dst->i_rt_spc_timelimit = src->d_rtbtimer;
+ dst->i_ino_warnlimit = src->d_iwarns;
+ dst->i_spc_warnlimit = src->d_bwarns;
+ dst->i_rt_spc_warnlimit = src->d_rtbwarns;
+ if (src->d_fieldmask & FS_DQ_BWARNS)
+ dst->i_fieldmask |= QC_SPC_WARNS;
+ if (src->d_fieldmask & FS_DQ_IWARNS)
+ dst->i_fieldmask |= QC_INO_WARNS;
+ if (src->d_fieldmask & FS_DQ_RTBWARNS)
+ dst->i_fieldmask |= QC_RT_SPC_WARNS;
+ if (src->d_fieldmask & FS_DQ_BTIMER)
+ dst->i_fieldmask |= QC_SPC_TIMER;
+ if (src->d_fieldmask & FS_DQ_ITIMER)
+ dst->i_fieldmask |= QC_INO_TIMER;
+ if (src->d_fieldmask & FS_DQ_RTBTIMER)
+ dst->i_fieldmask |= QC_RT_SPC_TIMER;
+}
+
+static int quota_setxquota(struct super_block *sb, int type, qid_t id,
+ void __user *addr)
+{
+ struct fs_disk_quota fdq;
+ struct qc_dqblk qdq;
+ struct kqid qid;
+
+ if (copy_from_user(&fdq, addr, sizeof(fdq)))
+ return -EFAULT;
+ if (!sb->s_qcop->set_dqblk)
+ return -ENOSYS;
+ qid = make_kqid(current_user_ns(), type, id);
+ if (!qid_has_mapping(sb->s_user_ns, qid))
+ return -EINVAL;
+ /* Are we actually setting timer / warning limits for all users? */
+ if (from_kqid(sb->s_user_ns, qid) == 0 &&
+ fdq.d_fieldmask & (FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK)) {
+ struct qc_info qinfo;
+ int ret;
+
+ if (!sb->s_qcop->set_info)
+ return -EINVAL;
+ copy_qcinfo_from_xfs_dqblk(&qinfo, &fdq);
+ ret = sb->s_qcop->set_info(sb, type, &qinfo);
+ if (ret)
+ return ret;
+ /* These are already done */
+ fdq.d_fieldmask &= ~(FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK);
+ }
+ copy_from_xfs_dqblk(&qdq, &fdq);
+ return sb->s_qcop->set_dqblk(sb, qid, &qdq);
+}
+
+static inline void copy_to_xfs_dqblk_ts(const struct fs_disk_quota *d,
+ __s32 *timer_lo, __s8 *timer_hi, s64 timer)
+{
+ *timer_lo = timer;
+ if (d->d_fieldmask & FS_DQ_BIGTIME)
+ *timer_hi = timer >> 32;
+}
+
+static inline bool want_bigtime(s64 timer)
+{
+ return timer > S32_MAX || timer < S32_MIN;
+}
+
+static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src,
+ int type, qid_t id)
+{
+ memset(dst, 0, sizeof(*dst));
+ if (want_bigtime(src->d_ino_timer) || want_bigtime(src->d_spc_timer) ||
+ want_bigtime(src->d_rt_spc_timer))
+ dst->d_fieldmask |= FS_DQ_BIGTIME;
+ dst->d_version = FS_DQUOT_VERSION;
+ dst->d_id = id;
+ if (type == USRQUOTA)
+ dst->d_flags = FS_USER_QUOTA;
+ else if (type == PRJQUOTA)
+ dst->d_flags = FS_PROJ_QUOTA;
+ else
+ dst->d_flags = FS_GROUP_QUOTA;
+ dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit);
+ dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit);
+ dst->d_ino_hardlimit = src->d_ino_hardlimit;
+ dst->d_ino_softlimit = src->d_ino_softlimit;
+ dst->d_bcount = quota_btobb(src->d_space);
+ dst->d_icount = src->d_ino_count;
+ copy_to_xfs_dqblk_ts(dst, &dst->d_itimer, &dst->d_itimer_hi,
+ src->d_ino_timer);
+ copy_to_xfs_dqblk_ts(dst, &dst->d_btimer, &dst->d_btimer_hi,
+ src->d_spc_timer);
+ dst->d_iwarns = src->d_ino_warns;
+ dst->d_bwarns = src->d_spc_warns;
+ dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit);
+ dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit);
+ dst->d_rtbcount = quota_btobb(src->d_rt_space);
+ copy_to_xfs_dqblk_ts(dst, &dst->d_rtbtimer, &dst->d_rtbtimer_hi,
+ src->d_rt_spc_timer);
+ dst->d_rtbwarns = src->d_rt_spc_warns;
+}
+
+static int quota_getxquota(struct super_block *sb, int type, qid_t id,
+ void __user *addr)
+{
+ struct fs_disk_quota fdq;
+ struct qc_dqblk qdq;
+ struct kqid qid;
+ int ret;
+
+ if (!sb->s_qcop->get_dqblk)
+ return -ENOSYS;
+ qid = make_kqid(current_user_ns(), type, id);
+ if (!qid_has_mapping(sb->s_user_ns, qid))
+ return -EINVAL;
+ ret = sb->s_qcop->get_dqblk(sb, qid, &qdq);
+ if (ret)
+ return ret;
+ copy_to_xfs_dqblk(&fdq, &qdq, type, id);
+ if (copy_to_user(addr, &fdq, sizeof(fdq)))
+ return -EFAULT;
+ return ret;
+}
+
+/*
+ * Return quota for next active quota >= this id, if any exists,
+ * otherwise return -ENOENT via ->get_nextdqblk.
+ */
+static int quota_getnextxquota(struct super_block *sb, int type, qid_t id,
+ void __user *addr)
+{
+ struct fs_disk_quota fdq;
+ struct qc_dqblk qdq;
+ struct kqid qid;
+ qid_t id_out;
+ int ret;
+
+ if (!sb->s_qcop->get_nextdqblk)
+ return -ENOSYS;
+ qid = make_kqid(current_user_ns(), type, id);
+ if (!qid_has_mapping(sb->s_user_ns, qid))
+ return -EINVAL;
+ ret = sb->s_qcop->get_nextdqblk(sb, &qid, &qdq);
+ if (ret)
+ return ret;
+ id_out = from_kqid(current_user_ns(), qid);
+ copy_to_xfs_dqblk(&fdq, &qdq, type, id_out);
+ if (copy_to_user(addr, &fdq, sizeof(fdq)))
+ return -EFAULT;
+ return ret;
+}
+
+static int quota_rmxquota(struct super_block *sb, void __user *addr)
+{
+ __u32 flags;
+
+ if (copy_from_user(&flags, addr, sizeof(flags)))
+ return -EFAULT;
+ if (!sb->s_qcop->rm_xquota)
+ return -ENOSYS;
+ return sb->s_qcop->rm_xquota(sb, flags);
+}
+
+/* Copy parameters and call proper function */
+static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
+ void __user *addr, const struct path *path)
+{
+ int ret;
+
+ type = array_index_nospec(type, MAXQUOTAS);
+ /*
+ * Quota not supported on this fs? Check this before s_quota_types
+ * since they needn't be set if quota is not supported at all.
+ */
+ if (!sb->s_qcop)
+ return -ENOSYS;
+ if (!(sb->s_quota_types & (1 << type)))
+ return -EINVAL;
+
+ ret = check_quotactl_permission(sb, type, cmd, id);
+ if (ret < 0)
+ return ret;
+
+ switch (cmd) {
+ case Q_QUOTAON:
+ return quota_quotaon(sb, type, id, path);
+ case Q_QUOTAOFF:
+ return quota_quotaoff(sb, type);
+ case Q_GETFMT:
+ return quota_getfmt(sb, type, addr);
+ case Q_GETINFO:
+ return quota_getinfo(sb, type, addr);
+ case Q_SETINFO:
+ return quota_setinfo(sb, type, addr);
+ case Q_GETQUOTA:
+ return quota_getquota(sb, type, id, addr);
+ case Q_GETNEXTQUOTA:
+ return quota_getnextquota(sb, type, id, addr);
+ case Q_SETQUOTA:
+ return quota_setquota(sb, type, id, addr);
+ case Q_SYNC:
+ if (!sb->s_qcop->quota_sync)
+ return -ENOSYS;
+ return sb->s_qcop->quota_sync(sb, type);
+ case Q_XQUOTAON:
+ return quota_enable(sb, addr);
+ case Q_XQUOTAOFF:
+ return quota_disable(sb, addr);
+ case Q_XQUOTARM:
+ return quota_rmxquota(sb, addr);
+ case Q_XGETQSTAT:
+ return quota_getxstate(sb, type, addr);
+ case Q_XGETQSTATV:
+ return quota_getxstatev(sb, type, addr);
+ case Q_XSETQLIM:
+ return quota_setxquota(sb, type, id, addr);
+ case Q_XGETQUOTA:
+ return quota_getxquota(sb, type, id, addr);
+ case Q_XGETNEXTQUOTA:
+ return quota_getnextxquota(sb, type, id, addr);
+ case Q_XQUOTASYNC:
+ if (sb_rdonly(sb))
+ return -EROFS;
+ /* XFS quotas are fully coherent now, making this call a noop */
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+#ifdef CONFIG_BLOCK
+
+/* Return 1 if 'cmd' will block on frozen filesystem */
+static int quotactl_cmd_write(int cmd)
+{
+ /*
+ * We cannot allow Q_GETQUOTA and Q_GETNEXTQUOTA without write access
+ * as dquot_acquire() may allocate space for new structure and OCFS2
+ * needs to increment on-disk use count.
+ */
+ switch (cmd) {
+ case Q_GETFMT:
+ case Q_GETINFO:
+ case Q_SYNC:
+ case Q_XGETQSTAT:
+ case Q_XGETQSTATV:
+ case Q_XGETQUOTA:
+ case Q_XGETNEXTQUOTA:
+ case Q_XQUOTASYNC:
+ return 0;
+ }
+ return 1;
+}
+#endif /* CONFIG_BLOCK */
+
+/* Return true if quotactl command is manipulating quota on/off state */
+static bool quotactl_cmd_onoff(int cmd)
+{
+ return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF) ||
+ (cmd == Q_XQUOTAON) || (cmd == Q_XQUOTAOFF);
+}
+
+/*
+ * look up a superblock on which quota ops will be performed
+ * - use the name of a block device to find the superblock thereon
+ */
+static struct super_block *quotactl_block(const char __user *special, int cmd)
+{
+#ifdef CONFIG_BLOCK
+ struct block_device *bdev;
+ struct super_block *sb;
+ struct filename *tmp = getname(special);
+
+ if (IS_ERR(tmp))
+ return ERR_CAST(tmp);
+ bdev = lookup_bdev(tmp->name);
+ putname(tmp);
+ if (IS_ERR(bdev))
+ return ERR_CAST(bdev);
+ if (quotactl_cmd_onoff(cmd))
+ sb = get_super_exclusive_thawed(bdev);
+ else if (quotactl_cmd_write(cmd))
+ sb = get_super_thawed(bdev);
+ else
+ sb = get_super(bdev);
+ bdput(bdev);
+ if (!sb)
+ return ERR_PTR(-ENODEV);
+
+ return sb;
+#else
+ return ERR_PTR(-ENODEV);
+#endif
+}
+
+/*
+ * This is the system call interface. This communicates with
+ * the user-level programs. Currently this only supports diskquota
+ * calls. Maybe we need to add the process quotas etc. in the future,
+ * but we probably should use rlimits for that.
+ */
+SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special,
+ qid_t, id, void __user *, addr)
+{
+ uint cmds, type;
+ struct super_block *sb = NULL;
+ struct path path, *pathp = NULL;
+ int ret;
+
+ cmds = cmd >> SUBCMDSHIFT;
+ type = cmd & SUBCMDMASK;
+
+ if (type >= MAXQUOTAS)
+ return -EINVAL;
+
+ /*
+ * As a special case Q_SYNC can be called without a specific device.
+ * It will iterate all superblocks that have quota enabled and call
+ * the sync action on each of them.
+ */
+ if (!special) {
+ if (cmds == Q_SYNC)
+ return quota_sync_all(type);
+ return -ENODEV;
+ }
+
+ /*
+ * Path for quotaon has to be resolved before grabbing superblock
+ * because that gets s_umount sem which is also possibly needed by path
+ * resolution (think about autofs) and thus deadlocks could arise.
+ */
+ if (cmds == Q_QUOTAON) {
+ ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
+ if (ret)
+ pathp = ERR_PTR(ret);
+ else
+ pathp = &path;
+ }
+
+ sb = quotactl_block(special, cmds);
+ if (IS_ERR(sb)) {
+ ret = PTR_ERR(sb);
+ goto out;
+ }
+
+ ret = do_quotactl(sb, type, cmds, id, addr, pathp);
+
+ if (!quotactl_cmd_onoff(cmds))
+ drop_super(sb);
+ else
+ drop_super_exclusive(sb);
+out:
+ if (pathp && !IS_ERR(pathp))
+ path_put(pathp);
+ return ret;
+}
diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
new file mode 100644
index 000000000..07948f6ac
--- /dev/null
+++ b/fs/quota/quota_tree.c
@@ -0,0 +1,787 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vfsv0 quota IO operations on file
+ */
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/dqblk_v2.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/quotaops.h>
+
+#include <asm/byteorder.h>
+
+#include "quota_tree.h"
+
+MODULE_AUTHOR("Jan Kara");
+MODULE_DESCRIPTION("Quota trie support");
+MODULE_LICENSE("GPL");
+
+#define __QUOTA_QT_PARANOIA
+
+static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
+{
+ unsigned int epb = info->dqi_usable_bs >> 2;
+
+ depth = info->dqi_qtree_depth - depth - 1;
+ while (depth--)
+ id /= epb;
+ return id % epb;
+}
+
+static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth)
+{
+ qid_t id = from_kqid(&init_user_ns, qid);
+
+ return __get_index(info, id, depth);
+}
+
+/* Number of entries in one blocks */
+static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
+{
+ return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader))
+ / info->dqi_entry_size;
+}
+
+static char *getdqbuf(size_t size)
+{
+ char *buf = kmalloc(size, GFP_NOFS);
+ if (!buf)
+ printk(KERN_WARNING
+ "VFS: Not enough memory for quota buffers.\n");
+ return buf;
+}
+
+static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
+{
+ struct super_block *sb = info->dqi_sb;
+
+ memset(buf, 0, info->dqi_usable_bs);
+ return sb->s_op->quota_read(sb, info->dqi_type, buf,
+ info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
+}
+
+static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
+{
+ struct super_block *sb = info->dqi_sb;
+ ssize_t ret;
+
+ ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
+ info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
+ if (ret != info->dqi_usable_bs) {
+ quota_error(sb, "dquota write failed");
+ if (ret >= 0)
+ ret = -EIO;
+ }
+ return ret;
+}
+
+static inline int do_check_range(struct super_block *sb, const char *val_name,
+ uint val, uint min_val, uint max_val)
+{
+ if (val < min_val || val > max_val) {
+ quota_error(sb, "Getting %s %u out of range %u-%u",
+ val_name, val, min_val, max_val);
+ return -EUCLEAN;
+ }
+
+ return 0;
+}
+
+static int check_dquot_block_header(struct qtree_mem_dqinfo *info,
+ struct qt_disk_dqdbheader *dh)
+{
+ int err = 0;
+
+ err = do_check_range(info->dqi_sb, "dqdh_next_free",
+ le32_to_cpu(dh->dqdh_next_free), 0,
+ info->dqi_blocks - 1);
+ if (err)
+ return err;
+ err = do_check_range(info->dqi_sb, "dqdh_prev_free",
+ le32_to_cpu(dh->dqdh_prev_free), 0,
+ info->dqi_blocks - 1);
+
+ return err;
+}
+
+/* Remove empty block from list and return it */
+static int get_free_dqblk(struct qtree_mem_dqinfo *info)
+{
+ char *buf = getdqbuf(info->dqi_usable_bs);
+ struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
+ int ret, blk;
+
+ if (!buf)
+ return -ENOMEM;
+ if (info->dqi_free_blk) {
+ blk = info->dqi_free_blk;
+ ret = read_blk(info, blk, buf);
+ if (ret < 0)
+ goto out_buf;
+ ret = check_dquot_block_header(info, dh);
+ if (ret)
+ goto out_buf;
+ info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free);
+ }
+ else {
+ memset(buf, 0, info->dqi_usable_bs);
+ /* Assure block allocation... */
+ ret = write_blk(info, info->dqi_blocks, buf);
+ if (ret < 0)
+ goto out_buf;
+ blk = info->dqi_blocks++;
+ }
+ mark_info_dirty(info->dqi_sb, info->dqi_type);
+ ret = blk;
+out_buf:
+ kfree(buf);
+ return ret;
+}
+
+/* Insert empty block to the list */
+static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk)
+{
+ struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
+ int err;
+
+ dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk);
+ dh->dqdh_prev_free = cpu_to_le32(0);
+ dh->dqdh_entries = cpu_to_le16(0);
+ err = write_blk(info, blk, buf);
+ if (err < 0)
+ return err;
+ info->dqi_free_blk = blk;
+ mark_info_dirty(info->dqi_sb, info->dqi_type);
+ return 0;
+}
+
+/* Remove given block from the list of blocks with free entries */
+static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
+ uint blk)
+{
+ char *tmpbuf = getdqbuf(info->dqi_usable_bs);
+ struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
+ uint nextblk = le32_to_cpu(dh->dqdh_next_free);
+ uint prevblk = le32_to_cpu(dh->dqdh_prev_free);
+ int err;
+
+ if (!tmpbuf)
+ return -ENOMEM;
+ if (nextblk) {
+ err = read_blk(info, nextblk, tmpbuf);
+ if (err < 0)
+ goto out_buf;
+ ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
+ dh->dqdh_prev_free;
+ err = write_blk(info, nextblk, tmpbuf);
+ if (err < 0)
+ goto out_buf;
+ }
+ if (prevblk) {
+ err = read_blk(info, prevblk, tmpbuf);
+ if (err < 0)
+ goto out_buf;
+ ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free =
+ dh->dqdh_next_free;
+ err = write_blk(info, prevblk, tmpbuf);
+ if (err < 0)
+ goto out_buf;
+ } else {
+ info->dqi_free_entry = nextblk;
+ mark_info_dirty(info->dqi_sb, info->dqi_type);
+ }
+ kfree(tmpbuf);
+ dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
+ /* No matter whether write succeeds block is out of list */
+ if (write_blk(info, blk, buf) < 0)
+ quota_error(info->dqi_sb, "Can't write block (%u) "
+ "with free entries", blk);
+ return 0;
+out_buf:
+ kfree(tmpbuf);
+ return err;
+}
+
+/* Insert given block to the beginning of list with free entries */
+static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
+ uint blk)
+{
+ char *tmpbuf = getdqbuf(info->dqi_usable_bs);
+ struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
+ int err;
+
+ if (!tmpbuf)
+ return -ENOMEM;
+ dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry);
+ dh->dqdh_prev_free = cpu_to_le32(0);
+ err = write_blk(info, blk, buf);
+ if (err < 0)
+ goto out_buf;
+ if (info->dqi_free_entry) {
+ err = read_blk(info, info->dqi_free_entry, tmpbuf);
+ if (err < 0)
+ goto out_buf;
+ ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
+ cpu_to_le32(blk);
+ err = write_blk(info, info->dqi_free_entry, tmpbuf);
+ if (err < 0)
+ goto out_buf;
+ }
+ kfree(tmpbuf);
+ info->dqi_free_entry = blk;
+ mark_info_dirty(info->dqi_sb, info->dqi_type);
+ return 0;
+out_buf:
+ kfree(tmpbuf);
+ return err;
+}
+
+/* Is the entry in the block free? */
+int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk)
+{
+ int i;
+
+ for (i = 0; i < info->dqi_entry_size; i++)
+ if (disk[i])
+ return 0;
+ return 1;
+}
+EXPORT_SYMBOL(qtree_entry_unused);
+
+/* Find space for dquot */
+static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
+ struct dquot *dquot, int *err)
+{
+ uint blk, i;
+ struct qt_disk_dqdbheader *dh;
+ char *buf = getdqbuf(info->dqi_usable_bs);
+ char *ddquot;
+
+ *err = 0;
+ if (!buf) {
+ *err = -ENOMEM;
+ return 0;
+ }
+ dh = (struct qt_disk_dqdbheader *)buf;
+ if (info->dqi_free_entry) {
+ blk = info->dqi_free_entry;
+ *err = read_blk(info, blk, buf);
+ if (*err < 0)
+ goto out_buf;
+ *err = check_dquot_block_header(info, dh);
+ if (*err)
+ goto out_buf;
+ } else {
+ blk = get_free_dqblk(info);
+ if ((int)blk < 0) {
+ *err = blk;
+ kfree(buf);
+ return 0;
+ }
+ memset(buf, 0, info->dqi_usable_bs);
+ /* This is enough as the block is already zeroed and the entry
+ * list is empty... */
+ info->dqi_free_entry = blk;
+ mark_info_dirty(dquot->dq_sb, dquot->dq_id.type);
+ }
+ /* Block will be full? */
+ if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
+ *err = remove_free_dqentry(info, buf, blk);
+ if (*err < 0) {
+ quota_error(dquot->dq_sb, "Can't remove block (%u) "
+ "from entry free list", blk);
+ goto out_buf;
+ }
+ }
+ le16_add_cpu(&dh->dqdh_entries, 1);
+ /* Find free structure in block */
+ ddquot = buf + sizeof(struct qt_disk_dqdbheader);
+ for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
+ if (qtree_entry_unused(info, ddquot))
+ break;
+ ddquot += info->dqi_entry_size;
+ }
+#ifdef __QUOTA_QT_PARANOIA
+ if (i == qtree_dqstr_in_blk(info)) {
+ quota_error(dquot->dq_sb, "Data block full but it shouldn't");
+ *err = -EIO;
+ goto out_buf;
+ }
+#endif
+ *err = write_blk(info, blk, buf);
+ if (*err < 0) {
+ quota_error(dquot->dq_sb, "Can't write quota data block %u",
+ blk);
+ goto out_buf;
+ }
+ dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) +
+ sizeof(struct qt_disk_dqdbheader) +
+ i * info->dqi_entry_size;
+ kfree(buf);
+ return blk;
+out_buf:
+ kfree(buf);
+ return 0;
+}
+
+/* Insert reference to structure into the trie */
+static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
+ uint *treeblk, int depth)
+{
+ char *buf = getdqbuf(info->dqi_usable_bs);
+ int ret = 0, newson = 0, newact = 0;
+ __le32 *ref;
+ uint newblk;
+
+ if (!buf)
+ return -ENOMEM;
+ if (!*treeblk) {
+ ret = get_free_dqblk(info);
+ if (ret < 0)
+ goto out_buf;
+ *treeblk = ret;
+ memset(buf, 0, info->dqi_usable_bs);
+ newact = 1;
+ } else {
+ ret = read_blk(info, *treeblk, buf);
+ if (ret < 0) {
+ quota_error(dquot->dq_sb, "Can't read tree quota "
+ "block %u", *treeblk);
+ goto out_buf;
+ }
+ }
+ ref = (__le32 *)buf;
+ newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
+ if (!newblk)
+ newson = 1;
+ if (depth == info->dqi_qtree_depth - 1) {
+#ifdef __QUOTA_QT_PARANOIA
+ if (newblk) {
+ quota_error(dquot->dq_sb, "Inserting already present "
+ "quota entry (block %u)",
+ le32_to_cpu(ref[get_index(info,
+ dquot->dq_id, depth)]));
+ ret = -EIO;
+ goto out_buf;
+ }
+#endif
+ newblk = find_free_dqentry(info, dquot, &ret);
+ } else {
+ ret = do_insert_tree(info, dquot, &newblk, depth+1);
+ }
+ if (newson && ret >= 0) {
+ ref[get_index(info, dquot->dq_id, depth)] =
+ cpu_to_le32(newblk);
+ ret = write_blk(info, *treeblk, buf);
+ } else if (newact && ret < 0) {
+ put_free_dqblk(info, buf, *treeblk);
+ }
+out_buf:
+ kfree(buf);
+ return ret;
+}
+
+/* Wrapper for inserting quota structure into tree */
+static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
+ struct dquot *dquot)
+{
+ int tmp = QT_TREEOFF;
+
+#ifdef __QUOTA_QT_PARANOIA
+ if (info->dqi_blocks <= QT_TREEOFF) {
+ quota_error(dquot->dq_sb, "Quota tree root isn't allocated!");
+ return -EIO;
+ }
+#endif
+ return do_insert_tree(info, dquot, &tmp, 0);
+}
+
+/*
+ * We don't have to be afraid of deadlocks as we never have quotas on quota
+ * files...
+ */
+int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
+{
+ int type = dquot->dq_id.type;
+ struct super_block *sb = dquot->dq_sb;
+ ssize_t ret;
+ char *ddquot = getdqbuf(info->dqi_entry_size);
+
+ if (!ddquot)
+ return -ENOMEM;
+
+ /* dq_off is guarded by dqio_sem */
+ if (!dquot->dq_off) {
+ ret = dq_insert_tree(info, dquot);
+ if (ret < 0) {
+ quota_error(sb, "Error %zd occurred while creating "
+ "quota", ret);
+ kfree(ddquot);
+ return ret;
+ }
+ }
+ spin_lock(&dquot->dq_dqb_lock);
+ info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
+ spin_unlock(&dquot->dq_dqb_lock);
+ ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
+ dquot->dq_off);
+ if (ret != info->dqi_entry_size) {
+ quota_error(sb, "dquota write failed");
+ if (ret >= 0)
+ ret = -ENOSPC;
+ } else {
+ ret = 0;
+ }
+ dqstats_inc(DQST_WRITES);
+ kfree(ddquot);
+
+ return ret;
+}
+EXPORT_SYMBOL(qtree_write_dquot);
+
+/* Free dquot entry in data block */
+static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
+ uint blk)
+{
+ struct qt_disk_dqdbheader *dh;
+ char *buf = getdqbuf(info->dqi_usable_bs);
+ int ret = 0;
+
+ if (!buf)
+ return -ENOMEM;
+ if (dquot->dq_off >> info->dqi_blocksize_bits != blk) {
+ quota_error(dquot->dq_sb, "Quota structure has offset to "
+ "other block (%u) than it should (%u)", blk,
+ (uint)(dquot->dq_off >> info->dqi_blocksize_bits));
+ ret = -EIO;
+ goto out_buf;
+ }
+ ret = read_blk(info, blk, buf);
+ if (ret < 0) {
+ quota_error(dquot->dq_sb, "Can't read quota data block %u",
+ blk);
+ goto out_buf;
+ }
+ dh = (struct qt_disk_dqdbheader *)buf;
+ ret = check_dquot_block_header(info, dh);
+ if (ret)
+ goto out_buf;
+ le16_add_cpu(&dh->dqdh_entries, -1);
+ if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */
+ ret = remove_free_dqentry(info, buf, blk);
+ if (ret >= 0)
+ ret = put_free_dqblk(info, buf, blk);
+ if (ret < 0) {
+ quota_error(dquot->dq_sb, "Can't move quota data block "
+ "(%u) to free list", blk);
+ goto out_buf;
+ }
+ } else {
+ memset(buf +
+ (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)),
+ 0, info->dqi_entry_size);
+ if (le16_to_cpu(dh->dqdh_entries) ==
+ qtree_dqstr_in_blk(info) - 1) {
+ /* Insert will write block itself */
+ ret = insert_free_dqentry(info, buf, blk);
+ if (ret < 0) {
+ quota_error(dquot->dq_sb, "Can't insert quota "
+ "data block (%u) to free entry list", blk);
+ goto out_buf;
+ }
+ } else {
+ ret = write_blk(info, blk, buf);
+ if (ret < 0) {
+ quota_error(dquot->dq_sb, "Can't write quota "
+ "data block %u", blk);
+ goto out_buf;
+ }
+ }
+ }
+ dquot->dq_off = 0; /* Quota is now unattached */
+out_buf:
+ kfree(buf);
+ return ret;
+}
+
+/* Remove reference to dquot from tree */
+static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
+ uint *blk, int depth)
+{
+ char *buf = getdqbuf(info->dqi_usable_bs);
+ int ret = 0;
+ uint newblk;
+ __le32 *ref = (__le32 *)buf;
+
+ if (!buf)
+ return -ENOMEM;
+ ret = read_blk(info, *blk, buf);
+ if (ret < 0) {
+ quota_error(dquot->dq_sb, "Can't read quota data block %u",
+ *blk);
+ goto out_buf;
+ }
+ newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
+ if (newblk < QT_TREEOFF || newblk >= info->dqi_blocks) {
+ quota_error(dquot->dq_sb, "Getting block too big (%u >= %u)",
+ newblk, info->dqi_blocks);
+ ret = -EUCLEAN;
+ goto out_buf;
+ }
+
+ if (depth == info->dqi_qtree_depth - 1) {
+ ret = free_dqentry(info, dquot, newblk);
+ newblk = 0;
+ } else {
+ ret = remove_tree(info, dquot, &newblk, depth+1);
+ }
+ if (ret >= 0 && !newblk) {
+ int i;
+ ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
+ /* Block got empty? */
+ for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
+ ;
+ /* Don't put the root block into the free block list */
+ if (i == (info->dqi_usable_bs >> 2)
+ && *blk != QT_TREEOFF) {
+ put_free_dqblk(info, buf, *blk);
+ *blk = 0;
+ } else {
+ ret = write_blk(info, *blk, buf);
+ if (ret < 0)
+ quota_error(dquot->dq_sb,
+ "Can't write quota tree block %u",
+ *blk);
+ }
+ }
+out_buf:
+ kfree(buf);
+ return ret;
+}
+
+/* Delete dquot from tree */
+int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
+{
+ uint tmp = QT_TREEOFF;
+
+ if (!dquot->dq_off) /* Even not allocated? */
+ return 0;
+ return remove_tree(info, dquot, &tmp, 0);
+}
+EXPORT_SYMBOL(qtree_delete_dquot);
+
+/* Find entry in block */
+static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
+ struct dquot *dquot, uint blk)
+{
+ char *buf = getdqbuf(info->dqi_usable_bs);
+ loff_t ret = 0;
+ int i;
+ char *ddquot;
+
+ if (!buf)
+ return -ENOMEM;
+ ret = read_blk(info, blk, buf);
+ if (ret < 0) {
+ quota_error(dquot->dq_sb, "Can't read quota tree "
+ "block %u", blk);
+ goto out_buf;
+ }
+ ddquot = buf + sizeof(struct qt_disk_dqdbheader);
+ for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
+ if (info->dqi_ops->is_id(ddquot, dquot))
+ break;
+ ddquot += info->dqi_entry_size;
+ }
+ if (i == qtree_dqstr_in_blk(info)) {
+ quota_error(dquot->dq_sb,
+ "Quota for id %u referenced but not present",
+ from_kqid(&init_user_ns, dquot->dq_id));
+ ret = -EIO;
+ goto out_buf;
+ } else {
+ ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct
+ qt_disk_dqdbheader) + i * info->dqi_entry_size;
+ }
+out_buf:
+ kfree(buf);
+ return ret;
+}
+
+/* Find entry for given id in the tree */
+static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
+ struct dquot *dquot, uint blk, int depth)
+{
+ char *buf = getdqbuf(info->dqi_usable_bs);
+ loff_t ret = 0;
+ __le32 *ref = (__le32 *)buf;
+
+ if (!buf)
+ return -ENOMEM;
+ ret = read_blk(info, blk, buf);
+ if (ret < 0) {
+ quota_error(dquot->dq_sb, "Can't read quota tree block %u",
+ blk);
+ goto out_buf;
+ }
+ ret = 0;
+ blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
+ if (!blk) /* No reference? */
+ goto out_buf;
+ if (blk < QT_TREEOFF || blk >= info->dqi_blocks) {
+ quota_error(dquot->dq_sb, "Getting block too big (%u >= %u)",
+ blk, info->dqi_blocks);
+ ret = -EUCLEAN;
+ goto out_buf;
+ }
+
+ if (depth < info->dqi_qtree_depth - 1)
+ ret = find_tree_dqentry(info, dquot, blk, depth+1);
+ else
+ ret = find_block_dqentry(info, dquot, blk);
+out_buf:
+ kfree(buf);
+ return ret;
+}
+
+/* Find entry for given id in the tree - wrapper function */
+static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info,
+ struct dquot *dquot)
+{
+ return find_tree_dqentry(info, dquot, QT_TREEOFF, 0);
+}
+
+int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
+{
+ int type = dquot->dq_id.type;
+ struct super_block *sb = dquot->dq_sb;
+ loff_t offset;
+ char *ddquot;
+ int ret = 0;
+
+#ifdef __QUOTA_QT_PARANOIA
+ /* Invalidated quota? */
+ if (!sb_dqopt(dquot->dq_sb)->files[type]) {
+ quota_error(sb, "Quota invalidated while reading!");
+ return -EIO;
+ }
+#endif
+ /* Do we know offset of the dquot entry in the quota file? */
+ if (!dquot->dq_off) {
+ offset = find_dqentry(info, dquot);
+ if (offset <= 0) { /* Entry not present? */
+ if (offset < 0)
+ quota_error(sb,"Can't read quota structure "
+ "for id %u",
+ from_kqid(&init_user_ns,
+ dquot->dq_id));
+ dquot->dq_off = 0;
+ set_bit(DQ_FAKE_B, &dquot->dq_flags);
+ memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
+ ret = offset;
+ goto out;
+ }
+ dquot->dq_off = offset;
+ }
+ ddquot = getdqbuf(info->dqi_entry_size);
+ if (!ddquot)
+ return -ENOMEM;
+ ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size,
+ dquot->dq_off);
+ if (ret != info->dqi_entry_size) {
+ if (ret >= 0)
+ ret = -EIO;
+ quota_error(sb, "Error while reading quota structure for id %u",
+ from_kqid(&init_user_ns, dquot->dq_id));
+ set_bit(DQ_FAKE_B, &dquot->dq_flags);
+ memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
+ kfree(ddquot);
+ goto out;
+ }
+ spin_lock(&dquot->dq_dqb_lock);
+ info->dqi_ops->disk2mem_dqblk(dquot, ddquot);
+ if (!dquot->dq_dqb.dqb_bhardlimit &&
+ !dquot->dq_dqb.dqb_bsoftlimit &&
+ !dquot->dq_dqb.dqb_ihardlimit &&
+ !dquot->dq_dqb.dqb_isoftlimit)
+ set_bit(DQ_FAKE_B, &dquot->dq_flags);
+ spin_unlock(&dquot->dq_dqb_lock);
+ kfree(ddquot);
+out:
+ dqstats_inc(DQST_READS);
+ return ret;
+}
+EXPORT_SYMBOL(qtree_read_dquot);
+
+/* Check whether dquot should not be deleted. We know we are
+ * the only one operating on dquot (thanks to dq_lock) */
+int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
+{
+ if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
+ !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
+ return qtree_delete_dquot(info, dquot);
+ return 0;
+}
+EXPORT_SYMBOL(qtree_release_dquot);
+
+static int find_next_id(struct qtree_mem_dqinfo *info, qid_t *id,
+ unsigned int blk, int depth)
+{
+ char *buf = getdqbuf(info->dqi_usable_bs);
+ __le32 *ref = (__le32 *)buf;
+ ssize_t ret;
+ unsigned int epb = info->dqi_usable_bs >> 2;
+ unsigned int level_inc = 1;
+ int i;
+
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = depth; i < info->dqi_qtree_depth - 1; i++)
+ level_inc *= epb;
+
+ ret = read_blk(info, blk, buf);
+ if (ret < 0) {
+ quota_error(info->dqi_sb,
+ "Can't read quota tree block %u", blk);
+ goto out_buf;
+ }
+ for (i = __get_index(info, *id, depth); i < epb; i++) {
+ if (ref[i] == cpu_to_le32(0)) {
+ *id += level_inc;
+ continue;
+ }
+ if (depth == info->dqi_qtree_depth - 1) {
+ ret = 0;
+ goto out_buf;
+ }
+ ret = find_next_id(info, id, le32_to_cpu(ref[i]), depth + 1);
+ if (ret != -ENOENT)
+ break;
+ }
+ if (i == epb) {
+ ret = -ENOENT;
+ goto out_buf;
+ }
+out_buf:
+ kfree(buf);
+ return ret;
+}
+
+int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid)
+{
+ qid_t id = from_kqid(&init_user_ns, *qid);
+ int ret;
+
+ ret = find_next_id(info, &id, QT_TREEOFF, 0);
+ if (ret < 0)
+ return ret;
+ *qid = make_kqid(&init_user_ns, qid->type, id);
+ return 0;
+}
+EXPORT_SYMBOL(qtree_get_next_id);
diff --git a/fs/quota/quota_tree.h b/fs/quota/quota_tree.h
new file mode 100644
index 000000000..31cf27e0e
--- /dev/null
+++ b/fs/quota/quota_tree.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definitions of structures for vfsv0 quota format
+ */
+
+#ifndef _LINUX_QUOTA_TREE_H
+#define _LINUX_QUOTA_TREE_H
+
+#include <linux/types.h>
+#include <linux/quota.h>
+
+/*
+ * Structure of header of block with quota structures. It is padded to 16 bytes so
+ * there will be space for exactly 21 quota-entries in a block
+ */
+struct qt_disk_dqdbheader {
+ __le32 dqdh_next_free; /* Number of next block with free entry */
+ __le32 dqdh_prev_free; /* Number of previous block with free entry */
+ __le16 dqdh_entries; /* Number of valid entries in block */
+ __le16 dqdh_pad1;
+ __le32 dqdh_pad2;
+};
+
+#define QT_TREEOFF 1 /* Offset of tree in file in blocks */
+
+#endif /* _LINUX_QUOTAIO_TREE_H */
diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c
new file mode 100644
index 000000000..cd92e5fa0
--- /dev/null
+++ b/fs/quota/quota_v1.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/quota.h>
+#include <linux/quotaops.h>
+#include <linux/dqblk_v1.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/byteorder.h>
+
+#include "quotaio_v1.h"
+
+MODULE_AUTHOR("Jan Kara");
+MODULE_DESCRIPTION("Old quota format support");
+MODULE_LICENSE("GPL");
+
+#define QUOTABLOCK_BITS 10
+#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS)
+
+static inline qsize_t v1_stoqb(qsize_t space)
+{
+ return (space + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS;
+}
+
+static inline qsize_t v1_qbtos(qsize_t blocks)
+{
+ return blocks << QUOTABLOCK_BITS;
+}
+
+static void v1_disk2mem_dqblk(struct mem_dqblk *m, struct v1_disk_dqblk *d)
+{
+ m->dqb_ihardlimit = d->dqb_ihardlimit;
+ m->dqb_isoftlimit = d->dqb_isoftlimit;
+ m->dqb_curinodes = d->dqb_curinodes;
+ m->dqb_bhardlimit = v1_qbtos(d->dqb_bhardlimit);
+ m->dqb_bsoftlimit = v1_qbtos(d->dqb_bsoftlimit);
+ m->dqb_curspace = v1_qbtos(d->dqb_curblocks);
+ m->dqb_itime = d->dqb_itime;
+ m->dqb_btime = d->dqb_btime;
+}
+
+static void v1_mem2disk_dqblk(struct v1_disk_dqblk *d, struct mem_dqblk *m)
+{
+ d->dqb_ihardlimit = m->dqb_ihardlimit;
+ d->dqb_isoftlimit = m->dqb_isoftlimit;
+ d->dqb_curinodes = m->dqb_curinodes;
+ d->dqb_bhardlimit = v1_stoqb(m->dqb_bhardlimit);
+ d->dqb_bsoftlimit = v1_stoqb(m->dqb_bsoftlimit);
+ d->dqb_curblocks = v1_stoqb(m->dqb_curspace);
+ d->dqb_itime = m->dqb_itime;
+ d->dqb_btime = m->dqb_btime;
+}
+
+static int v1_read_dqblk(struct dquot *dquot)
+{
+ int type = dquot->dq_id.type;
+ struct v1_disk_dqblk dqblk;
+ struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
+
+ if (!dqopt->files[type])
+ return -EINVAL;
+
+ /* Set structure to 0s in case read fails/is after end of file */
+ memset(&dqblk, 0, sizeof(struct v1_disk_dqblk));
+ dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk,
+ sizeof(struct v1_disk_dqblk),
+ v1_dqoff(from_kqid(&init_user_ns, dquot->dq_id)));
+
+ v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk);
+ if (dquot->dq_dqb.dqb_bhardlimit == 0 &&
+ dquot->dq_dqb.dqb_bsoftlimit == 0 &&
+ dquot->dq_dqb.dqb_ihardlimit == 0 &&
+ dquot->dq_dqb.dqb_isoftlimit == 0)
+ set_bit(DQ_FAKE_B, &dquot->dq_flags);
+ dqstats_inc(DQST_READS);
+
+ return 0;
+}
+
+static int v1_commit_dqblk(struct dquot *dquot)
+{
+ short type = dquot->dq_id.type;
+ ssize_t ret;
+ struct v1_disk_dqblk dqblk;
+
+ v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb);
+ if (((type == USRQUOTA) && uid_eq(dquot->dq_id.uid, GLOBAL_ROOT_UID)) ||
+ ((type == GRPQUOTA) && gid_eq(dquot->dq_id.gid, GLOBAL_ROOT_GID))) {
+ dqblk.dqb_btime =
+ sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace;
+ dqblk.dqb_itime =
+ sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace;
+ }
+ ret = 0;
+ if (sb_dqopt(dquot->dq_sb)->files[type])
+ ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type,
+ (char *)&dqblk, sizeof(struct v1_disk_dqblk),
+ v1_dqoff(from_kqid(&init_user_ns, dquot->dq_id)));
+ if (ret != sizeof(struct v1_disk_dqblk)) {
+ quota_error(dquot->dq_sb, "dquota write failed");
+ if (ret >= 0)
+ ret = -EIO;
+ goto out;
+ }
+ ret = 0;
+
+out:
+ dqstats_inc(DQST_WRITES);
+
+ return ret;
+}
+
+/* Magics of new quota format */
+#define V2_INITQMAGICS {\
+ 0xd9c01f11, /* USRQUOTA */\
+ 0xd9c01927 /* GRPQUOTA */\
+}
+
+/* Header of new quota format */
+struct v2_disk_dqheader {
+ __le32 dqh_magic; /* Magic number identifying file */
+ __le32 dqh_version; /* File version */
+};
+
+static int v1_check_quota_file(struct super_block *sb, int type)
+{
+ struct inode *inode = sb_dqopt(sb)->files[type];
+ ulong blocks;
+ size_t off;
+ struct v2_disk_dqheader dqhead;
+ ssize_t size;
+ loff_t isize;
+ static const uint quota_magics[] = V2_INITQMAGICS;
+
+ isize = i_size_read(inode);
+ if (!isize)
+ return 0;
+ blocks = isize >> BLOCK_SIZE_BITS;
+ off = isize & (BLOCK_SIZE - 1);
+ if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) %
+ sizeof(struct v1_disk_dqblk))
+ return 0;
+ /* Doublecheck whether we didn't get file with new format - with old
+ * quotactl() this could happen */
+ size = sb->s_op->quota_read(sb, type, (char *)&dqhead,
+ sizeof(struct v2_disk_dqheader), 0);
+ if (size != sizeof(struct v2_disk_dqheader))
+ return 1; /* Probably not new format */
+ if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type])
+ return 1; /* Definitely not new format */
+ printk(KERN_INFO
+ "VFS: %s: Refusing to turn on old quota format on given file."
+ " It probably contains newer quota format.\n", sb->s_id);
+ return 0; /* Seems like a new format file -> refuse it */
+}
+
+static int v1_read_file_info(struct super_block *sb, int type)
+{
+ struct quota_info *dqopt = sb_dqopt(sb);
+ struct v1_disk_dqblk dqblk;
+ int ret;
+
+ down_read(&dqopt->dqio_sem);
+ ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
+ sizeof(struct v1_disk_dqblk), v1_dqoff(0));
+ if (ret != sizeof(struct v1_disk_dqblk)) {
+ if (ret >= 0)
+ ret = -EIO;
+ goto out;
+ }
+ ret = 0;
+ /* limits are stored as unsigned 32-bit data */
+ dqopt->info[type].dqi_max_spc_limit = 0xffffffffULL << QUOTABLOCK_BITS;
+ dqopt->info[type].dqi_max_ino_limit = 0xffffffff;
+ dqopt->info[type].dqi_igrace =
+ dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME;
+ dqopt->info[type].dqi_bgrace =
+ dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME;
+out:
+ up_read(&dqopt->dqio_sem);
+ return ret;
+}
+
+static int v1_write_file_info(struct super_block *sb, int type)
+{
+ struct quota_info *dqopt = sb_dqopt(sb);
+ struct v1_disk_dqblk dqblk;
+ int ret;
+
+ down_write(&dqopt->dqio_sem);
+ ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
+ sizeof(struct v1_disk_dqblk), v1_dqoff(0));
+ if (ret != sizeof(struct v1_disk_dqblk)) {
+ if (ret >= 0)
+ ret = -EIO;
+ goto out;
+ }
+ spin_lock(&dq_data_lock);
+ dqopt->info[type].dqi_flags &= ~DQF_INFO_DIRTY;
+ dqblk.dqb_itime = dqopt->info[type].dqi_igrace;
+ dqblk.dqb_btime = dqopt->info[type].dqi_bgrace;
+ spin_unlock(&dq_data_lock);
+ ret = sb->s_op->quota_write(sb, type, (char *)&dqblk,
+ sizeof(struct v1_disk_dqblk), v1_dqoff(0));
+ if (ret == sizeof(struct v1_disk_dqblk))
+ ret = 0;
+ else if (ret > 0)
+ ret = -EIO;
+out:
+ up_write(&dqopt->dqio_sem);
+ return ret;
+}
+
+static const struct quota_format_ops v1_format_ops = {
+ .check_quota_file = v1_check_quota_file,
+ .read_file_info = v1_read_file_info,
+ .write_file_info = v1_write_file_info,
+ .read_dqblk = v1_read_dqblk,
+ .commit_dqblk = v1_commit_dqblk,
+};
+
+static struct quota_format_type v1_quota_format = {
+ .qf_fmt_id = QFMT_VFS_OLD,
+ .qf_ops = &v1_format_ops,
+ .qf_owner = THIS_MODULE
+};
+
+static int __init init_v1_quota_format(void)
+{
+ return register_quota_format(&v1_quota_format);
+}
+
+static void __exit exit_v1_quota_format(void)
+{
+ unregister_quota_format(&v1_quota_format);
+}
+
+module_init(init_v1_quota_format);
+module_exit(exit_v1_quota_format);
+
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
new file mode 100644
index 000000000..b1467f392
--- /dev/null
+++ b/fs/quota/quota_v2.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vfsv0 quota IO operations on file
+ */
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/dqblk_v2.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/quotaops.h>
+
+#include <asm/byteorder.h>
+
+#include "quota_tree.h"
+#include "quotaio_v2.h"
+
+MODULE_AUTHOR("Jan Kara");
+MODULE_DESCRIPTION("Quota format v2 support");
+MODULE_LICENSE("GPL");
+
+static void v2r0_mem2diskdqb(void *dp, struct dquot *dquot);
+static void v2r0_disk2memdqb(struct dquot *dquot, void *dp);
+static int v2r0_is_id(void *dp, struct dquot *dquot);
+static void v2r1_mem2diskdqb(void *dp, struct dquot *dquot);
+static void v2r1_disk2memdqb(struct dquot *dquot, void *dp);
+static int v2r1_is_id(void *dp, struct dquot *dquot);
+
+static const struct qtree_fmt_operations v2r0_qtree_ops = {
+ .mem2disk_dqblk = v2r0_mem2diskdqb,
+ .disk2mem_dqblk = v2r0_disk2memdqb,
+ .is_id = v2r0_is_id,
+};
+
+static const struct qtree_fmt_operations v2r1_qtree_ops = {
+ .mem2disk_dqblk = v2r1_mem2diskdqb,
+ .disk2mem_dqblk = v2r1_disk2memdqb,
+ .is_id = v2r1_is_id,
+};
+
+#define QUOTABLOCK_BITS 10
+#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS)
+
+static inline qsize_t v2_stoqb(qsize_t space)
+{
+ return (space + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS;
+}
+
+static inline qsize_t v2_qbtos(qsize_t blocks)
+{
+ return blocks << QUOTABLOCK_BITS;
+}
+
+static int v2_read_header(struct super_block *sb, int type,
+ struct v2_disk_dqheader *dqhead)
+{
+ ssize_t size;
+
+ size = sb->s_op->quota_read(sb, type, (char *)dqhead,
+ sizeof(struct v2_disk_dqheader), 0);
+ if (size != sizeof(struct v2_disk_dqheader)) {
+ quota_error(sb, "Failed header read: expected=%zd got=%zd",
+ sizeof(struct v2_disk_dqheader), size);
+ if (size < 0)
+ return size;
+ return -EIO;
+ }
+ return 0;
+}
+
+/* Check whether given file is really vfsv0 quotafile */
+static int v2_check_quota_file(struct super_block *sb, int type)
+{
+ struct v2_disk_dqheader dqhead;
+ static const uint quota_magics[] = V2_INITQMAGICS;
+ static const uint quota_versions[] = V2_INITQVERSIONS;
+
+ if (v2_read_header(sb, type, &dqhead))
+ return 0;
+ if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type] ||
+ le32_to_cpu(dqhead.dqh_version) > quota_versions[type])
+ return 0;
+ return 1;
+}
+
+/* Read information header from quota file */
+static int v2_read_file_info(struct super_block *sb, int type)
+{
+ struct v2_disk_dqinfo dinfo;
+ struct v2_disk_dqheader dqhead;
+ struct quota_info *dqopt = sb_dqopt(sb);
+ struct mem_dqinfo *info = &dqopt->info[type];
+ struct qtree_mem_dqinfo *qinfo;
+ ssize_t size;
+ unsigned int version;
+ int ret;
+
+ down_read(&dqopt->dqio_sem);
+ ret = v2_read_header(sb, type, &dqhead);
+ if (ret < 0)
+ goto out;
+ version = le32_to_cpu(dqhead.dqh_version);
+ if ((info->dqi_fmt_id == QFMT_VFS_V0 && version != 0) ||
+ (info->dqi_fmt_id == QFMT_VFS_V1 && version != 1)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ size = sb->s_op->quota_read(sb, type, (char *)&dinfo,
+ sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
+ if (size != sizeof(struct v2_disk_dqinfo)) {
+ quota_error(sb, "Can't read info structure");
+ if (size < 0)
+ ret = size;
+ else
+ ret = -EIO;
+ goto out;
+ }
+ info->dqi_priv = kmalloc(sizeof(struct qtree_mem_dqinfo), GFP_NOFS);
+ if (!info->dqi_priv) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ qinfo = info->dqi_priv;
+ if (version == 0) {
+ /* limits are stored as unsigned 32-bit data */
+ info->dqi_max_spc_limit = 0xffffffffLL << QUOTABLOCK_BITS;
+ info->dqi_max_ino_limit = 0xffffffff;
+ } else {
+ /*
+ * Used space is stored as unsigned 64-bit value in bytes but
+ * quota core supports only signed 64-bit values so use that
+ * as a limit
+ */
+ info->dqi_max_spc_limit = 0x7fffffffffffffffLL; /* 2^63-1 */
+ info->dqi_max_ino_limit = 0x7fffffffffffffffLL;
+ }
+ info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
+ info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
+ /* No flags currently supported */
+ info->dqi_flags = 0;
+ qinfo->dqi_sb = sb;
+ qinfo->dqi_type = type;
+ qinfo->dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
+ qinfo->dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
+ qinfo->dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
+ qinfo->dqi_blocksize_bits = V2_DQBLKSIZE_BITS;
+ qinfo->dqi_usable_bs = 1 << V2_DQBLKSIZE_BITS;
+ qinfo->dqi_qtree_depth = qtree_depth(qinfo);
+ if (version == 0) {
+ qinfo->dqi_entry_size = sizeof(struct v2r0_disk_dqblk);
+ qinfo->dqi_ops = &v2r0_qtree_ops;
+ } else {
+ qinfo->dqi_entry_size = sizeof(struct v2r1_disk_dqblk);
+ qinfo->dqi_ops = &v2r1_qtree_ops;
+ }
+ ret = -EUCLEAN;
+ /* Some sanity checks of the read headers... */
+ if ((loff_t)qinfo->dqi_blocks << qinfo->dqi_blocksize_bits >
+ i_size_read(sb_dqopt(sb)->files[type])) {
+ quota_error(sb, "Number of blocks too big for quota file size (%llu > %llu).",
+ (loff_t)qinfo->dqi_blocks << qinfo->dqi_blocksize_bits,
+ i_size_read(sb_dqopt(sb)->files[type]));
+ goto out_free;
+ }
+ if (qinfo->dqi_free_blk >= qinfo->dqi_blocks) {
+ quota_error(sb, "Free block number too big (%u >= %u).",
+ qinfo->dqi_free_blk, qinfo->dqi_blocks);
+ goto out_free;
+ }
+ if (qinfo->dqi_free_entry >= qinfo->dqi_blocks) {
+ quota_error(sb, "Block with free entry too big (%u >= %u).",
+ qinfo->dqi_free_entry, qinfo->dqi_blocks);
+ goto out_free;
+ }
+ ret = 0;
+out_free:
+ if (ret) {
+ kfree(info->dqi_priv);
+ info->dqi_priv = NULL;
+ }
+out:
+ up_read(&dqopt->dqio_sem);
+ return ret;
+}
+
+/* Write information header to quota file */
+static int v2_write_file_info(struct super_block *sb, int type)
+{
+ struct v2_disk_dqinfo dinfo;
+ struct quota_info *dqopt = sb_dqopt(sb);
+ struct mem_dqinfo *info = &dqopt->info[type];
+ struct qtree_mem_dqinfo *qinfo = info->dqi_priv;
+ ssize_t size;
+
+ down_write(&dqopt->dqio_sem);
+ spin_lock(&dq_data_lock);
+ info->dqi_flags &= ~DQF_INFO_DIRTY;
+ dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
+ dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
+ /* No flags currently supported */
+ dinfo.dqi_flags = cpu_to_le32(0);
+ spin_unlock(&dq_data_lock);
+ dinfo.dqi_blocks = cpu_to_le32(qinfo->dqi_blocks);
+ dinfo.dqi_free_blk = cpu_to_le32(qinfo->dqi_free_blk);
+ dinfo.dqi_free_entry = cpu_to_le32(qinfo->dqi_free_entry);
+ size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
+ sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
+ up_write(&dqopt->dqio_sem);
+ if (size != sizeof(struct v2_disk_dqinfo)) {
+ quota_error(sb, "Can't write info structure");
+ return -1;
+ }
+ return 0;
+}
+
+static void v2r0_disk2memdqb(struct dquot *dquot, void *dp)
+{
+ struct v2r0_disk_dqblk *d = dp, empty;
+ struct mem_dqblk *m = &dquot->dq_dqb;
+
+ m->dqb_ihardlimit = le32_to_cpu(d->dqb_ihardlimit);
+ m->dqb_isoftlimit = le32_to_cpu(d->dqb_isoftlimit);
+ m->dqb_curinodes = le32_to_cpu(d->dqb_curinodes);
+ m->dqb_itime = le64_to_cpu(d->dqb_itime);
+ m->dqb_bhardlimit = v2_qbtos(le32_to_cpu(d->dqb_bhardlimit));
+ m->dqb_bsoftlimit = v2_qbtos(le32_to_cpu(d->dqb_bsoftlimit));
+ m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
+ m->dqb_btime = le64_to_cpu(d->dqb_btime);
+ /* We need to escape back all-zero structure */
+ memset(&empty, 0, sizeof(struct v2r0_disk_dqblk));
+ empty.dqb_itime = cpu_to_le64(1);
+ if (!memcmp(&empty, dp, sizeof(struct v2r0_disk_dqblk)))
+ m->dqb_itime = 0;
+}
+
+static void v2r0_mem2diskdqb(void *dp, struct dquot *dquot)
+{
+ struct v2r0_disk_dqblk *d = dp;
+ struct mem_dqblk *m = &dquot->dq_dqb;
+ struct qtree_mem_dqinfo *info =
+ sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv;
+
+ d->dqb_ihardlimit = cpu_to_le32(m->dqb_ihardlimit);
+ d->dqb_isoftlimit = cpu_to_le32(m->dqb_isoftlimit);
+ d->dqb_curinodes = cpu_to_le32(m->dqb_curinodes);
+ d->dqb_itime = cpu_to_le64(m->dqb_itime);
+ d->dqb_bhardlimit = cpu_to_le32(v2_stoqb(m->dqb_bhardlimit));
+ d->dqb_bsoftlimit = cpu_to_le32(v2_stoqb(m->dqb_bsoftlimit));
+ d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
+ d->dqb_btime = cpu_to_le64(m->dqb_btime);
+ d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id));
+ if (qtree_entry_unused(info, dp))
+ d->dqb_itime = cpu_to_le64(1);
+}
+
+static int v2r0_is_id(void *dp, struct dquot *dquot)
+{
+ struct v2r0_disk_dqblk *d = dp;
+ struct qtree_mem_dqinfo *info =
+ sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv;
+
+ if (qtree_entry_unused(info, dp))
+ return 0;
+ return qid_eq(make_kqid(&init_user_ns, dquot->dq_id.type,
+ le32_to_cpu(d->dqb_id)),
+ dquot->dq_id);
+}
+
+static void v2r1_disk2memdqb(struct dquot *dquot, void *dp)
+{
+ struct v2r1_disk_dqblk *d = dp, empty;
+ struct mem_dqblk *m = &dquot->dq_dqb;
+
+ m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit);
+ m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit);
+ m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes);
+ m->dqb_itime = le64_to_cpu(d->dqb_itime);
+ m->dqb_bhardlimit = v2_qbtos(le64_to_cpu(d->dqb_bhardlimit));
+ m->dqb_bsoftlimit = v2_qbtos(le64_to_cpu(d->dqb_bsoftlimit));
+ m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
+ m->dqb_btime = le64_to_cpu(d->dqb_btime);
+ /* We need to escape back all-zero structure */
+ memset(&empty, 0, sizeof(struct v2r1_disk_dqblk));
+ empty.dqb_itime = cpu_to_le64(1);
+ if (!memcmp(&empty, dp, sizeof(struct v2r1_disk_dqblk)))
+ m->dqb_itime = 0;
+}
+
+static void v2r1_mem2diskdqb(void *dp, struct dquot *dquot)
+{
+ struct v2r1_disk_dqblk *d = dp;
+ struct mem_dqblk *m = &dquot->dq_dqb;
+ struct qtree_mem_dqinfo *info =
+ sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv;
+
+ d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
+ d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
+ d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes);
+ d->dqb_itime = cpu_to_le64(m->dqb_itime);
+ d->dqb_bhardlimit = cpu_to_le64(v2_stoqb(m->dqb_bhardlimit));
+ d->dqb_bsoftlimit = cpu_to_le64(v2_stoqb(m->dqb_bsoftlimit));
+ d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
+ d->dqb_btime = cpu_to_le64(m->dqb_btime);
+ d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id));
+ d->dqb_pad = 0;
+ if (qtree_entry_unused(info, dp))
+ d->dqb_itime = cpu_to_le64(1);
+}
+
+static int v2r1_is_id(void *dp, struct dquot *dquot)
+{
+ struct v2r1_disk_dqblk *d = dp;
+ struct qtree_mem_dqinfo *info =
+ sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv;
+
+ if (qtree_entry_unused(info, dp))
+ return 0;
+ return qid_eq(make_kqid(&init_user_ns, dquot->dq_id.type,
+ le32_to_cpu(d->dqb_id)),
+ dquot->dq_id);
+}
+
+static int v2_read_dquot(struct dquot *dquot)
+{
+ struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
+ int ret;
+
+ down_read(&dqopt->dqio_sem);
+ ret = qtree_read_dquot(
+ sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv,
+ dquot);
+ up_read(&dqopt->dqio_sem);
+ return ret;
+}
+
+static int v2_write_dquot(struct dquot *dquot)
+{
+ struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
+ int ret;
+ bool alloc = false;
+
+ /*
+ * If space for dquot is already allocated, we don't need any
+ * protection as we'll only overwrite the place of dquot. We are
+ * still protected by concurrent writes of the same dquot by
+ * dquot->dq_lock.
+ */
+ if (!dquot->dq_off) {
+ alloc = true;
+ down_write(&dqopt->dqio_sem);
+ } else {
+ down_read(&dqopt->dqio_sem);
+ }
+ ret = qtree_write_dquot(
+ sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv,
+ dquot);
+ if (alloc)
+ up_write(&dqopt->dqio_sem);
+ else
+ up_read(&dqopt->dqio_sem);
+ return ret;
+}
+
+static int v2_release_dquot(struct dquot *dquot)
+{
+ struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
+ int ret;
+
+ down_write(&dqopt->dqio_sem);
+ ret = qtree_release_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, dquot);
+ up_write(&dqopt->dqio_sem);
+
+ return ret;
+}
+
+static int v2_free_file_info(struct super_block *sb, int type)
+{
+ kfree(sb_dqinfo(sb, type)->dqi_priv);
+ return 0;
+}
+
+static int v2_get_next_id(struct super_block *sb, struct kqid *qid)
+{
+ struct quota_info *dqopt = sb_dqopt(sb);
+ int ret;
+
+ down_read(&dqopt->dqio_sem);
+ ret = qtree_get_next_id(sb_dqinfo(sb, qid->type)->dqi_priv, qid);
+ up_read(&dqopt->dqio_sem);
+ return ret;
+}
+
+static const struct quota_format_ops v2_format_ops = {
+ .check_quota_file = v2_check_quota_file,
+ .read_file_info = v2_read_file_info,
+ .write_file_info = v2_write_file_info,
+ .free_file_info = v2_free_file_info,
+ .read_dqblk = v2_read_dquot,
+ .commit_dqblk = v2_write_dquot,
+ .release_dqblk = v2_release_dquot,
+ .get_next_id = v2_get_next_id,
+};
+
+static struct quota_format_type v2r0_quota_format = {
+ .qf_fmt_id = QFMT_VFS_V0,
+ .qf_ops = &v2_format_ops,
+ .qf_owner = THIS_MODULE
+};
+
+static struct quota_format_type v2r1_quota_format = {
+ .qf_fmt_id = QFMT_VFS_V1,
+ .qf_ops = &v2_format_ops,
+ .qf_owner = THIS_MODULE
+};
+
+static int __init init_v2_quota_format(void)
+{
+ int ret;
+
+ ret = register_quota_format(&v2r0_quota_format);
+ if (ret)
+ return ret;
+ return register_quota_format(&v2r1_quota_format);
+}
+
+static void __exit exit_v2_quota_format(void)
+{
+ unregister_quota_format(&v2r0_quota_format);
+ unregister_quota_format(&v2r1_quota_format);
+}
+
+module_init(init_v2_quota_format);
+module_exit(exit_v2_quota_format);
diff --git a/fs/quota/quotaio_v1.h b/fs/quota/quotaio_v1.h
new file mode 100644
index 000000000..31dca9a89
--- /dev/null
+++ b/fs/quota/quotaio_v1.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_QUOTAIO_V1_H
+#define _LINUX_QUOTAIO_V1_H
+
+#include <linux/types.h>
+
+/*
+ * The following constants define the amount of time given a user
+ * before the soft limits are treated as hard limits (usually resulting
+ * in an allocation failure). The timer is started when the user crosses
+ * their soft limit, it is reset when they go below their soft limit.
+ */
+#define MAX_IQ_TIME 604800 /* (7*24*60*60) 1 week */
+#define MAX_DQ_TIME 604800 /* (7*24*60*60) 1 week */
+
+/*
+ * The following structure defines the format of the disk quota file
+ * (as it appears on disk) - the file is an array of these structures
+ * indexed by user or group number.
+ */
+struct v1_disk_dqblk {
+ __u32 dqb_bhardlimit; /* absolute limit on disk blks alloc */
+ __u32 dqb_bsoftlimit; /* preferred limit on disk blks */
+ __u32 dqb_curblocks; /* current block count */
+ __u32 dqb_ihardlimit; /* absolute limit on allocated inodes */
+ __u32 dqb_isoftlimit; /* preferred inode limit */
+ __u32 dqb_curinodes; /* current # allocated inodes */
+
+ /* below fields differ in length on 32-bit vs 64-bit architectures */
+ unsigned long dqb_btime; /* time limit for excessive disk use */
+ unsigned long dqb_itime; /* time limit for excessive inode use */
+};
+
+#define v1_dqoff(UID) ((loff_t)((UID) * sizeof (struct v1_disk_dqblk)))
+
+#endif /* _LINUX_QUOTAIO_V1_H */
diff --git a/fs/quota/quotaio_v2.h b/fs/quota/quotaio_v2.h
new file mode 100644
index 000000000..43cf0f0e2
--- /dev/null
+++ b/fs/quota/quotaio_v2.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definitions of structures for vfsv0 quota format
+ */
+
+#ifndef _LINUX_QUOTAIO_V2_H
+#define _LINUX_QUOTAIO_V2_H
+
+#include <linux/types.h>
+#include <linux/quota.h>
+
+/*
+ * Definitions of magics and versions of current quota files
+ */
+#define V2_INITQMAGICS {\
+ 0xd9c01f11, /* USRQUOTA */\
+ 0xd9c01927, /* GRPQUOTA */\
+ 0xd9c03f14, /* PRJQUOTA */\
+}
+
+#define V2_INITQVERSIONS {\
+ 1, /* USRQUOTA */\
+ 1, /* GRPQUOTA */\
+ 1, /* PRJQUOTA */\
+}
+
+/* First generic header */
+struct v2_disk_dqheader {
+ __le32 dqh_magic; /* Magic number identifying file */
+ __le32 dqh_version; /* File version */
+};
+
+/*
+ * The following structure defines the format of the disk quota file
+ * (as it appears on disk) - the file is a radix tree whose leaves point
+ * to blocks of these structures.
+ */
+struct v2r0_disk_dqblk {
+ __le32 dqb_id; /* id this quota applies to */
+ __le32 dqb_ihardlimit; /* absolute limit on allocated inodes */
+ __le32 dqb_isoftlimit; /* preferred inode limit */
+ __le32 dqb_curinodes; /* current # allocated inodes */
+ __le32 dqb_bhardlimit; /* absolute limit on disk space (in QUOTABLOCK_SIZE) */
+ __le32 dqb_bsoftlimit; /* preferred limit on disk space (in QUOTABLOCK_SIZE) */
+ __le64 dqb_curspace; /* current space occupied (in bytes) */
+ __le64 dqb_btime; /* time limit for excessive disk use */
+ __le64 dqb_itime; /* time limit for excessive inode use */
+};
+
+struct v2r1_disk_dqblk {
+ __le32 dqb_id; /* id this quota applies to */
+ __le32 dqb_pad;
+ __le64 dqb_ihardlimit; /* absolute limit on allocated inodes */
+ __le64 dqb_isoftlimit; /* preferred inode limit */
+ __le64 dqb_curinodes; /* current # allocated inodes */
+ __le64 dqb_bhardlimit; /* absolute limit on disk space (in QUOTABLOCK_SIZE) */
+ __le64 dqb_bsoftlimit; /* preferred limit on disk space (in QUOTABLOCK_SIZE) */
+ __le64 dqb_curspace; /* current space occupied (in bytes) */
+ __le64 dqb_btime; /* time limit for excessive disk use */
+ __le64 dqb_itime; /* time limit for excessive inode use */
+};
+
+/* Header with type and version specific information */
+struct v2_disk_dqinfo {
+ __le32 dqi_bgrace; /* Time before block soft limit becomes hard limit */
+ __le32 dqi_igrace; /* Time before inode soft limit becomes hard limit */
+ __le32 dqi_flags; /* Flags for quotafile (DQF_*) */
+ __le32 dqi_blocks; /* Number of blocks in file */
+ __le32 dqi_free_blk; /* Number of first free block in the list */
+ __le32 dqi_free_entry; /* Number of block with at least one free entry */
+};
+
+#define V2_DQINFOOFF sizeof(struct v2_disk_dqheader) /* Offset of info header in file */
+#define V2_DQBLKSIZE_BITS 10 /* Size of leaf block in tree */
+
+#endif /* _LINUX_QUOTAIO_V2_H */