summaryrefslogtreecommitdiffstats
path: root/fs/zonefs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/zonefs')
-rw-r--r--fs/zonefs/Kconfig11
-rw-r--r--fs/zonefs/Makefile6
-rw-r--r--fs/zonefs/file.c847
-rw-r--r--fs/zonefs/super.c1451
-rw-r--r--fs/zonefs/sysfs.c134
-rw-r--r--fs/zonefs/trace.h106
-rw-r--r--fs/zonefs/zonefs.h289
7 files changed, 2844 insertions, 0 deletions
diff --git a/fs/zonefs/Kconfig b/fs/zonefs/Kconfig
new file mode 100644
index 000000000..827278f93
--- /dev/null
+++ b/fs/zonefs/Kconfig
@@ -0,0 +1,11 @@
+config ZONEFS_FS
+ tristate "zonefs filesystem support"
+ depends on BLOCK
+ depends on BLK_DEV_ZONED
+ select FS_IOMAP
+ select CRC32
+ help
+ zonefs is a simple file system which exposes zones of a zoned block
+ device (e.g. host-managed or host-aware SMR disk drives) as files.
+
+ If unsure, say N.
diff --git a/fs/zonefs/Makefile b/fs/zonefs/Makefile
new file mode 100644
index 000000000..645f7229d
--- /dev/null
+++ b/fs/zonefs/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+ccflags-y += -I$(src)
+
+obj-$(CONFIG_ZONEFS_FS) += zonefs.o
+
+zonefs-y := super.o file.o sysfs.o
diff --git a/fs/zonefs/file.c b/fs/zonefs/file.c
new file mode 100644
index 000000000..b2c9b35df
--- /dev/null
+++ b/fs/zonefs/file.c
@@ -0,0 +1,847 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Simple file system for zoned block devices exposing zones as files.
+ *
+ * Copyright (C) 2022 Western Digital Corporation or its affiliates.
+ */
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/iomap.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/statfs.h>
+#include <linux/writeback.h>
+#include <linux/quotaops.h>
+#include <linux/seq_file.h>
+#include <linux/parser.h>
+#include <linux/uio.h>
+#include <linux/mman.h>
+#include <linux/sched/mm.h>
+#include <linux/task_io_accounting_ops.h>
+
+#include "zonefs.h"
+
+#include "trace.h"
+
+static int zonefs_read_iomap_begin(struct inode *inode, loff_t offset,
+ loff_t length, unsigned int flags,
+ struct iomap *iomap, struct iomap *srcmap)
+{
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+ struct super_block *sb = inode->i_sb;
+ loff_t isize;
+
+ /*
+ * All blocks are always mapped below EOF. If reading past EOF,
+ * act as if there is a hole up to the file maximum size.
+ */
+ mutex_lock(&zi->i_truncate_mutex);
+ iomap->bdev = inode->i_sb->s_bdev;
+ iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
+ isize = i_size_read(inode);
+ if (iomap->offset >= isize) {
+ iomap->type = IOMAP_HOLE;
+ iomap->addr = IOMAP_NULL_ADDR;
+ iomap->length = length;
+ } else {
+ iomap->type = IOMAP_MAPPED;
+ iomap->addr = (z->z_sector << SECTOR_SHIFT) + iomap->offset;
+ iomap->length = isize - iomap->offset;
+ }
+ mutex_unlock(&zi->i_truncate_mutex);
+
+ trace_zonefs_iomap_begin(inode, iomap);
+
+ return 0;
+}
+
+static const struct iomap_ops zonefs_read_iomap_ops = {
+ .iomap_begin = zonefs_read_iomap_begin,
+};
+
+static int zonefs_write_iomap_begin(struct inode *inode, loff_t offset,
+ loff_t length, unsigned int flags,
+ struct iomap *iomap, struct iomap *srcmap)
+{
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+ struct super_block *sb = inode->i_sb;
+ loff_t isize;
+
+ /* All write I/Os should always be within the file maximum size */
+ if (WARN_ON_ONCE(offset + length > z->z_capacity))
+ return -EIO;
+
+ /*
+ * Sequential zones can only accept direct writes. This is already
+ * checked when writes are issued, so warn if we see a page writeback
+ * operation.
+ */
+ if (WARN_ON_ONCE(zonefs_zone_is_seq(z) && !(flags & IOMAP_DIRECT)))
+ return -EIO;
+
+ /*
+ * For conventional zones, all blocks are always mapped. For sequential
+ * zones, all blocks after always mapped below the inode size (zone
+ * write pointer) and unwriten beyond.
+ */
+ mutex_lock(&zi->i_truncate_mutex);
+ iomap->bdev = inode->i_sb->s_bdev;
+ iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
+ iomap->addr = (z->z_sector << SECTOR_SHIFT) + iomap->offset;
+ isize = i_size_read(inode);
+ if (iomap->offset >= isize) {
+ iomap->type = IOMAP_UNWRITTEN;
+ iomap->length = z->z_capacity - iomap->offset;
+ } else {
+ iomap->type = IOMAP_MAPPED;
+ iomap->length = isize - iomap->offset;
+ }
+ mutex_unlock(&zi->i_truncate_mutex);
+
+ trace_zonefs_iomap_begin(inode, iomap);
+
+ return 0;
+}
+
+static const struct iomap_ops zonefs_write_iomap_ops = {
+ .iomap_begin = zonefs_write_iomap_begin,
+};
+
+static int zonefs_read_folio(struct file *unused, struct folio *folio)
+{
+ return iomap_read_folio(folio, &zonefs_read_iomap_ops);
+}
+
+static void zonefs_readahead(struct readahead_control *rac)
+{
+ iomap_readahead(rac, &zonefs_read_iomap_ops);
+}
+
+/*
+ * Map blocks for page writeback. This is used only on conventional zone files,
+ * which implies that the page range can only be within the fixed inode size.
+ */
+static int zonefs_write_map_blocks(struct iomap_writepage_ctx *wpc,
+ struct inode *inode, loff_t offset)
+{
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+
+ if (WARN_ON_ONCE(zonefs_zone_is_seq(z)))
+ return -EIO;
+ if (WARN_ON_ONCE(offset >= i_size_read(inode)))
+ return -EIO;
+
+ /* If the mapping is already OK, nothing needs to be done */
+ if (offset >= wpc->iomap.offset &&
+ offset < wpc->iomap.offset + wpc->iomap.length)
+ return 0;
+
+ return zonefs_write_iomap_begin(inode, offset,
+ z->z_capacity - offset,
+ IOMAP_WRITE, &wpc->iomap, NULL);
+}
+
+static const struct iomap_writeback_ops zonefs_writeback_ops = {
+ .map_blocks = zonefs_write_map_blocks,
+};
+
+static int zonefs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct iomap_writepage_ctx wpc = { };
+
+ return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops);
+}
+
+static int zonefs_swap_activate(struct swap_info_struct *sis,
+ struct file *swap_file, sector_t *span)
+{
+ struct inode *inode = file_inode(swap_file);
+
+ if (zonefs_inode_is_seq(inode)) {
+ zonefs_err(inode->i_sb,
+ "swap file: not a conventional zone file\n");
+ return -EINVAL;
+ }
+
+ return iomap_swapfile_activate(sis, swap_file, span,
+ &zonefs_read_iomap_ops);
+}
+
+const struct address_space_operations zonefs_file_aops = {
+ .read_folio = zonefs_read_folio,
+ .readahead = zonefs_readahead,
+ .writepages = zonefs_writepages,
+ .dirty_folio = iomap_dirty_folio,
+ .release_folio = iomap_release_folio,
+ .invalidate_folio = iomap_invalidate_folio,
+ .migrate_folio = filemap_migrate_folio,
+ .is_partially_uptodate = iomap_is_partially_uptodate,
+ .error_remove_page = generic_error_remove_page,
+ .swap_activate = zonefs_swap_activate,
+};
+
+int zonefs_file_truncate(struct inode *inode, loff_t isize)
+{
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+ loff_t old_isize;
+ enum req_op op;
+ int ret = 0;
+
+ /*
+ * Only sequential zone files can be truncated and truncation is allowed
+ * only down to a 0 size, which is equivalent to a zone reset, and to
+ * the maximum file size, which is equivalent to a zone finish.
+ */
+ if (!zonefs_zone_is_seq(z))
+ return -EPERM;
+
+ if (!isize)
+ op = REQ_OP_ZONE_RESET;
+ else if (isize == z->z_capacity)
+ op = REQ_OP_ZONE_FINISH;
+ else
+ return -EPERM;
+
+ inode_dio_wait(inode);
+
+ /* Serialize against page faults */
+ filemap_invalidate_lock(inode->i_mapping);
+
+ /* Serialize against zonefs_iomap_begin() */
+ mutex_lock(&zi->i_truncate_mutex);
+
+ old_isize = i_size_read(inode);
+ if (isize == old_isize)
+ goto unlock;
+
+ ret = zonefs_inode_zone_mgmt(inode, op);
+ if (ret)
+ goto unlock;
+
+ /*
+ * If the mount option ZONEFS_MNTOPT_EXPLICIT_OPEN is set,
+ * take care of open zones.
+ */
+ if (z->z_flags & ZONEFS_ZONE_OPEN) {
+ /*
+ * Truncating a zone to EMPTY or FULL is the equivalent of
+ * closing the zone. For a truncation to 0, we need to
+ * re-open the zone to ensure new writes can be processed.
+ * For a truncation to the maximum file size, the zone is
+ * closed and writes cannot be accepted anymore, so clear
+ * the open flag.
+ */
+ if (!isize)
+ ret = zonefs_inode_zone_mgmt(inode, REQ_OP_ZONE_OPEN);
+ else
+ z->z_flags &= ~ZONEFS_ZONE_OPEN;
+ }
+
+ zonefs_update_stats(inode, isize);
+ truncate_setsize(inode, isize);
+ z->z_wpoffset = isize;
+ zonefs_inode_account_active(inode);
+
+unlock:
+ mutex_unlock(&zi->i_truncate_mutex);
+ filemap_invalidate_unlock(inode->i_mapping);
+
+ return ret;
+}
+
+static int zonefs_file_fsync(struct file *file, loff_t start, loff_t end,
+ int datasync)
+{
+ struct inode *inode = file_inode(file);
+ int ret = 0;
+
+ if (unlikely(IS_IMMUTABLE(inode)))
+ return -EPERM;
+
+ /*
+ * Since only direct writes are allowed in sequential files, page cache
+ * flush is needed only for conventional zone files.
+ */
+ if (zonefs_inode_is_cnv(inode))
+ ret = file_write_and_wait_range(file, start, end);
+ if (!ret)
+ ret = blkdev_issue_flush(inode->i_sb->s_bdev);
+
+ if (ret)
+ zonefs_io_error(inode, true);
+
+ return ret;
+}
+
+static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
+{
+ struct inode *inode = file_inode(vmf->vma->vm_file);
+ vm_fault_t ret;
+
+ if (unlikely(IS_IMMUTABLE(inode)))
+ return VM_FAULT_SIGBUS;
+
+ /*
+ * Sanity check: only conventional zone files can have shared
+ * writeable mappings.
+ */
+ if (zonefs_inode_is_seq(inode))
+ return VM_FAULT_NOPAGE;
+
+ sb_start_pagefault(inode->i_sb);
+ file_update_time(vmf->vma->vm_file);
+
+ /* Serialize against truncates */
+ filemap_invalidate_lock_shared(inode->i_mapping);
+ ret = iomap_page_mkwrite(vmf, &zonefs_write_iomap_ops);
+ filemap_invalidate_unlock_shared(inode->i_mapping);
+
+ sb_end_pagefault(inode->i_sb);
+ return ret;
+}
+
+static const struct vm_operations_struct zonefs_file_vm_ops = {
+ .fault = filemap_fault,
+ .map_pages = filemap_map_pages,
+ .page_mkwrite = zonefs_filemap_page_mkwrite,
+};
+
+static int zonefs_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ /*
+ * Conventional zones accept random writes, so their files can support
+ * shared writable mappings. For sequential zone files, only read
+ * mappings are possible since there are no guarantees for write
+ * ordering between msync() and page cache writeback.
+ */
+ if (zonefs_inode_is_seq(file_inode(file)) &&
+ (vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
+ return -EINVAL;
+
+ file_accessed(file);
+ vma->vm_ops = &zonefs_file_vm_ops;
+
+ return 0;
+}
+
+static loff_t zonefs_file_llseek(struct file *file, loff_t offset, int whence)
+{
+ loff_t isize = i_size_read(file_inode(file));
+
+ /*
+ * Seeks are limited to below the zone size for conventional zones
+ * and below the zone write pointer for sequential zones. In both
+ * cases, this limit is the inode size.
+ */
+ return generic_file_llseek_size(file, offset, whence, isize, isize);
+}
+
+static int zonefs_file_write_dio_end_io(struct kiocb *iocb, ssize_t size,
+ int error, unsigned int flags)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+
+ if (error) {
+ zonefs_io_error(inode, true);
+ return error;
+ }
+
+ if (size && zonefs_inode_is_seq(inode)) {
+ /*
+ * Note that we may be seeing completions out of order,
+ * but that is not a problem since a write completed
+ * successfully necessarily means that all preceding writes
+ * were also successful. So we can safely increase the inode
+ * size to the write end location.
+ */
+ mutex_lock(&zi->i_truncate_mutex);
+ if (i_size_read(inode) < iocb->ki_pos + size) {
+ zonefs_update_stats(inode, iocb->ki_pos + size);
+ zonefs_i_size_write(inode, iocb->ki_pos + size);
+ }
+ mutex_unlock(&zi->i_truncate_mutex);
+ }
+
+ return 0;
+}
+
+static const struct iomap_dio_ops zonefs_write_dio_ops = {
+ .end_io = zonefs_file_write_dio_end_io,
+};
+
+/*
+ * Do not exceed the LFS limits nor the file zone size. If pos is under the
+ * limit it becomes a short access. If it exceeds the limit, return -EFBIG.
+ */
+static loff_t zonefs_write_check_limits(struct file *file, loff_t pos,
+ loff_t count)
+{
+ struct inode *inode = file_inode(file);
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+ loff_t limit = rlimit(RLIMIT_FSIZE);
+ loff_t max_size = z->z_capacity;
+
+ if (limit != RLIM_INFINITY) {
+ if (pos >= limit) {
+ send_sig(SIGXFSZ, current, 0);
+ return -EFBIG;
+ }
+ count = min(count, limit - pos);
+ }
+
+ if (!(file->f_flags & O_LARGEFILE))
+ max_size = min_t(loff_t, MAX_NON_LFS, max_size);
+
+ if (unlikely(pos >= max_size))
+ return -EFBIG;
+
+ return min(count, max_size - pos);
+}
+
+static ssize_t zonefs_write_checks(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+ loff_t count;
+
+ if (IS_SWAPFILE(inode))
+ return -ETXTBSY;
+
+ if (!iov_iter_count(from))
+ return 0;
+
+ if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
+ return -EINVAL;
+
+ if (iocb->ki_flags & IOCB_APPEND) {
+ if (zonefs_zone_is_cnv(z))
+ return -EINVAL;
+ mutex_lock(&zi->i_truncate_mutex);
+ iocb->ki_pos = z->z_wpoffset;
+ mutex_unlock(&zi->i_truncate_mutex);
+ }
+
+ count = zonefs_write_check_limits(file, iocb->ki_pos,
+ iov_iter_count(from));
+ if (count < 0)
+ return count;
+
+ iov_iter_truncate(from, count);
+ return iov_iter_count(from);
+}
+
+/*
+ * Handle direct writes. For sequential zone files, this is the only possible
+ * write path. For these files, check that the user is issuing writes
+ * sequentially from the end of the file. This code assumes that the block layer
+ * delivers write requests to the device in sequential order. This is always the
+ * case if a block IO scheduler implementing the ELEVATOR_F_ZBD_SEQ_WRITE
+ * elevator feature is being used (e.g. mq-deadline). The block layer always
+ * automatically select such an elevator for zoned block devices during the
+ * device initialization.
+ */
+static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+ struct super_block *sb = inode->i_sb;
+ ssize_t ret, count;
+
+ /*
+ * For async direct IOs to sequential zone files, refuse IOCB_NOWAIT
+ * as this can cause write reordering (e.g. the first aio gets EAGAIN
+ * on the inode lock but the second goes through but is now unaligned).
+ */
+ if (zonefs_zone_is_seq(z) && !is_sync_kiocb(iocb) &&
+ (iocb->ki_flags & IOCB_NOWAIT))
+ return -EOPNOTSUPP;
+
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!inode_trylock(inode))
+ return -EAGAIN;
+ } else {
+ inode_lock(inode);
+ }
+
+ count = zonefs_write_checks(iocb, from);
+ if (count <= 0) {
+ ret = count;
+ goto inode_unlock;
+ }
+
+ if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
+ ret = -EINVAL;
+ goto inode_unlock;
+ }
+
+ /* Enforce sequential writes (append only) in sequential zones */
+ if (zonefs_zone_is_seq(z)) {
+ mutex_lock(&zi->i_truncate_mutex);
+ if (iocb->ki_pos != z->z_wpoffset) {
+ mutex_unlock(&zi->i_truncate_mutex);
+ ret = -EINVAL;
+ goto inode_unlock;
+ }
+ mutex_unlock(&zi->i_truncate_mutex);
+ }
+
+ /*
+ * iomap_dio_rw() may return ENOTBLK if there was an issue with
+ * page invalidation. Overwrite that error code with EBUSY so that
+ * the user can make sense of the error.
+ */
+ ret = iomap_dio_rw(iocb, from, &zonefs_write_iomap_ops,
+ &zonefs_write_dio_ops, 0, NULL, 0);
+ if (ret == -ENOTBLK)
+ ret = -EBUSY;
+
+ if (zonefs_zone_is_seq(z) &&
+ (ret > 0 || ret == -EIOCBQUEUED)) {
+ if (ret > 0)
+ count = ret;
+
+ /*
+ * Update the zone write pointer offset assuming the write
+ * operation succeeded. If it did not, the error recovery path
+ * will correct it. Also do active seq file accounting.
+ */
+ mutex_lock(&zi->i_truncate_mutex);
+ z->z_wpoffset += count;
+ zonefs_inode_account_active(inode);
+ mutex_unlock(&zi->i_truncate_mutex);
+ }
+
+inode_unlock:
+ inode_unlock(inode);
+
+ return ret;
+}
+
+static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
+ struct iov_iter *from)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ ssize_t ret;
+
+ /*
+ * Direct IO writes are mandatory for sequential zone files so that the
+ * write IO issuing order is preserved.
+ */
+ if (zonefs_inode_is_seq(inode))
+ return -EIO;
+
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!inode_trylock(inode))
+ return -EAGAIN;
+ } else {
+ inode_lock(inode);
+ }
+
+ ret = zonefs_write_checks(iocb, from);
+ if (ret <= 0)
+ goto inode_unlock;
+
+ ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops);
+ if (ret == -EIO)
+ zonefs_io_error(inode, true);
+
+inode_unlock:
+ inode_unlock(inode);
+ if (ret > 0)
+ ret = generic_write_sync(iocb, ret);
+
+ return ret;
+}
+
+static ssize_t zonefs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+
+ if (unlikely(IS_IMMUTABLE(inode)))
+ return -EPERM;
+
+ if (sb_rdonly(inode->i_sb))
+ return -EROFS;
+
+ /* Write operations beyond the zone capacity are not allowed */
+ if (iocb->ki_pos >= z->z_capacity)
+ return -EFBIG;
+
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ ssize_t ret = zonefs_file_dio_write(iocb, from);
+
+ if (ret != -ENOTBLK)
+ return ret;
+ }
+
+ return zonefs_file_buffered_write(iocb, from);
+}
+
+static int zonefs_file_read_dio_end_io(struct kiocb *iocb, ssize_t size,
+ int error, unsigned int flags)
+{
+ if (error) {
+ zonefs_io_error(file_inode(iocb->ki_filp), false);
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct iomap_dio_ops zonefs_read_dio_ops = {
+ .end_io = zonefs_file_read_dio_end_io,
+};
+
+static ssize_t zonefs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+ struct super_block *sb = inode->i_sb;
+ loff_t isize;
+ ssize_t ret;
+
+ /* Offline zones cannot be read */
+ if (unlikely(IS_IMMUTABLE(inode) && !(inode->i_mode & 0777)))
+ return -EPERM;
+
+ if (iocb->ki_pos >= z->z_capacity)
+ return 0;
+
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!inode_trylock_shared(inode))
+ return -EAGAIN;
+ } else {
+ inode_lock_shared(inode);
+ }
+
+ /* Limit read operations to written data */
+ mutex_lock(&zi->i_truncate_mutex);
+ isize = i_size_read(inode);
+ if (iocb->ki_pos >= isize) {
+ mutex_unlock(&zi->i_truncate_mutex);
+ ret = 0;
+ goto inode_unlock;
+ }
+ iov_iter_truncate(to, isize - iocb->ki_pos);
+ mutex_unlock(&zi->i_truncate_mutex);
+
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ size_t count = iov_iter_count(to);
+
+ if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
+ ret = -EINVAL;
+ goto inode_unlock;
+ }
+ file_accessed(iocb->ki_filp);
+ ret = iomap_dio_rw(iocb, to, &zonefs_read_iomap_ops,
+ &zonefs_read_dio_ops, 0, NULL, 0);
+ } else {
+ ret = generic_file_read_iter(iocb, to);
+ if (ret == -EIO)
+ zonefs_io_error(inode, false);
+ }
+
+inode_unlock:
+ inode_unlock_shared(inode);
+
+ return ret;
+}
+
+static ssize_t zonefs_file_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags)
+{
+ struct inode *inode = file_inode(in);
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+ loff_t isize;
+ ssize_t ret = 0;
+
+ /* Offline zones cannot be read */
+ if (unlikely(IS_IMMUTABLE(inode) && !(inode->i_mode & 0777)))
+ return -EPERM;
+
+ if (*ppos >= z->z_capacity)
+ return 0;
+
+ inode_lock_shared(inode);
+
+ /* Limit read operations to written data */
+ mutex_lock(&zi->i_truncate_mutex);
+ isize = i_size_read(inode);
+ if (*ppos >= isize)
+ len = 0;
+ else
+ len = min_t(loff_t, len, isize - *ppos);
+ mutex_unlock(&zi->i_truncate_mutex);
+
+ if (len > 0) {
+ ret = filemap_splice_read(in, ppos, pipe, len, flags);
+ if (ret == -EIO)
+ zonefs_io_error(inode, false);
+ }
+
+ inode_unlock_shared(inode);
+ return ret;
+}
+
+/*
+ * Write open accounting is done only for sequential files.
+ */
+static inline bool zonefs_seq_file_need_wro(struct inode *inode,
+ struct file *file)
+{
+ if (zonefs_inode_is_cnv(inode))
+ return false;
+
+ if (!(file->f_mode & FMODE_WRITE))
+ return false;
+
+ return true;
+}
+
+static int zonefs_seq_file_write_open(struct inode *inode)
+{
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+ int ret = 0;
+
+ mutex_lock(&zi->i_truncate_mutex);
+
+ if (!zi->i_wr_refcnt) {
+ struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
+ unsigned int wro = atomic_inc_return(&sbi->s_wro_seq_files);
+
+ if (sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) {
+
+ if (sbi->s_max_wro_seq_files
+ && wro > sbi->s_max_wro_seq_files) {
+ atomic_dec(&sbi->s_wro_seq_files);
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (i_size_read(inode) < z->z_capacity) {
+ ret = zonefs_inode_zone_mgmt(inode,
+ REQ_OP_ZONE_OPEN);
+ if (ret) {
+ atomic_dec(&sbi->s_wro_seq_files);
+ goto unlock;
+ }
+ z->z_flags |= ZONEFS_ZONE_OPEN;
+ zonefs_inode_account_active(inode);
+ }
+ }
+ }
+
+ zi->i_wr_refcnt++;
+
+unlock:
+ mutex_unlock(&zi->i_truncate_mutex);
+
+ return ret;
+}
+
+static int zonefs_file_open(struct inode *inode, struct file *file)
+{
+ int ret;
+
+ file->f_mode |= FMODE_CAN_ODIRECT;
+ ret = generic_file_open(inode, file);
+ if (ret)
+ return ret;
+
+ if (zonefs_seq_file_need_wro(inode, file))
+ return zonefs_seq_file_write_open(inode);
+
+ return 0;
+}
+
+static void zonefs_seq_file_write_close(struct inode *inode)
+{
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+ struct super_block *sb = inode->i_sb;
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ int ret = 0;
+
+ mutex_lock(&zi->i_truncate_mutex);
+
+ zi->i_wr_refcnt--;
+ if (zi->i_wr_refcnt)
+ goto unlock;
+
+ /*
+ * The file zone may not be open anymore (e.g. the file was truncated to
+ * its maximum size or it was fully written). For this case, we only
+ * need to decrement the write open count.
+ */
+ if (z->z_flags & ZONEFS_ZONE_OPEN) {
+ ret = zonefs_inode_zone_mgmt(inode, REQ_OP_ZONE_CLOSE);
+ if (ret) {
+ __zonefs_io_error(inode, false);
+ /*
+ * Leaving zones explicitly open may lead to a state
+ * where most zones cannot be written (zone resources
+ * exhausted). So take preventive action by remounting
+ * read-only.
+ */
+ if (z->z_flags & ZONEFS_ZONE_OPEN &&
+ !(sb->s_flags & SB_RDONLY)) {
+ zonefs_warn(sb,
+ "closing zone at %llu failed %d\n",
+ z->z_sector, ret);
+ zonefs_warn(sb,
+ "remounting filesystem read-only\n");
+ sb->s_flags |= SB_RDONLY;
+ }
+ goto unlock;
+ }
+
+ z->z_flags &= ~ZONEFS_ZONE_OPEN;
+ zonefs_inode_account_active(inode);
+ }
+
+ atomic_dec(&sbi->s_wro_seq_files);
+
+unlock:
+ mutex_unlock(&zi->i_truncate_mutex);
+}
+
+static int zonefs_file_release(struct inode *inode, struct file *file)
+{
+ /*
+ * If we explicitly open a zone we must close it again as well, but the
+ * zone management operation can fail (either due to an IO error or as
+ * the zone has gone offline or read-only). Make sure we don't fail the
+ * close(2) for user-space.
+ */
+ if (zonefs_seq_file_need_wro(inode, file))
+ zonefs_seq_file_write_close(inode);
+
+ return 0;
+}
+
+const struct file_operations zonefs_file_operations = {
+ .open = zonefs_file_open,
+ .release = zonefs_file_release,
+ .fsync = zonefs_file_fsync,
+ .mmap = zonefs_file_mmap,
+ .llseek = zonefs_file_llseek,
+ .read_iter = zonefs_file_read_iter,
+ .write_iter = zonefs_file_write_iter,
+ .splice_read = zonefs_file_splice_read,
+ .splice_write = iter_file_splice_write,
+ .iopoll = iocb_bio_iopoll,
+};
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
new file mode 100644
index 000000000..9d1a9808f
--- /dev/null
+++ b/fs/zonefs/super.c
@@ -0,0 +1,1451 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Simple file system for zoned block devices exposing zones as files.
+ *
+ * Copyright (C) 2019 Western Digital Corporation or its affiliates.
+ */
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/magic.h>
+#include <linux/iomap.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/statfs.h>
+#include <linux/writeback.h>
+#include <linux/quotaops.h>
+#include <linux/seq_file.h>
+#include <linux/parser.h>
+#include <linux/uio.h>
+#include <linux/mman.h>
+#include <linux/sched/mm.h>
+#include <linux/crc32.h>
+#include <linux/task_io_accounting_ops.h>
+
+#include "zonefs.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+/*
+ * Get the name of a zone group directory.
+ */
+static const char *zonefs_zgroup_name(enum zonefs_ztype ztype)
+{
+ switch (ztype) {
+ case ZONEFS_ZTYPE_CNV:
+ return "cnv";
+ case ZONEFS_ZTYPE_SEQ:
+ return "seq";
+ default:
+ WARN_ON_ONCE(1);
+ return "???";
+ }
+}
+
+/*
+ * Manage the active zone count.
+ */
+static void zonefs_account_active(struct super_block *sb,
+ struct zonefs_zone *z)
+{
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+
+ if (zonefs_zone_is_cnv(z))
+ return;
+
+ /*
+ * For zones that transitioned to the offline or readonly condition,
+ * we only need to clear the active state.
+ */
+ if (z->z_flags & (ZONEFS_ZONE_OFFLINE | ZONEFS_ZONE_READONLY))
+ goto out;
+
+ /*
+ * If the zone is active, that is, if it is explicitly open or
+ * partially written, check if it was already accounted as active.
+ */
+ if ((z->z_flags & ZONEFS_ZONE_OPEN) ||
+ (z->z_wpoffset > 0 && z->z_wpoffset < z->z_capacity)) {
+ if (!(z->z_flags & ZONEFS_ZONE_ACTIVE)) {
+ z->z_flags |= ZONEFS_ZONE_ACTIVE;
+ atomic_inc(&sbi->s_active_seq_files);
+ }
+ return;
+ }
+
+out:
+ /* The zone is not active. If it was, update the active count */
+ if (z->z_flags & ZONEFS_ZONE_ACTIVE) {
+ z->z_flags &= ~ZONEFS_ZONE_ACTIVE;
+ atomic_dec(&sbi->s_active_seq_files);
+ }
+}
+
+/*
+ * Manage the active zone count. Called with zi->i_truncate_mutex held.
+ */
+void zonefs_inode_account_active(struct inode *inode)
+{
+ lockdep_assert_held(&ZONEFS_I(inode)->i_truncate_mutex);
+
+ return zonefs_account_active(inode->i_sb, zonefs_inode_zone(inode));
+}
+
+/*
+ * Execute a zone management operation.
+ */
+static int zonefs_zone_mgmt(struct super_block *sb,
+ struct zonefs_zone *z, enum req_op op)
+{
+ int ret;
+
+ /*
+ * With ZNS drives, closing an explicitly open zone that has not been
+ * written will change the zone state to "closed", that is, the zone
+ * will remain active. Since this can then cause failure of explicit
+ * open operation on other zones if the drive active zone resources
+ * are exceeded, make sure that the zone does not remain active by
+ * resetting it.
+ */
+ if (op == REQ_OP_ZONE_CLOSE && !z->z_wpoffset)
+ op = REQ_OP_ZONE_RESET;
+
+ trace_zonefs_zone_mgmt(sb, z, op);
+ ret = blkdev_zone_mgmt(sb->s_bdev, op, z->z_sector,
+ z->z_size >> SECTOR_SHIFT, GFP_NOFS);
+ if (ret) {
+ zonefs_err(sb,
+ "Zone management operation %s at %llu failed %d\n",
+ blk_op_str(op), z->z_sector, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int zonefs_inode_zone_mgmt(struct inode *inode, enum req_op op)
+{
+ lockdep_assert_held(&ZONEFS_I(inode)->i_truncate_mutex);
+
+ return zonefs_zone_mgmt(inode->i_sb, zonefs_inode_zone(inode), op);
+}
+
+void zonefs_i_size_write(struct inode *inode, loff_t isize)
+{
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+
+ i_size_write(inode, isize);
+
+ /*
+ * A full zone is no longer open/active and does not need
+ * explicit closing.
+ */
+ if (isize >= z->z_capacity) {
+ struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
+
+ if (z->z_flags & ZONEFS_ZONE_ACTIVE)
+ atomic_dec(&sbi->s_active_seq_files);
+ z->z_flags &= ~(ZONEFS_ZONE_OPEN | ZONEFS_ZONE_ACTIVE);
+ }
+}
+
+void zonefs_update_stats(struct inode *inode, loff_t new_isize)
+{
+ struct super_block *sb = inode->i_sb;
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ loff_t old_isize = i_size_read(inode);
+ loff_t nr_blocks;
+
+ if (new_isize == old_isize)
+ return;
+
+ spin_lock(&sbi->s_lock);
+
+ /*
+ * This may be called for an update after an IO error.
+ * So beware of the values seen.
+ */
+ if (new_isize < old_isize) {
+ nr_blocks = (old_isize - new_isize) >> sb->s_blocksize_bits;
+ if (sbi->s_used_blocks > nr_blocks)
+ sbi->s_used_blocks -= nr_blocks;
+ else
+ sbi->s_used_blocks = 0;
+ } else {
+ sbi->s_used_blocks +=
+ (new_isize - old_isize) >> sb->s_blocksize_bits;
+ if (sbi->s_used_blocks > sbi->s_blocks)
+ sbi->s_used_blocks = sbi->s_blocks;
+ }
+
+ spin_unlock(&sbi->s_lock);
+}
+
+/*
+ * Check a zone condition. Return the amount of written (and still readable)
+ * data in the zone.
+ */
+static loff_t zonefs_check_zone_condition(struct super_block *sb,
+ struct zonefs_zone *z,
+ struct blk_zone *zone)
+{
+ switch (zone->cond) {
+ case BLK_ZONE_COND_OFFLINE:
+ zonefs_warn(sb, "Zone %llu: offline zone\n",
+ z->z_sector);
+ z->z_flags |= ZONEFS_ZONE_OFFLINE;
+ return 0;
+ case BLK_ZONE_COND_READONLY:
+ /*
+ * The write pointer of read-only zones is invalid, so we cannot
+ * determine the zone wpoffset (inode size). We thus keep the
+ * zone wpoffset as is, which leads to an empty file
+ * (wpoffset == 0) on mount. For a runtime error, this keeps
+ * the inode size as it was when last updated so that the user
+ * can recover data.
+ */
+ zonefs_warn(sb, "Zone %llu: read-only zone\n",
+ z->z_sector);
+ z->z_flags |= ZONEFS_ZONE_READONLY;
+ if (zonefs_zone_is_cnv(z))
+ return z->z_capacity;
+ return z->z_wpoffset;
+ case BLK_ZONE_COND_FULL:
+ /* The write pointer of full zones is invalid. */
+ return z->z_capacity;
+ default:
+ if (zonefs_zone_is_cnv(z))
+ return z->z_capacity;
+ return (zone->wp - zone->start) << SECTOR_SHIFT;
+ }
+}
+
+/*
+ * Check a zone condition and adjust its inode access permissions for
+ * offline and readonly zones.
+ */
+static void zonefs_inode_update_mode(struct inode *inode)
+{
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+
+ if (z->z_flags & ZONEFS_ZONE_OFFLINE) {
+ /* Offline zones cannot be read nor written */
+ inode->i_flags |= S_IMMUTABLE;
+ inode->i_mode &= ~0777;
+ } else if (z->z_flags & ZONEFS_ZONE_READONLY) {
+ /* Readonly zones cannot be written */
+ inode->i_flags |= S_IMMUTABLE;
+ if (z->z_flags & ZONEFS_ZONE_INIT_MODE)
+ inode->i_mode &= ~0777;
+ else
+ inode->i_mode &= ~0222;
+ }
+
+ z->z_flags &= ~ZONEFS_ZONE_INIT_MODE;
+ z->z_mode = inode->i_mode;
+}
+
+struct zonefs_ioerr_data {
+ struct inode *inode;
+ bool write;
+};
+
+static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
+ void *data)
+{
+ struct zonefs_ioerr_data *err = data;
+ struct inode *inode = err->inode;
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+ struct super_block *sb = inode->i_sb;
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ loff_t isize, data_size;
+
+ /*
+ * Check the zone condition: if the zone is not "bad" (offline or
+ * read-only), read errors are simply signaled to the IO issuer as long
+ * as there is no inconsistency between the inode size and the amount of
+ * data writen in the zone (data_size).
+ */
+ data_size = zonefs_check_zone_condition(sb, z, zone);
+ isize = i_size_read(inode);
+ if (!(z->z_flags & (ZONEFS_ZONE_READONLY | ZONEFS_ZONE_OFFLINE)) &&
+ !err->write && isize == data_size)
+ return 0;
+
+ /*
+ * At this point, we detected either a bad zone or an inconsistency
+ * between the inode size and the amount of data written in the zone.
+ * For the latter case, the cause may be a write IO error or an external
+ * action on the device. Two error patterns exist:
+ * 1) The inode size is lower than the amount of data in the zone:
+ * a write operation partially failed and data was writen at the end
+ * of the file. This can happen in the case of a large direct IO
+ * needing several BIOs and/or write requests to be processed.
+ * 2) The inode size is larger than the amount of data in the zone:
+ * this can happen with a deferred write error with the use of the
+ * device side write cache after getting successful write IO
+ * completions. Other possibilities are (a) an external corruption,
+ * e.g. an application reset the zone directly, or (b) the device
+ * has a serious problem (e.g. firmware bug).
+ *
+ * In all cases, warn about inode size inconsistency and handle the
+ * IO error according to the zone condition and to the mount options.
+ */
+ if (zonefs_zone_is_seq(z) && isize != data_size)
+ zonefs_warn(sb,
+ "inode %lu: invalid size %lld (should be %lld)\n",
+ inode->i_ino, isize, data_size);
+
+ /*
+ * First handle bad zones signaled by hardware. The mount options
+ * errors=zone-ro and errors=zone-offline result in changing the
+ * zone condition to read-only and offline respectively, as if the
+ * condition was signaled by the hardware.
+ */
+ if ((z->z_flags & ZONEFS_ZONE_OFFLINE) ||
+ (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL)) {
+ zonefs_warn(sb, "inode %lu: read/write access disabled\n",
+ inode->i_ino);
+ if (!(z->z_flags & ZONEFS_ZONE_OFFLINE))
+ z->z_flags |= ZONEFS_ZONE_OFFLINE;
+ zonefs_inode_update_mode(inode);
+ data_size = 0;
+ } else if ((z->z_flags & ZONEFS_ZONE_READONLY) ||
+ (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO)) {
+ zonefs_warn(sb, "inode %lu: write access disabled\n",
+ inode->i_ino);
+ if (!(z->z_flags & ZONEFS_ZONE_READONLY))
+ z->z_flags |= ZONEFS_ZONE_READONLY;
+ zonefs_inode_update_mode(inode);
+ data_size = isize;
+ } else if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO &&
+ data_size > isize) {
+ /* Do not expose garbage data */
+ data_size = isize;
+ }
+
+ /*
+ * If the filesystem is mounted with the explicit-open mount option, we
+ * need to clear the ZONEFS_ZONE_OPEN flag if the zone transitioned to
+ * the read-only or offline condition, to avoid attempting an explicit
+ * close of the zone when the inode file is closed.
+ */
+ if ((sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) &&
+ (z->z_flags & (ZONEFS_ZONE_READONLY | ZONEFS_ZONE_OFFLINE)))
+ z->z_flags &= ~ZONEFS_ZONE_OPEN;
+
+ /*
+ * If error=remount-ro was specified, any error result in remounting
+ * the volume as read-only.
+ */
+ if ((sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO) && !sb_rdonly(sb)) {
+ zonefs_warn(sb, "remounting filesystem read-only\n");
+ sb->s_flags |= SB_RDONLY;
+ }
+
+ /*
+ * Update block usage stats and the inode size to prevent access to
+ * invalid data.
+ */
+ zonefs_update_stats(inode, data_size);
+ zonefs_i_size_write(inode, data_size);
+ z->z_wpoffset = data_size;
+ zonefs_inode_account_active(inode);
+
+ return 0;
+}
+
+/*
+ * When an file IO error occurs, check the file zone to see if there is a change
+ * in the zone condition (e.g. offline or read-only). For a failed write to a
+ * sequential zone, the zone write pointer position must also be checked to
+ * eventually correct the file size and zonefs inode write pointer offset
+ * (which can be out of sync with the drive due to partial write failures).
+ */
+void __zonefs_io_error(struct inode *inode, bool write)
+{
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+ struct super_block *sb = inode->i_sb;
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ unsigned int noio_flag;
+ unsigned int nr_zones = 1;
+ struct zonefs_ioerr_data err = {
+ .inode = inode,
+ .write = write,
+ };
+ int ret;
+
+ /*
+ * The only files that have more than one zone are conventional zone
+ * files with aggregated conventional zones, for which the inode zone
+ * size is always larger than the device zone size.
+ */
+ if (z->z_size > bdev_zone_sectors(sb->s_bdev))
+ nr_zones = z->z_size >>
+ (sbi->s_zone_sectors_shift + SECTOR_SHIFT);
+
+ /*
+ * Memory allocations in blkdev_report_zones() can trigger a memory
+ * reclaim which may in turn cause a recursion into zonefs as well as
+ * struct request allocations for the same device. The former case may
+ * end up in a deadlock on the inode truncate mutex, while the latter
+ * may prevent IO forward progress. Executing the report zones under
+ * the GFP_NOIO context avoids both problems.
+ */
+ noio_flag = memalloc_noio_save();
+ ret = blkdev_report_zones(sb->s_bdev, z->z_sector, nr_zones,
+ zonefs_io_error_cb, &err);
+ if (ret != nr_zones)
+ zonefs_err(sb, "Get inode %lu zone information failed %d\n",
+ inode->i_ino, ret);
+ memalloc_noio_restore(noio_flag);
+}
+
+static struct kmem_cache *zonefs_inode_cachep;
+
+static struct inode *zonefs_alloc_inode(struct super_block *sb)
+{
+ struct zonefs_inode_info *zi;
+
+ zi = alloc_inode_sb(sb, zonefs_inode_cachep, GFP_KERNEL);
+ if (!zi)
+ return NULL;
+
+ inode_init_once(&zi->i_vnode);
+ mutex_init(&zi->i_truncate_mutex);
+ zi->i_wr_refcnt = 0;
+
+ return &zi->i_vnode;
+}
+
+static void zonefs_free_inode(struct inode *inode)
+{
+ kmem_cache_free(zonefs_inode_cachep, ZONEFS_I(inode));
+}
+
+/*
+ * File system stat.
+ */
+static int zonefs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ struct super_block *sb = dentry->d_sb;
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ enum zonefs_ztype t;
+
+ buf->f_type = ZONEFS_MAGIC;
+ buf->f_bsize = sb->s_blocksize;
+ buf->f_namelen = ZONEFS_NAME_MAX;
+
+ spin_lock(&sbi->s_lock);
+
+ buf->f_blocks = sbi->s_blocks;
+ if (WARN_ON(sbi->s_used_blocks > sbi->s_blocks))
+ buf->f_bfree = 0;
+ else
+ buf->f_bfree = buf->f_blocks - sbi->s_used_blocks;
+ buf->f_bavail = buf->f_bfree;
+
+ for (t = 0; t < ZONEFS_ZTYPE_MAX; t++) {
+ if (sbi->s_zgroup[t].g_nr_zones)
+ buf->f_files += sbi->s_zgroup[t].g_nr_zones + 1;
+ }
+ buf->f_ffree = 0;
+
+ spin_unlock(&sbi->s_lock);
+
+ buf->f_fsid = uuid_to_fsid(sbi->s_uuid.b);
+
+ return 0;
+}
+
+enum {
+ Opt_errors_ro, Opt_errors_zro, Opt_errors_zol, Opt_errors_repair,
+ Opt_explicit_open, Opt_err,
+};
+
+static const match_table_t tokens = {
+ { Opt_errors_ro, "errors=remount-ro"},
+ { Opt_errors_zro, "errors=zone-ro"},
+ { Opt_errors_zol, "errors=zone-offline"},
+ { Opt_errors_repair, "errors=repair"},
+ { Opt_explicit_open, "explicit-open" },
+ { Opt_err, NULL}
+};
+
+static int zonefs_parse_options(struct super_block *sb, char *options)
+{
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ substring_t args[MAX_OPT_ARGS];
+ char *p;
+
+ if (!options)
+ return 0;
+
+ while ((p = strsep(&options, ",")) != NULL) {
+ int token;
+
+ if (!*p)
+ continue;
+
+ token = match_token(p, tokens, args);
+ switch (token) {
+ case Opt_errors_ro:
+ sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
+ sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_RO;
+ break;
+ case Opt_errors_zro:
+ sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
+ sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_ZRO;
+ break;
+ case Opt_errors_zol:
+ sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
+ sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_ZOL;
+ break;
+ case Opt_errors_repair:
+ sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
+ sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_REPAIR;
+ break;
+ case Opt_explicit_open:
+ sbi->s_mount_opts |= ZONEFS_MNTOPT_EXPLICIT_OPEN;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int zonefs_show_options(struct seq_file *seq, struct dentry *root)
+{
+ struct zonefs_sb_info *sbi = ZONEFS_SB(root->d_sb);
+
+ if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO)
+ seq_puts(seq, ",errors=remount-ro");
+ if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO)
+ seq_puts(seq, ",errors=zone-ro");
+ if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL)
+ seq_puts(seq, ",errors=zone-offline");
+ if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_REPAIR)
+ seq_puts(seq, ",errors=repair");
+
+ return 0;
+}
+
+static int zonefs_remount(struct super_block *sb, int *flags, char *data)
+{
+ sync_filesystem(sb);
+
+ return zonefs_parse_options(sb, data);
+}
+
+static int zonefs_inode_setattr(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct iattr *iattr)
+{
+ struct inode *inode = d_inode(dentry);
+ int ret;
+
+ if (unlikely(IS_IMMUTABLE(inode)))
+ return -EPERM;
+
+ ret = setattr_prepare(&nop_mnt_idmap, dentry, iattr);
+ if (ret)
+ return ret;
+
+ /*
+ * Since files and directories cannot be created nor deleted, do not
+ * allow setting any write attributes on the sub-directories grouping
+ * files by zone type.
+ */
+ if ((iattr->ia_valid & ATTR_MODE) && S_ISDIR(inode->i_mode) &&
+ (iattr->ia_mode & 0222))
+ return -EPERM;
+
+ if (((iattr->ia_valid & ATTR_UID) &&
+ !uid_eq(iattr->ia_uid, inode->i_uid)) ||
+ ((iattr->ia_valid & ATTR_GID) &&
+ !gid_eq(iattr->ia_gid, inode->i_gid))) {
+ ret = dquot_transfer(&nop_mnt_idmap, inode, iattr);
+ if (ret)
+ return ret;
+ }
+
+ if (iattr->ia_valid & ATTR_SIZE) {
+ ret = zonefs_file_truncate(inode, iattr->ia_size);
+ if (ret)
+ return ret;
+ }
+
+ setattr_copy(&nop_mnt_idmap, inode, iattr);
+
+ if (S_ISREG(inode->i_mode)) {
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+
+ z->z_mode = inode->i_mode;
+ z->z_uid = inode->i_uid;
+ z->z_gid = inode->i_gid;
+ }
+
+ return 0;
+}
+
+static const struct inode_operations zonefs_file_inode_operations = {
+ .setattr = zonefs_inode_setattr,
+};
+
+static long zonefs_fname_to_fno(const struct qstr *fname)
+{
+ const char *name = fname->name;
+ unsigned int len = fname->len;
+ long fno = 0, shift = 1;
+ const char *rname;
+ char c = *name;
+ unsigned int i;
+
+ /*
+ * File names are always a base-10 number string without any
+ * leading 0s.
+ */
+ if (!isdigit(c))
+ return -ENOENT;
+
+ if (len > 1 && c == '0')
+ return -ENOENT;
+
+ if (len == 1)
+ return c - '0';
+
+ for (i = 0, rname = name + len - 1; i < len; i++, rname--) {
+ c = *rname;
+ if (!isdigit(c))
+ return -ENOENT;
+ fno += (c - '0') * shift;
+ shift *= 10;
+ }
+
+ return fno;
+}
+
+static struct inode *zonefs_get_file_inode(struct inode *dir,
+ struct dentry *dentry)
+{
+ struct zonefs_zone_group *zgroup = dir->i_private;
+ struct super_block *sb = dir->i_sb;
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ struct zonefs_zone *z;
+ struct inode *inode;
+ ino_t ino;
+ long fno;
+
+ /* Get the file number from the file name */
+ fno = zonefs_fname_to_fno(&dentry->d_name);
+ if (fno < 0)
+ return ERR_PTR(fno);
+
+ if (!zgroup->g_nr_zones || fno >= zgroup->g_nr_zones)
+ return ERR_PTR(-ENOENT);
+
+ z = &zgroup->g_zones[fno];
+ ino = z->z_sector >> sbi->s_zone_sectors_shift;
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW)) {
+ WARN_ON_ONCE(inode->i_private != z);
+ return inode;
+ }
+
+ inode->i_ino = ino;
+ inode->i_mode = z->z_mode;
+ inode->i_mtime = inode->i_atime = inode_set_ctime_to_ts(inode,
+ inode_get_ctime(dir));
+ inode->i_uid = z->z_uid;
+ inode->i_gid = z->z_gid;
+ inode->i_size = z->z_wpoffset;
+ inode->i_blocks = z->z_capacity >> SECTOR_SHIFT;
+ inode->i_private = z;
+
+ inode->i_op = &zonefs_file_inode_operations;
+ inode->i_fop = &zonefs_file_operations;
+ inode->i_mapping->a_ops = &zonefs_file_aops;
+
+ /* Update the inode access rights depending on the zone condition */
+ zonefs_inode_update_mode(inode);
+
+ unlock_new_inode(inode);
+
+ return inode;
+}
+
+static struct inode *zonefs_get_zgroup_inode(struct super_block *sb,
+ enum zonefs_ztype ztype)
+{
+ struct inode *root = d_inode(sb->s_root);
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ struct inode *inode;
+ ino_t ino = bdev_nr_zones(sb->s_bdev) + ztype + 1;
+
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+
+ inode->i_ino = ino;
+ inode_init_owner(&nop_mnt_idmap, inode, root, S_IFDIR | 0555);
+ inode->i_size = sbi->s_zgroup[ztype].g_nr_zones;
+ inode->i_mtime = inode->i_atime = inode_set_ctime_to_ts(inode,
+ inode_get_ctime(root));
+ inode->i_private = &sbi->s_zgroup[ztype];
+ set_nlink(inode, 2);
+
+ inode->i_op = &zonefs_dir_inode_operations;
+ inode->i_fop = &zonefs_dir_operations;
+
+ unlock_new_inode(inode);
+
+ return inode;
+}
+
+
+static struct inode *zonefs_get_dir_inode(struct inode *dir,
+ struct dentry *dentry)
+{
+ struct super_block *sb = dir->i_sb;
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ const char *name = dentry->d_name.name;
+ enum zonefs_ztype ztype;
+
+ /*
+ * We only need to check for the "seq" directory and
+ * the "cnv" directory if we have conventional zones.
+ */
+ if (dentry->d_name.len != 3)
+ return ERR_PTR(-ENOENT);
+
+ for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
+ if (sbi->s_zgroup[ztype].g_nr_zones &&
+ memcmp(name, zonefs_zgroup_name(ztype), 3) == 0)
+ break;
+ }
+ if (ztype == ZONEFS_ZTYPE_MAX)
+ return ERR_PTR(-ENOENT);
+
+ return zonefs_get_zgroup_inode(sb, ztype);
+}
+
+static struct dentry *zonefs_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
+{
+ struct inode *inode;
+
+ if (dentry->d_name.len > ZONEFS_NAME_MAX)
+ return ERR_PTR(-ENAMETOOLONG);
+
+ if (dir == d_inode(dir->i_sb->s_root))
+ inode = zonefs_get_dir_inode(dir, dentry);
+ else
+ inode = zonefs_get_file_inode(dir, dentry);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+
+ return d_splice_alias(inode, dentry);
+}
+
+static int zonefs_readdir_root(struct file *file, struct dir_context *ctx)
+{
+ struct inode *inode = file_inode(file);
+ struct super_block *sb = inode->i_sb;
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ enum zonefs_ztype ztype = ZONEFS_ZTYPE_CNV;
+ ino_t base_ino = bdev_nr_zones(sb->s_bdev) + 1;
+
+ if (ctx->pos >= inode->i_size)
+ return 0;
+
+ if (!dir_emit_dots(file, ctx))
+ return 0;
+
+ if (ctx->pos == 2) {
+ if (!sbi->s_zgroup[ZONEFS_ZTYPE_CNV].g_nr_zones)
+ ztype = ZONEFS_ZTYPE_SEQ;
+
+ if (!dir_emit(ctx, zonefs_zgroup_name(ztype), 3,
+ base_ino + ztype, DT_DIR))
+ return 0;
+ ctx->pos++;
+ }
+
+ if (ctx->pos == 3 && ztype != ZONEFS_ZTYPE_SEQ) {
+ ztype = ZONEFS_ZTYPE_SEQ;
+ if (!dir_emit(ctx, zonefs_zgroup_name(ztype), 3,
+ base_ino + ztype, DT_DIR))
+ return 0;
+ ctx->pos++;
+ }
+
+ return 0;
+}
+
+static int zonefs_readdir_zgroup(struct file *file,
+ struct dir_context *ctx)
+{
+ struct inode *inode = file_inode(file);
+ struct zonefs_zone_group *zgroup = inode->i_private;
+ struct super_block *sb = inode->i_sb;
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ struct zonefs_zone *z;
+ int fname_len;
+ char *fname;
+ ino_t ino;
+ int f;
+
+ /*
+ * The size of zone group directories is equal to the number
+ * of zone files in the group and does note include the "." and
+ * ".." entries. Hence the "+ 2" here.
+ */
+ if (ctx->pos >= inode->i_size + 2)
+ return 0;
+
+ if (!dir_emit_dots(file, ctx))
+ return 0;
+
+ fname = kmalloc(ZONEFS_NAME_MAX, GFP_KERNEL);
+ if (!fname)
+ return -ENOMEM;
+
+ for (f = ctx->pos - 2; f < zgroup->g_nr_zones; f++) {
+ z = &zgroup->g_zones[f];
+ ino = z->z_sector >> sbi->s_zone_sectors_shift;
+ fname_len = snprintf(fname, ZONEFS_NAME_MAX - 1, "%u", f);
+ if (!dir_emit(ctx, fname, fname_len, ino, DT_REG))
+ break;
+ ctx->pos++;
+ }
+
+ kfree(fname);
+
+ return 0;
+}
+
+static int zonefs_readdir(struct file *file, struct dir_context *ctx)
+{
+ struct inode *inode = file_inode(file);
+
+ if (inode == d_inode(inode->i_sb->s_root))
+ return zonefs_readdir_root(file, ctx);
+
+ return zonefs_readdir_zgroup(file, ctx);
+}
+
+const struct inode_operations zonefs_dir_inode_operations = {
+ .lookup = zonefs_lookup,
+ .setattr = zonefs_inode_setattr,
+};
+
+const struct file_operations zonefs_dir_operations = {
+ .llseek = generic_file_llseek,
+ .read = generic_read_dir,
+ .iterate_shared = zonefs_readdir,
+};
+
+struct zonefs_zone_data {
+ struct super_block *sb;
+ unsigned int nr_zones[ZONEFS_ZTYPE_MAX];
+ sector_t cnv_zone_start;
+ struct blk_zone *zones;
+};
+
+static int zonefs_get_zone_info_cb(struct blk_zone *zone, unsigned int idx,
+ void *data)
+{
+ struct zonefs_zone_data *zd = data;
+ struct super_block *sb = zd->sb;
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+
+ /*
+ * We do not care about the first zone: it contains the super block
+ * and not exposed as a file.
+ */
+ if (!idx)
+ return 0;
+
+ /*
+ * Count the number of zones that will be exposed as files.
+ * For sequential zones, we always have as many files as zones.
+ * FOr conventional zones, the number of files depends on if we have
+ * conventional zones aggregation enabled.
+ */
+ switch (zone->type) {
+ case BLK_ZONE_TYPE_CONVENTIONAL:
+ if (sbi->s_features & ZONEFS_F_AGGRCNV) {
+ /* One file per set of contiguous conventional zones */
+ if (!(sbi->s_zgroup[ZONEFS_ZTYPE_CNV].g_nr_zones) ||
+ zone->start != zd->cnv_zone_start)
+ sbi->s_zgroup[ZONEFS_ZTYPE_CNV].g_nr_zones++;
+ zd->cnv_zone_start = zone->start + zone->len;
+ } else {
+ /* One file per zone */
+ sbi->s_zgroup[ZONEFS_ZTYPE_CNV].g_nr_zones++;
+ }
+ break;
+ case BLK_ZONE_TYPE_SEQWRITE_REQ:
+ case BLK_ZONE_TYPE_SEQWRITE_PREF:
+ sbi->s_zgroup[ZONEFS_ZTYPE_SEQ].g_nr_zones++;
+ break;
+ default:
+ zonefs_err(zd->sb, "Unsupported zone type 0x%x\n",
+ zone->type);
+ return -EIO;
+ }
+
+ memcpy(&zd->zones[idx], zone, sizeof(struct blk_zone));
+
+ return 0;
+}
+
+static int zonefs_get_zone_info(struct zonefs_zone_data *zd)
+{
+ struct block_device *bdev = zd->sb->s_bdev;
+ int ret;
+
+ zd->zones = kvcalloc(bdev_nr_zones(bdev), sizeof(struct blk_zone),
+ GFP_KERNEL);
+ if (!zd->zones)
+ return -ENOMEM;
+
+ /* Get zones information from the device */
+ ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES,
+ zonefs_get_zone_info_cb, zd);
+ if (ret < 0) {
+ zonefs_err(zd->sb, "Zone report failed %d\n", ret);
+ return ret;
+ }
+
+ if (ret != bdev_nr_zones(bdev)) {
+ zonefs_err(zd->sb, "Invalid zone report (%d/%u zones)\n",
+ ret, bdev_nr_zones(bdev));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static inline void zonefs_free_zone_info(struct zonefs_zone_data *zd)
+{
+ kvfree(zd->zones);
+}
+
+/*
+ * Create a zone group and populate it with zone files.
+ */
+static int zonefs_init_zgroup(struct super_block *sb,
+ struct zonefs_zone_data *zd,
+ enum zonefs_ztype ztype)
+{
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ struct zonefs_zone_group *zgroup = &sbi->s_zgroup[ztype];
+ struct blk_zone *zone, *next, *end;
+ struct zonefs_zone *z;
+ unsigned int n = 0;
+ int ret;
+
+ /* Allocate the zone group. If it is empty, we have nothing to do. */
+ if (!zgroup->g_nr_zones)
+ return 0;
+
+ zgroup->g_zones = kvcalloc(zgroup->g_nr_zones,
+ sizeof(struct zonefs_zone), GFP_KERNEL);
+ if (!zgroup->g_zones)
+ return -ENOMEM;
+
+ /*
+ * Initialize the zone groups using the device zone information.
+ * We always skip the first zone as it contains the super block
+ * and is not use to back a file.
+ */
+ end = zd->zones + bdev_nr_zones(sb->s_bdev);
+ for (zone = &zd->zones[1]; zone < end; zone = next) {
+
+ next = zone + 1;
+ if (zonefs_zone_type(zone) != ztype)
+ continue;
+
+ if (WARN_ON_ONCE(n >= zgroup->g_nr_zones))
+ return -EINVAL;
+
+ /*
+ * For conventional zones, contiguous zones can be aggregated
+ * together to form larger files. Note that this overwrites the
+ * length of the first zone of the set of contiguous zones
+ * aggregated together. If one offline or read-only zone is
+ * found, assume that all zones aggregated have the same
+ * condition.
+ */
+ if (ztype == ZONEFS_ZTYPE_CNV &&
+ (sbi->s_features & ZONEFS_F_AGGRCNV)) {
+ for (; next < end; next++) {
+ if (zonefs_zone_type(next) != ztype)
+ break;
+ zone->len += next->len;
+ zone->capacity += next->capacity;
+ if (next->cond == BLK_ZONE_COND_READONLY &&
+ zone->cond != BLK_ZONE_COND_OFFLINE)
+ zone->cond = BLK_ZONE_COND_READONLY;
+ else if (next->cond == BLK_ZONE_COND_OFFLINE)
+ zone->cond = BLK_ZONE_COND_OFFLINE;
+ }
+ }
+
+ z = &zgroup->g_zones[n];
+ if (ztype == ZONEFS_ZTYPE_CNV)
+ z->z_flags |= ZONEFS_ZONE_CNV;
+ z->z_sector = zone->start;
+ z->z_size = zone->len << SECTOR_SHIFT;
+ if (z->z_size > bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT &&
+ !(sbi->s_features & ZONEFS_F_AGGRCNV)) {
+ zonefs_err(sb,
+ "Invalid zone size %llu (device zone sectors %llu)\n",
+ z->z_size,
+ bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT);
+ return -EINVAL;
+ }
+
+ z->z_capacity = min_t(loff_t, MAX_LFS_FILESIZE,
+ zone->capacity << SECTOR_SHIFT);
+ z->z_wpoffset = zonefs_check_zone_condition(sb, z, zone);
+
+ z->z_mode = S_IFREG | sbi->s_perm;
+ z->z_uid = sbi->s_uid;
+ z->z_gid = sbi->s_gid;
+
+ /*
+ * Let zonefs_inode_update_mode() know that we will need
+ * special initialization of the inode mode the first time
+ * it is accessed.
+ */
+ z->z_flags |= ZONEFS_ZONE_INIT_MODE;
+
+ sb->s_maxbytes = max(z->z_capacity, sb->s_maxbytes);
+ sbi->s_blocks += z->z_capacity >> sb->s_blocksize_bits;
+ sbi->s_used_blocks += z->z_wpoffset >> sb->s_blocksize_bits;
+
+ /*
+ * For sequential zones, make sure that any open zone is closed
+ * first to ensure that the initial number of open zones is 0,
+ * in sync with the open zone accounting done when the mount
+ * option ZONEFS_MNTOPT_EXPLICIT_OPEN is used.
+ */
+ if (ztype == ZONEFS_ZTYPE_SEQ &&
+ (zone->cond == BLK_ZONE_COND_IMP_OPEN ||
+ zone->cond == BLK_ZONE_COND_EXP_OPEN)) {
+ ret = zonefs_zone_mgmt(sb, z, REQ_OP_ZONE_CLOSE);
+ if (ret)
+ return ret;
+ }
+
+ zonefs_account_active(sb, z);
+
+ n++;
+ }
+
+ if (WARN_ON_ONCE(n != zgroup->g_nr_zones))
+ return -EINVAL;
+
+ zonefs_info(sb, "Zone group \"%s\" has %u file%s\n",
+ zonefs_zgroup_name(ztype),
+ zgroup->g_nr_zones,
+ zgroup->g_nr_zones > 1 ? "s" : "");
+
+ return 0;
+}
+
+static void zonefs_free_zgroups(struct super_block *sb)
+{
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ enum zonefs_ztype ztype;
+
+ if (!sbi)
+ return;
+
+ for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
+ kvfree(sbi->s_zgroup[ztype].g_zones);
+ sbi->s_zgroup[ztype].g_zones = NULL;
+ }
+}
+
+/*
+ * Create a zone group and populate it with zone files.
+ */
+static int zonefs_init_zgroups(struct super_block *sb)
+{
+ struct zonefs_zone_data zd;
+ enum zonefs_ztype ztype;
+ int ret;
+
+ /* First get the device zone information */
+ memset(&zd, 0, sizeof(struct zonefs_zone_data));
+ zd.sb = sb;
+ ret = zonefs_get_zone_info(&zd);
+ if (ret)
+ goto cleanup;
+
+ /* Allocate and initialize the zone groups */
+ for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
+ ret = zonefs_init_zgroup(sb, &zd, ztype);
+ if (ret) {
+ zonefs_info(sb,
+ "Zone group \"%s\" initialization failed\n",
+ zonefs_zgroup_name(ztype));
+ break;
+ }
+ }
+
+cleanup:
+ zonefs_free_zone_info(&zd);
+ if (ret)
+ zonefs_free_zgroups(sb);
+
+ return ret;
+}
+
+/*
+ * Read super block information from the device.
+ */
+static int zonefs_read_super(struct super_block *sb)
+{
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ struct zonefs_super *super;
+ u32 crc, stored_crc;
+ struct page *page;
+ struct bio_vec bio_vec;
+ struct bio bio;
+ int ret;
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ bio_init(&bio, sb->s_bdev, &bio_vec, 1, REQ_OP_READ);
+ bio.bi_iter.bi_sector = 0;
+ __bio_add_page(&bio, page, PAGE_SIZE, 0);
+
+ ret = submit_bio_wait(&bio);
+ if (ret)
+ goto free_page;
+
+ super = page_address(page);
+
+ ret = -EINVAL;
+ if (le32_to_cpu(super->s_magic) != ZONEFS_MAGIC)
+ goto free_page;
+
+ stored_crc = le32_to_cpu(super->s_crc);
+ super->s_crc = 0;
+ crc = crc32(~0U, (unsigned char *)super, sizeof(struct zonefs_super));
+ if (crc != stored_crc) {
+ zonefs_err(sb, "Invalid checksum (Expected 0x%08x, got 0x%08x)",
+ crc, stored_crc);
+ goto free_page;
+ }
+
+ sbi->s_features = le64_to_cpu(super->s_features);
+ if (sbi->s_features & ~ZONEFS_F_DEFINED_FEATURES) {
+ zonefs_err(sb, "Unknown features set 0x%llx\n",
+ sbi->s_features);
+ goto free_page;
+ }
+
+ if (sbi->s_features & ZONEFS_F_UID) {
+ sbi->s_uid = make_kuid(current_user_ns(),
+ le32_to_cpu(super->s_uid));
+ if (!uid_valid(sbi->s_uid)) {
+ zonefs_err(sb, "Invalid UID feature\n");
+ goto free_page;
+ }
+ }
+
+ if (sbi->s_features & ZONEFS_F_GID) {
+ sbi->s_gid = make_kgid(current_user_ns(),
+ le32_to_cpu(super->s_gid));
+ if (!gid_valid(sbi->s_gid)) {
+ zonefs_err(sb, "Invalid GID feature\n");
+ goto free_page;
+ }
+ }
+
+ if (sbi->s_features & ZONEFS_F_PERM)
+ sbi->s_perm = le32_to_cpu(super->s_perm);
+
+ if (memchr_inv(super->s_reserved, 0, sizeof(super->s_reserved))) {
+ zonefs_err(sb, "Reserved area is being used\n");
+ goto free_page;
+ }
+
+ import_uuid(&sbi->s_uuid, super->s_uuid);
+ ret = 0;
+
+free_page:
+ __free_page(page);
+
+ return ret;
+}
+
+static const struct super_operations zonefs_sops = {
+ .alloc_inode = zonefs_alloc_inode,
+ .free_inode = zonefs_free_inode,
+ .statfs = zonefs_statfs,
+ .remount_fs = zonefs_remount,
+ .show_options = zonefs_show_options,
+};
+
+static int zonefs_get_zgroup_inodes(struct super_block *sb)
+{
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ struct inode *dir_inode;
+ enum zonefs_ztype ztype;
+
+ for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
+ if (!sbi->s_zgroup[ztype].g_nr_zones)
+ continue;
+
+ dir_inode = zonefs_get_zgroup_inode(sb, ztype);
+ if (IS_ERR(dir_inode))
+ return PTR_ERR(dir_inode);
+
+ sbi->s_zgroup[ztype].g_inode = dir_inode;
+ }
+
+ return 0;
+}
+
+static void zonefs_release_zgroup_inodes(struct super_block *sb)
+{
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ enum zonefs_ztype ztype;
+
+ if (!sbi)
+ return;
+
+ for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
+ if (sbi->s_zgroup[ztype].g_inode) {
+ iput(sbi->s_zgroup[ztype].g_inode);
+ sbi->s_zgroup[ztype].g_inode = NULL;
+ }
+ }
+}
+
+/*
+ * Check that the device is zoned. If it is, get the list of zones and create
+ * sub-directories and files according to the device zone configuration and
+ * format options.
+ */
+static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct zonefs_sb_info *sbi;
+ struct inode *inode;
+ enum zonefs_ztype ztype;
+ int ret;
+
+ if (!bdev_is_zoned(sb->s_bdev)) {
+ zonefs_err(sb, "Not a zoned block device\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Initialize super block information: the maximum file size is updated
+ * when the zone files are created so that the format option
+ * ZONEFS_F_AGGRCNV which increases the maximum file size of a file
+ * beyond the zone size is taken into account.
+ */
+ sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
+ if (!sbi)
+ return -ENOMEM;
+
+ spin_lock_init(&sbi->s_lock);
+ sb->s_fs_info = sbi;
+ sb->s_magic = ZONEFS_MAGIC;
+ sb->s_maxbytes = 0;
+ sb->s_op = &zonefs_sops;
+ sb->s_time_gran = 1;
+
+ /*
+ * The block size is set to the device zone write granularity to ensure
+ * that write operations are always aligned according to the device
+ * interface constraints.
+ */
+ sb_set_blocksize(sb, bdev_zone_write_granularity(sb->s_bdev));
+ sbi->s_zone_sectors_shift = ilog2(bdev_zone_sectors(sb->s_bdev));
+ sbi->s_uid = GLOBAL_ROOT_UID;
+ sbi->s_gid = GLOBAL_ROOT_GID;
+ sbi->s_perm = 0640;
+ sbi->s_mount_opts = ZONEFS_MNTOPT_ERRORS_RO;
+
+ atomic_set(&sbi->s_wro_seq_files, 0);
+ sbi->s_max_wro_seq_files = bdev_max_open_zones(sb->s_bdev);
+ atomic_set(&sbi->s_active_seq_files, 0);
+ sbi->s_max_active_seq_files = bdev_max_active_zones(sb->s_bdev);
+
+ ret = zonefs_read_super(sb);
+ if (ret)
+ return ret;
+
+ ret = zonefs_parse_options(sb, data);
+ if (ret)
+ return ret;
+
+ zonefs_info(sb, "Mounting %u zones", bdev_nr_zones(sb->s_bdev));
+
+ if (!sbi->s_max_wro_seq_files &&
+ !sbi->s_max_active_seq_files &&
+ sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) {
+ zonefs_info(sb,
+ "No open and active zone limits. Ignoring explicit_open mount option\n");
+ sbi->s_mount_opts &= ~ZONEFS_MNTOPT_EXPLICIT_OPEN;
+ }
+
+ /* Initialize the zone groups */
+ ret = zonefs_init_zgroups(sb);
+ if (ret)
+ goto cleanup;
+
+ /* Create the root directory inode */
+ ret = -ENOMEM;
+ inode = new_inode(sb);
+ if (!inode)
+ goto cleanup;
+
+ inode->i_ino = bdev_nr_zones(sb->s_bdev);
+ inode->i_mode = S_IFDIR | 0555;
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ inode->i_op = &zonefs_dir_inode_operations;
+ inode->i_fop = &zonefs_dir_operations;
+ inode->i_size = 2;
+ set_nlink(inode, 2);
+ for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
+ if (sbi->s_zgroup[ztype].g_nr_zones) {
+ inc_nlink(inode);
+ inode->i_size++;
+ }
+ }
+
+ sb->s_root = d_make_root(inode);
+ if (!sb->s_root)
+ goto cleanup;
+
+ /*
+ * Take a reference on the zone groups directory inodes
+ * to keep them in the inode cache.
+ */
+ ret = zonefs_get_zgroup_inodes(sb);
+ if (ret)
+ goto cleanup;
+
+ ret = zonefs_sysfs_register(sb);
+ if (ret)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ zonefs_release_zgroup_inodes(sb);
+ zonefs_free_zgroups(sb);
+
+ return ret;
+}
+
+static struct dentry *zonefs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ return mount_bdev(fs_type, flags, dev_name, data, zonefs_fill_super);
+}
+
+static void zonefs_kill_super(struct super_block *sb)
+{
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+
+ /* Release the reference on the zone group directory inodes */
+ zonefs_release_zgroup_inodes(sb);
+
+ kill_block_super(sb);
+
+ zonefs_sysfs_unregister(sb);
+ zonefs_free_zgroups(sb);
+ kfree(sbi);
+}
+
+/*
+ * File system definition and registration.
+ */
+static struct file_system_type zonefs_type = {
+ .owner = THIS_MODULE,
+ .name = "zonefs",
+ .mount = zonefs_mount,
+ .kill_sb = zonefs_kill_super,
+ .fs_flags = FS_REQUIRES_DEV,
+};
+
+static int __init zonefs_init_inodecache(void)
+{
+ zonefs_inode_cachep = kmem_cache_create("zonefs_inode_cache",
+ sizeof(struct zonefs_inode_info), 0,
+ (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT),
+ NULL);
+ if (zonefs_inode_cachep == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+static void zonefs_destroy_inodecache(void)
+{
+ /*
+ * Make sure all delayed rcu free inodes are flushed before we
+ * destroy the inode cache.
+ */
+ rcu_barrier();
+ kmem_cache_destroy(zonefs_inode_cachep);
+}
+
+static int __init zonefs_init(void)
+{
+ int ret;
+
+ BUILD_BUG_ON(sizeof(struct zonefs_super) != ZONEFS_SUPER_SIZE);
+
+ ret = zonefs_init_inodecache();
+ if (ret)
+ return ret;
+
+ ret = zonefs_sysfs_init();
+ if (ret)
+ goto destroy_inodecache;
+
+ ret = register_filesystem(&zonefs_type);
+ if (ret)
+ goto sysfs_exit;
+
+ return 0;
+
+sysfs_exit:
+ zonefs_sysfs_exit();
+destroy_inodecache:
+ zonefs_destroy_inodecache();
+
+ return ret;
+}
+
+static void __exit zonefs_exit(void)
+{
+ unregister_filesystem(&zonefs_type);
+ zonefs_sysfs_exit();
+ zonefs_destroy_inodecache();
+}
+
+MODULE_AUTHOR("Damien Le Moal");
+MODULE_DESCRIPTION("Zone file system for zoned block devices");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_FS("zonefs");
+module_init(zonefs_init);
+module_exit(zonefs_exit);
diff --git a/fs/zonefs/sysfs.c b/fs/zonefs/sysfs.c
new file mode 100644
index 000000000..8ccb65c2b
--- /dev/null
+++ b/fs/zonefs/sysfs.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Simple file system for zoned block devices exposing zones as files.
+ *
+ * Copyright (C) 2022 Western Digital Corporation or its affiliates.
+ */
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/blkdev.h>
+
+#include "zonefs.h"
+
+struct zonefs_sysfs_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct zonefs_sb_info *sbi, char *buf);
+};
+
+#define ZONEFS_SYSFS_ATTR_RO(name) \
+static struct zonefs_sysfs_attr zonefs_sysfs_attr_##name = __ATTR_RO(name)
+
+#define ATTR_LIST(name) &zonefs_sysfs_attr_##name.attr
+
+static ssize_t zonefs_sysfs_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct zonefs_sb_info *sbi =
+ container_of(kobj, struct zonefs_sb_info, s_kobj);
+ struct zonefs_sysfs_attr *zonefs_attr =
+ container_of(attr, struct zonefs_sysfs_attr, attr);
+
+ if (!zonefs_attr->show)
+ return 0;
+
+ return zonefs_attr->show(sbi, buf);
+}
+
+static ssize_t max_wro_seq_files_show(struct zonefs_sb_info *sbi, char *buf)
+{
+ return sysfs_emit(buf, "%u\n", sbi->s_max_wro_seq_files);
+}
+ZONEFS_SYSFS_ATTR_RO(max_wro_seq_files);
+
+static ssize_t nr_wro_seq_files_show(struct zonefs_sb_info *sbi, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", atomic_read(&sbi->s_wro_seq_files));
+}
+ZONEFS_SYSFS_ATTR_RO(nr_wro_seq_files);
+
+static ssize_t max_active_seq_files_show(struct zonefs_sb_info *sbi, char *buf)
+{
+ return sysfs_emit(buf, "%u\n", sbi->s_max_active_seq_files);
+}
+ZONEFS_SYSFS_ATTR_RO(max_active_seq_files);
+
+static ssize_t nr_active_seq_files_show(struct zonefs_sb_info *sbi, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", atomic_read(&sbi->s_active_seq_files));
+}
+ZONEFS_SYSFS_ATTR_RO(nr_active_seq_files);
+
+static struct attribute *zonefs_sysfs_attrs[] = {
+ ATTR_LIST(max_wro_seq_files),
+ ATTR_LIST(nr_wro_seq_files),
+ ATTR_LIST(max_active_seq_files),
+ ATTR_LIST(nr_active_seq_files),
+ NULL,
+};
+ATTRIBUTE_GROUPS(zonefs_sysfs);
+
+static void zonefs_sysfs_sb_release(struct kobject *kobj)
+{
+ struct zonefs_sb_info *sbi =
+ container_of(kobj, struct zonefs_sb_info, s_kobj);
+
+ complete(&sbi->s_kobj_unregister);
+}
+
+static const struct sysfs_ops zonefs_sysfs_attr_ops = {
+ .show = zonefs_sysfs_attr_show,
+};
+
+static const struct kobj_type zonefs_sb_ktype = {
+ .default_groups = zonefs_sysfs_groups,
+ .sysfs_ops = &zonefs_sysfs_attr_ops,
+ .release = zonefs_sysfs_sb_release,
+};
+
+static struct kobject *zonefs_sysfs_root;
+
+int zonefs_sysfs_register(struct super_block *sb)
+{
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ int ret;
+
+ init_completion(&sbi->s_kobj_unregister);
+ ret = kobject_init_and_add(&sbi->s_kobj, &zonefs_sb_ktype,
+ zonefs_sysfs_root, "%s", sb->s_id);
+ if (ret) {
+ kobject_put(&sbi->s_kobj);
+ wait_for_completion(&sbi->s_kobj_unregister);
+ return ret;
+ }
+
+ sbi->s_sysfs_registered = true;
+
+ return 0;
+}
+
+void zonefs_sysfs_unregister(struct super_block *sb)
+{
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+
+ if (!sbi || !sbi->s_sysfs_registered)
+ return;
+
+ kobject_del(&sbi->s_kobj);
+ kobject_put(&sbi->s_kobj);
+ wait_for_completion(&sbi->s_kobj_unregister);
+}
+
+int __init zonefs_sysfs_init(void)
+{
+ zonefs_sysfs_root = kobject_create_and_add("zonefs", fs_kobj);
+ if (!zonefs_sysfs_root)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void zonefs_sysfs_exit(void)
+{
+ kobject_put(zonefs_sysfs_root);
+ zonefs_sysfs_root = NULL;
+}
diff --git a/fs/zonefs/trace.h b/fs/zonefs/trace.h
new file mode 100644
index 000000000..9969db3a9
--- /dev/null
+++ b/fs/zonefs/trace.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * zonefs filesystem driver tracepoints.
+ *
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM zonefs
+
+#if !defined(_TRACE_ZONEFS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ZONEFS_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+#include <linux/blkdev.h>
+
+#include "zonefs.h"
+
+#define show_dev(dev) MAJOR(dev), MINOR(dev)
+
+TRACE_EVENT(zonefs_zone_mgmt,
+ TP_PROTO(struct super_block *sb, struct zonefs_zone *z,
+ enum req_op op),
+ TP_ARGS(sb, z, op),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(enum req_op, op)
+ __field(sector_t, sector)
+ __field(sector_t, nr_sectors)
+ ),
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->ino =
+ z->z_sector >> ZONEFS_SB(sb)->s_zone_sectors_shift;
+ __entry->op = op;
+ __entry->sector = z->z_sector;
+ __entry->nr_sectors = z->z_size >> SECTOR_SHIFT;
+ ),
+ TP_printk("bdev=(%d,%d), ino=%lu op=%s, sector=%llu, nr_sectors=%llu",
+ show_dev(__entry->dev), (unsigned long)__entry->ino,
+ blk_op_str(__entry->op), __entry->sector,
+ __entry->nr_sectors
+ )
+);
+
+TRACE_EVENT(zonefs_file_dio_append,
+ TP_PROTO(struct inode *inode, ssize_t size, ssize_t ret),
+ TP_ARGS(inode, size, ret),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(sector_t, sector)
+ __field(ssize_t, size)
+ __field(loff_t, wpoffset)
+ __field(ssize_t, ret)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->sector = zonefs_inode_zone(inode)->z_sector;
+ __entry->size = size;
+ __entry->wpoffset =
+ zonefs_inode_zone(inode)->z_wpoffset;
+ __entry->ret = ret;
+ ),
+ TP_printk("bdev=(%d, %d), ino=%lu, sector=%llu, size=%zu, wpoffset=%llu, ret=%zu",
+ show_dev(__entry->dev), (unsigned long)__entry->ino,
+ __entry->sector, __entry->size, __entry->wpoffset,
+ __entry->ret
+ )
+);
+
+TRACE_EVENT(zonefs_iomap_begin,
+ TP_PROTO(struct inode *inode, struct iomap *iomap),
+ TP_ARGS(inode, iomap),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(u64, addr)
+ __field(loff_t, offset)
+ __field(u64, length)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->addr = iomap->addr;
+ __entry->offset = iomap->offset;
+ __entry->length = iomap->length;
+ ),
+ TP_printk("bdev=(%d,%d), ino=%lu, addr=%llu, offset=%llu, length=%llu",
+ show_dev(__entry->dev), (unsigned long)__entry->ino,
+ __entry->addr, __entry->offset, __entry->length
+ )
+);
+
+#endif /* _TRACE_ZONEFS_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/fs/zonefs/zonefs.h b/fs/zonefs/zonefs.h
new file mode 100644
index 000000000..817565224
--- /dev/null
+++ b/fs/zonefs/zonefs.h
@@ -0,0 +1,289 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Simple zone file system for zoned block devices.
+ *
+ * Copyright (C) 2019 Western Digital Corporation or its affiliates.
+ */
+#ifndef __ZONEFS_H__
+#define __ZONEFS_H__
+
+#include <linux/fs.h>
+#include <linux/magic.h>
+#include <linux/uuid.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/kobject.h>
+
+/*
+ * Maximum length of file names: this only needs to be large enough to fit
+ * the zone group directory names and a decimal zone number for file names.
+ * 16 characters is plenty.
+ */
+#define ZONEFS_NAME_MAX 16
+
+/*
+ * Zone types: ZONEFS_ZTYPE_SEQ is used for all sequential zone types
+ * defined in linux/blkzoned.h, that is, BLK_ZONE_TYPE_SEQWRITE_REQ and
+ * BLK_ZONE_TYPE_SEQWRITE_PREF.
+ */
+enum zonefs_ztype {
+ ZONEFS_ZTYPE_CNV,
+ ZONEFS_ZTYPE_SEQ,
+ ZONEFS_ZTYPE_MAX,
+};
+
+static inline enum zonefs_ztype zonefs_zone_type(struct blk_zone *zone)
+{
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return ZONEFS_ZTYPE_CNV;
+ return ZONEFS_ZTYPE_SEQ;
+}
+
+#define ZONEFS_ZONE_INIT_MODE (1U << 0)
+#define ZONEFS_ZONE_OPEN (1U << 1)
+#define ZONEFS_ZONE_ACTIVE (1U << 2)
+#define ZONEFS_ZONE_OFFLINE (1U << 3)
+#define ZONEFS_ZONE_READONLY (1U << 4)
+#define ZONEFS_ZONE_CNV (1U << 31)
+
+/*
+ * In-memory per-file inode zone data.
+ */
+struct zonefs_zone {
+ /* Zone state flags */
+ unsigned int z_flags;
+
+ /* Zone start sector (512B unit) */
+ sector_t z_sector;
+
+ /* Zone size (bytes) */
+ loff_t z_size;
+
+ /* Zone capacity (file maximum size, bytes) */
+ loff_t z_capacity;
+
+ /* Write pointer offset in the zone (sequential zones only, bytes) */
+ loff_t z_wpoffset;
+
+ /* Saved inode uid, gid and access rights */
+ umode_t z_mode;
+ kuid_t z_uid;
+ kgid_t z_gid;
+};
+
+/*
+ * In memory zone group information: all zones of a group are exposed
+ * as files, one file per zone.
+ */
+struct zonefs_zone_group {
+ struct inode *g_inode;
+ unsigned int g_nr_zones;
+ struct zonefs_zone *g_zones;
+};
+
+/*
+ * In-memory inode data.
+ */
+struct zonefs_inode_info {
+ struct inode i_vnode;
+
+ /*
+ * To serialise fully against both syscall and mmap based IO and
+ * sequential file truncation, two locks are used. For serializing
+ * zonefs_seq_file_truncate() against zonefs_iomap_begin(), that is,
+ * file truncate operations against block mapping, i_truncate_mutex is
+ * used. i_truncate_mutex also protects against concurrent accesses
+ * and changes to the inode private data, and in particular changes to
+ * a sequential file size on completion of direct IO writes.
+ * Serialization of mmap read IOs with truncate and syscall IO
+ * operations is done with invalidate_lock in addition to
+ * i_truncate_mutex. Only zonefs_seq_file_truncate() takes both lock
+ * (invalidate_lock first, i_truncate_mutex second).
+ */
+ struct mutex i_truncate_mutex;
+
+ /* guarded by i_truncate_mutex */
+ unsigned int i_wr_refcnt;
+};
+
+static inline struct zonefs_inode_info *ZONEFS_I(struct inode *inode)
+{
+ return container_of(inode, struct zonefs_inode_info, i_vnode);
+}
+
+static inline bool zonefs_zone_is_cnv(struct zonefs_zone *z)
+{
+ return z->z_flags & ZONEFS_ZONE_CNV;
+}
+
+static inline bool zonefs_zone_is_seq(struct zonefs_zone *z)
+{
+ return !zonefs_zone_is_cnv(z);
+}
+
+static inline struct zonefs_zone *zonefs_inode_zone(struct inode *inode)
+{
+ return inode->i_private;
+}
+
+static inline bool zonefs_inode_is_cnv(struct inode *inode)
+{
+ return zonefs_zone_is_cnv(zonefs_inode_zone(inode));
+}
+
+static inline bool zonefs_inode_is_seq(struct inode *inode)
+{
+ return zonefs_zone_is_seq(zonefs_inode_zone(inode));
+}
+
+/*
+ * On-disk super block (block 0).
+ */
+#define ZONEFS_LABEL_LEN 64
+#define ZONEFS_UUID_SIZE 16
+#define ZONEFS_SUPER_SIZE 4096
+
+struct zonefs_super {
+
+ /* Magic number */
+ __le32 s_magic;
+
+ /* Checksum */
+ __le32 s_crc;
+
+ /* Volume label */
+ char s_label[ZONEFS_LABEL_LEN];
+
+ /* 128-bit uuid */
+ __u8 s_uuid[ZONEFS_UUID_SIZE];
+
+ /* Features */
+ __le64 s_features;
+
+ /* UID/GID to use for files */
+ __le32 s_uid;
+ __le32 s_gid;
+
+ /* File permissions */
+ __le32 s_perm;
+
+ /* Padding to ZONEFS_SUPER_SIZE bytes */
+ __u8 s_reserved[3988];
+
+} __packed;
+
+/*
+ * Feature flags: specified in the s_features field of the on-disk super
+ * block struct zonefs_super and in-memory in the s_feartures field of
+ * struct zonefs_sb_info.
+ */
+enum zonefs_features {
+ /*
+ * Aggregate contiguous conventional zones into a single file.
+ */
+ ZONEFS_F_AGGRCNV = 1ULL << 0,
+ /*
+ * Use super block specified UID for files instead of default 0.
+ */
+ ZONEFS_F_UID = 1ULL << 1,
+ /*
+ * Use super block specified GID for files instead of default 0.
+ */
+ ZONEFS_F_GID = 1ULL << 2,
+ /*
+ * Use super block specified file permissions instead of default 640.
+ */
+ ZONEFS_F_PERM = 1ULL << 3,
+};
+
+#define ZONEFS_F_DEFINED_FEATURES \
+ (ZONEFS_F_AGGRCNV | ZONEFS_F_UID | ZONEFS_F_GID | ZONEFS_F_PERM)
+
+/*
+ * Mount options for zone write pointer error handling.
+ */
+#define ZONEFS_MNTOPT_ERRORS_RO (1 << 0) /* Make zone file readonly */
+#define ZONEFS_MNTOPT_ERRORS_ZRO (1 << 1) /* Make zone file offline */
+#define ZONEFS_MNTOPT_ERRORS_ZOL (1 << 2) /* Make zone file offline */
+#define ZONEFS_MNTOPT_ERRORS_REPAIR (1 << 3) /* Remount read-only */
+#define ZONEFS_MNTOPT_ERRORS_MASK \
+ (ZONEFS_MNTOPT_ERRORS_RO | ZONEFS_MNTOPT_ERRORS_ZRO | \
+ ZONEFS_MNTOPT_ERRORS_ZOL | ZONEFS_MNTOPT_ERRORS_REPAIR)
+#define ZONEFS_MNTOPT_EXPLICIT_OPEN (1 << 4) /* Explicit open/close of zones on open/close */
+
+/*
+ * In-memory Super block information.
+ */
+struct zonefs_sb_info {
+
+ unsigned long s_mount_opts;
+
+ spinlock_t s_lock;
+
+ unsigned long long s_features;
+ kuid_t s_uid;
+ kgid_t s_gid;
+ umode_t s_perm;
+ uuid_t s_uuid;
+ unsigned int s_zone_sectors_shift;
+
+ struct zonefs_zone_group s_zgroup[ZONEFS_ZTYPE_MAX];
+
+ loff_t s_blocks;
+ loff_t s_used_blocks;
+
+ unsigned int s_max_wro_seq_files;
+ atomic_t s_wro_seq_files;
+
+ unsigned int s_max_active_seq_files;
+ atomic_t s_active_seq_files;
+
+ bool s_sysfs_registered;
+ struct kobject s_kobj;
+ struct completion s_kobj_unregister;
+};
+
+static inline struct zonefs_sb_info *ZONEFS_SB(struct super_block *sb)
+{
+ return sb->s_fs_info;
+}
+
+#define zonefs_info(sb, format, args...) \
+ pr_info("zonefs (%s): " format, sb->s_id, ## args)
+#define zonefs_err(sb, format, args...) \
+ pr_err("zonefs (%s) ERROR: " format, sb->s_id, ## args)
+#define zonefs_warn(sb, format, args...) \
+ pr_warn("zonefs (%s) WARNING: " format, sb->s_id, ## args)
+
+/* In super.c */
+void zonefs_inode_account_active(struct inode *inode);
+int zonefs_inode_zone_mgmt(struct inode *inode, enum req_op op);
+void zonefs_i_size_write(struct inode *inode, loff_t isize);
+void zonefs_update_stats(struct inode *inode, loff_t new_isize);
+void __zonefs_io_error(struct inode *inode, bool write);
+
+static inline void zonefs_io_error(struct inode *inode, bool write)
+{
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+
+ mutex_lock(&zi->i_truncate_mutex);
+ __zonefs_io_error(inode, write);
+ mutex_unlock(&zi->i_truncate_mutex);
+}
+
+/* In super.c */
+extern const struct inode_operations zonefs_dir_inode_operations;
+extern const struct file_operations zonefs_dir_operations;
+
+/* In file.c */
+extern const struct address_space_operations zonefs_file_aops;
+extern const struct file_operations zonefs_file_operations;
+int zonefs_file_truncate(struct inode *inode, loff_t isize);
+
+/* In sysfs.c */
+int zonefs_sysfs_register(struct super_block *sb);
+void zonefs_sysfs_unregister(struct super_block *sb);
+int zonefs_sysfs_init(void);
+void zonefs_sysfs_exit(void);
+
+#endif