summaryrefslogtreecommitdiffstats
path: root/fs/squashfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/squashfs')
-rw-r--r--fs/squashfs/Kconfig266
-rw-r--r--fs/squashfs/Makefile19
-rw-r--r--fs/squashfs/block.c359
-rw-r--r--fs/squashfs/cache.c448
-rw-r--r--fs/squashfs/decompressor.c141
-rw-r--r--fs/squashfs/decompressor.h54
-rw-r--r--fs/squashfs/decompressor_multi.c203
-rw-r--r--fs/squashfs/decompressor_multi_percpu.c110
-rw-r--r--fs/squashfs/decompressor_single.c91
-rw-r--r--fs/squashfs/dir.c223
-rw-r--r--fs/squashfs/export.c179
-rw-r--r--fs/squashfs/file.c638
-rw-r--r--fs/squashfs/file_cache.c36
-rw-r--r--fs/squashfs/file_direct.c124
-rw-r--r--fs/squashfs/fragment.c89
-rw-r--r--fs/squashfs/id.c115
-rw-r--r--fs/squashfs/inode.c417
-rw-r--r--fs/squashfs/lz4_wrapper.c146
-rw-r--r--fs/squashfs/lzo_wrapper.c123
-rw-r--r--fs/squashfs/namei.c238
-rw-r--r--fs/squashfs/page_actor.c136
-rw-r--r--fs/squashfs/page_actor.h57
-rw-r--r--fs/squashfs/squashfs.h114
-rw-r--r--fs/squashfs/squashfs_fs.h452
-rw-r--r--fs/squashfs/squashfs_fs_i.h41
-rw-r--r--fs/squashfs/squashfs_fs_sb.h73
-rw-r--r--fs/squashfs/super.c698
-rw-r--r--fs/squashfs/symlink.c112
-rw-r--r--fs/squashfs/xattr.c271
-rw-r--r--fs/squashfs/xattr.h41
-rw-r--r--fs/squashfs/xattr_id.c132
-rw-r--r--fs/squashfs/xz_wrapper.c196
-rw-r--r--fs/squashfs/zlib_wrapper.c137
-rw-r--r--fs/squashfs/zstd_wrapper.c154
34 files changed, 6633 insertions, 0 deletions
diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig
new file mode 100644
index 0000000000..60fc98bdf4
--- /dev/null
+++ b/fs/squashfs/Kconfig
@@ -0,0 +1,266 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config SQUASHFS
+ tristate "SquashFS 4.0 - Squashed file system support"
+ depends on BLOCK
+ help
+ Saying Y here includes support for SquashFS 4.0 (a Compressed
+ Read-Only File System). Squashfs is a highly compressed read-only
+ filesystem for Linux. It uses zlib, lzo or xz compression to
+ compress both files, inodes and directories. Inodes in the system
+ are very small and all blocks are packed to minimise data overhead.
+ Block sizes greater than 4K are supported up to a maximum of 1 Mbytes
+ (default block size 128K). SquashFS 4.0 supports 64 bit filesystems
+ and files (larger than 4GB), full uid/gid information, hard links and
+ timestamps.
+
+ Squashfs is intended for general read-only filesystem use, for
+ archival use (i.e. in cases where a .tar.gz file may be used), and in
+ embedded systems where low overhead is needed. Further information
+ and tools are available from http://squashfs.sourceforge.net.
+
+ If you want to compile this as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want),
+ say M here. The module will be called squashfs. Note that the root
+ file system (the one containing the directory /) cannot be compiled
+ as a module.
+
+ If unsure, say N.
+
+choice
+ prompt "File decompression options"
+ depends on SQUASHFS
+ help
+ Squashfs now supports two options for decompressing file
+ data. Traditionally Squashfs has decompressed into an
+ intermediate buffer and then memcopied it into the page cache.
+ Squashfs now supports the ability to decompress directly into
+ the page cache.
+
+ If unsure, select "Decompress file data into an intermediate buffer"
+
+config SQUASHFS_FILE_CACHE
+ bool "Decompress file data into an intermediate buffer"
+ help
+ Decompress file data into an intermediate buffer and then
+ memcopy it into the page cache.
+
+config SQUASHFS_FILE_DIRECT
+ bool "Decompress files directly into the page cache"
+ help
+ Directly decompress file data into the page cache.
+ Doing so can significantly improve performance because
+ it eliminates a memcpy and it also removes the lock contention
+ on the single buffer.
+
+endchoice
+
+config SQUASHFS_DECOMP_SINGLE
+ depends on SQUASHFS
+ def_bool n
+
+config SQUASHFS_DECOMP_MULTI
+ depends on SQUASHFS
+ def_bool n
+
+config SQUASHFS_DECOMP_MULTI_PERCPU
+ depends on SQUASHFS
+ def_bool n
+
+config SQUASHFS_CHOICE_DECOMP_BY_MOUNT
+ bool "Select the parallel decompression mode during mount"
+ depends on SQUASHFS
+ default n
+ select SQUASHFS_DECOMP_SINGLE
+ select SQUASHFS_DECOMP_MULTI
+ select SQUASHFS_DECOMP_MULTI_PERCPU
+ select SQUASHFS_MOUNT_DECOMP_THREADS
+ help
+ Compile all parallel decompression modes and specify the
+ decompression mode by setting "threads=" during mount.
+ default Decompressor parallelisation is SQUASHFS_DECOMP_SINGLE
+
+choice
+ prompt "Select decompression parallel mode at compile time"
+ depends on SQUASHFS
+ depends on !SQUASHFS_CHOICE_DECOMP_BY_MOUNT
+ help
+ Squashfs now supports three parallelisation options for
+ decompression. Each one exhibits various trade-offs between
+ decompression performance and CPU and memory usage.
+
+ If in doubt, select "Single threaded compression"
+
+config SQUASHFS_COMPILE_DECOMP_SINGLE
+ bool "Single threaded compression"
+ select SQUASHFS_DECOMP_SINGLE
+ help
+ Traditionally Squashfs has used single-threaded decompression.
+ Only one block (data or metadata) can be decompressed at any
+ one time. This limits CPU and memory usage to a minimum.
+
+config SQUASHFS_COMPILE_DECOMP_MULTI
+ bool "Use multiple decompressors for parallel I/O"
+ select SQUASHFS_DECOMP_MULTI
+ help
+ By default Squashfs uses a single decompressor but it gives
+ poor performance on parallel I/O workloads when using multiple CPU
+ machines due to waiting on decompressor availability.
+
+ If you have a parallel I/O workload and your system has enough memory,
+ using this option may improve overall I/O performance.
+
+ This decompressor implementation uses up to two parallel
+ decompressors per core. It dynamically allocates decompressors
+ on a demand basis.
+
+config SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU
+ bool "Use percpu multiple decompressors for parallel I/O"
+ select SQUASHFS_DECOMP_MULTI_PERCPU
+ help
+ By default Squashfs uses a single decompressor but it gives
+ poor performance on parallel I/O workloads when using multiple CPU
+ machines due to waiting on decompressor availability.
+
+ This decompressor implementation uses a maximum of one
+ decompressor per core. It uses percpu variables to ensure
+ decompression is load-balanced across the cores.
+endchoice
+
+config SQUASHFS_MOUNT_DECOMP_THREADS
+ bool "Add the mount parameter 'threads=' for squashfs"
+ depends on SQUASHFS
+ depends on SQUASHFS_DECOMP_MULTI
+ default n
+ help
+ Use threads= to set the decompression parallel mode and the number of threads.
+ If SQUASHFS_CHOICE_DECOMP_BY_MOUNT=y
+ threads=<single|multi|percpu|1|2|3|...>
+ else
+ threads=<2|3|...>
+ The upper limit is num_online_cpus() * 2.
+
+config SQUASHFS_XATTR
+ bool "Squashfs XATTR support"
+ depends on SQUASHFS
+ help
+ Saying Y here includes support for extended attributes (xattrs).
+ Xattrs are name:value pairs associated with inodes by
+ the kernel or by users (see the attr(5) manual page).
+
+ If unsure, say N.
+
+config SQUASHFS_ZLIB
+ bool "Include support for ZLIB compressed file systems"
+ depends on SQUASHFS
+ select ZLIB_INFLATE
+ default y
+ help
+ ZLIB compression is the standard compression used by Squashfs
+ file systems. It offers a good trade-off between compression
+ achieved and the amount of CPU time and memory necessary to
+ compress and decompress.
+
+ If unsure, say Y.
+
+config SQUASHFS_LZ4
+ bool "Include support for LZ4 compressed file systems"
+ depends on SQUASHFS
+ select LZ4_DECOMPRESS
+ help
+ Saying Y here includes support for reading Squashfs file systems
+ compressed with LZ4 compression. LZ4 compression is mainly
+ aimed at embedded systems with slower CPUs where the overheads
+ of zlib are too high.
+
+ LZ4 is not the standard compression used in Squashfs and so most
+ file systems will be readable without selecting this option.
+
+ If unsure, say N.
+
+config SQUASHFS_LZO
+ bool "Include support for LZO compressed file systems"
+ depends on SQUASHFS
+ select LZO_DECOMPRESS
+ help
+ Saying Y here includes support for reading Squashfs file systems
+ compressed with LZO compression. LZO compression is mainly
+ aimed at embedded systems with slower CPUs where the overheads
+ of zlib are too high.
+
+ LZO is not the standard compression used in Squashfs and so most
+ file systems will be readable without selecting this option.
+
+ If unsure, say N.
+
+config SQUASHFS_XZ
+ bool "Include support for XZ compressed file systems"
+ depends on SQUASHFS
+ select XZ_DEC
+ help
+ Saying Y here includes support for reading Squashfs file systems
+ compressed with XZ compression. XZ gives better compression than
+ the default zlib compression, at the expense of greater CPU and
+ memory overhead.
+
+ XZ is not the standard compression used in Squashfs and so most
+ file systems will be readable without selecting this option.
+
+ If unsure, say N.
+
+config SQUASHFS_ZSTD
+ bool "Include support for ZSTD compressed file systems"
+ depends on SQUASHFS
+ select ZSTD_DECOMPRESS
+ help
+ Saying Y here includes support for reading Squashfs file systems
+ compressed with ZSTD compression. ZSTD gives better compression than
+ the default ZLIB compression, while using less CPU.
+
+ ZSTD is not the standard compression used in Squashfs and so most
+ file systems will be readable without selecting this option.
+
+ If unsure, say N.
+
+config SQUASHFS_4K_DEVBLK_SIZE
+ bool "Use 4K device block size?"
+ depends on SQUASHFS
+ help
+ By default Squashfs sets the dev block size (sb_min_blocksize)
+ to 1K or the smallest block size supported by the block device
+ (if larger). This, because blocks are packed together and
+ unaligned in Squashfs, should reduce latency.
+
+ This, however, gives poor performance on MTD NAND devices where
+ the optimal I/O size is 4K (even though the devices can support
+ smaller block sizes).
+
+ Using a 4K device block size may also improve overall I/O
+ performance for some file access patterns (e.g. sequential
+ accesses of files in filesystem order) on all media.
+
+ Setting this option will force Squashfs to use a 4K device block
+ size by default.
+
+ If unsure, say N.
+
+config SQUASHFS_EMBEDDED
+ bool "Additional option for memory-constrained systems"
+ depends on SQUASHFS
+ help
+ Saying Y here allows you to specify cache size.
+
+ If unsure, say N.
+
+config SQUASHFS_FRAGMENT_CACHE_SIZE
+ int "Number of fragments cached" if SQUASHFS_EMBEDDED
+ depends on SQUASHFS
+ default "3"
+ help
+ By default SquashFS caches the last 3 fragments read from
+ the filesystem. Increasing this amount may mean SquashFS
+ has to re-read fragments less often from disk, at the expense
+ of extra system memory. Decreasing this amount will mean
+ SquashFS uses less memory at the expense of extra reads from disk.
+
+ Note there must be at least one cached fragment. Anything
+ much more than three will probably not make much difference.
diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile
new file mode 100644
index 0000000000..477c89a519
--- /dev/null
+++ b/fs/squashfs/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the linux squashfs routines.
+#
+
+obj-$(CONFIG_SQUASHFS) += squashfs.o
+squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o
+squashfs-y += namei.o super.o symlink.o decompressor.o page_actor.o
+squashfs-$(CONFIG_SQUASHFS_FILE_CACHE) += file_cache.o
+squashfs-$(CONFIG_SQUASHFS_FILE_DIRECT) += file_direct.o
+squashfs-$(CONFIG_SQUASHFS_DECOMP_SINGLE) += decompressor_single.o
+squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI) += decompressor_multi.o
+squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU) += decompressor_multi_percpu.o
+squashfs-$(CONFIG_SQUASHFS_XATTR) += xattr.o xattr_id.o
+squashfs-$(CONFIG_SQUASHFS_LZ4) += lz4_wrapper.o
+squashfs-$(CONFIG_SQUASHFS_LZO) += lzo_wrapper.o
+squashfs-$(CONFIG_SQUASHFS_XZ) += xz_wrapper.o
+squashfs-$(CONFIG_SQUASHFS_ZLIB) += zlib_wrapper.o
+squashfs-$(CONFIG_SQUASHFS_ZSTD) += zstd_wrapper.o
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
new file mode 100644
index 0000000000..581ce95193
--- /dev/null
+++ b/fs/squashfs/block.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * block.c
+ */
+
+/*
+ * This file implements the low-level routines to read and decompress
+ * datablocks and metadata blocks.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/string.h>
+#include <linux/bio.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs.h"
+#include "decompressor.h"
+#include "page_actor.h"
+
+/*
+ * Returns the amount of bytes copied to the page actor.
+ */
+static int copy_bio_to_actor(struct bio *bio,
+ struct squashfs_page_actor *actor,
+ int offset, int req_length)
+{
+ void *actor_addr;
+ struct bvec_iter_all iter_all = {};
+ struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
+ int copied_bytes = 0;
+ int actor_offset = 0;
+
+ squashfs_actor_nobuff(actor);
+ actor_addr = squashfs_first_page(actor);
+
+ if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all)))
+ return 0;
+
+ while (copied_bytes < req_length) {
+ int bytes_to_copy = min_t(int, bvec->bv_len - offset,
+ PAGE_SIZE - actor_offset);
+
+ bytes_to_copy = min_t(int, bytes_to_copy,
+ req_length - copied_bytes);
+ if (!IS_ERR(actor_addr))
+ memcpy(actor_addr + actor_offset, bvec_virt(bvec) +
+ offset, bytes_to_copy);
+
+ actor_offset += bytes_to_copy;
+ copied_bytes += bytes_to_copy;
+ offset += bytes_to_copy;
+
+ if (actor_offset >= PAGE_SIZE) {
+ actor_addr = squashfs_next_page(actor);
+ if (!actor_addr)
+ break;
+ actor_offset = 0;
+ }
+ if (offset >= bvec->bv_len) {
+ if (!bio_next_segment(bio, &iter_all))
+ break;
+ offset = 0;
+ }
+ }
+ squashfs_finish_page(actor);
+ return copied_bytes;
+}
+
+static int squashfs_bio_read_cached(struct bio *fullbio,
+ struct address_space *cache_mapping, u64 index, int length,
+ u64 read_start, u64 read_end, int page_count)
+{
+ struct page *head_to_cache = NULL, *tail_to_cache = NULL;
+ struct block_device *bdev = fullbio->bi_bdev;
+ int start_idx = 0, end_idx = 0;
+ struct bvec_iter_all iter_all;
+ struct bio *bio = NULL;
+ struct bio_vec *bv;
+ int idx = 0;
+ int err = 0;
+
+ bio_for_each_segment_all(bv, fullbio, iter_all) {
+ struct page *page = bv->bv_page;
+
+ if (page->mapping == cache_mapping) {
+ idx++;
+ continue;
+ }
+
+ /*
+ * We only use this when the device block size is the same as
+ * the page size, so read_start and read_end cover full pages.
+ *
+ * Compare these to the original required index and length to
+ * only cache pages which were requested partially, since these
+ * are the ones which are likely to be needed when reading
+ * adjacent blocks.
+ */
+ if (idx == 0 && index != read_start)
+ head_to_cache = page;
+ else if (idx == page_count - 1 && index + length != read_end)
+ tail_to_cache = page;
+
+ if (!bio || idx != end_idx) {
+ struct bio *new = bio_alloc_clone(bdev, fullbio,
+ GFP_NOIO, &fs_bio_set);
+
+ if (bio) {
+ bio_trim(bio, start_idx * PAGE_SECTORS,
+ (end_idx - start_idx) * PAGE_SECTORS);
+ bio_chain(bio, new);
+ submit_bio(bio);
+ }
+
+ bio = new;
+ start_idx = idx;
+ }
+
+ idx++;
+ end_idx = idx;
+ }
+
+ if (bio) {
+ bio_trim(bio, start_idx * PAGE_SECTORS,
+ (end_idx - start_idx) * PAGE_SECTORS);
+ err = submit_bio_wait(bio);
+ bio_put(bio);
+ }
+
+ if (err)
+ return err;
+
+ if (head_to_cache) {
+ int ret = add_to_page_cache_lru(head_to_cache, cache_mapping,
+ read_start >> PAGE_SHIFT,
+ GFP_NOIO);
+
+ if (!ret) {
+ SetPageUptodate(head_to_cache);
+ unlock_page(head_to_cache);
+ }
+
+ }
+
+ if (tail_to_cache) {
+ int ret = add_to_page_cache_lru(tail_to_cache, cache_mapping,
+ (read_end >> PAGE_SHIFT) - 1,
+ GFP_NOIO);
+
+ if (!ret) {
+ SetPageUptodate(tail_to_cache);
+ unlock_page(tail_to_cache);
+ }
+ }
+
+ return 0;
+}
+
+static struct page *squashfs_get_cache_page(struct address_space *mapping,
+ pgoff_t index)
+{
+ struct page *page;
+
+ if (!mapping)
+ return NULL;
+
+ page = find_get_page(mapping, index);
+ if (!page)
+ return NULL;
+
+ if (!PageUptodate(page)) {
+ put_page(page);
+ return NULL;
+ }
+
+ return page;
+}
+
+static int squashfs_bio_read(struct super_block *sb, u64 index, int length,
+ struct bio **biop, int *block_offset)
+{
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+ struct address_space *cache_mapping = msblk->cache_mapping;
+ const u64 read_start = round_down(index, msblk->devblksize);
+ const sector_t block = read_start >> msblk->devblksize_log2;
+ const u64 read_end = round_up(index + length, msblk->devblksize);
+ const sector_t block_end = read_end >> msblk->devblksize_log2;
+ int offset = read_start - round_down(index, PAGE_SIZE);
+ int total_len = (block_end - block) << msblk->devblksize_log2;
+ const int page_count = DIV_ROUND_UP(total_len + offset, PAGE_SIZE);
+ int error, i;
+ struct bio *bio;
+
+ bio = bio_kmalloc(page_count, GFP_NOIO);
+ if (!bio)
+ return -ENOMEM;
+ bio_init(bio, sb->s_bdev, bio->bi_inline_vecs, page_count, REQ_OP_READ);
+ bio->bi_iter.bi_sector = block * (msblk->devblksize >> SECTOR_SHIFT);
+
+ for (i = 0; i < page_count; ++i) {
+ unsigned int len =
+ min_t(unsigned int, PAGE_SIZE - offset, total_len);
+ pgoff_t index = (read_start >> PAGE_SHIFT) + i;
+ struct page *page;
+
+ page = squashfs_get_cache_page(cache_mapping, index);
+ if (!page)
+ page = alloc_page(GFP_NOIO);
+
+ if (!page) {
+ error = -ENOMEM;
+ goto out_free_bio;
+ }
+
+ /*
+ * Use the __ version to avoid merging since we need each page
+ * to be separate when we check for and avoid cached pages.
+ */
+ __bio_add_page(bio, page, len, offset);
+ offset = 0;
+ total_len -= len;
+ }
+
+ if (cache_mapping)
+ error = squashfs_bio_read_cached(bio, cache_mapping, index,
+ length, read_start, read_end,
+ page_count);
+ else
+ error = submit_bio_wait(bio);
+ if (error)
+ goto out_free_bio;
+
+ *biop = bio;
+ *block_offset = index & ((1 << msblk->devblksize_log2) - 1);
+ return 0;
+
+out_free_bio:
+ bio_free_pages(bio);
+ bio_uninit(bio);
+ kfree(bio);
+ return error;
+}
+
+/*
+ * Read and decompress a metadata block or datablock. Length is non-zero
+ * if a datablock is being read (the size is stored elsewhere in the
+ * filesystem), otherwise the length is obtained from the first two bytes of
+ * the metadata block. A bit in the length field indicates if the block
+ * is stored uncompressed in the filesystem (usually because compression
+ * generated a larger block - this does occasionally happen with compression
+ * algorithms).
+ */
+int squashfs_read_data(struct super_block *sb, u64 index, int length,
+ u64 *next_index, struct squashfs_page_actor *output)
+{
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+ struct bio *bio = NULL;
+ int compressed;
+ int res;
+ int offset;
+
+ if (length) {
+ /*
+ * Datablock.
+ */
+ compressed = SQUASHFS_COMPRESSED_BLOCK(length);
+ length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length);
+ TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n",
+ index, compressed ? "" : "un", length, output->length);
+ } else {
+ /*
+ * Metadata block.
+ */
+ const u8 *data;
+ struct bvec_iter_all iter_all = {};
+ struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
+
+ if (index + 2 > msblk->bytes_used) {
+ res = -EIO;
+ goto out;
+ }
+ res = squashfs_bio_read(sb, index, 2, &bio, &offset);
+ if (res)
+ goto out;
+
+ if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) {
+ res = -EIO;
+ goto out_free_bio;
+ }
+ /* Extract the length of the metadata block */
+ data = bvec_virt(bvec);
+ length = data[offset];
+ if (offset < bvec->bv_len - 1) {
+ length |= data[offset + 1] << 8;
+ } else {
+ if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) {
+ res = -EIO;
+ goto out_free_bio;
+ }
+ data = bvec_virt(bvec);
+ length |= data[0] << 8;
+ }
+ bio_free_pages(bio);
+ bio_uninit(bio);
+ kfree(bio);
+
+ compressed = SQUASHFS_COMPRESSED(length);
+ length = SQUASHFS_COMPRESSED_SIZE(length);
+ index += 2;
+
+ TRACE("Block @ 0x%llx, %scompressed size %d\n", index - 2,
+ compressed ? "" : "un", length);
+ }
+ if (length < 0 || length > output->length ||
+ (index + length) > msblk->bytes_used) {
+ res = -EIO;
+ goto out;
+ }
+
+ if (next_index)
+ *next_index = index + length;
+
+ res = squashfs_bio_read(sb, index, length, &bio, &offset);
+ if (res)
+ goto out;
+
+ if (compressed) {
+ if (!msblk->stream) {
+ res = -EIO;
+ goto out_free_bio;
+ }
+ res = msblk->thread_ops->decompress(msblk, bio, offset, length, output);
+ } else {
+ res = copy_bio_to_actor(bio, output, offset, length);
+ }
+
+out_free_bio:
+ bio_free_pages(bio);
+ bio_uninit(bio);
+ kfree(bio);
+out:
+ if (res < 0) {
+ ERROR("Failed to read block 0x%llx: %d\n", index, res);
+ if (msblk->panic_on_errors)
+ panic("squashfs read failed");
+ }
+
+ return res;
+}
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
new file mode 100644
index 0000000000..5062326d0e
--- /dev/null
+++ b/fs/squashfs/cache.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * cache.c
+ */
+
+/*
+ * Blocks in Squashfs are compressed. To avoid repeatedly decompressing
+ * recently accessed data Squashfs uses two small metadata and fragment caches.
+ *
+ * This file implements a generic cache implementation used for both caches,
+ * plus functions layered ontop of the generic cache implementation to
+ * access the metadata and fragment caches.
+ *
+ * To avoid out of memory and fragmentation issues with vmalloc the cache
+ * uses sequences of kmalloced PAGE_SIZE buffers.
+ *
+ * It should be noted that the cache is not used for file datablocks, these
+ * are decompressed and cached in the page-cache in the normal way. The
+ * cache is only used to temporarily cache fragment and metadata blocks
+ * which have been read as as a result of a metadata (i.e. inode or
+ * directory) or fragment access. Because metadata and fragments are packed
+ * together into blocks (to gain greater compression) the read of a particular
+ * piece of metadata or fragment will retrieve other metadata/fragments which
+ * have been packed with it, these because of locality-of-reference may be read
+ * in the near future. Temporarily caching them ensures they are available for
+ * near future access without requiring an additional read and decompress.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/pagemap.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs.h"
+#include "page_actor.h"
+
+/*
+ * Look-up block in cache, and increment usage count. If not in cache, read
+ * and decompress it from disk.
+ */
+struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb,
+ struct squashfs_cache *cache, u64 block, int length)
+{
+ int i, n;
+ struct squashfs_cache_entry *entry;
+
+ spin_lock(&cache->lock);
+
+ while (1) {
+ for (i = cache->curr_blk, n = 0; n < cache->entries; n++) {
+ if (cache->entry[i].block == block) {
+ cache->curr_blk = i;
+ break;
+ }
+ i = (i + 1) % cache->entries;
+ }
+
+ if (n == cache->entries) {
+ /*
+ * Block not in cache, if all cache entries are used
+ * go to sleep waiting for one to become available.
+ */
+ if (cache->unused == 0) {
+ cache->num_waiters++;
+ spin_unlock(&cache->lock);
+ wait_event(cache->wait_queue, cache->unused);
+ spin_lock(&cache->lock);
+ cache->num_waiters--;
+ continue;
+ }
+
+ /*
+ * At least one unused cache entry. A simple
+ * round-robin strategy is used to choose the entry to
+ * be evicted from the cache.
+ */
+ i = cache->next_blk;
+ for (n = 0; n < cache->entries; n++) {
+ if (cache->entry[i].refcount == 0)
+ break;
+ i = (i + 1) % cache->entries;
+ }
+
+ cache->next_blk = (i + 1) % cache->entries;
+ entry = &cache->entry[i];
+
+ /*
+ * Initialise chosen cache entry, and fill it in from
+ * disk.
+ */
+ cache->unused--;
+ entry->block = block;
+ entry->refcount = 1;
+ entry->pending = 1;
+ entry->num_waiters = 0;
+ entry->error = 0;
+ spin_unlock(&cache->lock);
+
+ entry->length = squashfs_read_data(sb, block, length,
+ &entry->next_index, entry->actor);
+
+ spin_lock(&cache->lock);
+
+ if (entry->length < 0)
+ entry->error = entry->length;
+
+ entry->pending = 0;
+
+ /*
+ * While filling this entry one or more other processes
+ * have looked it up in the cache, and have slept
+ * waiting for it to become available.
+ */
+ if (entry->num_waiters) {
+ spin_unlock(&cache->lock);
+ wake_up_all(&entry->wait_queue);
+ } else
+ spin_unlock(&cache->lock);
+
+ goto out;
+ }
+
+ /*
+ * Block already in cache. Increment refcount so it doesn't
+ * get reused until we're finished with it, if it was
+ * previously unused there's one less cache entry available
+ * for reuse.
+ */
+ entry = &cache->entry[i];
+ if (entry->refcount == 0)
+ cache->unused--;
+ entry->refcount++;
+
+ /*
+ * If the entry is currently being filled in by another process
+ * go to sleep waiting for it to become available.
+ */
+ if (entry->pending) {
+ entry->num_waiters++;
+ spin_unlock(&cache->lock);
+ wait_event(entry->wait_queue, !entry->pending);
+ } else
+ spin_unlock(&cache->lock);
+
+ goto out;
+ }
+
+out:
+ TRACE("Got %s %d, start block %lld, refcount %d, error %d\n",
+ cache->name, i, entry->block, entry->refcount, entry->error);
+
+ if (entry->error)
+ ERROR("Unable to read %s cache entry [%llx]\n", cache->name,
+ block);
+ return entry;
+}
+
+
+/*
+ * Release cache entry, once usage count is zero it can be reused.
+ */
+void squashfs_cache_put(struct squashfs_cache_entry *entry)
+{
+ struct squashfs_cache *cache = entry->cache;
+
+ spin_lock(&cache->lock);
+ entry->refcount--;
+ if (entry->refcount == 0) {
+ cache->unused++;
+ /*
+ * If there's any processes waiting for a block to become
+ * available, wake one up.
+ */
+ if (cache->num_waiters) {
+ spin_unlock(&cache->lock);
+ wake_up(&cache->wait_queue);
+ return;
+ }
+ }
+ spin_unlock(&cache->lock);
+}
+
+/*
+ * Delete cache reclaiming all kmalloced buffers.
+ */
+void squashfs_cache_delete(struct squashfs_cache *cache)
+{
+ int i, j;
+
+ if (cache == NULL)
+ return;
+
+ for (i = 0; i < cache->entries; i++) {
+ if (cache->entry[i].data) {
+ for (j = 0; j < cache->pages; j++)
+ kfree(cache->entry[i].data[j]);
+ kfree(cache->entry[i].data);
+ }
+ kfree(cache->entry[i].actor);
+ }
+
+ kfree(cache->entry);
+ kfree(cache);
+}
+
+
+/*
+ * Initialise cache allocating the specified number of entries, each of
+ * size block_size. To avoid vmalloc fragmentation issues each entry
+ * is allocated as a sequence of kmalloced PAGE_SIZE buffers.
+ */
+struct squashfs_cache *squashfs_cache_init(char *name, int entries,
+ int block_size)
+{
+ int i, j;
+ struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+
+ if (cache == NULL) {
+ ERROR("Failed to allocate %s cache\n", name);
+ return NULL;
+ }
+
+ cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL);
+ if (cache->entry == NULL) {
+ ERROR("Failed to allocate %s cache\n", name);
+ goto cleanup;
+ }
+
+ cache->curr_blk = 0;
+ cache->next_blk = 0;
+ cache->unused = entries;
+ cache->entries = entries;
+ cache->block_size = block_size;
+ cache->pages = block_size >> PAGE_SHIFT;
+ cache->pages = cache->pages ? cache->pages : 1;
+ cache->name = name;
+ cache->num_waiters = 0;
+ spin_lock_init(&cache->lock);
+ init_waitqueue_head(&cache->wait_queue);
+
+ for (i = 0; i < entries; i++) {
+ struct squashfs_cache_entry *entry = &cache->entry[i];
+
+ init_waitqueue_head(&cache->entry[i].wait_queue);
+ entry->cache = cache;
+ entry->block = SQUASHFS_INVALID_BLK;
+ entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL);
+ if (entry->data == NULL) {
+ ERROR("Failed to allocate %s cache entry\n", name);
+ goto cleanup;
+ }
+
+ for (j = 0; j < cache->pages; j++) {
+ entry->data[j] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (entry->data[j] == NULL) {
+ ERROR("Failed to allocate %s buffer\n", name);
+ goto cleanup;
+ }
+ }
+
+ entry->actor = squashfs_page_actor_init(entry->data,
+ cache->pages, 0);
+ if (entry->actor == NULL) {
+ ERROR("Failed to allocate %s cache entry\n", name);
+ goto cleanup;
+ }
+ }
+
+ return cache;
+
+cleanup:
+ squashfs_cache_delete(cache);
+ return NULL;
+}
+
+
+/*
+ * Copy up to length bytes from cache entry to buffer starting at offset bytes
+ * into the cache entry. If there's not length bytes then copy the number of
+ * bytes available. In all cases return the number of bytes copied.
+ */
+int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry,
+ int offset, int length)
+{
+ int remaining = length;
+
+ if (length == 0)
+ return 0;
+ else if (buffer == NULL)
+ return min(length, entry->length - offset);
+
+ while (offset < entry->length) {
+ void *buff = entry->data[offset / PAGE_SIZE]
+ + (offset % PAGE_SIZE);
+ int bytes = min_t(int, entry->length - offset,
+ PAGE_SIZE - (offset % PAGE_SIZE));
+
+ if (bytes >= remaining) {
+ memcpy(buffer, buff, remaining);
+ remaining = 0;
+ break;
+ }
+
+ memcpy(buffer, buff, bytes);
+ buffer += bytes;
+ remaining -= bytes;
+ offset += bytes;
+ }
+
+ return length - remaining;
+}
+
+
+/*
+ * Read length bytes from metadata position <block, offset> (block is the
+ * start of the compressed block on disk, and offset is the offset into
+ * the block once decompressed). Data is packed into consecutive blocks,
+ * and length bytes may require reading more than one block.
+ */
+int squashfs_read_metadata(struct super_block *sb, void *buffer,
+ u64 *block, int *offset, int length)
+{
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+ int bytes, res = length;
+ struct squashfs_cache_entry *entry;
+
+ TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
+
+ if (unlikely(length < 0))
+ return -EIO;
+
+ while (length) {
+ entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
+ if (entry->error) {
+ res = entry->error;
+ goto error;
+ } else if (*offset >= entry->length) {
+ res = -EIO;
+ goto error;
+ }
+
+ bytes = squashfs_copy_data(buffer, entry, *offset, length);
+ if (buffer)
+ buffer += bytes;
+ length -= bytes;
+ *offset += bytes;
+
+ if (*offset == entry->length) {
+ *block = entry->next_index;
+ *offset = 0;
+ }
+
+ squashfs_cache_put(entry);
+ }
+
+ return res;
+
+error:
+ squashfs_cache_put(entry);
+ return res;
+}
+
+
+/*
+ * Look-up in the fragmment cache the fragment located at <start_block> in the
+ * filesystem. If necessary read and decompress it from disk.
+ */
+struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *sb,
+ u64 start_block, int length)
+{
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+
+ return squashfs_cache_get(sb, msblk->fragment_cache, start_block,
+ length);
+}
+
+
+/*
+ * Read and decompress the datablock located at <start_block> in the
+ * filesystem. The cache is used here to avoid duplicating locking and
+ * read/decompress code.
+ */
+struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb,
+ u64 start_block, int length)
+{
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+
+ return squashfs_cache_get(sb, msblk->read_page, start_block, length);
+}
+
+
+/*
+ * Read a filesystem table (uncompressed sequence of bytes) from disk
+ */
+void *squashfs_read_table(struct super_block *sb, u64 block, int length)
+{
+ int pages = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ int i, res;
+ void *table, *buffer, **data;
+ struct squashfs_page_actor *actor;
+
+ table = buffer = kmalloc(length, GFP_KERNEL);
+ if (table == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ data = kcalloc(pages, sizeof(void *), GFP_KERNEL);
+ if (data == NULL) {
+ res = -ENOMEM;
+ goto failed;
+ }
+
+ actor = squashfs_page_actor_init(data, pages, length);
+ if (actor == NULL) {
+ res = -ENOMEM;
+ goto failed2;
+ }
+
+ for (i = 0; i < pages; i++, buffer += PAGE_SIZE)
+ data[i] = buffer;
+
+ res = squashfs_read_data(sb, block, length |
+ SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, actor);
+
+ kfree(data);
+ kfree(actor);
+
+ if (res < 0)
+ goto failed;
+
+ return table;
+
+failed2:
+ kfree(data);
+failed:
+ kfree(table);
+ return ERR_PTR(res);
+}
diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c
new file mode 100644
index 0000000000..a676084be2
--- /dev/null
+++ b/fs/squashfs/decompressor.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * decompressor.c
+ */
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "decompressor.h"
+#include "squashfs.h"
+#include "page_actor.h"
+
+/*
+ * This file (and decompressor.h) implements a decompressor framework for
+ * Squashfs, allowing multiple decompressors to be easily supported
+ */
+
+static const struct squashfs_decompressor squashfs_lzma_unsupported_comp_ops = {
+ NULL, NULL, NULL, NULL, LZMA_COMPRESSION, "lzma", 0
+};
+
+#ifndef CONFIG_SQUASHFS_LZ4
+static const struct squashfs_decompressor squashfs_lz4_comp_ops = {
+ NULL, NULL, NULL, NULL, LZ4_COMPRESSION, "lz4", 0
+};
+#endif
+
+#ifndef CONFIG_SQUASHFS_LZO
+static const struct squashfs_decompressor squashfs_lzo_comp_ops = {
+ NULL, NULL, NULL, NULL, LZO_COMPRESSION, "lzo", 0
+};
+#endif
+
+#ifndef CONFIG_SQUASHFS_XZ
+static const struct squashfs_decompressor squashfs_xz_comp_ops = {
+ NULL, NULL, NULL, NULL, XZ_COMPRESSION, "xz", 0
+};
+#endif
+
+#ifndef CONFIG_SQUASHFS_ZLIB
+static const struct squashfs_decompressor squashfs_zlib_comp_ops = {
+ NULL, NULL, NULL, NULL, ZLIB_COMPRESSION, "zlib", 0
+};
+#endif
+
+#ifndef CONFIG_SQUASHFS_ZSTD
+static const struct squashfs_decompressor squashfs_zstd_comp_ops = {
+ NULL, NULL, NULL, NULL, ZSTD_COMPRESSION, "zstd", 0
+};
+#endif
+
+static const struct squashfs_decompressor squashfs_unknown_comp_ops = {
+ NULL, NULL, NULL, NULL, 0, "unknown", 0
+};
+
+static const struct squashfs_decompressor *decompressor[] = {
+ &squashfs_zlib_comp_ops,
+ &squashfs_lz4_comp_ops,
+ &squashfs_lzo_comp_ops,
+ &squashfs_xz_comp_ops,
+ &squashfs_lzma_unsupported_comp_ops,
+ &squashfs_zstd_comp_ops,
+ &squashfs_unknown_comp_ops
+};
+
+
+const struct squashfs_decompressor *squashfs_lookup_decompressor(int id)
+{
+ int i;
+
+ for (i = 0; decompressor[i]->id; i++)
+ if (id == decompressor[i]->id)
+ break;
+
+ return decompressor[i];
+}
+
+
+static void *get_comp_opts(struct super_block *sb, unsigned short flags)
+{
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+ void *buffer = NULL, *comp_opts;
+ struct squashfs_page_actor *actor = NULL;
+ int length = 0;
+
+ /*
+ * Read decompressor specific options from file system if present
+ */
+ if (SQUASHFS_COMP_OPTS(flags)) {
+ buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (buffer == NULL) {
+ comp_opts = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ actor = squashfs_page_actor_init(&buffer, 1, 0);
+ if (actor == NULL) {
+ comp_opts = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ length = squashfs_read_data(sb,
+ sizeof(struct squashfs_super_block), 0, NULL, actor);
+
+ if (length < 0) {
+ comp_opts = ERR_PTR(length);
+ goto out;
+ }
+ }
+
+ comp_opts = squashfs_comp_opts(msblk, buffer, length);
+
+out:
+ kfree(actor);
+ kfree(buffer);
+ return comp_opts;
+}
+
+
+void *squashfs_decompressor_setup(struct super_block *sb, unsigned short flags)
+{
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+ void *stream, *comp_opts = get_comp_opts(sb, flags);
+
+ if (IS_ERR(comp_opts))
+ return comp_opts;
+
+ stream = msblk->thread_ops->create(msblk, comp_opts);
+ if (IS_ERR(stream))
+ kfree(comp_opts);
+
+ return stream;
+}
diff --git a/fs/squashfs/decompressor.h b/fs/squashfs/decompressor.h
new file mode 100644
index 0000000000..19ab608343
--- /dev/null
+++ b/fs/squashfs/decompressor.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef DECOMPRESSOR_H
+#define DECOMPRESSOR_H
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * decompressor.h
+ */
+
+#include <linux/bio.h>
+
+struct squashfs_decompressor {
+ void *(*init)(struct squashfs_sb_info *, void *);
+ void *(*comp_opts)(struct squashfs_sb_info *, void *, int);
+ void (*free)(void *);
+ int (*decompress)(struct squashfs_sb_info *, void *,
+ struct bio *, int, int, struct squashfs_page_actor *);
+ int id;
+ char *name;
+ int alloc_buffer;
+ int supported;
+};
+
+static inline void *squashfs_comp_opts(struct squashfs_sb_info *msblk,
+ void *buff, int length)
+{
+ return msblk->decompressor->comp_opts ?
+ msblk->decompressor->comp_opts(msblk, buff, length) : NULL;
+}
+
+#ifdef CONFIG_SQUASHFS_XZ
+extern const struct squashfs_decompressor squashfs_xz_comp_ops;
+#endif
+
+#ifdef CONFIG_SQUASHFS_LZ4
+extern const struct squashfs_decompressor squashfs_lz4_comp_ops;
+#endif
+
+#ifdef CONFIG_SQUASHFS_LZO
+extern const struct squashfs_decompressor squashfs_lzo_comp_ops;
+#endif
+
+#ifdef CONFIG_SQUASHFS_ZLIB
+extern const struct squashfs_decompressor squashfs_zlib_comp_ops;
+#endif
+
+#ifdef CONFIG_SQUASHFS_ZSTD
+extern const struct squashfs_decompressor squashfs_zstd_comp_ops;
+#endif
+
+#endif
diff --git a/fs/squashfs/decompressor_multi.c b/fs/squashfs/decompressor_multi.c
new file mode 100644
index 0000000000..416c53eedb
--- /dev/null
+++ b/fs/squashfs/decompressor_multi.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013
+ * Minchan Kim <minchan@kernel.org>
+ */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/bio.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/cpumask.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "decompressor.h"
+#include "squashfs.h"
+
+/*
+ * This file implements multi-threaded decompression in the
+ * decompressor framework
+ */
+
+
+/*
+ * The reason that multiply two is that a CPU can request new I/O
+ * while it is waiting previous request.
+ */
+#define MAX_DECOMPRESSOR (num_online_cpus() * 2)
+
+
+static int squashfs_max_decompressors(void)
+{
+ return MAX_DECOMPRESSOR;
+}
+
+struct squashfs_stream {
+ void *comp_opts;
+ struct list_head strm_list;
+ struct mutex mutex;
+ int avail_decomp;
+ wait_queue_head_t wait;
+};
+
+
+struct decomp_stream {
+ void *stream;
+ struct list_head list;
+};
+
+
+static void put_decomp_stream(struct decomp_stream *decomp_strm,
+ struct squashfs_stream *stream)
+{
+ mutex_lock(&stream->mutex);
+ list_add(&decomp_strm->list, &stream->strm_list);
+ mutex_unlock(&stream->mutex);
+ wake_up(&stream->wait);
+}
+
+static void *squashfs_decompressor_create(struct squashfs_sb_info *msblk,
+ void *comp_opts)
+{
+ struct squashfs_stream *stream;
+ struct decomp_stream *decomp_strm = NULL;
+ int err = -ENOMEM;
+
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (!stream)
+ goto out;
+
+ stream->comp_opts = comp_opts;
+ mutex_init(&stream->mutex);
+ INIT_LIST_HEAD(&stream->strm_list);
+ init_waitqueue_head(&stream->wait);
+
+ /*
+ * We should have a decompressor at least as default
+ * so if we fail to allocate new decompressor dynamically,
+ * we could always fall back to default decompressor and
+ * file system works.
+ */
+ decomp_strm = kmalloc(sizeof(*decomp_strm), GFP_KERNEL);
+ if (!decomp_strm)
+ goto out;
+
+ decomp_strm->stream = msblk->decompressor->init(msblk,
+ stream->comp_opts);
+ if (IS_ERR(decomp_strm->stream)) {
+ err = PTR_ERR(decomp_strm->stream);
+ goto out;
+ }
+
+ list_add(&decomp_strm->list, &stream->strm_list);
+ stream->avail_decomp = 1;
+ return stream;
+
+out:
+ kfree(decomp_strm);
+ kfree(stream);
+ return ERR_PTR(err);
+}
+
+
+static void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk)
+{
+ struct squashfs_stream *stream = msblk->stream;
+ if (stream) {
+ struct decomp_stream *decomp_strm;
+
+ while (!list_empty(&stream->strm_list)) {
+ decomp_strm = list_entry(stream->strm_list.prev,
+ struct decomp_stream, list);
+ list_del(&decomp_strm->list);
+ msblk->decompressor->free(decomp_strm->stream);
+ kfree(decomp_strm);
+ stream->avail_decomp--;
+ }
+ WARN_ON(stream->avail_decomp);
+ kfree(stream->comp_opts);
+ kfree(stream);
+ }
+}
+
+
+static struct decomp_stream *get_decomp_stream(struct squashfs_sb_info *msblk,
+ struct squashfs_stream *stream)
+{
+ struct decomp_stream *decomp_strm;
+
+ while (1) {
+ mutex_lock(&stream->mutex);
+
+ /* There is available decomp_stream */
+ if (!list_empty(&stream->strm_list)) {
+ decomp_strm = list_entry(stream->strm_list.prev,
+ struct decomp_stream, list);
+ list_del(&decomp_strm->list);
+ mutex_unlock(&stream->mutex);
+ break;
+ }
+
+ /*
+ * If there is no available decomp and already full,
+ * let's wait for releasing decomp from other users.
+ */
+ if (stream->avail_decomp >= msblk->max_thread_num)
+ goto wait;
+
+ /* Let's allocate new decomp */
+ decomp_strm = kmalloc(sizeof(*decomp_strm), GFP_KERNEL);
+ if (!decomp_strm)
+ goto wait;
+
+ decomp_strm->stream = msblk->decompressor->init(msblk,
+ stream->comp_opts);
+ if (IS_ERR(decomp_strm->stream)) {
+ kfree(decomp_strm);
+ goto wait;
+ }
+
+ stream->avail_decomp++;
+ WARN_ON(stream->avail_decomp > msblk->max_thread_num);
+
+ mutex_unlock(&stream->mutex);
+ break;
+wait:
+ /*
+ * If system memory is tough, let's for other's
+ * releasing instead of hurting VM because it could
+ * make page cache thrashing.
+ */
+ mutex_unlock(&stream->mutex);
+ wait_event(stream->wait,
+ !list_empty(&stream->strm_list));
+ }
+
+ return decomp_strm;
+}
+
+
+static int squashfs_decompress(struct squashfs_sb_info *msblk, struct bio *bio,
+ int offset, int length,
+ struct squashfs_page_actor *output)
+{
+ int res;
+ struct squashfs_stream *stream = msblk->stream;
+ struct decomp_stream *decomp_stream = get_decomp_stream(msblk, stream);
+ res = msblk->decompressor->decompress(msblk, decomp_stream->stream,
+ bio, offset, length, output);
+ put_decomp_stream(decomp_stream, stream);
+ if (res < 0)
+ ERROR("%s decompression failed, data probably corrupt\n",
+ msblk->decompressor->name);
+ return res;
+}
+
+const struct squashfs_decompressor_thread_ops squashfs_decompressor_multi = {
+ .create = squashfs_decompressor_create,
+ .destroy = squashfs_decompressor_destroy,
+ .decompress = squashfs_decompress,
+ .max_decompressors = squashfs_max_decompressors,
+};
diff --git a/fs/squashfs/decompressor_multi_percpu.c b/fs/squashfs/decompressor_multi_percpu.c
new file mode 100644
index 0000000000..8a218e7c23
--- /dev/null
+++ b/fs/squashfs/decompressor_multi_percpu.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/percpu.h>
+#include <linux/local_lock.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "decompressor.h"
+#include "squashfs.h"
+
+/*
+ * This file implements multi-threaded decompression using percpu
+ * variables, one thread per cpu core.
+ */
+
+struct squashfs_stream {
+ void *stream;
+ local_lock_t lock;
+};
+
+static void *squashfs_decompressor_create(struct squashfs_sb_info *msblk,
+ void *comp_opts)
+{
+ struct squashfs_stream *stream;
+ struct squashfs_stream __percpu *percpu;
+ int err, cpu;
+
+ percpu = alloc_percpu(struct squashfs_stream);
+ if (percpu == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ for_each_possible_cpu(cpu) {
+ stream = per_cpu_ptr(percpu, cpu);
+ stream->stream = msblk->decompressor->init(msblk, comp_opts);
+ if (IS_ERR(stream->stream)) {
+ err = PTR_ERR(stream->stream);
+ goto out;
+ }
+ local_lock_init(&stream->lock);
+ }
+
+ kfree(comp_opts);
+ return (__force void *) percpu;
+
+out:
+ for_each_possible_cpu(cpu) {
+ stream = per_cpu_ptr(percpu, cpu);
+ if (!IS_ERR_OR_NULL(stream->stream))
+ msblk->decompressor->free(stream->stream);
+ }
+ free_percpu(percpu);
+ return ERR_PTR(err);
+}
+
+static void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk)
+{
+ struct squashfs_stream __percpu *percpu =
+ (struct squashfs_stream __percpu *) msblk->stream;
+ struct squashfs_stream *stream;
+ int cpu;
+
+ if (msblk->stream) {
+ for_each_possible_cpu(cpu) {
+ stream = per_cpu_ptr(percpu, cpu);
+ msblk->decompressor->free(stream->stream);
+ }
+ free_percpu(percpu);
+ }
+}
+
+static int squashfs_decompress(struct squashfs_sb_info *msblk, struct bio *bio,
+ int offset, int length, struct squashfs_page_actor *output)
+{
+ struct squashfs_stream *stream;
+ struct squashfs_stream __percpu *percpu =
+ (struct squashfs_stream __percpu *) msblk->stream;
+ int res;
+
+ local_lock(&percpu->lock);
+ stream = this_cpu_ptr(percpu);
+
+ res = msblk->decompressor->decompress(msblk, stream->stream, bio,
+ offset, length, output);
+
+ local_unlock(&percpu->lock);
+
+ if (res < 0)
+ ERROR("%s decompression failed, data probably corrupt\n",
+ msblk->decompressor->name);
+
+ return res;
+}
+
+static int squashfs_max_decompressors(void)
+{
+ return num_possible_cpus();
+}
+
+const struct squashfs_decompressor_thread_ops squashfs_decompressor_percpu = {
+ .create = squashfs_decompressor_create,
+ .destroy = squashfs_decompressor_destroy,
+ .decompress = squashfs_decompress,
+ .max_decompressors = squashfs_max_decompressors,
+};
diff --git a/fs/squashfs/decompressor_single.c b/fs/squashfs/decompressor_single.c
new file mode 100644
index 0000000000..6f16188771
--- /dev/null
+++ b/fs/squashfs/decompressor_single.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ */
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/bio.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "decompressor.h"
+#include "squashfs.h"
+
+/*
+ * This file implements single-threaded decompression in the
+ * decompressor framework
+ */
+
+struct squashfs_stream {
+ void *stream;
+ struct mutex mutex;
+};
+
+static void *squashfs_decompressor_create(struct squashfs_sb_info *msblk,
+ void *comp_opts)
+{
+ struct squashfs_stream *stream;
+ int err = -ENOMEM;
+
+ stream = kmalloc(sizeof(*stream), GFP_KERNEL);
+ if (stream == NULL)
+ goto out;
+
+ stream->stream = msblk->decompressor->init(msblk, comp_opts);
+ if (IS_ERR(stream->stream)) {
+ err = PTR_ERR(stream->stream);
+ goto out;
+ }
+
+ kfree(comp_opts);
+ mutex_init(&stream->mutex);
+ return stream;
+
+out:
+ kfree(stream);
+ return ERR_PTR(err);
+}
+
+static void squashfs_decompressor_destroy(struct squashfs_sb_info *msblk)
+{
+ struct squashfs_stream *stream = msblk->stream;
+
+ if (stream) {
+ msblk->decompressor->free(stream->stream);
+ kfree(stream);
+ }
+}
+
+static int squashfs_decompress(struct squashfs_sb_info *msblk, struct bio *bio,
+ int offset, int length,
+ struct squashfs_page_actor *output)
+{
+ int res;
+ struct squashfs_stream *stream = msblk->stream;
+
+ mutex_lock(&stream->mutex);
+ res = msblk->decompressor->decompress(msblk, stream->stream, bio,
+ offset, length, output);
+ mutex_unlock(&stream->mutex);
+
+ if (res < 0)
+ ERROR("%s decompression failed, data probably corrupt\n",
+ msblk->decompressor->name);
+
+ return res;
+}
+
+static int squashfs_max_decompressors(void)
+{
+ return 1;
+}
+
+const struct squashfs_decompressor_thread_ops squashfs_decompressor_single = {
+ .create = squashfs_decompressor_create,
+ .destroy = squashfs_decompressor_destroy,
+ .decompress = squashfs_decompress,
+ .max_decompressors = squashfs_max_decompressors,
+};
diff --git a/fs/squashfs/dir.c b/fs/squashfs/dir.c
new file mode 100644
index 0000000000..a2ade63ecc
--- /dev/null
+++ b/fs/squashfs/dir.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * dir.c
+ */
+
+/*
+ * This file implements code to read directories from disk.
+ *
+ * See namei.c for a description of directory organisation on disk.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/slab.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+static const unsigned char squashfs_filetype_table[] = {
+ DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_FIFO, DT_SOCK
+};
+
+/*
+ * Lookup offset (f_pos) in the directory index, returning the
+ * metadata block containing it.
+ *
+ * If we get an error reading the index then return the part of the index
+ * (if any) we have managed to read - the index isn't essential, just
+ * quicker.
+ */
+static int get_dir_index_using_offset(struct super_block *sb,
+ u64 *next_block, int *next_offset, u64 index_start, int index_offset,
+ int i_count, u64 f_pos)
+{
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+ int err, i, index, length = 0;
+ unsigned int size;
+ struct squashfs_dir_index dir_index;
+
+ TRACE("Entered get_dir_index_using_offset, i_count %d, f_pos %lld\n",
+ i_count, f_pos);
+
+ /*
+ * Translate from external f_pos to the internal f_pos. This
+ * is offset by 3 because we invent "." and ".." entries which are
+ * not actually stored in the directory.
+ */
+ if (f_pos <= 3)
+ return f_pos;
+ f_pos -= 3;
+
+ for (i = 0; i < i_count; i++) {
+ err = squashfs_read_metadata(sb, &dir_index, &index_start,
+ &index_offset, sizeof(dir_index));
+ if (err < 0)
+ break;
+
+ index = le32_to_cpu(dir_index.index);
+ if (index > f_pos)
+ /*
+ * Found the index we're looking for.
+ */
+ break;
+
+ size = le32_to_cpu(dir_index.size) + 1;
+
+ /* size should never be larger than SQUASHFS_NAME_LEN */
+ if (size > SQUASHFS_NAME_LEN)
+ break;
+
+ err = squashfs_read_metadata(sb, NULL, &index_start,
+ &index_offset, size);
+ if (err < 0)
+ break;
+
+ length = index;
+ *next_block = le32_to_cpu(dir_index.start_block) +
+ msblk->directory_table;
+ }
+
+ *next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
+
+ /*
+ * Translate back from internal f_pos to external f_pos.
+ */
+ return length + 3;
+}
+
+
+static int squashfs_readdir(struct file *file, struct dir_context *ctx)
+{
+ struct inode *inode = file_inode(file);
+ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+ u64 block = squashfs_i(inode)->start + msblk->directory_table;
+ int offset = squashfs_i(inode)->offset, length, err;
+ unsigned int inode_number, dir_count, size, type;
+ struct squashfs_dir_header dirh;
+ struct squashfs_dir_entry *dire;
+
+ TRACE("Entered squashfs_readdir [%llx:%x]\n", block, offset);
+
+ dire = kmalloc(sizeof(*dire) + SQUASHFS_NAME_LEN + 1, GFP_KERNEL);
+ if (dire == NULL) {
+ ERROR("Failed to allocate squashfs_dir_entry\n");
+ goto finish;
+ }
+
+ /*
+ * Return "." and ".." entries as the first two filenames in the
+ * directory. To maximise compression these two entries are not
+ * stored in the directory, and so we invent them here.
+ *
+ * It also means that the external f_pos is offset by 3 from the
+ * on-disk directory f_pos.
+ */
+ while (ctx->pos < 3) {
+ char *name;
+ int i_ino;
+
+ if (ctx->pos == 0) {
+ name = ".";
+ size = 1;
+ i_ino = inode->i_ino;
+ } else {
+ name = "..";
+ size = 2;
+ i_ino = squashfs_i(inode)->parent;
+ }
+
+ if (!dir_emit(ctx, name, size, i_ino,
+ squashfs_filetype_table[1]))
+ goto finish;
+
+ ctx->pos += size;
+ }
+
+ length = get_dir_index_using_offset(inode->i_sb, &block, &offset,
+ squashfs_i(inode)->dir_idx_start,
+ squashfs_i(inode)->dir_idx_offset,
+ squashfs_i(inode)->dir_idx_cnt,
+ ctx->pos);
+
+ while (length < i_size_read(inode)) {
+ /*
+ * Read directory header
+ */
+ err = squashfs_read_metadata(inode->i_sb, &dirh, &block,
+ &offset, sizeof(dirh));
+ if (err < 0)
+ goto failed_read;
+
+ length += sizeof(dirh);
+
+ dir_count = le32_to_cpu(dirh.count) + 1;
+
+ if (dir_count > SQUASHFS_DIR_COUNT)
+ goto failed_read;
+
+ while (dir_count--) {
+ /*
+ * Read directory entry.
+ */
+ err = squashfs_read_metadata(inode->i_sb, dire, &block,
+ &offset, sizeof(*dire));
+ if (err < 0)
+ goto failed_read;
+
+ size = le16_to_cpu(dire->size) + 1;
+
+ /* size should never be larger than SQUASHFS_NAME_LEN */
+ if (size > SQUASHFS_NAME_LEN)
+ goto failed_read;
+
+ err = squashfs_read_metadata(inode->i_sb, dire->name,
+ &block, &offset, size);
+ if (err < 0)
+ goto failed_read;
+
+ length += sizeof(*dire) + size;
+
+ if (ctx->pos >= length)
+ continue;
+
+ dire->name[size] = '\0';
+ inode_number = le32_to_cpu(dirh.inode_number) +
+ ((short) le16_to_cpu(dire->inode_number));
+ type = le16_to_cpu(dire->type);
+
+ if (type > SQUASHFS_MAX_DIR_TYPE)
+ goto failed_read;
+
+ if (!dir_emit(ctx, dire->name, size,
+ inode_number,
+ squashfs_filetype_table[type]))
+ goto finish;
+
+ ctx->pos = length;
+ }
+ }
+
+finish:
+ kfree(dire);
+ return 0;
+
+failed_read:
+ ERROR("Unable to read directory block [%llx:%x]\n", block, offset);
+ kfree(dire);
+ return 0;
+}
+
+
+const struct file_operations squashfs_dir_ops = {
+ .read = generic_read_dir,
+ .iterate_shared = squashfs_readdir,
+ .llseek = generic_file_llseek,
+};
diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c
new file mode 100644
index 0000000000..7237637462
--- /dev/null
+++ b/fs/squashfs/export.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * export.c
+ */
+
+/*
+ * This file implements code to make Squashfs filesystems exportable (NFS etc.)
+ *
+ * The export code uses an inode lookup table to map inode numbers passed in
+ * filehandles to an inode location on disk. This table is stored compressed
+ * into metadata blocks. A second index table is used to locate these. This
+ * second index table for speed of access (and because it is small) is read at
+ * mount time and cached in memory.
+ *
+ * The inode lookup table is used only by the export code, inode disk
+ * locations are directly encoded in directories, enabling direct access
+ * without an intermediate lookup for all operations except the export ops.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/dcache.h>
+#include <linux/exportfs.h>
+#include <linux/slab.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+/*
+ * Look-up inode number (ino) in table, returning the inode location.
+ */
+static long long squashfs_inode_lookup(struct super_block *sb, int ino_num)
+{
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+ int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1);
+ int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino_num - 1);
+ u64 start;
+ __le64 ino;
+ int err;
+
+ TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino_num);
+
+ if (ino_num == 0 || (ino_num - 1) >= msblk->inodes)
+ return -EINVAL;
+
+ start = le64_to_cpu(msblk->inode_lookup_table[blk]);
+
+ err = squashfs_read_metadata(sb, &ino, &start, &offset, sizeof(ino));
+ if (err < 0)
+ return err;
+
+ TRACE("squashfs_inode_lookup, inode = 0x%llx\n",
+ (u64) le64_to_cpu(ino));
+
+ return le64_to_cpu(ino);
+}
+
+
+static struct dentry *squashfs_export_iget(struct super_block *sb,
+ unsigned int ino_num)
+{
+ long long ino;
+ struct dentry *dentry = ERR_PTR(-ENOENT);
+
+ TRACE("Entered squashfs_export_iget\n");
+
+ ino = squashfs_inode_lookup(sb, ino_num);
+ if (ino >= 0)
+ dentry = d_obtain_alias(squashfs_iget(sb, ino, ino_num));
+
+ return dentry;
+}
+
+
+static struct dentry *squashfs_fh_to_dentry(struct super_block *sb,
+ struct fid *fid, int fh_len, int fh_type)
+{
+ if ((fh_type != FILEID_INO32_GEN && fh_type != FILEID_INO32_GEN_PARENT)
+ || fh_len < 2)
+ return NULL;
+
+ return squashfs_export_iget(sb, fid->i32.ino);
+}
+
+
+static struct dentry *squashfs_fh_to_parent(struct super_block *sb,
+ struct fid *fid, int fh_len, int fh_type)
+{
+ if (fh_type != FILEID_INO32_GEN_PARENT || fh_len < 4)
+ return NULL;
+
+ return squashfs_export_iget(sb, fid->i32.parent_ino);
+}
+
+
+static struct dentry *squashfs_get_parent(struct dentry *child)
+{
+ struct inode *inode = d_inode(child);
+ unsigned int parent_ino = squashfs_i(inode)->parent;
+
+ return squashfs_export_iget(inode->i_sb, parent_ino);
+}
+
+
+/*
+ * Read uncompressed inode lookup table indexes off disk into memory
+ */
+__le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
+ u64 lookup_table_start, u64 next_table, unsigned int inodes)
+{
+ unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes);
+ unsigned int indexes = SQUASHFS_LOOKUP_BLOCKS(inodes);
+ int n;
+ __le64 *table;
+ u64 start, end;
+
+ TRACE("In read_inode_lookup_table, length %d\n", length);
+
+ /* Sanity check values */
+
+ /* there should always be at least one inode */
+ if (inodes == 0)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * The computed size of the lookup table (length bytes) should exactly
+ * match the table start and end points
+ */
+ if (length != (next_table - lookup_table_start))
+ return ERR_PTR(-EINVAL);
+
+ table = squashfs_read_table(sb, lookup_table_start, length);
+ if (IS_ERR(table))
+ return table;
+
+ /*
+ * table0], table[1], ... table[indexes - 1] store the locations
+ * of the compressed inode lookup blocks. Each entry should be
+ * less than the next (i.e. table[0] < table[1]), and the difference
+ * between them should be SQUASHFS_METADATA_SIZE or less.
+ * table[indexes - 1] should be less than lookup_table_start, and
+ * again the difference should be SQUASHFS_METADATA_SIZE or less
+ */
+ for (n = 0; n < (indexes - 1); n++) {
+ start = le64_to_cpu(table[n]);
+ end = le64_to_cpu(table[n + 1]);
+
+ if (start >= end
+ || (end - start) >
+ (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
+ kfree(table);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ start = le64_to_cpu(table[indexes - 1]);
+ if (start >= lookup_table_start ||
+ (lookup_table_start - start) >
+ (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
+ kfree(table);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return table;
+}
+
+
+const struct export_operations squashfs_export_ops = {
+ .fh_to_dentry = squashfs_fh_to_dentry,
+ .fh_to_parent = squashfs_fh_to_parent,
+ .get_parent = squashfs_get_parent
+};
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
new file mode 100644
index 0000000000..8ba8c4c507
--- /dev/null
+++ b/fs/squashfs/file.c
@@ -0,0 +1,638 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * file.c
+ */
+
+/*
+ * This file contains code for handling regular files. A regular file
+ * consists of a sequence of contiguous compressed blocks, and/or a
+ * compressed fragment block (tail-end packed block). The compressed size
+ * of each datablock is stored in a block list contained within the
+ * file inode (itself stored in one or more compressed metadata blocks).
+ *
+ * To speed up access to datablocks when reading 'large' files (256 Mbytes or
+ * larger), the code implements an index cache that caches the mapping from
+ * block index to datablock location on disk.
+ *
+ * The index cache allows Squashfs to handle large files (up to 1.75 TiB) while
+ * retaining a simple and space-efficient block list on disk. The cache
+ * is split into slots, caching up to eight 224 GiB files (128 KiB blocks).
+ * Larger files use multiple slots, with 1.75 TiB files using all 8 slots.
+ * The index cache is designed to be memory efficient, and by default uses
+ * 16 KiB.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/mutex.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+#include "page_actor.h"
+
+/*
+ * Locate cache slot in range [offset, index] for specified inode. If
+ * there's more than one return the slot closest to index.
+ */
+static struct meta_index *locate_meta_index(struct inode *inode, int offset,
+ int index)
+{
+ struct meta_index *meta = NULL;
+ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+ int i;
+
+ mutex_lock(&msblk->meta_index_mutex);
+
+ TRACE("locate_meta_index: index %d, offset %d\n", index, offset);
+
+ if (msblk->meta_index == NULL)
+ goto not_allocated;
+
+ for (i = 0; i < SQUASHFS_META_SLOTS; i++) {
+ if (msblk->meta_index[i].inode_number == inode->i_ino &&
+ msblk->meta_index[i].offset >= offset &&
+ msblk->meta_index[i].offset <= index &&
+ msblk->meta_index[i].locked == 0) {
+ TRACE("locate_meta_index: entry %d, offset %d\n", i,
+ msblk->meta_index[i].offset);
+ meta = &msblk->meta_index[i];
+ offset = meta->offset;
+ }
+ }
+
+ if (meta)
+ meta->locked = 1;
+
+not_allocated:
+ mutex_unlock(&msblk->meta_index_mutex);
+
+ return meta;
+}
+
+
+/*
+ * Find and initialise an empty cache slot for index offset.
+ */
+static struct meta_index *empty_meta_index(struct inode *inode, int offset,
+ int skip)
+{
+ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+ struct meta_index *meta = NULL;
+ int i;
+
+ mutex_lock(&msblk->meta_index_mutex);
+
+ TRACE("empty_meta_index: offset %d, skip %d\n", offset, skip);
+
+ if (msblk->meta_index == NULL) {
+ /*
+ * First time cache index has been used, allocate and
+ * initialise. The cache index could be allocated at
+ * mount time but doing it here means it is allocated only
+ * if a 'large' file is read.
+ */
+ msblk->meta_index = kcalloc(SQUASHFS_META_SLOTS,
+ sizeof(*(msblk->meta_index)), GFP_KERNEL);
+ if (msblk->meta_index == NULL) {
+ ERROR("Failed to allocate meta_index\n");
+ goto failed;
+ }
+ for (i = 0; i < SQUASHFS_META_SLOTS; i++) {
+ msblk->meta_index[i].inode_number = 0;
+ msblk->meta_index[i].locked = 0;
+ }
+ msblk->next_meta_index = 0;
+ }
+
+ for (i = SQUASHFS_META_SLOTS; i &&
+ msblk->meta_index[msblk->next_meta_index].locked; i--)
+ msblk->next_meta_index = (msblk->next_meta_index + 1) %
+ SQUASHFS_META_SLOTS;
+
+ if (i == 0) {
+ TRACE("empty_meta_index: failed!\n");
+ goto failed;
+ }
+
+ TRACE("empty_meta_index: returned meta entry %d, %p\n",
+ msblk->next_meta_index,
+ &msblk->meta_index[msblk->next_meta_index]);
+
+ meta = &msblk->meta_index[msblk->next_meta_index];
+ msblk->next_meta_index = (msblk->next_meta_index + 1) %
+ SQUASHFS_META_SLOTS;
+
+ meta->inode_number = inode->i_ino;
+ meta->offset = offset;
+ meta->skip = skip;
+ meta->entries = 0;
+ meta->locked = 1;
+
+failed:
+ mutex_unlock(&msblk->meta_index_mutex);
+ return meta;
+}
+
+
+static void release_meta_index(struct inode *inode, struct meta_index *meta)
+{
+ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+ mutex_lock(&msblk->meta_index_mutex);
+ meta->locked = 0;
+ mutex_unlock(&msblk->meta_index_mutex);
+}
+
+
+/*
+ * Read the next n blocks from the block list, starting from
+ * metadata block <start_block, offset>.
+ */
+static long long read_indexes(struct super_block *sb, int n,
+ u64 *start_block, int *offset)
+{
+ int err, i;
+ long long block = 0;
+ __le32 *blist = kmalloc(PAGE_SIZE, GFP_KERNEL);
+
+ if (blist == NULL) {
+ ERROR("read_indexes: Failed to allocate block_list\n");
+ return -ENOMEM;
+ }
+
+ while (n) {
+ int blocks = min_t(int, n, PAGE_SIZE >> 2);
+
+ err = squashfs_read_metadata(sb, blist, start_block,
+ offset, blocks << 2);
+ if (err < 0) {
+ ERROR("read_indexes: reading block [%llx:%x]\n",
+ *start_block, *offset);
+ goto failure;
+ }
+
+ for (i = 0; i < blocks; i++) {
+ int size = squashfs_block_size(blist[i]);
+ if (size < 0) {
+ err = size;
+ goto failure;
+ }
+ block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
+ }
+ n -= blocks;
+ }
+
+ kfree(blist);
+ return block;
+
+failure:
+ kfree(blist);
+ return err;
+}
+
+
+/*
+ * Each cache index slot has SQUASHFS_META_ENTRIES, each of which
+ * can cache one index -> datablock/blocklist-block mapping. We wish
+ * to distribute these over the length of the file, entry[0] maps index x,
+ * entry[1] maps index x + skip, entry[2] maps index x + 2 * skip, and so on.
+ * The larger the file, the greater the skip factor. The skip factor is
+ * limited to the size of the metadata cache (SQUASHFS_CACHED_BLKS) to ensure
+ * the number of metadata blocks that need to be read fits into the cache.
+ * If the skip factor is limited in this way then the file will use multiple
+ * slots.
+ */
+static inline int calculate_skip(u64 blocks)
+{
+ u64 skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
+ * SQUASHFS_META_INDEXES);
+ return min((u64) SQUASHFS_CACHED_BLKS - 1, skip + 1);
+}
+
+
+/*
+ * Search and grow the index cache for the specified inode, returning the
+ * on-disk locations of the datablock and block list metadata block
+ * <index_block, index_offset> for index (scaled to nearest cache index).
+ */
+static int fill_meta_index(struct inode *inode, int index,
+ u64 *index_block, int *index_offset, u64 *data_block)
+{
+ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+ int skip = calculate_skip(i_size_read(inode) >> msblk->block_log);
+ int offset = 0;
+ struct meta_index *meta;
+ struct meta_entry *meta_entry;
+ u64 cur_index_block = squashfs_i(inode)->block_list_start;
+ int cur_offset = squashfs_i(inode)->offset;
+ u64 cur_data_block = squashfs_i(inode)->start;
+ int err, i;
+
+ /*
+ * Scale index to cache index (cache slot entry)
+ */
+ index /= SQUASHFS_META_INDEXES * skip;
+
+ while (offset < index) {
+ meta = locate_meta_index(inode, offset + 1, index);
+
+ if (meta == NULL) {
+ meta = empty_meta_index(inode, offset + 1, skip);
+ if (meta == NULL)
+ goto all_done;
+ } else {
+ offset = index < meta->offset + meta->entries ? index :
+ meta->offset + meta->entries - 1;
+ meta_entry = &meta->meta_entry[offset - meta->offset];
+ cur_index_block = meta_entry->index_block +
+ msblk->inode_table;
+ cur_offset = meta_entry->offset;
+ cur_data_block = meta_entry->data_block;
+ TRACE("get_meta_index: offset %d, meta->offset %d, "
+ "meta->entries %d\n", offset, meta->offset,
+ meta->entries);
+ TRACE("get_meta_index: index_block 0x%llx, offset 0x%x"
+ " data_block 0x%llx\n", cur_index_block,
+ cur_offset, cur_data_block);
+ }
+
+ /*
+ * If necessary grow cache slot by reading block list. Cache
+ * slot is extended up to index or to the end of the slot, in
+ * which case further slots will be used.
+ */
+ for (i = meta->offset + meta->entries; i <= index &&
+ i < meta->offset + SQUASHFS_META_ENTRIES; i++) {
+ int blocks = skip * SQUASHFS_META_INDEXES;
+ long long res = read_indexes(inode->i_sb, blocks,
+ &cur_index_block, &cur_offset);
+
+ if (res < 0) {
+ if (meta->entries == 0)
+ /*
+ * Don't leave an empty slot on read
+ * error allocated to this inode...
+ */
+ meta->inode_number = 0;
+ err = res;
+ goto failed;
+ }
+
+ cur_data_block += res;
+ meta_entry = &meta->meta_entry[i - meta->offset];
+ meta_entry->index_block = cur_index_block -
+ msblk->inode_table;
+ meta_entry->offset = cur_offset;
+ meta_entry->data_block = cur_data_block;
+ meta->entries++;
+ offset++;
+ }
+
+ TRACE("get_meta_index: meta->offset %d, meta->entries %d\n",
+ meta->offset, meta->entries);
+
+ release_meta_index(inode, meta);
+ }
+
+all_done:
+ *index_block = cur_index_block;
+ *index_offset = cur_offset;
+ *data_block = cur_data_block;
+
+ /*
+ * Scale cache index (cache slot entry) to index
+ */
+ return offset * SQUASHFS_META_INDEXES * skip;
+
+failed:
+ release_meta_index(inode, meta);
+ return err;
+}
+
+
+/*
+ * Get the on-disk location and compressed size of the datablock
+ * specified by index. Fill_meta_index() does most of the work.
+ */
+static int read_blocklist(struct inode *inode, int index, u64 *block)
+{
+ u64 start;
+ long long blks;
+ int offset;
+ __le32 size;
+ int res = fill_meta_index(inode, index, &start, &offset, block);
+
+ TRACE("read_blocklist: res %d, index %d, start 0x%llx, offset"
+ " 0x%x, block 0x%llx\n", res, index, start, offset,
+ *block);
+
+ if (res < 0)
+ return res;
+
+ /*
+ * res contains the index of the mapping returned by fill_meta_index(),
+ * this will likely be less than the desired index (because the
+ * meta_index cache works at a higher granularity). Read any
+ * extra block indexes needed.
+ */
+ if (res < index) {
+ blks = read_indexes(inode->i_sb, index - res, &start, &offset);
+ if (blks < 0)
+ return (int) blks;
+ *block += blks;
+ }
+
+ /*
+ * Read length of block specified by index.
+ */
+ res = squashfs_read_metadata(inode->i_sb, &size, &start, &offset,
+ sizeof(size));
+ if (res < 0)
+ return res;
+ return squashfs_block_size(size);
+}
+
+void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail)
+{
+ int copied;
+ void *pageaddr;
+
+ pageaddr = kmap_atomic(page);
+ copied = squashfs_copy_data(pageaddr, buffer, offset, avail);
+ memset(pageaddr + copied, 0, PAGE_SIZE - copied);
+ kunmap_atomic(pageaddr);
+
+ flush_dcache_page(page);
+ if (copied == avail)
+ SetPageUptodate(page);
+ else
+ SetPageError(page);
+}
+
+/* Copy data into page cache */
+void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
+ int bytes, int offset)
+{
+ struct inode *inode = page->mapping->host;
+ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+ int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
+ int start_index = page->index & ~mask, end_index = start_index | mask;
+
+ /*
+ * Loop copying datablock into pages. As the datablock likely covers
+ * many PAGE_SIZE pages (default block size is 128 KiB) explicitly
+ * grab the pages from the page cache, except for the page that we've
+ * been called to fill.
+ */
+ for (i = start_index; i <= end_index && bytes > 0; i++,
+ bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
+ struct page *push_page;
+ int avail = buffer ? min_t(int, bytes, PAGE_SIZE) : 0;
+
+ TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail);
+
+ push_page = (i == page->index) ? page :
+ grab_cache_page_nowait(page->mapping, i);
+
+ if (!push_page)
+ continue;
+
+ if (PageUptodate(push_page))
+ goto skip_page;
+
+ squashfs_fill_page(push_page, buffer, offset, avail);
+skip_page:
+ unlock_page(push_page);
+ if (i != page->index)
+ put_page(push_page);
+ }
+}
+
+/* Read datablock stored packed inside a fragment (tail-end packed block) */
+static int squashfs_readpage_fragment(struct page *page, int expected)
+{
+ struct inode *inode = page->mapping->host;
+ struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
+ squashfs_i(inode)->fragment_block,
+ squashfs_i(inode)->fragment_size);
+ int res = buffer->error;
+
+ if (res)
+ ERROR("Unable to read page, block %llx, size %x\n",
+ squashfs_i(inode)->fragment_block,
+ squashfs_i(inode)->fragment_size);
+ else
+ squashfs_copy_cache(page, buffer, expected,
+ squashfs_i(inode)->fragment_offset);
+
+ squashfs_cache_put(buffer);
+ return res;
+}
+
+static int squashfs_readpage_sparse(struct page *page, int expected)
+{
+ squashfs_copy_cache(page, NULL, expected, 0);
+ return 0;
+}
+
+static int squashfs_read_folio(struct file *file, struct folio *folio)
+{
+ struct page *page = &folio->page;
+ struct inode *inode = page->mapping->host;
+ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+ int index = page->index >> (msblk->block_log - PAGE_SHIFT);
+ int file_end = i_size_read(inode) >> msblk->block_log;
+ int expected = index == file_end ?
+ (i_size_read(inode) & (msblk->block_size - 1)) :
+ msblk->block_size;
+ int res = 0;
+ void *pageaddr;
+
+ TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
+ page->index, squashfs_i(inode)->start);
+
+ if (page->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >>
+ PAGE_SHIFT))
+ goto out;
+
+ if (index < file_end || squashfs_i(inode)->fragment_block ==
+ SQUASHFS_INVALID_BLK) {
+ u64 block = 0;
+
+ res = read_blocklist(inode, index, &block);
+ if (res < 0)
+ goto error_out;
+
+ if (res == 0)
+ res = squashfs_readpage_sparse(page, expected);
+ else
+ res = squashfs_readpage_block(page, block, res, expected);
+ } else
+ res = squashfs_readpage_fragment(page, expected);
+
+ if (!res)
+ return 0;
+
+error_out:
+ SetPageError(page);
+out:
+ pageaddr = kmap_atomic(page);
+ memset(pageaddr, 0, PAGE_SIZE);
+ kunmap_atomic(pageaddr);
+ flush_dcache_page(page);
+ if (res == 0)
+ SetPageUptodate(page);
+ unlock_page(page);
+
+ return res;
+}
+
+static int squashfs_readahead_fragment(struct page **page,
+ unsigned int pages, unsigned int expected)
+{
+ struct inode *inode = page[0]->mapping->host;
+ struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
+ squashfs_i(inode)->fragment_block,
+ squashfs_i(inode)->fragment_size);
+ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+ unsigned int n, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
+ int error = buffer->error;
+
+ if (error)
+ goto out;
+
+ expected += squashfs_i(inode)->fragment_offset;
+
+ for (n = 0; n < pages; n++) {
+ unsigned int base = (page[n]->index & mask) << PAGE_SHIFT;
+ unsigned int offset = base + squashfs_i(inode)->fragment_offset;
+
+ if (expected > offset) {
+ unsigned int avail = min_t(unsigned int, expected -
+ offset, PAGE_SIZE);
+
+ squashfs_fill_page(page[n], buffer, offset, avail);
+ }
+
+ unlock_page(page[n]);
+ put_page(page[n]);
+ }
+
+out:
+ squashfs_cache_put(buffer);
+ return error;
+}
+
+static void squashfs_readahead(struct readahead_control *ractl)
+{
+ struct inode *inode = ractl->mapping->host;
+ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+ size_t mask = (1UL << msblk->block_log) - 1;
+ unsigned short shift = msblk->block_log - PAGE_SHIFT;
+ loff_t start = readahead_pos(ractl) & ~mask;
+ size_t len = readahead_length(ractl) + readahead_pos(ractl) - start;
+ struct squashfs_page_actor *actor;
+ unsigned int nr_pages = 0;
+ struct page **pages;
+ int i, file_end = i_size_read(inode) >> msblk->block_log;
+ unsigned int max_pages = 1UL << shift;
+
+ readahead_expand(ractl, start, (len | mask) + 1);
+
+ pages = kmalloc_array(max_pages, sizeof(void *), GFP_KERNEL);
+ if (!pages)
+ return;
+
+ for (;;) {
+ pgoff_t index;
+ int res, bsize;
+ u64 block = 0;
+ unsigned int expected;
+ struct page *last_page;
+
+ expected = start >> msblk->block_log == file_end ?
+ (i_size_read(inode) & (msblk->block_size - 1)) :
+ msblk->block_size;
+
+ max_pages = (expected + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ nr_pages = __readahead_batch(ractl, pages, max_pages);
+ if (!nr_pages)
+ break;
+
+ if (readahead_pos(ractl) >= i_size_read(inode))
+ goto skip_pages;
+
+ index = pages[0]->index >> shift;
+
+ if ((pages[nr_pages - 1]->index >> shift) != index)
+ goto skip_pages;
+
+ if (index == file_end && squashfs_i(inode)->fragment_block !=
+ SQUASHFS_INVALID_BLK) {
+ res = squashfs_readahead_fragment(pages, nr_pages,
+ expected);
+ if (res)
+ goto skip_pages;
+ continue;
+ }
+
+ bsize = read_blocklist(inode, index, &block);
+ if (bsize == 0)
+ goto skip_pages;
+
+ actor = squashfs_page_actor_init_special(msblk, pages, nr_pages,
+ expected);
+ if (!actor)
+ goto skip_pages;
+
+ res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
+
+ last_page = squashfs_page_actor_free(actor);
+
+ if (res == expected) {
+ int bytes;
+
+ /* Last page (if present) may have trailing bytes not filled */
+ bytes = res % PAGE_SIZE;
+ if (index == file_end && bytes && last_page)
+ memzero_page(last_page, bytes,
+ PAGE_SIZE - bytes);
+
+ for (i = 0; i < nr_pages; i++) {
+ flush_dcache_page(pages[i]);
+ SetPageUptodate(pages[i]);
+ }
+ }
+
+ for (i = 0; i < nr_pages; i++) {
+ unlock_page(pages[i]);
+ put_page(pages[i]);
+ }
+ }
+
+ kfree(pages);
+ return;
+
+skip_pages:
+ for (i = 0; i < nr_pages; i++) {
+ unlock_page(pages[i]);
+ put_page(pages[i]);
+ }
+ kfree(pages);
+}
+
+const struct address_space_operations squashfs_aops = {
+ .read_folio = squashfs_read_folio,
+ .readahead = squashfs_readahead
+};
diff --git a/fs/squashfs/file_cache.c b/fs/squashfs/file_cache.c
new file mode 100644
index 0000000000..54c17b7c85
--- /dev/null
+++ b/fs/squashfs/file_cache.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/mutex.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+/* Read separately compressed datablock and memcopy into page cache */
+int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expected)
+{
+ struct inode *i = page->mapping->host;
+ struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
+ block, bsize);
+ int res = buffer->error;
+
+ if (res)
+ ERROR("Unable to read page, block %llx, size %x\n", block,
+ bsize);
+ else
+ squashfs_copy_cache(page, buffer, expected, 0);
+
+ squashfs_cache_put(buffer);
+ return res;
+}
diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
new file mode 100644
index 0000000000..f1ccad519e
--- /dev/null
+++ b/fs/squashfs/file_direct.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/mutex.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+#include "page_actor.h"
+
+/* Read separately compressed datablock directly into page cache */
+int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
+ int expected)
+
+{
+ struct inode *inode = target_page->mapping->host;
+ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+
+ int file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
+ int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
+ int start_index = target_page->index & ~mask;
+ int end_index = start_index | mask;
+ int i, n, pages, bytes, res = -ENOMEM;
+ struct page **page;
+ struct squashfs_page_actor *actor;
+ void *pageaddr;
+
+ if (end_index > file_end)
+ end_index = file_end;
+
+ pages = end_index - start_index + 1;
+
+ page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL);
+ if (page == NULL)
+ return res;
+
+ /* Try to grab all the pages covered by the Squashfs block */
+ for (i = 0, n = start_index; n <= end_index; n++) {
+ page[i] = (n == target_page->index) ? target_page :
+ grab_cache_page_nowait(target_page->mapping, n);
+
+ if (page[i] == NULL)
+ continue;
+
+ if (PageUptodate(page[i])) {
+ unlock_page(page[i]);
+ put_page(page[i]);
+ continue;
+ }
+
+ i++;
+ }
+
+ pages = i;
+
+ /*
+ * Create a "page actor" which will kmap and kunmap the
+ * page cache pages appropriately within the decompressor
+ */
+ actor = squashfs_page_actor_init_special(msblk, page, pages, expected);
+ if (actor == NULL)
+ goto out;
+
+ /* Decompress directly into the page cache buffers */
+ res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
+
+ squashfs_page_actor_free(actor);
+
+ if (res < 0)
+ goto mark_errored;
+
+ if (res != expected) {
+ res = -EIO;
+ goto mark_errored;
+ }
+
+ /* Last page (if present) may have trailing bytes not filled */
+ bytes = res % PAGE_SIZE;
+ if (page[pages - 1]->index == end_index && bytes) {
+ pageaddr = kmap_local_page(page[pages - 1]);
+ memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
+ kunmap_local(pageaddr);
+ }
+
+ /* Mark pages as uptodate, unlock and release */
+ for (i = 0; i < pages; i++) {
+ flush_dcache_page(page[i]);
+ SetPageUptodate(page[i]);
+ unlock_page(page[i]);
+ if (page[i] != target_page)
+ put_page(page[i]);
+ }
+
+ kfree(page);
+
+ return 0;
+
+mark_errored:
+ /* Decompression failed, mark pages as errored. Target_page is
+ * dealt with by the caller
+ */
+ for (i = 0; i < pages; i++) {
+ if (page[i] == NULL || page[i] == target_page)
+ continue;
+ flush_dcache_page(page[i]);
+ SetPageError(page[i]);
+ unlock_page(page[i]);
+ put_page(page[i]);
+ }
+
+out:
+ kfree(page);
+ return res;
+}
diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c
new file mode 100644
index 0000000000..49602b9a42
--- /dev/null
+++ b/fs/squashfs/fragment.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * fragment.c
+ */
+
+/*
+ * This file implements code to handle compressed fragments (tail-end packed
+ * datablocks).
+ *
+ * Regular files contain a fragment index which is mapped to a fragment
+ * location on disk and compressed size using a fragment lookup table.
+ * Like everything in Squashfs this fragment lookup table is itself stored
+ * compressed into metadata blocks. A second index table is used to locate
+ * these. This second index table for speed of access (and because it
+ * is small) is read at mount time and cached in memory.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/slab.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs.h"
+
+/*
+ * Look-up fragment using the fragment index table. Return the on disk
+ * location of the fragment and its compressed size
+ */
+int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment,
+ u64 *fragment_block)
+{
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+ int block, offset, size;
+ struct squashfs_fragment_entry fragment_entry;
+ u64 start_block;
+
+ if (fragment >= msblk->fragments)
+ return -EIO;
+ block = SQUASHFS_FRAGMENT_INDEX(fragment);
+ offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
+
+ start_block = le64_to_cpu(msblk->fragment_index[block]);
+
+ size = squashfs_read_metadata(sb, &fragment_entry, &start_block,
+ &offset, sizeof(fragment_entry));
+ if (size < 0)
+ return size;
+
+ *fragment_block = le64_to_cpu(fragment_entry.start_block);
+ return squashfs_block_size(fragment_entry.size);
+}
+
+
+/*
+ * Read the uncompressed fragment lookup table indexes off disk into memory
+ */
+__le64 *squashfs_read_fragment_index_table(struct super_block *sb,
+ u64 fragment_table_start, u64 next_table, unsigned int fragments)
+{
+ unsigned int length = SQUASHFS_FRAGMENT_INDEX_BYTES(fragments);
+ __le64 *table;
+
+ /*
+ * Sanity check, length bytes should not extend into the next table -
+ * this check also traps instances where fragment_table_start is
+ * incorrectly larger than the next table start
+ */
+ if (fragment_table_start + length > next_table)
+ return ERR_PTR(-EINVAL);
+
+ table = squashfs_read_table(sb, fragment_table_start, length);
+
+ /*
+ * table[0] points to the first fragment table metadata block, this
+ * should be less than fragment_table_start
+ */
+ if (!IS_ERR(table) && le64_to_cpu(table[0]) >= fragment_table_start) {
+ kfree(table);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return table;
+}
diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c
new file mode 100644
index 0000000000..ea53876797
--- /dev/null
+++ b/fs/squashfs/id.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * id.c
+ */
+
+/*
+ * This file implements code to handle uids and gids.
+ *
+ * For space efficiency regular files store uid and gid indexes, which are
+ * converted to 32-bit uids/gids using an id look up table. This table is
+ * stored compressed into metadata blocks. A second index table is used to
+ * locate these. This second index table for speed of access (and because it
+ * is small) is read at mount time and cached in memory.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/slab.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs.h"
+
+/*
+ * Map uid/gid index into real 32-bit uid/gid using the id look up table
+ */
+int squashfs_get_id(struct super_block *sb, unsigned int index,
+ unsigned int *id)
+{
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+ int block = SQUASHFS_ID_BLOCK(index);
+ int offset = SQUASHFS_ID_BLOCK_OFFSET(index);
+ u64 start_block;
+ __le32 disk_id;
+ int err;
+
+ if (index >= msblk->ids)
+ return -EINVAL;
+
+ start_block = le64_to_cpu(msblk->id_table[block]);
+
+ err = squashfs_read_metadata(sb, &disk_id, &start_block, &offset,
+ sizeof(disk_id));
+ if (err < 0)
+ return err;
+
+ *id = le32_to_cpu(disk_id);
+ return 0;
+}
+
+
+/*
+ * Read uncompressed id lookup table indexes from disk into memory
+ */
+__le64 *squashfs_read_id_index_table(struct super_block *sb,
+ u64 id_table_start, u64 next_table, unsigned short no_ids)
+{
+ unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids);
+ unsigned int indexes = SQUASHFS_ID_BLOCKS(no_ids);
+ int n;
+ __le64 *table;
+ u64 start, end;
+
+ TRACE("In read_id_index_table, length %d\n", length);
+
+ /* Sanity check values */
+
+ /* there should always be at least one id */
+ if (no_ids == 0)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * The computed size of the index table (length bytes) should exactly
+ * match the table start and end points
+ */
+ if (length != (next_table - id_table_start))
+ return ERR_PTR(-EINVAL);
+
+ table = squashfs_read_table(sb, id_table_start, length);
+ if (IS_ERR(table))
+ return table;
+
+ /*
+ * table[0], table[1], ... table[indexes - 1] store the locations
+ * of the compressed id blocks. Each entry should be less than
+ * the next (i.e. table[0] < table[1]), and the difference between them
+ * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1]
+ * should be less than id_table_start, and again the difference
+ * should be SQUASHFS_METADATA_SIZE or less
+ */
+ for (n = 0; n < (indexes - 1); n++) {
+ start = le64_to_cpu(table[n]);
+ end = le64_to_cpu(table[n + 1]);
+
+ if (start >= end || (end - start) >
+ (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
+ kfree(table);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ start = le64_to_cpu(table[indexes - 1]);
+ if (start >= id_table_start || (id_table_start - start) >
+ (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
+ kfree(table);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return table;
+}
diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
new file mode 100644
index 0000000000..c6e626b005
--- /dev/null
+++ b/fs/squashfs/inode.c
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * inode.c
+ */
+
+/*
+ * This file implements code to create and read inodes from disk.
+ *
+ * Inodes in Squashfs are identified by a 48-bit inode which encodes the
+ * location of the compressed metadata block containing the inode, and the byte
+ * offset into that block where the inode is placed (<block, offset>).
+ *
+ * To maximise compression there are different inodes for each file type
+ * (regular file, directory, device, etc.), the inode contents and length
+ * varying with the type.
+ *
+ * To further maximise compression, two types of regular file inode and
+ * directory inode are defined: inodes optimised for frequently occurring
+ * regular files and directories, and extended types where extra
+ * information has to be stored.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/xattr.h>
+#include <linux/pagemap.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+#include "xattr.h"
+
+/*
+ * Initialise VFS inode with the base inode information common to all
+ * Squashfs inode types. Sqsh_ino contains the unswapped base inode
+ * off disk.
+ */
+static int squashfs_new_inode(struct super_block *sb, struct inode *inode,
+ struct squashfs_base_inode *sqsh_ino)
+{
+ uid_t i_uid;
+ gid_t i_gid;
+ int err;
+
+ err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->uid), &i_uid);
+ if (err)
+ return err;
+
+ err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->guid), &i_gid);
+ if (err)
+ return err;
+
+ i_uid_write(inode, i_uid);
+ i_gid_write(inode, i_gid);
+ inode->i_ino = le32_to_cpu(sqsh_ino->inode_number);
+ inode->i_mtime.tv_sec = le32_to_cpu(sqsh_ino->mtime);
+ inode->i_atime.tv_sec = inode->i_mtime.tv_sec;
+ inode_set_ctime(inode, inode->i_mtime.tv_sec, 0);
+ inode->i_mode = le16_to_cpu(sqsh_ino->mode);
+ inode->i_size = 0;
+
+ return err;
+}
+
+
+struct inode *squashfs_iget(struct super_block *sb, long long ino,
+ unsigned int ino_number)
+{
+ struct inode *inode = iget_locked(sb, ino_number);
+ int err;
+
+ TRACE("Entered squashfs_iget\n");
+
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+
+ err = squashfs_read_inode(inode, ino);
+ if (err) {
+ iget_failed(inode);
+ return ERR_PTR(err);
+ }
+
+ unlock_new_inode(inode);
+ return inode;
+}
+
+
+/*
+ * Initialise VFS inode by reading inode from inode table (compressed
+ * metadata). The format and amount of data read depends on type.
+ */
+int squashfs_read_inode(struct inode *inode, long long ino)
+{
+ struct super_block *sb = inode->i_sb;
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+ u64 block = SQUASHFS_INODE_BLK(ino) + msblk->inode_table;
+ int err, type, offset = SQUASHFS_INODE_OFFSET(ino);
+ union squashfs_inode squashfs_ino;
+ struct squashfs_base_inode *sqshb_ino = &squashfs_ino.base;
+ int xattr_id = SQUASHFS_INVALID_XATTR;
+
+ TRACE("Entered squashfs_read_inode\n");
+
+ /*
+ * Read inode base common to all inode types.
+ */
+ err = squashfs_read_metadata(sb, sqshb_ino, &block,
+ &offset, sizeof(*sqshb_ino));
+ if (err < 0)
+ goto failed_read;
+
+ err = squashfs_new_inode(sb, inode, sqshb_ino);
+ if (err)
+ goto failed_read;
+
+ block = SQUASHFS_INODE_BLK(ino) + msblk->inode_table;
+ offset = SQUASHFS_INODE_OFFSET(ino);
+
+ type = le16_to_cpu(sqshb_ino->inode_type);
+ switch (type) {
+ case SQUASHFS_REG_TYPE: {
+ unsigned int frag_offset, frag;
+ int frag_size;
+ u64 frag_blk;
+ struct squashfs_reg_inode *sqsh_ino = &squashfs_ino.reg;
+
+ err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
+ sizeof(*sqsh_ino));
+ if (err < 0)
+ goto failed_read;
+
+ frag = le32_to_cpu(sqsh_ino->fragment);
+ if (frag != SQUASHFS_INVALID_FRAG) {
+ frag_offset = le32_to_cpu(sqsh_ino->offset);
+ frag_size = squashfs_frag_lookup(sb, frag, &frag_blk);
+ if (frag_size < 0) {
+ err = frag_size;
+ goto failed_read;
+ }
+ } else {
+ frag_blk = SQUASHFS_INVALID_BLK;
+ frag_size = 0;
+ frag_offset = 0;
+ }
+
+ set_nlink(inode, 1);
+ inode->i_size = le32_to_cpu(sqsh_ino->file_size);
+ inode->i_fop = &generic_ro_fops;
+ inode->i_mode |= S_IFREG;
+ inode->i_blocks = ((inode->i_size - 1) >> 9) + 1;
+ squashfs_i(inode)->fragment_block = frag_blk;
+ squashfs_i(inode)->fragment_size = frag_size;
+ squashfs_i(inode)->fragment_offset = frag_offset;
+ squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block);
+ squashfs_i(inode)->block_list_start = block;
+ squashfs_i(inode)->offset = offset;
+ inode->i_data.a_ops = &squashfs_aops;
+
+ TRACE("File inode %x:%x, start_block %llx, block_list_start "
+ "%llx, offset %x\n", SQUASHFS_INODE_BLK(ino),
+ offset, squashfs_i(inode)->start, block, offset);
+ break;
+ }
+ case SQUASHFS_LREG_TYPE: {
+ unsigned int frag_offset, frag;
+ int frag_size;
+ u64 frag_blk;
+ struct squashfs_lreg_inode *sqsh_ino = &squashfs_ino.lreg;
+
+ err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
+ sizeof(*sqsh_ino));
+ if (err < 0)
+ goto failed_read;
+
+ frag = le32_to_cpu(sqsh_ino->fragment);
+ if (frag != SQUASHFS_INVALID_FRAG) {
+ frag_offset = le32_to_cpu(sqsh_ino->offset);
+ frag_size = squashfs_frag_lookup(sb, frag, &frag_blk);
+ if (frag_size < 0) {
+ err = frag_size;
+ goto failed_read;
+ }
+ } else {
+ frag_blk = SQUASHFS_INVALID_BLK;
+ frag_size = 0;
+ frag_offset = 0;
+ }
+
+ xattr_id = le32_to_cpu(sqsh_ino->xattr);
+ set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
+ inode->i_size = le64_to_cpu(sqsh_ino->file_size);
+ inode->i_op = &squashfs_inode_ops;
+ inode->i_fop = &generic_ro_fops;
+ inode->i_mode |= S_IFREG;
+ inode->i_blocks = (inode->i_size -
+ le64_to_cpu(sqsh_ino->sparse) + 511) >> 9;
+
+ squashfs_i(inode)->fragment_block = frag_blk;
+ squashfs_i(inode)->fragment_size = frag_size;
+ squashfs_i(inode)->fragment_offset = frag_offset;
+ squashfs_i(inode)->start = le64_to_cpu(sqsh_ino->start_block);
+ squashfs_i(inode)->block_list_start = block;
+ squashfs_i(inode)->offset = offset;
+ inode->i_data.a_ops = &squashfs_aops;
+
+ TRACE("File inode %x:%x, start_block %llx, block_list_start "
+ "%llx, offset %x\n", SQUASHFS_INODE_BLK(ino),
+ offset, squashfs_i(inode)->start, block, offset);
+ break;
+ }
+ case SQUASHFS_DIR_TYPE: {
+ struct squashfs_dir_inode *sqsh_ino = &squashfs_ino.dir;
+
+ err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
+ sizeof(*sqsh_ino));
+ if (err < 0)
+ goto failed_read;
+
+ set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
+ inode->i_size = le16_to_cpu(sqsh_ino->file_size);
+ inode->i_op = &squashfs_dir_inode_ops;
+ inode->i_fop = &squashfs_dir_ops;
+ inode->i_mode |= S_IFDIR;
+ squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block);
+ squashfs_i(inode)->offset = le16_to_cpu(sqsh_ino->offset);
+ squashfs_i(inode)->dir_idx_cnt = 0;
+ squashfs_i(inode)->parent = le32_to_cpu(sqsh_ino->parent_inode);
+
+ TRACE("Directory inode %x:%x, start_block %llx, offset %x\n",
+ SQUASHFS_INODE_BLK(ino), offset,
+ squashfs_i(inode)->start,
+ le16_to_cpu(sqsh_ino->offset));
+ break;
+ }
+ case SQUASHFS_LDIR_TYPE: {
+ struct squashfs_ldir_inode *sqsh_ino = &squashfs_ino.ldir;
+
+ err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
+ sizeof(*sqsh_ino));
+ if (err < 0)
+ goto failed_read;
+
+ xattr_id = le32_to_cpu(sqsh_ino->xattr);
+ set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
+ inode->i_size = le32_to_cpu(sqsh_ino->file_size);
+ inode->i_op = &squashfs_dir_inode_ops;
+ inode->i_fop = &squashfs_dir_ops;
+ inode->i_mode |= S_IFDIR;
+ squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block);
+ squashfs_i(inode)->offset = le16_to_cpu(sqsh_ino->offset);
+ squashfs_i(inode)->dir_idx_start = block;
+ squashfs_i(inode)->dir_idx_offset = offset;
+ squashfs_i(inode)->dir_idx_cnt = le16_to_cpu(sqsh_ino->i_count);
+ squashfs_i(inode)->parent = le32_to_cpu(sqsh_ino->parent_inode);
+
+ TRACE("Long directory inode %x:%x, start_block %llx, offset "
+ "%x\n", SQUASHFS_INODE_BLK(ino), offset,
+ squashfs_i(inode)->start,
+ le16_to_cpu(sqsh_ino->offset));
+ break;
+ }
+ case SQUASHFS_SYMLINK_TYPE:
+ case SQUASHFS_LSYMLINK_TYPE: {
+ struct squashfs_symlink_inode *sqsh_ino = &squashfs_ino.symlink;
+
+ err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
+ sizeof(*sqsh_ino));
+ if (err < 0)
+ goto failed_read;
+
+ set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
+ inode->i_size = le32_to_cpu(sqsh_ino->symlink_size);
+ inode->i_op = &squashfs_symlink_inode_ops;
+ inode_nohighmem(inode);
+ inode->i_data.a_ops = &squashfs_symlink_aops;
+ inode->i_mode |= S_IFLNK;
+ squashfs_i(inode)->start = block;
+ squashfs_i(inode)->offset = offset;
+
+ if (type == SQUASHFS_LSYMLINK_TYPE) {
+ __le32 xattr;
+
+ err = squashfs_read_metadata(sb, NULL, &block,
+ &offset, inode->i_size);
+ if (err < 0)
+ goto failed_read;
+ err = squashfs_read_metadata(sb, &xattr, &block,
+ &offset, sizeof(xattr));
+ if (err < 0)
+ goto failed_read;
+ xattr_id = le32_to_cpu(xattr);
+ }
+
+ TRACE("Symbolic link inode %x:%x, start_block %llx, offset "
+ "%x\n", SQUASHFS_INODE_BLK(ino), offset,
+ block, offset);
+ break;
+ }
+ case SQUASHFS_BLKDEV_TYPE:
+ case SQUASHFS_CHRDEV_TYPE: {
+ struct squashfs_dev_inode *sqsh_ino = &squashfs_ino.dev;
+ unsigned int rdev;
+
+ err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
+ sizeof(*sqsh_ino));
+ if (err < 0)
+ goto failed_read;
+
+ if (type == SQUASHFS_CHRDEV_TYPE)
+ inode->i_mode |= S_IFCHR;
+ else
+ inode->i_mode |= S_IFBLK;
+ set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
+ rdev = le32_to_cpu(sqsh_ino->rdev);
+ init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));
+
+ TRACE("Device inode %x:%x, rdev %x\n",
+ SQUASHFS_INODE_BLK(ino), offset, rdev);
+ break;
+ }
+ case SQUASHFS_LBLKDEV_TYPE:
+ case SQUASHFS_LCHRDEV_TYPE: {
+ struct squashfs_ldev_inode *sqsh_ino = &squashfs_ino.ldev;
+ unsigned int rdev;
+
+ err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
+ sizeof(*sqsh_ino));
+ if (err < 0)
+ goto failed_read;
+
+ if (type == SQUASHFS_LCHRDEV_TYPE)
+ inode->i_mode |= S_IFCHR;
+ else
+ inode->i_mode |= S_IFBLK;
+ xattr_id = le32_to_cpu(sqsh_ino->xattr);
+ inode->i_op = &squashfs_inode_ops;
+ set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
+ rdev = le32_to_cpu(sqsh_ino->rdev);
+ init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));
+
+ TRACE("Device inode %x:%x, rdev %x\n",
+ SQUASHFS_INODE_BLK(ino), offset, rdev);
+ break;
+ }
+ case SQUASHFS_FIFO_TYPE:
+ case SQUASHFS_SOCKET_TYPE: {
+ struct squashfs_ipc_inode *sqsh_ino = &squashfs_ino.ipc;
+
+ err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
+ sizeof(*sqsh_ino));
+ if (err < 0)
+ goto failed_read;
+
+ if (type == SQUASHFS_FIFO_TYPE)
+ inode->i_mode |= S_IFIFO;
+ else
+ inode->i_mode |= S_IFSOCK;
+ set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
+ init_special_inode(inode, inode->i_mode, 0);
+ break;
+ }
+ case SQUASHFS_LFIFO_TYPE:
+ case SQUASHFS_LSOCKET_TYPE: {
+ struct squashfs_lipc_inode *sqsh_ino = &squashfs_ino.lipc;
+
+ err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
+ sizeof(*sqsh_ino));
+ if (err < 0)
+ goto failed_read;
+
+ if (type == SQUASHFS_LFIFO_TYPE)
+ inode->i_mode |= S_IFIFO;
+ else
+ inode->i_mode |= S_IFSOCK;
+ xattr_id = le32_to_cpu(sqsh_ino->xattr);
+ inode->i_op = &squashfs_inode_ops;
+ set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
+ init_special_inode(inode, inode->i_mode, 0);
+ break;
+ }
+ default:
+ ERROR("Unknown inode type %d in squashfs_iget!\n", type);
+ return -EINVAL;
+ }
+
+ if (xattr_id != SQUASHFS_INVALID_XATTR && msblk->xattr_id_table) {
+ err = squashfs_xattr_lookup(sb, xattr_id,
+ &squashfs_i(inode)->xattr_count,
+ &squashfs_i(inode)->xattr_size,
+ &squashfs_i(inode)->xattr);
+ if (err < 0)
+ goto failed_read;
+ inode->i_blocks += ((squashfs_i(inode)->xattr_size - 1) >> 9)
+ + 1;
+ } else
+ squashfs_i(inode)->xattr_count = 0;
+
+ return 0;
+
+failed_read:
+ ERROR("Unable to read inode 0x%llx\n", ino);
+ return err;
+}
+
+
+const struct inode_operations squashfs_inode_ops = {
+ .listxattr = squashfs_listxattr
+};
+
diff --git a/fs/squashfs/lz4_wrapper.c b/fs/squashfs/lz4_wrapper.c
new file mode 100644
index 0000000000..49797729f1
--- /dev/null
+++ b/fs/squashfs/lz4_wrapper.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013, 2014
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ */
+
+#include <linux/bio.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/lz4.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs.h"
+#include "decompressor.h"
+#include "page_actor.h"
+
+#define LZ4_LEGACY 1
+
+struct lz4_comp_opts {
+ __le32 version;
+ __le32 flags;
+};
+
+struct squashfs_lz4 {
+ void *input;
+ void *output;
+};
+
+
+static void *lz4_comp_opts(struct squashfs_sb_info *msblk,
+ void *buff, int len)
+{
+ struct lz4_comp_opts *comp_opts = buff;
+
+ /* LZ4 compressed filesystems always have compression options */
+ if (comp_opts == NULL || len < sizeof(*comp_opts))
+ return ERR_PTR(-EIO);
+
+ if (le32_to_cpu(comp_opts->version) != LZ4_LEGACY) {
+ /* LZ4 format currently used by the kernel is the 'legacy'
+ * format */
+ ERROR("Unknown LZ4 version\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return NULL;
+}
+
+
+static void *lz4_init(struct squashfs_sb_info *msblk, void *buff)
+{
+ int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE);
+ struct squashfs_lz4 *stream;
+
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (stream == NULL)
+ goto failed;
+ stream->input = vmalloc(block_size);
+ if (stream->input == NULL)
+ goto failed2;
+ stream->output = vmalloc(block_size);
+ if (stream->output == NULL)
+ goto failed3;
+
+ return stream;
+
+failed3:
+ vfree(stream->input);
+failed2:
+ kfree(stream);
+failed:
+ ERROR("Failed to initialise LZ4 decompressor\n");
+ return ERR_PTR(-ENOMEM);
+}
+
+
+static void lz4_free(void *strm)
+{
+ struct squashfs_lz4 *stream = strm;
+
+ if (stream) {
+ vfree(stream->input);
+ vfree(stream->output);
+ }
+ kfree(stream);
+}
+
+
+static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm,
+ struct bio *bio, int offset, int length,
+ struct squashfs_page_actor *output)
+{
+ struct bvec_iter_all iter_all = {};
+ struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
+ struct squashfs_lz4 *stream = strm;
+ void *buff = stream->input, *data;
+ int bytes = length, res;
+
+ while (bio_next_segment(bio, &iter_all)) {
+ int avail = min(bytes, ((int)bvec->bv_len) - offset);
+
+ data = bvec_virt(bvec);
+ memcpy(buff, data + offset, avail);
+ buff += avail;
+ bytes -= avail;
+ offset = 0;
+ }
+
+ res = LZ4_decompress_safe(stream->input, stream->output,
+ length, output->length);
+
+ if (res < 0)
+ return -EIO;
+
+ bytes = res;
+ data = squashfs_first_page(output);
+ buff = stream->output;
+ while (data) {
+ if (bytes <= PAGE_SIZE) {
+ if (!IS_ERR(data))
+ memcpy(data, buff, bytes);
+ break;
+ }
+ if (!IS_ERR(data))
+ memcpy(data, buff, PAGE_SIZE);
+ buff += PAGE_SIZE;
+ bytes -= PAGE_SIZE;
+ data = squashfs_next_page(output);
+ }
+ squashfs_finish_page(output);
+
+ return res;
+}
+
+const struct squashfs_decompressor squashfs_lz4_comp_ops = {
+ .init = lz4_init,
+ .comp_opts = lz4_comp_opts,
+ .free = lz4_free,
+ .decompress = lz4_uncompress,
+ .id = LZ4_COMPRESSION,
+ .name = "lz4",
+ .alloc_buffer = 0,
+ .supported = 1
+};
diff --git a/fs/squashfs/lzo_wrapper.c b/fs/squashfs/lzo_wrapper.c
new file mode 100644
index 0000000000..d216aeefa8
--- /dev/null
+++ b/fs/squashfs/lzo_wrapper.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2010 LG Electronics
+ * Chan Jeong <chan.jeong@lge.com>
+ *
+ * lzo_wrapper.c
+ */
+
+#include <linux/mutex.h>
+#include <linux/bio.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/lzo.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs.h"
+#include "decompressor.h"
+#include "page_actor.h"
+
+struct squashfs_lzo {
+ void *input;
+ void *output;
+};
+
+static void *lzo_init(struct squashfs_sb_info *msblk, void *buff)
+{
+ int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE);
+
+ struct squashfs_lzo *stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (stream == NULL)
+ goto failed;
+ stream->input = vmalloc(block_size);
+ if (stream->input == NULL)
+ goto failed;
+ stream->output = vmalloc(block_size);
+ if (stream->output == NULL)
+ goto failed2;
+
+ return stream;
+
+failed2:
+ vfree(stream->input);
+failed:
+ ERROR("Failed to allocate lzo workspace\n");
+ kfree(stream);
+ return ERR_PTR(-ENOMEM);
+}
+
+
+static void lzo_free(void *strm)
+{
+ struct squashfs_lzo *stream = strm;
+
+ if (stream) {
+ vfree(stream->input);
+ vfree(stream->output);
+ }
+ kfree(stream);
+}
+
+
+static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm,
+ struct bio *bio, int offset, int length,
+ struct squashfs_page_actor *output)
+{
+ struct bvec_iter_all iter_all = {};
+ struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
+ struct squashfs_lzo *stream = strm;
+ void *buff = stream->input, *data;
+ int bytes = length, res;
+ size_t out_len = output->length;
+
+ while (bio_next_segment(bio, &iter_all)) {
+ int avail = min(bytes, ((int)bvec->bv_len) - offset);
+
+ data = bvec_virt(bvec);
+ memcpy(buff, data + offset, avail);
+ buff += avail;
+ bytes -= avail;
+ offset = 0;
+ }
+
+ res = lzo1x_decompress_safe(stream->input, (size_t)length,
+ stream->output, &out_len);
+ if (res != LZO_E_OK)
+ goto failed;
+
+ res = bytes = (int)out_len;
+ data = squashfs_first_page(output);
+ buff = stream->output;
+ while (data) {
+ if (bytes <= PAGE_SIZE) {
+ if (!IS_ERR(data))
+ memcpy(data, buff, bytes);
+ break;
+ } else {
+ if (!IS_ERR(data))
+ memcpy(data, buff, PAGE_SIZE);
+ buff += PAGE_SIZE;
+ bytes -= PAGE_SIZE;
+ data = squashfs_next_page(output);
+ }
+ }
+ squashfs_finish_page(output);
+
+ return res;
+
+failed:
+ return -EIO;
+}
+
+const struct squashfs_decompressor squashfs_lzo_comp_ops = {
+ .init = lzo_init,
+ .free = lzo_free,
+ .decompress = lzo_uncompress,
+ .id = LZO_COMPRESSION,
+ .name = "lzo",
+ .alloc_buffer = 0,
+ .supported = 1
+};
diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c
new file mode 100644
index 0000000000..11e4539b9e
--- /dev/null
+++ b/fs/squashfs/namei.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * namei.c
+ */
+
+/*
+ * This file implements code to do filename lookup in directories.
+ *
+ * Like inodes, directories are packed into compressed metadata blocks, stored
+ * in a directory table. Directories are accessed using the start address of
+ * the metablock containing the directory and the offset into the
+ * decompressed block (<block, offset>).
+ *
+ * Directories are organised in a slightly complex way, and are not simply
+ * a list of file names. The organisation takes advantage of the
+ * fact that (in most cases) the inodes of the files will be in the same
+ * compressed metadata block, and therefore, can share the start block.
+ * Directories are therefore organised in a two level list, a directory
+ * header containing the shared start block value, and a sequence of directory
+ * entries, each of which share the shared start block. A new directory header
+ * is written once/if the inode start block changes. The directory
+ * header/directory entry list is repeated as many times as necessary.
+ *
+ * Directories are sorted, and can contain a directory index to speed up
+ * file lookup. Directory indexes store one entry per metablock, each entry
+ * storing the index/filename mapping to the first directory header
+ * in each metadata block. Directories are sorted in alphabetical order,
+ * and at lookup the index is scanned linearly looking for the first filename
+ * alphabetically larger than the filename being looked up. At this point the
+ * location of the metadata block the filename is in has been found.
+ * The general idea of the index is ensure only one metadata block needs to be
+ * decompressed to do a lookup irrespective of the length of the directory.
+ * This scheme has the advantage that it doesn't require extra memory overhead
+ * and doesn't require much extra storage on disk.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/dcache.h>
+#include <linux/xattr.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+#include "xattr.h"
+
+/*
+ * Lookup name in the directory index, returning the location of the metadata
+ * block containing it, and the directory index this represents.
+ *
+ * If we get an error reading the index then return the part of the index
+ * (if any) we have managed to read - the index isn't essential, just
+ * quicker.
+ */
+static int get_dir_index_using_name(struct super_block *sb,
+ u64 *next_block, int *next_offset, u64 index_start,
+ int index_offset, int i_count, const char *name,
+ int len)
+{
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+ int i, length = 0, err;
+ unsigned int size;
+ struct squashfs_dir_index *index;
+ char *str;
+
+ TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count);
+
+ index = kmalloc(sizeof(*index) + SQUASHFS_NAME_LEN * 2 + 2, GFP_KERNEL);
+ if (index == NULL) {
+ ERROR("Failed to allocate squashfs_dir_index\n");
+ goto out;
+ }
+
+ str = &index->name[SQUASHFS_NAME_LEN + 1];
+ strncpy(str, name, len);
+ str[len] = '\0';
+
+ for (i = 0; i < i_count; i++) {
+ err = squashfs_read_metadata(sb, index, &index_start,
+ &index_offset, sizeof(*index));
+ if (err < 0)
+ break;
+
+
+ size = le32_to_cpu(index->size) + 1;
+ if (size > SQUASHFS_NAME_LEN)
+ break;
+
+ err = squashfs_read_metadata(sb, index->name, &index_start,
+ &index_offset, size);
+ if (err < 0)
+ break;
+
+ index->name[size] = '\0';
+
+ if (strcmp(index->name, str) > 0)
+ break;
+
+ length = le32_to_cpu(index->index);
+ *next_block = le32_to_cpu(index->start_block) +
+ msblk->directory_table;
+ }
+
+ *next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
+ kfree(index);
+
+out:
+ /*
+ * Return index (f_pos) of the looked up metadata block. Translate
+ * from internal f_pos to external f_pos which is offset by 3 because
+ * we invent "." and ".." entries which are not actually stored in the
+ * directory.
+ */
+ return length + 3;
+}
+
+
+static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
+{
+ const unsigned char *name = dentry->d_name.name;
+ int len = dentry->d_name.len;
+ struct inode *inode = NULL;
+ struct squashfs_sb_info *msblk = dir->i_sb->s_fs_info;
+ struct squashfs_dir_header dirh;
+ struct squashfs_dir_entry *dire;
+ u64 block = squashfs_i(dir)->start + msblk->directory_table;
+ int offset = squashfs_i(dir)->offset;
+ int err, length;
+ unsigned int dir_count, size;
+
+ TRACE("Entered squashfs_lookup [%llx:%x]\n", block, offset);
+
+ dire = kmalloc(sizeof(*dire) + SQUASHFS_NAME_LEN + 1, GFP_KERNEL);
+ if (dire == NULL) {
+ ERROR("Failed to allocate squashfs_dir_entry\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (len > SQUASHFS_NAME_LEN) {
+ err = -ENAMETOOLONG;
+ goto failed;
+ }
+
+ length = get_dir_index_using_name(dir->i_sb, &block, &offset,
+ squashfs_i(dir)->dir_idx_start,
+ squashfs_i(dir)->dir_idx_offset,
+ squashfs_i(dir)->dir_idx_cnt, name, len);
+
+ while (length < i_size_read(dir)) {
+ /*
+ * Read directory header.
+ */
+ err = squashfs_read_metadata(dir->i_sb, &dirh, &block,
+ &offset, sizeof(dirh));
+ if (err < 0)
+ goto read_failure;
+
+ length += sizeof(dirh);
+
+ dir_count = le32_to_cpu(dirh.count) + 1;
+
+ if (dir_count > SQUASHFS_DIR_COUNT)
+ goto data_error;
+
+ while (dir_count--) {
+ /*
+ * Read directory entry.
+ */
+ err = squashfs_read_metadata(dir->i_sb, dire, &block,
+ &offset, sizeof(*dire));
+ if (err < 0)
+ goto read_failure;
+
+ size = le16_to_cpu(dire->size) + 1;
+
+ /* size should never be larger than SQUASHFS_NAME_LEN */
+ if (size > SQUASHFS_NAME_LEN)
+ goto data_error;
+
+ err = squashfs_read_metadata(dir->i_sb, dire->name,
+ &block, &offset, size);
+ if (err < 0)
+ goto read_failure;
+
+ length += sizeof(*dire) + size;
+
+ if (name[0] < dire->name[0])
+ goto exit_lookup;
+
+ if (len == size && !strncmp(name, dire->name, len)) {
+ unsigned int blk, off, ino_num;
+ long long ino;
+ blk = le32_to_cpu(dirh.start_block);
+ off = le16_to_cpu(dire->offset);
+ ino_num = le32_to_cpu(dirh.inode_number) +
+ (short) le16_to_cpu(dire->inode_number);
+ ino = SQUASHFS_MKINODE(blk, off);
+
+ TRACE("calling squashfs_iget for directory "
+ "entry %s, inode %x:%x, %d\n", name,
+ blk, off, ino_num);
+
+ inode = squashfs_iget(dir->i_sb, ino, ino_num);
+ goto exit_lookup;
+ }
+ }
+ }
+
+exit_lookup:
+ kfree(dire);
+ return d_splice_alias(inode, dentry);
+
+data_error:
+ err = -EIO;
+
+read_failure:
+ ERROR("Unable to read directory block [%llx:%x]\n",
+ squashfs_i(dir)->start + msblk->directory_table,
+ squashfs_i(dir)->offset);
+failed:
+ kfree(dire);
+ return ERR_PTR(err);
+}
+
+
+const struct inode_operations squashfs_dir_inode_ops = {
+ .lookup = squashfs_lookup,
+ .listxattr = squashfs_listxattr
+};
diff --git a/fs/squashfs/page_actor.c b/fs/squashfs/page_actor.c
new file mode 100644
index 0000000000..81af6c4ca1
--- /dev/null
+++ b/fs/squashfs/page_actor.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include "squashfs_fs_sb.h"
+#include "decompressor.h"
+#include "page_actor.h"
+
+/*
+ * This file contains implementations of page_actor for decompressing into
+ * an intermediate buffer, and for decompressing directly into the
+ * page cache.
+ *
+ * Calling code should avoid sleeping between calls to squashfs_first_page()
+ * and squashfs_finish_page().
+ */
+
+/* Implementation of page_actor for decompressing into intermediate buffer */
+static void *cache_first_page(struct squashfs_page_actor *actor)
+{
+ actor->next_page = 1;
+ return actor->buffer[0];
+}
+
+static void *cache_next_page(struct squashfs_page_actor *actor)
+{
+ if (actor->next_page == actor->pages)
+ return NULL;
+
+ return actor->buffer[actor->next_page++];
+}
+
+static void cache_finish_page(struct squashfs_page_actor *actor)
+{
+ /* empty */
+}
+
+struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
+ int pages, int length)
+{
+ struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
+
+ if (actor == NULL)
+ return NULL;
+
+ actor->length = length ? : pages * PAGE_SIZE;
+ actor->buffer = buffer;
+ actor->pages = pages;
+ actor->next_page = 0;
+ actor->tmp_buffer = NULL;
+ actor->squashfs_first_page = cache_first_page;
+ actor->squashfs_next_page = cache_next_page;
+ actor->squashfs_finish_page = cache_finish_page;
+ return actor;
+}
+
+/* Implementation of page_actor for decompressing directly into page cache. */
+static void *handle_next_page(struct squashfs_page_actor *actor)
+{
+ int max_pages = (actor->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ if (actor->returned_pages == max_pages)
+ return NULL;
+
+ if ((actor->next_page == actor->pages) ||
+ (actor->next_index != actor->page[actor->next_page]->index)) {
+ actor->next_index++;
+ actor->returned_pages++;
+ actor->last_page = NULL;
+ return actor->alloc_buffer ? actor->tmp_buffer : ERR_PTR(-ENOMEM);
+ }
+
+ actor->next_index++;
+ actor->returned_pages++;
+ actor->last_page = actor->page[actor->next_page];
+ return actor->pageaddr = kmap_local_page(actor->page[actor->next_page++]);
+}
+
+static void *direct_first_page(struct squashfs_page_actor *actor)
+{
+ return handle_next_page(actor);
+}
+
+static void *direct_next_page(struct squashfs_page_actor *actor)
+{
+ if (actor->pageaddr) {
+ kunmap_local(actor->pageaddr);
+ actor->pageaddr = NULL;
+ }
+
+ return handle_next_page(actor);
+}
+
+static void direct_finish_page(struct squashfs_page_actor *actor)
+{
+ if (actor->pageaddr)
+ kunmap_local(actor->pageaddr);
+}
+
+struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_info *msblk,
+ struct page **page, int pages, int length)
+{
+ struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
+
+ if (actor == NULL)
+ return NULL;
+
+ if (msblk->decompressor->alloc_buffer) {
+ actor->tmp_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
+
+ if (actor->tmp_buffer == NULL) {
+ kfree(actor);
+ return NULL;
+ }
+ } else
+ actor->tmp_buffer = NULL;
+
+ actor->length = length ? : pages * PAGE_SIZE;
+ actor->page = page;
+ actor->pages = pages;
+ actor->next_page = 0;
+ actor->returned_pages = 0;
+ actor->next_index = page[0]->index & ~((1 << (msblk->block_log - PAGE_SHIFT)) - 1);
+ actor->pageaddr = NULL;
+ actor->last_page = NULL;
+ actor->alloc_buffer = msblk->decompressor->alloc_buffer;
+ actor->squashfs_first_page = direct_first_page;
+ actor->squashfs_next_page = direct_next_page;
+ actor->squashfs_finish_page = direct_finish_page;
+ return actor;
+}
diff --git a/fs/squashfs/page_actor.h b/fs/squashfs/page_actor.h
new file mode 100644
index 0000000000..97d4983559
--- /dev/null
+++ b/fs/squashfs/page_actor.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef PAGE_ACTOR_H
+#define PAGE_ACTOR_H
+/*
+ * Copyright (c) 2013
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ */
+
+struct squashfs_page_actor {
+ union {
+ void **buffer;
+ struct page **page;
+ };
+ void *pageaddr;
+ void *tmp_buffer;
+ void *(*squashfs_first_page)(struct squashfs_page_actor *);
+ void *(*squashfs_next_page)(struct squashfs_page_actor *);
+ void (*squashfs_finish_page)(struct squashfs_page_actor *);
+ struct page *last_page;
+ int pages;
+ int length;
+ int next_page;
+ int alloc_buffer;
+ int returned_pages;
+ pgoff_t next_index;
+};
+
+extern struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
+ int pages, int length);
+extern struct squashfs_page_actor *squashfs_page_actor_init_special(
+ struct squashfs_sb_info *msblk,
+ struct page **page, int pages, int length);
+static inline struct page *squashfs_page_actor_free(struct squashfs_page_actor *actor)
+{
+ struct page *last_page = actor->last_page;
+
+ kfree(actor->tmp_buffer);
+ kfree(actor);
+ return last_page;
+}
+static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
+{
+ return actor->squashfs_first_page(actor);
+}
+static inline void *squashfs_next_page(struct squashfs_page_actor *actor)
+{
+ return actor->squashfs_next_page(actor);
+}
+static inline void squashfs_finish_page(struct squashfs_page_actor *actor)
+{
+ actor->squashfs_finish_page(actor);
+}
+static inline void squashfs_actor_nobuff(struct squashfs_page_actor *actor)
+{
+ actor->alloc_buffer = 0;
+}
+#endif
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
new file mode 100644
index 0000000000..a6164fdf94
--- /dev/null
+++ b/fs/squashfs/squashfs.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * squashfs.h
+ */
+
+#define TRACE(s, args...) pr_debug("SQUASHFS: "s, ## args)
+
+#define ERROR(s, args...) pr_err("SQUASHFS error: "s, ## args)
+
+#define WARNING(s, args...) pr_warn("SQUASHFS: "s, ## args)
+
+/* block.c */
+extern int squashfs_read_data(struct super_block *, u64, int, u64 *,
+ struct squashfs_page_actor *);
+
+/* cache.c */
+extern struct squashfs_cache *squashfs_cache_init(char *, int, int);
+extern void squashfs_cache_delete(struct squashfs_cache *);
+extern struct squashfs_cache_entry *squashfs_cache_get(struct super_block *,
+ struct squashfs_cache *, u64, int);
+extern void squashfs_cache_put(struct squashfs_cache_entry *);
+extern int squashfs_copy_data(void *, struct squashfs_cache_entry *, int, int);
+extern int squashfs_read_metadata(struct super_block *, void *, u64 *,
+ int *, int);
+extern struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *,
+ u64, int);
+extern struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *,
+ u64, int);
+extern void *squashfs_read_table(struct super_block *, u64, int);
+
+/* decompressor.c */
+extern const struct squashfs_decompressor *squashfs_lookup_decompressor(int);
+extern void *squashfs_decompressor_setup(struct super_block *, unsigned short);
+
+/* decompressor_xxx.c */
+
+struct squashfs_decompressor_thread_ops {
+ void * (*create)(struct squashfs_sb_info *msblk, void *comp_opts);
+ void (*destroy)(struct squashfs_sb_info *msblk);
+ int (*decompress)(struct squashfs_sb_info *msblk, struct bio *bio,
+ int offset, int length, struct squashfs_page_actor *output);
+ int (*max_decompressors)(void);
+};
+
+#ifdef CONFIG_SQUASHFS_DECOMP_SINGLE
+extern const struct squashfs_decompressor_thread_ops squashfs_decompressor_single;
+#endif
+#ifdef CONFIG_SQUASHFS_DECOMP_MULTI
+extern const struct squashfs_decompressor_thread_ops squashfs_decompressor_multi;
+#endif
+#ifdef CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU
+extern const struct squashfs_decompressor_thread_ops squashfs_decompressor_percpu;
+#endif
+
+/* export.c */
+extern __le64 *squashfs_read_inode_lookup_table(struct super_block *, u64, u64,
+ unsigned int);
+
+/* fragment.c */
+extern int squashfs_frag_lookup(struct super_block *, unsigned int, u64 *);
+extern __le64 *squashfs_read_fragment_index_table(struct super_block *,
+ u64, u64, unsigned int);
+
+/* file.c */
+void squashfs_fill_page(struct page *, struct squashfs_cache_entry *, int, int);
+void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int,
+ int);
+
+/* file_xxx.c */
+extern int squashfs_readpage_block(struct page *, u64, int, int);
+
+/* id.c */
+extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *);
+extern __le64 *squashfs_read_id_index_table(struct super_block *, u64, u64,
+ unsigned short);
+
+/* inode.c */
+extern struct inode *squashfs_iget(struct super_block *, long long,
+ unsigned int);
+extern int squashfs_read_inode(struct inode *, long long);
+
+/* xattr.c */
+extern ssize_t squashfs_listxattr(struct dentry *, char *, size_t);
+
+/*
+ * Inodes, files, decompressor and xattr operations
+ */
+
+/* dir.c */
+extern const struct file_operations squashfs_dir_ops;
+
+/* export.c */
+extern const struct export_operations squashfs_export_ops;
+
+/* file.c */
+extern const struct address_space_operations squashfs_aops;
+
+/* inode.c */
+extern const struct inode_operations squashfs_inode_ops;
+
+/* namei.c */
+extern const struct inode_operations squashfs_dir_inode_ops;
+
+/* symlink.c */
+extern const struct address_space_operations squashfs_symlink_aops;
+extern const struct inode_operations squashfs_symlink_inode_ops;
+
+/* xattr.c */
+extern const struct xattr_handler *squashfs_xattr_handlers[];
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
new file mode 100644
index 0000000000..95f8e89017
--- /dev/null
+++ b/fs/squashfs/squashfs_fs.h
@@ -0,0 +1,452 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef SQUASHFS_FS
+#define SQUASHFS_FS
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * squashfs_fs.h
+ */
+
+#define SQUASHFS_CACHED_FRAGMENTS CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE
+#define SQUASHFS_MAJOR 4
+#define SQUASHFS_MINOR 0
+#define SQUASHFS_START 0
+
+/* size of metadata (inode and directory) blocks */
+#define SQUASHFS_METADATA_SIZE 8192
+#define SQUASHFS_BLOCK_OFFSET 2
+
+/* default size of block device I/O */
+#ifdef CONFIG_SQUASHFS_4K_DEVBLK_SIZE
+#define SQUASHFS_DEVBLK_SIZE 4096
+#else
+#define SQUASHFS_DEVBLK_SIZE 1024
+#endif
+
+#define SQUASHFS_FILE_MAX_SIZE 1048576
+#define SQUASHFS_FILE_MAX_LOG 20
+
+/* Max length of filename (not 255) */
+#define SQUASHFS_NAME_LEN 256
+
+/* Max value for directory header count*/
+#define SQUASHFS_DIR_COUNT 256
+
+#define SQUASHFS_INVALID_FRAG (0xffffffffU)
+#define SQUASHFS_INVALID_XATTR (0xffffffffU)
+#define SQUASHFS_INVALID_BLK (-1LL)
+
+/* Filesystem flags */
+#define SQUASHFS_NOI 0
+#define SQUASHFS_NOD 1
+#define SQUASHFS_NOF 3
+#define SQUASHFS_NO_FRAG 4
+#define SQUASHFS_ALWAYS_FRAG 5
+#define SQUASHFS_DUPLICATE 6
+#define SQUASHFS_EXPORT 7
+#define SQUASHFS_COMP_OPT 10
+
+#define SQUASHFS_BIT(flag, bit) ((flag >> bit) & 1)
+
+#define SQUASHFS_UNCOMPRESSED_INODES(flags) SQUASHFS_BIT(flags, \
+ SQUASHFS_NOI)
+
+#define SQUASHFS_UNCOMPRESSED_DATA(flags) SQUASHFS_BIT(flags, \
+ SQUASHFS_NOD)
+
+#define SQUASHFS_UNCOMPRESSED_FRAGMENTS(flags) SQUASHFS_BIT(flags, \
+ SQUASHFS_NOF)
+
+#define SQUASHFS_NO_FRAGMENTS(flags) SQUASHFS_BIT(flags, \
+ SQUASHFS_NO_FRAG)
+
+#define SQUASHFS_ALWAYS_FRAGMENTS(flags) SQUASHFS_BIT(flags, \
+ SQUASHFS_ALWAYS_FRAG)
+
+#define SQUASHFS_DUPLICATES(flags) SQUASHFS_BIT(flags, \
+ SQUASHFS_DUPLICATE)
+
+#define SQUASHFS_EXPORTABLE(flags) SQUASHFS_BIT(flags, \
+ SQUASHFS_EXPORT)
+
+#define SQUASHFS_COMP_OPTS(flags) SQUASHFS_BIT(flags, \
+ SQUASHFS_COMP_OPT)
+
+/* Inode types including extended types */
+#define SQUASHFS_DIR_TYPE 1
+#define SQUASHFS_REG_TYPE 2
+#define SQUASHFS_SYMLINK_TYPE 3
+#define SQUASHFS_BLKDEV_TYPE 4
+#define SQUASHFS_CHRDEV_TYPE 5
+#define SQUASHFS_FIFO_TYPE 6
+#define SQUASHFS_SOCKET_TYPE 7
+#define SQUASHFS_LDIR_TYPE 8
+#define SQUASHFS_LREG_TYPE 9
+#define SQUASHFS_LSYMLINK_TYPE 10
+#define SQUASHFS_LBLKDEV_TYPE 11
+#define SQUASHFS_LCHRDEV_TYPE 12
+#define SQUASHFS_LFIFO_TYPE 13
+#define SQUASHFS_LSOCKET_TYPE 14
+
+/* Max type value stored in directory entry */
+#define SQUASHFS_MAX_DIR_TYPE 7
+
+/* Xattr types */
+#define SQUASHFS_XATTR_USER 0
+#define SQUASHFS_XATTR_TRUSTED 1
+#define SQUASHFS_XATTR_SECURITY 2
+#define SQUASHFS_XATTR_VALUE_OOL 256
+#define SQUASHFS_XATTR_PREFIX_MASK 0xff
+
+/* Flag whether block is compressed or uncompressed, bit is set if block is
+ * uncompressed */
+#define SQUASHFS_COMPRESSED_BIT (1 << 15)
+
+#define SQUASHFS_COMPRESSED_SIZE(B) (((B) & ~SQUASHFS_COMPRESSED_BIT) ? \
+ (B) & ~SQUASHFS_COMPRESSED_BIT : SQUASHFS_COMPRESSED_BIT)
+
+#define SQUASHFS_COMPRESSED(B) (!((B) & SQUASHFS_COMPRESSED_BIT))
+
+#define SQUASHFS_COMPRESSED_BIT_BLOCK (1 << 24)
+
+#define SQUASHFS_COMPRESSED_SIZE_BLOCK(B) ((B) & \
+ ~SQUASHFS_COMPRESSED_BIT_BLOCK)
+
+#define SQUASHFS_COMPRESSED_BLOCK(B) (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK))
+
+static inline int squashfs_block_size(__le32 raw)
+{
+ u32 size = le32_to_cpu(raw);
+ return (size >> 25) ? -EIO : size;
+}
+
+/*
+ * Inode number ops. Inodes consist of a compressed block number, and an
+ * uncompressed offset within that block
+ */
+#define SQUASHFS_INODE_BLK(A) ((unsigned int) ((A) >> 16))
+
+#define SQUASHFS_INODE_OFFSET(A) ((unsigned int) ((A) & 0xffff))
+
+#define SQUASHFS_MKINODE(A, B) ((long long)(((long long) (A)\
+ << 16) + (B)))
+
+/* fragment and fragment table defines */
+#define SQUASHFS_FRAGMENT_BYTES(A) \
+ ((A) * sizeof(struct squashfs_fragment_entry))
+
+#define SQUASHFS_FRAGMENT_INDEX(A) (SQUASHFS_FRAGMENT_BYTES(A) / \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_FRAGMENT_INDEX_OFFSET(A) (SQUASHFS_FRAGMENT_BYTES(A) % \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_FRAGMENT_INDEXES(A) ((SQUASHFS_FRAGMENT_BYTES(A) + \
+ SQUASHFS_METADATA_SIZE - 1) / \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_FRAGMENT_INDEX_BYTES(A) (SQUASHFS_FRAGMENT_INDEXES(A) *\
+ sizeof(u64))
+
+/* inode lookup table defines */
+#define SQUASHFS_LOOKUP_BYTES(A) ((A) * sizeof(u64))
+
+#define SQUASHFS_LOOKUP_BLOCK(A) (SQUASHFS_LOOKUP_BYTES(A) / \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_LOOKUP_BLOCK_OFFSET(A) (SQUASHFS_LOOKUP_BYTES(A) % \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_LOOKUP_BLOCKS(A) ((SQUASHFS_LOOKUP_BYTES(A) + \
+ SQUASHFS_METADATA_SIZE - 1) / \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_LOOKUP_BLOCK_BYTES(A) (SQUASHFS_LOOKUP_BLOCKS(A) *\
+ sizeof(u64))
+
+/* uid/gid lookup table defines */
+#define SQUASHFS_ID_BYTES(A) ((A) * sizeof(unsigned int))
+
+#define SQUASHFS_ID_BLOCK(A) (SQUASHFS_ID_BYTES(A) / \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_ID_BLOCK_OFFSET(A) (SQUASHFS_ID_BYTES(A) % \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_ID_BLOCKS(A) ((SQUASHFS_ID_BYTES(A) + \
+ SQUASHFS_METADATA_SIZE - 1) / \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_ID_BLOCK_BYTES(A) (SQUASHFS_ID_BLOCKS(A) *\
+ sizeof(u64))
+/* xattr id lookup table defines */
+#define SQUASHFS_XATTR_BYTES(A) (((u64) (A)) * sizeof(struct squashfs_xattr_id))
+
+#define SQUASHFS_XATTR_BLOCK(A) (SQUASHFS_XATTR_BYTES(A) / \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_XATTR_BLOCK_OFFSET(A) (SQUASHFS_XATTR_BYTES(A) % \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_XATTR_BLOCKS(A) ((SQUASHFS_XATTR_BYTES(A) + \
+ SQUASHFS_METADATA_SIZE - 1) / \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_XATTR_BLOCK_BYTES(A) (SQUASHFS_XATTR_BLOCKS(A) *\
+ sizeof(u64))
+#define SQUASHFS_XATTR_BLK(A) ((unsigned int) ((A) >> 16))
+
+#define SQUASHFS_XATTR_OFFSET(A) ((unsigned int) ((A) & 0xffff))
+
+/* cached data constants for filesystem */
+#define SQUASHFS_CACHED_BLKS 8
+
+/* meta index cache */
+#define SQUASHFS_META_INDEXES (SQUASHFS_METADATA_SIZE / sizeof(unsigned int))
+#define SQUASHFS_META_ENTRIES 127
+#define SQUASHFS_META_SLOTS 8
+
+struct meta_entry {
+ u64 data_block;
+ unsigned int index_block;
+ unsigned short offset;
+ unsigned short pad;
+};
+
+struct meta_index {
+ unsigned int inode_number;
+ unsigned int offset;
+ unsigned short entries;
+ unsigned short skip;
+ unsigned short locked;
+ unsigned short pad;
+ struct meta_entry meta_entry[SQUASHFS_META_ENTRIES];
+};
+
+
+/*
+ * definitions for structures on disk
+ */
+#define ZLIB_COMPRESSION 1
+#define LZMA_COMPRESSION 2
+#define LZO_COMPRESSION 3
+#define XZ_COMPRESSION 4
+#define LZ4_COMPRESSION 5
+#define ZSTD_COMPRESSION 6
+
+struct squashfs_super_block {
+ __le32 s_magic;
+ __le32 inodes;
+ __le32 mkfs_time;
+ __le32 block_size;
+ __le32 fragments;
+ __le16 compression;
+ __le16 block_log;
+ __le16 flags;
+ __le16 no_ids;
+ __le16 s_major;
+ __le16 s_minor;
+ __le64 root_inode;
+ __le64 bytes_used;
+ __le64 id_table_start;
+ __le64 xattr_id_table_start;
+ __le64 inode_table_start;
+ __le64 directory_table_start;
+ __le64 fragment_table_start;
+ __le64 lookup_table_start;
+};
+
+struct squashfs_dir_index {
+ __le32 index;
+ __le32 start_block;
+ __le32 size;
+ unsigned char name[];
+};
+
+struct squashfs_base_inode {
+ __le16 inode_type;
+ __le16 mode;
+ __le16 uid;
+ __le16 guid;
+ __le32 mtime;
+ __le32 inode_number;
+};
+
+struct squashfs_ipc_inode {
+ __le16 inode_type;
+ __le16 mode;
+ __le16 uid;
+ __le16 guid;
+ __le32 mtime;
+ __le32 inode_number;
+ __le32 nlink;
+};
+
+struct squashfs_lipc_inode {
+ __le16 inode_type;
+ __le16 mode;
+ __le16 uid;
+ __le16 guid;
+ __le32 mtime;
+ __le32 inode_number;
+ __le32 nlink;
+ __le32 xattr;
+};
+
+struct squashfs_dev_inode {
+ __le16 inode_type;
+ __le16 mode;
+ __le16 uid;
+ __le16 guid;
+ __le32 mtime;
+ __le32 inode_number;
+ __le32 nlink;
+ __le32 rdev;
+};
+
+struct squashfs_ldev_inode {
+ __le16 inode_type;
+ __le16 mode;
+ __le16 uid;
+ __le16 guid;
+ __le32 mtime;
+ __le32 inode_number;
+ __le32 nlink;
+ __le32 rdev;
+ __le32 xattr;
+};
+
+struct squashfs_symlink_inode {
+ __le16 inode_type;
+ __le16 mode;
+ __le16 uid;
+ __le16 guid;
+ __le32 mtime;
+ __le32 inode_number;
+ __le32 nlink;
+ __le32 symlink_size;
+ char symlink[];
+};
+
+struct squashfs_reg_inode {
+ __le16 inode_type;
+ __le16 mode;
+ __le16 uid;
+ __le16 guid;
+ __le32 mtime;
+ __le32 inode_number;
+ __le32 start_block;
+ __le32 fragment;
+ __le32 offset;
+ __le32 file_size;
+ __le16 block_list[];
+};
+
+struct squashfs_lreg_inode {
+ __le16 inode_type;
+ __le16 mode;
+ __le16 uid;
+ __le16 guid;
+ __le32 mtime;
+ __le32 inode_number;
+ __le64 start_block;
+ __le64 file_size;
+ __le64 sparse;
+ __le32 nlink;
+ __le32 fragment;
+ __le32 offset;
+ __le32 xattr;
+ __le16 block_list[];
+};
+
+struct squashfs_dir_inode {
+ __le16 inode_type;
+ __le16 mode;
+ __le16 uid;
+ __le16 guid;
+ __le32 mtime;
+ __le32 inode_number;
+ __le32 start_block;
+ __le32 nlink;
+ __le16 file_size;
+ __le16 offset;
+ __le32 parent_inode;
+};
+
+struct squashfs_ldir_inode {
+ __le16 inode_type;
+ __le16 mode;
+ __le16 uid;
+ __le16 guid;
+ __le32 mtime;
+ __le32 inode_number;
+ __le32 nlink;
+ __le32 file_size;
+ __le32 start_block;
+ __le32 parent_inode;
+ __le16 i_count;
+ __le16 offset;
+ __le32 xattr;
+ struct squashfs_dir_index index[];
+};
+
+union squashfs_inode {
+ struct squashfs_base_inode base;
+ struct squashfs_dev_inode dev;
+ struct squashfs_ldev_inode ldev;
+ struct squashfs_symlink_inode symlink;
+ struct squashfs_reg_inode reg;
+ struct squashfs_lreg_inode lreg;
+ struct squashfs_dir_inode dir;
+ struct squashfs_ldir_inode ldir;
+ struct squashfs_ipc_inode ipc;
+ struct squashfs_lipc_inode lipc;
+};
+
+struct squashfs_dir_entry {
+ __le16 offset;
+ __le16 inode_number;
+ __le16 type;
+ __le16 size;
+ char name[];
+};
+
+struct squashfs_dir_header {
+ __le32 count;
+ __le32 start_block;
+ __le32 inode_number;
+};
+
+struct squashfs_fragment_entry {
+ __le64 start_block;
+ __le32 size;
+ unsigned int unused;
+};
+
+struct squashfs_xattr_entry {
+ __le16 type;
+ __le16 size;
+ char data[];
+};
+
+struct squashfs_xattr_val {
+ __le32 vsize;
+ char value[];
+};
+
+struct squashfs_xattr_id {
+ __le64 xattr;
+ __le32 count;
+ __le32 size;
+};
+
+struct squashfs_xattr_id_table {
+ __le64 xattr_table_start;
+ __le32 xattr_ids;
+ __le32 unused;
+};
+
+#endif
diff --git a/fs/squashfs/squashfs_fs_i.h b/fs/squashfs/squashfs_fs_i.h
new file mode 100644
index 0000000000..2c82d6f2a4
--- /dev/null
+++ b/fs/squashfs/squashfs_fs_i.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef SQUASHFS_FS_I
+#define SQUASHFS_FS_I
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * squashfs_fs_i.h
+ */
+
+struct squashfs_inode_info {
+ u64 start;
+ int offset;
+ u64 xattr;
+ unsigned int xattr_size;
+ int xattr_count;
+ union {
+ struct {
+ u64 fragment_block;
+ int fragment_size;
+ int fragment_offset;
+ u64 block_list_start;
+ };
+ struct {
+ u64 dir_idx_start;
+ int dir_idx_offset;
+ int dir_idx_cnt;
+ int parent;
+ };
+ };
+ struct inode vfs_inode;
+};
+
+
+static inline struct squashfs_inode_info *squashfs_i(struct inode *inode)
+{
+ return container_of(inode, struct squashfs_inode_info, vfs_inode);
+}
+#endif
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
new file mode 100644
index 0000000000..c01998eec1
--- /dev/null
+++ b/fs/squashfs/squashfs_fs_sb.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef SQUASHFS_FS_SB
+#define SQUASHFS_FS_SB
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * squashfs_fs_sb.h
+ */
+
+#include "squashfs_fs.h"
+
+struct squashfs_cache {
+ char *name;
+ int entries;
+ int curr_blk;
+ int next_blk;
+ int num_waiters;
+ int unused;
+ int block_size;
+ int pages;
+ spinlock_t lock;
+ wait_queue_head_t wait_queue;
+ struct squashfs_cache_entry *entry;
+};
+
+struct squashfs_cache_entry {
+ u64 block;
+ int length;
+ int refcount;
+ u64 next_index;
+ int pending;
+ int error;
+ int num_waiters;
+ wait_queue_head_t wait_queue;
+ struct squashfs_cache *cache;
+ void **data;
+ struct squashfs_page_actor *actor;
+};
+
+struct squashfs_sb_info {
+ const struct squashfs_decompressor *decompressor;
+ int devblksize;
+ int devblksize_log2;
+ struct squashfs_cache *block_cache;
+ struct squashfs_cache *fragment_cache;
+ struct squashfs_cache *read_page;
+ struct address_space *cache_mapping;
+ int next_meta_index;
+ __le64 *id_table;
+ __le64 *fragment_index;
+ __le64 *xattr_id_table;
+ struct mutex meta_index_mutex;
+ struct meta_index *meta_index;
+ void *stream;
+ __le64 *inode_lookup_table;
+ u64 inode_table;
+ u64 directory_table;
+ u64 xattr_table;
+ unsigned int block_size;
+ unsigned short block_log;
+ long long bytes_used;
+ unsigned int inodes;
+ unsigned int fragments;
+ unsigned int xattr_ids;
+ unsigned int ids;
+ bool panic_on_errors;
+ const struct squashfs_decompressor_thread_ops *thread_ops;
+ int max_thread_num;
+};
+#endif
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
new file mode 100644
index 0000000000..22e812808e
--- /dev/null
+++ b/fs/squashfs/super.c
@@ -0,0 +1,698 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * super.c
+ */
+
+/*
+ * This file implements code to read the superblock, read and initialise
+ * in-memory structures at mount time, and all the VFS glue code to register
+ * the filesystem.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/blkdev.h>
+#include <linux/fs.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
+#include <linux/vfs.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/seq_file.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/magic.h>
+#include <linux/xattr.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+#include "decompressor.h"
+#include "xattr.h"
+
+static struct file_system_type squashfs_fs_type;
+static const struct super_operations squashfs_super_ops;
+
+enum Opt_errors {
+ Opt_errors_continue,
+ Opt_errors_panic,
+};
+
+enum squashfs_param {
+ Opt_errors,
+ Opt_threads,
+};
+
+struct squashfs_mount_opts {
+ enum Opt_errors errors;
+ const struct squashfs_decompressor_thread_ops *thread_ops;
+ int thread_num;
+};
+
+static const struct constant_table squashfs_param_errors[] = {
+ {"continue", Opt_errors_continue },
+ {"panic", Opt_errors_panic },
+ {}
+};
+
+static const struct fs_parameter_spec squashfs_fs_parameters[] = {
+ fsparam_enum("errors", Opt_errors, squashfs_param_errors),
+ fsparam_string("threads", Opt_threads),
+ {}
+};
+
+
+static int squashfs_parse_param_threads_str(const char *str, struct squashfs_mount_opts *opts)
+{
+#ifdef CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT
+ if (strcmp(str, "single") == 0) {
+ opts->thread_ops = &squashfs_decompressor_single;
+ return 0;
+ }
+ if (strcmp(str, "multi") == 0) {
+ opts->thread_ops = &squashfs_decompressor_multi;
+ return 0;
+ }
+ if (strcmp(str, "percpu") == 0) {
+ opts->thread_ops = &squashfs_decompressor_percpu;
+ return 0;
+ }
+#endif
+ return -EINVAL;
+}
+
+static int squashfs_parse_param_threads_num(const char *str, struct squashfs_mount_opts *opts)
+{
+#ifdef CONFIG_SQUASHFS_MOUNT_DECOMP_THREADS
+ int ret;
+ unsigned long num;
+
+ ret = kstrtoul(str, 0, &num);
+ if (ret != 0)
+ return -EINVAL;
+ if (num > 1) {
+ opts->thread_ops = &squashfs_decompressor_multi;
+ if (num > opts->thread_ops->max_decompressors())
+ return -EINVAL;
+ opts->thread_num = (int)num;
+ return 0;
+ }
+#ifdef CONFIG_SQUASHFS_DECOMP_SINGLE
+ if (num == 1) {
+ opts->thread_ops = &squashfs_decompressor_single;
+ opts->thread_num = 1;
+ return 0;
+ }
+#endif
+#endif /* !CONFIG_SQUASHFS_MOUNT_DECOMP_THREADS */
+ return -EINVAL;
+}
+
+static int squashfs_parse_param_threads(const char *str, struct squashfs_mount_opts *opts)
+{
+ int ret = squashfs_parse_param_threads_str(str, opts);
+
+ if (ret == 0)
+ return ret;
+ return squashfs_parse_param_threads_num(str, opts);
+}
+
+static int squashfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct squashfs_mount_opts *opts = fc->fs_private;
+ struct fs_parse_result result;
+ int opt;
+
+ opt = fs_parse(fc, squashfs_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_errors:
+ opts->errors = result.uint_32;
+ break;
+ case Opt_threads:
+ if (squashfs_parse_param_threads(param->string, opts) != 0)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct squashfs_decompressor *supported_squashfs_filesystem(
+ struct fs_context *fc,
+ short major, short minor, short id)
+{
+ const struct squashfs_decompressor *decompressor;
+
+ if (major < SQUASHFS_MAJOR) {
+ errorf(fc, "Major/Minor mismatch, older Squashfs %d.%d "
+ "filesystems are unsupported", major, minor);
+ return NULL;
+ } else if (major > SQUASHFS_MAJOR || minor > SQUASHFS_MINOR) {
+ errorf(fc, "Major/Minor mismatch, trying to mount newer "
+ "%d.%d filesystem", major, minor);
+ errorf(fc, "Please update your kernel");
+ return NULL;
+ }
+
+ decompressor = squashfs_lookup_decompressor(id);
+ if (!decompressor->supported) {
+ errorf(fc, "Filesystem uses \"%s\" compression. This is not supported",
+ decompressor->name);
+ return NULL;
+ }
+
+ return decompressor;
+}
+
+
+static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
+{
+ struct squashfs_mount_opts *opts = fc->fs_private;
+ struct squashfs_sb_info *msblk;
+ struct squashfs_super_block *sblk = NULL;
+ struct inode *root;
+ long long root_inode;
+ unsigned short flags;
+ unsigned int fragments;
+ u64 lookup_table_start, xattr_id_table_start, next_table;
+ int err;
+
+ TRACE("Entered squashfs_fill_superblock\n");
+
+ sb->s_fs_info = kzalloc(sizeof(*msblk), GFP_KERNEL);
+ if (sb->s_fs_info == NULL) {
+ ERROR("Failed to allocate squashfs_sb_info\n");
+ return -ENOMEM;
+ }
+ msblk = sb->s_fs_info;
+ msblk->thread_ops = opts->thread_ops;
+
+ msblk->panic_on_errors = (opts->errors == Opt_errors_panic);
+
+ msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE);
+ msblk->devblksize_log2 = ffz(~msblk->devblksize);
+
+ mutex_init(&msblk->meta_index_mutex);
+
+ /*
+ * msblk->bytes_used is checked in squashfs_read_table to ensure reads
+ * are not beyond filesystem end. But as we're using
+ * squashfs_read_table here to read the superblock (including the value
+ * of bytes_used) we need to set it to an initial sensible dummy value
+ */
+ msblk->bytes_used = sizeof(*sblk);
+ sblk = squashfs_read_table(sb, SQUASHFS_START, sizeof(*sblk));
+
+ if (IS_ERR(sblk)) {
+ errorf(fc, "unable to read squashfs_super_block");
+ err = PTR_ERR(sblk);
+ sblk = NULL;
+ goto failed_mount;
+ }
+
+ err = -EINVAL;
+
+ /* Check it is a SQUASHFS superblock */
+ sb->s_magic = le32_to_cpu(sblk->s_magic);
+ if (sb->s_magic != SQUASHFS_MAGIC) {
+ if (!(fc->sb_flags & SB_SILENT))
+ errorf(fc, "Can't find a SQUASHFS superblock on %pg",
+ sb->s_bdev);
+ goto failed_mount;
+ }
+
+ if (opts->thread_num == 0) {
+ msblk->max_thread_num = msblk->thread_ops->max_decompressors();
+ } else {
+ msblk->max_thread_num = opts->thread_num;
+ }
+
+ /* Check the MAJOR & MINOR versions and lookup compression type */
+ msblk->decompressor = supported_squashfs_filesystem(
+ fc,
+ le16_to_cpu(sblk->s_major),
+ le16_to_cpu(sblk->s_minor),
+ le16_to_cpu(sblk->compression));
+ if (msblk->decompressor == NULL)
+ goto failed_mount;
+
+ /* Check the filesystem does not extend beyond the end of the
+ block device */
+ msblk->bytes_used = le64_to_cpu(sblk->bytes_used);
+ if (msblk->bytes_used < 0 ||
+ msblk->bytes_used > bdev_nr_bytes(sb->s_bdev))
+ goto failed_mount;
+
+ /* Check block size for sanity */
+ msblk->block_size = le32_to_cpu(sblk->block_size);
+ if (msblk->block_size > SQUASHFS_FILE_MAX_SIZE)
+ goto insanity;
+
+ /*
+ * Check the system page size is not larger than the filesystem
+ * block size (by default 128K). This is currently not supported.
+ */
+ if (PAGE_SIZE > msblk->block_size) {
+ errorf(fc, "Page size > filesystem block size (%d). This is "
+ "currently not supported!", msblk->block_size);
+ goto failed_mount;
+ }
+
+ /* Check block log for sanity */
+ msblk->block_log = le16_to_cpu(sblk->block_log);
+ if (msblk->block_log > SQUASHFS_FILE_MAX_LOG)
+ goto failed_mount;
+
+ /* Check that block_size and block_log match */
+ if (msblk->block_size != (1 << msblk->block_log))
+ goto insanity;
+
+ /* Check the root inode for sanity */
+ root_inode = le64_to_cpu(sblk->root_inode);
+ if (SQUASHFS_INODE_OFFSET(root_inode) > SQUASHFS_METADATA_SIZE)
+ goto insanity;
+
+ msblk->inode_table = le64_to_cpu(sblk->inode_table_start);
+ msblk->directory_table = le64_to_cpu(sblk->directory_table_start);
+ msblk->inodes = le32_to_cpu(sblk->inodes);
+ msblk->fragments = le32_to_cpu(sblk->fragments);
+ msblk->ids = le16_to_cpu(sblk->no_ids);
+ flags = le16_to_cpu(sblk->flags);
+
+ TRACE("Found valid superblock on %pg\n", sb->s_bdev);
+ TRACE("Inodes are %scompressed\n", SQUASHFS_UNCOMPRESSED_INODES(flags)
+ ? "un" : "");
+ TRACE("Data is %scompressed\n", SQUASHFS_UNCOMPRESSED_DATA(flags)
+ ? "un" : "");
+ TRACE("Filesystem size %lld bytes\n", msblk->bytes_used);
+ TRACE("Block size %d\n", msblk->block_size);
+ TRACE("Number of inodes %d\n", msblk->inodes);
+ TRACE("Number of fragments %d\n", msblk->fragments);
+ TRACE("Number of ids %d\n", msblk->ids);
+ TRACE("sblk->inode_table_start %llx\n", msblk->inode_table);
+ TRACE("sblk->directory_table_start %llx\n", msblk->directory_table);
+ TRACE("sblk->fragment_table_start %llx\n",
+ (u64) le64_to_cpu(sblk->fragment_table_start));
+ TRACE("sblk->id_table_start %llx\n",
+ (u64) le64_to_cpu(sblk->id_table_start));
+
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_time_min = 0;
+ sb->s_time_max = U32_MAX;
+ sb->s_flags |= SB_RDONLY;
+ sb->s_op = &squashfs_super_ops;
+
+ err = -ENOMEM;
+
+ msblk->block_cache = squashfs_cache_init("metadata",
+ SQUASHFS_CACHED_BLKS, SQUASHFS_METADATA_SIZE);
+ if (msblk->block_cache == NULL)
+ goto failed_mount;
+
+ /* Allocate read_page block */
+ msblk->read_page = squashfs_cache_init("data",
+ msblk->max_thread_num, msblk->block_size);
+ if (msblk->read_page == NULL) {
+ errorf(fc, "Failed to allocate read_page block");
+ goto failed_mount;
+ }
+
+ if (msblk->devblksize == PAGE_SIZE) {
+ struct inode *cache = new_inode(sb);
+
+ if (cache == NULL)
+ goto failed_mount;
+
+ set_nlink(cache, 1);
+ cache->i_size = OFFSET_MAX;
+ mapping_set_gfp_mask(cache->i_mapping, GFP_NOFS);
+
+ msblk->cache_mapping = cache->i_mapping;
+ }
+
+ msblk->stream = squashfs_decompressor_setup(sb, flags);
+ if (IS_ERR(msblk->stream)) {
+ err = PTR_ERR(msblk->stream);
+ msblk->stream = NULL;
+ goto insanity;
+ }
+
+ /* Handle xattrs */
+ sb->s_xattr = squashfs_xattr_handlers;
+ xattr_id_table_start = le64_to_cpu(sblk->xattr_id_table_start);
+ if (xattr_id_table_start == SQUASHFS_INVALID_BLK) {
+ next_table = msblk->bytes_used;
+ goto allocate_id_index_table;
+ }
+
+ /* Allocate and read xattr id lookup table */
+ msblk->xattr_id_table = squashfs_read_xattr_id_table(sb,
+ xattr_id_table_start, &msblk->xattr_table, &msblk->xattr_ids);
+ if (IS_ERR(msblk->xattr_id_table)) {
+ errorf(fc, "unable to read xattr id index table");
+ err = PTR_ERR(msblk->xattr_id_table);
+ msblk->xattr_id_table = NULL;
+ if (err != -ENOTSUPP)
+ goto failed_mount;
+ }
+ next_table = msblk->xattr_table;
+
+allocate_id_index_table:
+ /* Allocate and read id index table */
+ msblk->id_table = squashfs_read_id_index_table(sb,
+ le64_to_cpu(sblk->id_table_start), next_table, msblk->ids);
+ if (IS_ERR(msblk->id_table)) {
+ errorf(fc, "unable to read id index table");
+ err = PTR_ERR(msblk->id_table);
+ msblk->id_table = NULL;
+ goto failed_mount;
+ }
+ next_table = le64_to_cpu(msblk->id_table[0]);
+
+ /* Handle inode lookup table */
+ lookup_table_start = le64_to_cpu(sblk->lookup_table_start);
+ if (lookup_table_start == SQUASHFS_INVALID_BLK)
+ goto handle_fragments;
+
+ /* Allocate and read inode lookup table */
+ msblk->inode_lookup_table = squashfs_read_inode_lookup_table(sb,
+ lookup_table_start, next_table, msblk->inodes);
+ if (IS_ERR(msblk->inode_lookup_table)) {
+ errorf(fc, "unable to read inode lookup table");
+ err = PTR_ERR(msblk->inode_lookup_table);
+ msblk->inode_lookup_table = NULL;
+ goto failed_mount;
+ }
+ next_table = le64_to_cpu(msblk->inode_lookup_table[0]);
+
+ sb->s_export_op = &squashfs_export_ops;
+
+handle_fragments:
+ fragments = msblk->fragments;
+ if (fragments == 0)
+ goto check_directory_table;
+
+ msblk->fragment_cache = squashfs_cache_init("fragment",
+ SQUASHFS_CACHED_FRAGMENTS, msblk->block_size);
+ if (msblk->fragment_cache == NULL) {
+ err = -ENOMEM;
+ goto failed_mount;
+ }
+
+ /* Allocate and read fragment index table */
+ msblk->fragment_index = squashfs_read_fragment_index_table(sb,
+ le64_to_cpu(sblk->fragment_table_start), next_table, fragments);
+ if (IS_ERR(msblk->fragment_index)) {
+ errorf(fc, "unable to read fragment index table");
+ err = PTR_ERR(msblk->fragment_index);
+ msblk->fragment_index = NULL;
+ goto failed_mount;
+ }
+ next_table = le64_to_cpu(msblk->fragment_index[0]);
+
+check_directory_table:
+ /* Sanity check directory_table */
+ if (msblk->directory_table > next_table) {
+ err = -EINVAL;
+ goto insanity;
+ }
+
+ /* Sanity check inode_table */
+ if (msblk->inode_table >= msblk->directory_table) {
+ err = -EINVAL;
+ goto insanity;
+ }
+
+ /* allocate root */
+ root = new_inode(sb);
+ if (!root) {
+ err = -ENOMEM;
+ goto failed_mount;
+ }
+
+ err = squashfs_read_inode(root, root_inode);
+ if (err) {
+ make_bad_inode(root);
+ iput(root);
+ goto failed_mount;
+ }
+ insert_inode_hash(root);
+
+ sb->s_root = d_make_root(root);
+ if (sb->s_root == NULL) {
+ ERROR("Root inode create failed\n");
+ err = -ENOMEM;
+ goto failed_mount;
+ }
+
+ TRACE("Leaving squashfs_fill_super\n");
+ kfree(sblk);
+ return 0;
+
+insanity:
+ errorf(fc, "squashfs image failed sanity check");
+failed_mount:
+ squashfs_cache_delete(msblk->block_cache);
+ squashfs_cache_delete(msblk->fragment_cache);
+ squashfs_cache_delete(msblk->read_page);
+ if (msblk->cache_mapping)
+ iput(msblk->cache_mapping->host);
+ msblk->thread_ops->destroy(msblk);
+ kfree(msblk->inode_lookup_table);
+ kfree(msblk->fragment_index);
+ kfree(msblk->id_table);
+ kfree(msblk->xattr_id_table);
+ kfree(sb->s_fs_info);
+ sb->s_fs_info = NULL;
+ kfree(sblk);
+ return err;
+}
+
+static int squashfs_get_tree(struct fs_context *fc)
+{
+ return get_tree_bdev(fc, squashfs_fill_super);
+}
+
+static int squashfs_reconfigure(struct fs_context *fc)
+{
+ struct super_block *sb = fc->root->d_sb;
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+ struct squashfs_mount_opts *opts = fc->fs_private;
+
+ sync_filesystem(fc->root->d_sb);
+ fc->sb_flags |= SB_RDONLY;
+
+ msblk->panic_on_errors = (opts->errors == Opt_errors_panic);
+
+ return 0;
+}
+
+static void squashfs_free_fs_context(struct fs_context *fc)
+{
+ kfree(fc->fs_private);
+}
+
+static const struct fs_context_operations squashfs_context_ops = {
+ .get_tree = squashfs_get_tree,
+ .free = squashfs_free_fs_context,
+ .parse_param = squashfs_parse_param,
+ .reconfigure = squashfs_reconfigure,
+};
+
+static int squashfs_show_options(struct seq_file *s, struct dentry *root)
+{
+ struct super_block *sb = root->d_sb;
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+
+ if (msblk->panic_on_errors)
+ seq_puts(s, ",errors=panic");
+ else
+ seq_puts(s, ",errors=continue");
+
+#ifdef CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT
+ if (msblk->thread_ops == &squashfs_decompressor_single) {
+ seq_puts(s, ",threads=single");
+ return 0;
+ }
+ if (msblk->thread_ops == &squashfs_decompressor_percpu) {
+ seq_puts(s, ",threads=percpu");
+ return 0;
+ }
+#endif
+#ifdef CONFIG_SQUASHFS_MOUNT_DECOMP_THREADS
+ seq_printf(s, ",threads=%d", msblk->max_thread_num);
+#endif
+ return 0;
+}
+
+static int squashfs_init_fs_context(struct fs_context *fc)
+{
+ struct squashfs_mount_opts *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+
+#ifdef CONFIG_SQUASHFS_DECOMP_SINGLE
+ opts->thread_ops = &squashfs_decompressor_single;
+#elif defined(CONFIG_SQUASHFS_DECOMP_MULTI)
+ opts->thread_ops = &squashfs_decompressor_multi;
+#elif defined(CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU)
+ opts->thread_ops = &squashfs_decompressor_percpu;
+#else
+#error "fail: unknown squashfs decompression thread mode?"
+#endif
+ opts->thread_num = 0;
+ fc->fs_private = opts;
+ fc->ops = &squashfs_context_ops;
+ return 0;
+}
+
+static int squashfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ struct squashfs_sb_info *msblk = dentry->d_sb->s_fs_info;
+ u64 id = huge_encode_dev(dentry->d_sb->s_bdev->bd_dev);
+
+ TRACE("Entered squashfs_statfs\n");
+
+ buf->f_type = SQUASHFS_MAGIC;
+ buf->f_bsize = msblk->block_size;
+ buf->f_blocks = ((msblk->bytes_used - 1) >> msblk->block_log) + 1;
+ buf->f_bfree = buf->f_bavail = 0;
+ buf->f_files = msblk->inodes;
+ buf->f_ffree = 0;
+ buf->f_namelen = SQUASHFS_NAME_LEN;
+ buf->f_fsid = u64_to_fsid(id);
+
+ return 0;
+}
+
+
+static void squashfs_put_super(struct super_block *sb)
+{
+ if (sb->s_fs_info) {
+ struct squashfs_sb_info *sbi = sb->s_fs_info;
+ squashfs_cache_delete(sbi->block_cache);
+ squashfs_cache_delete(sbi->fragment_cache);
+ squashfs_cache_delete(sbi->read_page);
+ if (sbi->cache_mapping)
+ iput(sbi->cache_mapping->host);
+ sbi->thread_ops->destroy(sbi);
+ kfree(sbi->id_table);
+ kfree(sbi->fragment_index);
+ kfree(sbi->meta_index);
+ kfree(sbi->inode_lookup_table);
+ kfree(sbi->xattr_id_table);
+ kfree(sb->s_fs_info);
+ sb->s_fs_info = NULL;
+ }
+}
+
+static struct kmem_cache *squashfs_inode_cachep;
+
+
+static void init_once(void *foo)
+{
+ struct squashfs_inode_info *ei = foo;
+
+ inode_init_once(&ei->vfs_inode);
+}
+
+
+static int __init init_inodecache(void)
+{
+ squashfs_inode_cachep = kmem_cache_create("squashfs_inode_cache",
+ sizeof(struct squashfs_inode_info), 0,
+ SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT,
+ init_once);
+
+ return squashfs_inode_cachep ? 0 : -ENOMEM;
+}
+
+
+static void destroy_inodecache(void)
+{
+ /*
+ * Make sure all delayed rcu free inodes are flushed before we
+ * destroy cache.
+ */
+ rcu_barrier();
+ kmem_cache_destroy(squashfs_inode_cachep);
+}
+
+
+static int __init init_squashfs_fs(void)
+{
+ int err = init_inodecache();
+
+ if (err)
+ return err;
+
+ err = register_filesystem(&squashfs_fs_type);
+ if (err) {
+ destroy_inodecache();
+ return err;
+ }
+
+ pr_info("version 4.0 (2009/01/31) Phillip Lougher\n");
+
+ return 0;
+}
+
+
+static void __exit exit_squashfs_fs(void)
+{
+ unregister_filesystem(&squashfs_fs_type);
+ destroy_inodecache();
+}
+
+
+static struct inode *squashfs_alloc_inode(struct super_block *sb)
+{
+ struct squashfs_inode_info *ei =
+ alloc_inode_sb(sb, squashfs_inode_cachep, GFP_KERNEL);
+
+ return ei ? &ei->vfs_inode : NULL;
+}
+
+
+static void squashfs_free_inode(struct inode *inode)
+{
+ kmem_cache_free(squashfs_inode_cachep, squashfs_i(inode));
+}
+
+static struct file_system_type squashfs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "squashfs",
+ .init_fs_context = squashfs_init_fs_context,
+ .parameters = squashfs_fs_parameters,
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
+};
+MODULE_ALIAS_FS("squashfs");
+
+static const struct super_operations squashfs_super_ops = {
+ .alloc_inode = squashfs_alloc_inode,
+ .free_inode = squashfs_free_inode,
+ .statfs = squashfs_statfs,
+ .put_super = squashfs_put_super,
+ .show_options = squashfs_show_options,
+};
+
+module_init(init_squashfs_fs);
+module_exit(exit_squashfs_fs);
+MODULE_DESCRIPTION("squashfs 4.0, a compressed read-only filesystem");
+MODULE_AUTHOR("Phillip Lougher <phillip@squashfs.org.uk>");
+MODULE_LICENSE("GPL");
diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c
new file mode 100644
index 0000000000..2bf977a52c
--- /dev/null
+++ b/fs/squashfs/symlink.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * symlink.c
+ */
+
+/*
+ * This file implements code to handle symbolic links.
+ *
+ * The data contents of symbolic links are stored inside the symbolic
+ * link inode within the inode table. This allows the normally small symbolic
+ * link to be compressed as part of the inode table, achieving much greater
+ * compression than if the symbolic link was compressed individually.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/xattr.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+#include "xattr.h"
+
+static int squashfs_symlink_read_folio(struct file *file, struct folio *folio)
+{
+ struct page *page = &folio->page;
+ struct inode *inode = page->mapping->host;
+ struct super_block *sb = inode->i_sb;
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+ int index = page->index << PAGE_SHIFT;
+ u64 block = squashfs_i(inode)->start;
+ int offset = squashfs_i(inode)->offset;
+ int length = min_t(int, i_size_read(inode) - index, PAGE_SIZE);
+ int bytes, copied;
+ void *pageaddr;
+ struct squashfs_cache_entry *entry;
+
+ TRACE("Entered squashfs_symlink_readpage, page index %ld, start block "
+ "%llx, offset %x\n", page->index, block, offset);
+
+ /*
+ * Skip index bytes into symlink metadata.
+ */
+ if (index) {
+ bytes = squashfs_read_metadata(sb, NULL, &block, &offset,
+ index);
+ if (bytes < 0) {
+ ERROR("Unable to read symlink [%llx:%x]\n",
+ squashfs_i(inode)->start,
+ squashfs_i(inode)->offset);
+ goto error_out;
+ }
+ }
+
+ /*
+ * Read length bytes from symlink metadata. Squashfs_read_metadata
+ * is not used here because it can sleep and we want to use
+ * kmap_atomic to map the page. Instead call the underlying
+ * squashfs_cache_get routine. As length bytes may overlap metadata
+ * blocks, we may need to call squashfs_cache_get multiple times.
+ */
+ for (bytes = 0; bytes < length; offset = 0, bytes += copied) {
+ entry = squashfs_cache_get(sb, msblk->block_cache, block, 0);
+ if (entry->error) {
+ ERROR("Unable to read symlink [%llx:%x]\n",
+ squashfs_i(inode)->start,
+ squashfs_i(inode)->offset);
+ squashfs_cache_put(entry);
+ goto error_out;
+ }
+
+ pageaddr = kmap_atomic(page);
+ copied = squashfs_copy_data(pageaddr + bytes, entry, offset,
+ length - bytes);
+ if (copied == length - bytes)
+ memset(pageaddr + length, 0, PAGE_SIZE - length);
+ else
+ block = entry->next_index;
+ kunmap_atomic(pageaddr);
+ squashfs_cache_put(entry);
+ }
+
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ unlock_page(page);
+ return 0;
+
+error_out:
+ SetPageError(page);
+ unlock_page(page);
+ return 0;
+}
+
+
+const struct address_space_operations squashfs_symlink_aops = {
+ .read_folio = squashfs_symlink_read_folio
+};
+
+const struct inode_operations squashfs_symlink_inode_ops = {
+ .get_link = page_get_link,
+ .listxattr = squashfs_listxattr
+};
+
diff --git a/fs/squashfs/xattr.c b/fs/squashfs/xattr.c
new file mode 100644
index 0000000000..e1e3f3dd5a
--- /dev/null
+++ b/fs/squashfs/xattr.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2010
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * xattr.c
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/xattr.h>
+#include <linux/slab.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+
+static const struct xattr_handler *squashfs_xattr_handler(int);
+
+ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
+ size_t buffer_size)
+{
+ struct inode *inode = d_inode(d);
+ struct super_block *sb = inode->i_sb;
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+ u64 start = SQUASHFS_XATTR_BLK(squashfs_i(inode)->xattr)
+ + msblk->xattr_table;
+ int offset = SQUASHFS_XATTR_OFFSET(squashfs_i(inode)->xattr);
+ int count = squashfs_i(inode)->xattr_count;
+ size_t rest = buffer_size;
+ int err;
+
+ /* check that the file system has xattrs */
+ if (msblk->xattr_id_table == NULL)
+ return -EOPNOTSUPP;
+
+ /* loop reading each xattr name */
+ while (count--) {
+ struct squashfs_xattr_entry entry;
+ struct squashfs_xattr_val val;
+ const struct xattr_handler *handler;
+ int name_size;
+
+ err = squashfs_read_metadata(sb, &entry, &start, &offset,
+ sizeof(entry));
+ if (err < 0)
+ goto failed;
+
+ name_size = le16_to_cpu(entry.size);
+ handler = squashfs_xattr_handler(le16_to_cpu(entry.type));
+ if (handler && (!handler->list || handler->list(d))) {
+ const char *prefix = handler->prefix ?: handler->name;
+ size_t prefix_size = strlen(prefix);
+
+ if (buffer) {
+ if (prefix_size + name_size + 1 > rest) {
+ err = -ERANGE;
+ goto failed;
+ }
+ memcpy(buffer, prefix, prefix_size);
+ buffer += prefix_size;
+ }
+ err = squashfs_read_metadata(sb, buffer, &start,
+ &offset, name_size);
+ if (err < 0)
+ goto failed;
+ if (buffer) {
+ buffer[name_size] = '\0';
+ buffer += name_size + 1;
+ }
+ rest -= prefix_size + name_size + 1;
+ } else {
+ /* no handler or insuffficient privileges, so skip */
+ err = squashfs_read_metadata(sb, NULL, &start,
+ &offset, name_size);
+ if (err < 0)
+ goto failed;
+ }
+
+
+ /* skip remaining xattr entry */
+ err = squashfs_read_metadata(sb, &val, &start, &offset,
+ sizeof(val));
+ if (err < 0)
+ goto failed;
+
+ err = squashfs_read_metadata(sb, NULL, &start, &offset,
+ le32_to_cpu(val.vsize));
+ if (err < 0)
+ goto failed;
+ }
+ err = buffer_size - rest;
+
+failed:
+ return err;
+}
+
+
+static int squashfs_xattr_get(struct inode *inode, int name_index,
+ const char *name, void *buffer, size_t buffer_size)
+{
+ struct super_block *sb = inode->i_sb;
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+ u64 start = SQUASHFS_XATTR_BLK(squashfs_i(inode)->xattr)
+ + msblk->xattr_table;
+ int offset = SQUASHFS_XATTR_OFFSET(squashfs_i(inode)->xattr);
+ int count = squashfs_i(inode)->xattr_count;
+ int name_len = strlen(name);
+ int err, vsize;
+ char *target = kmalloc(name_len, GFP_KERNEL);
+
+ if (target == NULL)
+ return -ENOMEM;
+
+ /* loop reading each xattr name */
+ for (; count; count--) {
+ struct squashfs_xattr_entry entry;
+ struct squashfs_xattr_val val;
+ int type, prefix, name_size;
+
+ err = squashfs_read_metadata(sb, &entry, &start, &offset,
+ sizeof(entry));
+ if (err < 0)
+ goto failed;
+
+ name_size = le16_to_cpu(entry.size);
+ type = le16_to_cpu(entry.type);
+ prefix = type & SQUASHFS_XATTR_PREFIX_MASK;
+
+ if (prefix == name_index && name_size == name_len)
+ err = squashfs_read_metadata(sb, target, &start,
+ &offset, name_size);
+ else
+ err = squashfs_read_metadata(sb, NULL, &start,
+ &offset, name_size);
+ if (err < 0)
+ goto failed;
+
+ if (prefix == name_index && name_size == name_len &&
+ strncmp(target, name, name_size) == 0) {
+ /* found xattr */
+ if (type & SQUASHFS_XATTR_VALUE_OOL) {
+ __le64 xattr_val;
+ u64 xattr;
+ /* val is a reference to the real location */
+ err = squashfs_read_metadata(sb, &val, &start,
+ &offset, sizeof(val));
+ if (err < 0)
+ goto failed;
+ err = squashfs_read_metadata(sb, &xattr_val,
+ &start, &offset, sizeof(xattr_val));
+ if (err < 0)
+ goto failed;
+ xattr = le64_to_cpu(xattr_val);
+ start = SQUASHFS_XATTR_BLK(xattr) +
+ msblk->xattr_table;
+ offset = SQUASHFS_XATTR_OFFSET(xattr);
+ }
+ /* read xattr value */
+ err = squashfs_read_metadata(sb, &val, &start, &offset,
+ sizeof(val));
+ if (err < 0)
+ goto failed;
+
+ vsize = le32_to_cpu(val.vsize);
+ if (buffer) {
+ if (vsize > buffer_size) {
+ err = -ERANGE;
+ goto failed;
+ }
+ err = squashfs_read_metadata(sb, buffer, &start,
+ &offset, vsize);
+ if (err < 0)
+ goto failed;
+ }
+ break;
+ }
+
+ /* no match, skip remaining xattr entry */
+ err = squashfs_read_metadata(sb, &val, &start, &offset,
+ sizeof(val));
+ if (err < 0)
+ goto failed;
+ err = squashfs_read_metadata(sb, NULL, &start, &offset,
+ le32_to_cpu(val.vsize));
+ if (err < 0)
+ goto failed;
+ }
+ err = count ? vsize : -ENODATA;
+
+failed:
+ kfree(target);
+ return err;
+}
+
+
+static int squashfs_xattr_handler_get(const struct xattr_handler *handler,
+ struct dentry *unused,
+ struct inode *inode,
+ const char *name,
+ void *buffer, size_t size)
+{
+ return squashfs_xattr_get(inode, handler->flags, name,
+ buffer, size);
+}
+
+/*
+ * User namespace support
+ */
+static const struct xattr_handler squashfs_xattr_user_handler = {
+ .prefix = XATTR_USER_PREFIX,
+ .flags = SQUASHFS_XATTR_USER,
+ .get = squashfs_xattr_handler_get
+};
+
+/*
+ * Trusted namespace support
+ */
+static bool squashfs_trusted_xattr_handler_list(struct dentry *d)
+{
+ return capable(CAP_SYS_ADMIN);
+}
+
+static const struct xattr_handler squashfs_xattr_trusted_handler = {
+ .prefix = XATTR_TRUSTED_PREFIX,
+ .flags = SQUASHFS_XATTR_TRUSTED,
+ .list = squashfs_trusted_xattr_handler_list,
+ .get = squashfs_xattr_handler_get
+};
+
+/*
+ * Security namespace support
+ */
+static const struct xattr_handler squashfs_xattr_security_handler = {
+ .prefix = XATTR_SECURITY_PREFIX,
+ .flags = SQUASHFS_XATTR_SECURITY,
+ .get = squashfs_xattr_handler_get
+};
+
+static const struct xattr_handler *squashfs_xattr_handler(int type)
+{
+ if (type & ~(SQUASHFS_XATTR_PREFIX_MASK | SQUASHFS_XATTR_VALUE_OOL))
+ /* ignore unrecognised type */
+ return NULL;
+
+ switch (type & SQUASHFS_XATTR_PREFIX_MASK) {
+ case SQUASHFS_XATTR_USER:
+ return &squashfs_xattr_user_handler;
+ case SQUASHFS_XATTR_TRUSTED:
+ return &squashfs_xattr_trusted_handler;
+ case SQUASHFS_XATTR_SECURITY:
+ return &squashfs_xattr_security_handler;
+ default:
+ /* ignore unrecognised type */
+ return NULL;
+ }
+}
+
+const struct xattr_handler *squashfs_xattr_handlers[] = {
+ &squashfs_xattr_user_handler,
+ &squashfs_xattr_trusted_handler,
+ &squashfs_xattr_security_handler,
+ NULL
+};
+
diff --git a/fs/squashfs/xattr.h b/fs/squashfs/xattr.h
new file mode 100644
index 0000000000..f1a463d8bf
--- /dev/null
+++ b/fs/squashfs/xattr.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2010
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * xattr.h
+ */
+
+#ifdef CONFIG_SQUASHFS_XATTR
+extern __le64 *squashfs_read_xattr_id_table(struct super_block *, u64,
+ u64 *, unsigned int *);
+extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *,
+ unsigned int *, unsigned long long *);
+#else
+static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb,
+ u64 start, u64 *xattr_table_start, unsigned int *xattr_ids)
+{
+ struct squashfs_xattr_id_table *id_table;
+
+ id_table = squashfs_read_table(sb, start, sizeof(*id_table));
+ if (IS_ERR(id_table))
+ return (__le64 *) id_table;
+
+ *xattr_table_start = le64_to_cpu(id_table->xattr_table_start);
+ kfree(id_table);
+
+ ERROR("Xattrs in filesystem, these will be ignored\n");
+ return ERR_PTR(-ENOTSUPP);
+}
+
+static inline int squashfs_xattr_lookup(struct super_block *sb,
+ unsigned int index, int *count, unsigned int *size,
+ unsigned long long *xattr)
+{
+ return 0;
+}
+#define squashfs_listxattr NULL
+#define squashfs_xattr_handlers NULL
+#endif
diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c
new file mode 100644
index 0000000000..c8469c656e
--- /dev/null
+++ b/fs/squashfs/xattr_id.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2010
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * xattr_id.c
+ */
+
+/*
+ * This file implements code to map the 32-bit xattr id stored in the inode
+ * into the on disk location of the xattr data.
+ */
+
+#include <linux/fs.h>
+#include <linux/vfs.h>
+#include <linux/slab.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs.h"
+#include "xattr.h"
+
+/*
+ * Map xattr id using the xattr id look up table
+ */
+int squashfs_xattr_lookup(struct super_block *sb, unsigned int index,
+ int *count, unsigned int *size, unsigned long long *xattr)
+{
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+ int block = SQUASHFS_XATTR_BLOCK(index);
+ int offset = SQUASHFS_XATTR_BLOCK_OFFSET(index);
+ u64 start_block;
+ struct squashfs_xattr_id id;
+ int err;
+
+ if (index >= msblk->xattr_ids)
+ return -EINVAL;
+
+ start_block = le64_to_cpu(msblk->xattr_id_table[block]);
+
+ err = squashfs_read_metadata(sb, &id, &start_block, &offset,
+ sizeof(id));
+ if (err < 0)
+ return err;
+
+ *xattr = le64_to_cpu(id.xattr);
+ *size = le32_to_cpu(id.size);
+ *count = le32_to_cpu(id.count);
+ return 0;
+}
+
+
+/*
+ * Read uncompressed xattr id lookup table indexes from disk into memory
+ */
+__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
+ u64 *xattr_table_start, unsigned int *xattr_ids)
+{
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+ unsigned int len, indexes;
+ struct squashfs_xattr_id_table *id_table;
+ __le64 *table;
+ u64 start, end;
+ int n;
+
+ id_table = squashfs_read_table(sb, table_start, sizeof(*id_table));
+ if (IS_ERR(id_table))
+ return (__le64 *) id_table;
+
+ *xattr_table_start = le64_to_cpu(id_table->xattr_table_start);
+ *xattr_ids = le32_to_cpu(id_table->xattr_ids);
+ kfree(id_table);
+
+ /* Sanity check values */
+
+ /* there is always at least one xattr id */
+ if (*xattr_ids == 0)
+ return ERR_PTR(-EINVAL);
+
+ len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
+ indexes = SQUASHFS_XATTR_BLOCKS(*xattr_ids);
+
+ /*
+ * The computed size of the index table (len bytes) should exactly
+ * match the table start and end points
+ */
+ start = table_start + sizeof(*id_table);
+ end = msblk->bytes_used;
+
+ if (len != (end - start))
+ return ERR_PTR(-EINVAL);
+
+ table = squashfs_read_table(sb, start, len);
+ if (IS_ERR(table))
+ return table;
+
+ /* table[0], table[1], ... table[indexes - 1] store the locations
+ * of the compressed xattr id blocks. Each entry should be less than
+ * the next (i.e. table[0] < table[1]), and the difference between them
+ * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1]
+ * should be less than table_start, and again the difference
+ * shouls be SQUASHFS_METADATA_SIZE or less.
+ *
+ * Finally xattr_table_start should be less than table[0].
+ */
+ for (n = 0; n < (indexes - 1); n++) {
+ start = le64_to_cpu(table[n]);
+ end = le64_to_cpu(table[n + 1]);
+
+ if (start >= end || (end - start) >
+ (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
+ kfree(table);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ start = le64_to_cpu(table[indexes - 1]);
+ if (start >= table_start || (table_start - start) >
+ (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
+ kfree(table);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (*xattr_table_start >= le64_to_cpu(table[0])) {
+ kfree(table);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return table;
+}
diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c
new file mode 100644
index 0000000000..6c49481a2f
--- /dev/null
+++ b/fs/squashfs/xz_wrapper.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * xz_wrapper.c
+ */
+
+
+#include <linux/mutex.h>
+#include <linux/bio.h>
+#include <linux/slab.h>
+#include <linux/xz.h>
+#include <linux/bitops.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs.h"
+#include "decompressor.h"
+#include "page_actor.h"
+
+struct squashfs_xz {
+ struct xz_dec *state;
+ struct xz_buf buf;
+};
+
+struct disk_comp_opts {
+ __le32 dictionary_size;
+ __le32 flags;
+};
+
+struct comp_opts {
+ int dict_size;
+};
+
+static void *squashfs_xz_comp_opts(struct squashfs_sb_info *msblk,
+ void *buff, int len)
+{
+ struct disk_comp_opts *comp_opts = buff;
+ struct comp_opts *opts;
+ int err = 0, n;
+
+ opts = kmalloc(sizeof(*opts), GFP_KERNEL);
+ if (opts == NULL) {
+ err = -ENOMEM;
+ goto out2;
+ }
+
+ if (comp_opts) {
+ /* check compressor options are the expected length */
+ if (len < sizeof(*comp_opts)) {
+ err = -EIO;
+ goto out;
+ }
+
+ opts->dict_size = le32_to_cpu(comp_opts->dictionary_size);
+
+ /* the dictionary size should be 2^n or 2^n+2^(n+1) */
+ n = ffs(opts->dict_size) - 1;
+ if (opts->dict_size != (1 << n) && opts->dict_size != (1 << n) +
+ (1 << (n + 1))) {
+ err = -EIO;
+ goto out;
+ }
+ } else
+ /* use defaults */
+ opts->dict_size = max_t(int, msblk->block_size,
+ SQUASHFS_METADATA_SIZE);
+
+ return opts;
+
+out:
+ kfree(opts);
+out2:
+ return ERR_PTR(err);
+}
+
+
+static void *squashfs_xz_init(struct squashfs_sb_info *msblk, void *buff)
+{
+ struct comp_opts *comp_opts = buff;
+ struct squashfs_xz *stream;
+ int err;
+
+ stream = kmalloc(sizeof(*stream), GFP_KERNEL);
+ if (stream == NULL) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ stream->state = xz_dec_init(XZ_PREALLOC, comp_opts->dict_size);
+ if (stream->state == NULL) {
+ kfree(stream);
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ return stream;
+
+failed:
+ ERROR("Failed to initialise xz decompressor\n");
+ return ERR_PTR(err);
+}
+
+
+static void squashfs_xz_free(void *strm)
+{
+ struct squashfs_xz *stream = strm;
+
+ if (stream) {
+ xz_dec_end(stream->state);
+ kfree(stream);
+ }
+}
+
+
+static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
+ struct bio *bio, int offset, int length,
+ struct squashfs_page_actor *output)
+{
+ struct bvec_iter_all iter_all = {};
+ struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
+ int total = 0, error = 0;
+ struct squashfs_xz *stream = strm;
+
+ xz_dec_reset(stream->state);
+ stream->buf.in_pos = 0;
+ stream->buf.in_size = 0;
+ stream->buf.out_pos = 0;
+ stream->buf.out_size = PAGE_SIZE;
+ stream->buf.out = squashfs_first_page(output);
+ if (IS_ERR(stream->buf.out)) {
+ error = PTR_ERR(stream->buf.out);
+ goto finish;
+ }
+
+ for (;;) {
+ enum xz_ret xz_err;
+
+ if (stream->buf.in_pos == stream->buf.in_size) {
+ const void *data;
+ int avail;
+
+ if (!bio_next_segment(bio, &iter_all)) {
+ /* XZ_STREAM_END must be reached. */
+ error = -EIO;
+ break;
+ }
+
+ avail = min(length, ((int)bvec->bv_len) - offset);
+ data = bvec_virt(bvec);
+ length -= avail;
+ stream->buf.in = data + offset;
+ stream->buf.in_size = avail;
+ stream->buf.in_pos = 0;
+ offset = 0;
+ }
+
+ if (stream->buf.out_pos == stream->buf.out_size) {
+ stream->buf.out = squashfs_next_page(output);
+ if (IS_ERR(stream->buf.out)) {
+ error = PTR_ERR(stream->buf.out);
+ break;
+ } else if (stream->buf.out != NULL) {
+ stream->buf.out_pos = 0;
+ total += PAGE_SIZE;
+ }
+ }
+
+ xz_err = xz_dec_run(stream->state, &stream->buf);
+ if (xz_err == XZ_STREAM_END)
+ break;
+ if (xz_err != XZ_OK) {
+ error = -EIO;
+ break;
+ }
+ }
+
+finish:
+ squashfs_finish_page(output);
+
+ return error ? error : total + stream->buf.out_pos;
+}
+
+const struct squashfs_decompressor squashfs_xz_comp_ops = {
+ .init = squashfs_xz_init,
+ .comp_opts = squashfs_xz_comp_opts,
+ .free = squashfs_xz_free,
+ .decompress = squashfs_xz_uncompress,
+ .id = XZ_COMPRESSION,
+ .name = "xz",
+ .alloc_buffer = 1,
+ .supported = 1
+};
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c
new file mode 100644
index 0000000000..cbb7afe7bc
--- /dev/null
+++ b/fs/squashfs/zlib_wrapper.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * zlib_wrapper.c
+ */
+
+
+#include <linux/mutex.h>
+#include <linux/bio.h>
+#include <linux/slab.h>
+#include <linux/zlib.h>
+#include <linux/vmalloc.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs.h"
+#include "decompressor.h"
+#include "page_actor.h"
+
+static void *zlib_init(struct squashfs_sb_info *dummy, void *buff)
+{
+ z_stream *stream = kmalloc(sizeof(z_stream), GFP_KERNEL);
+ if (stream == NULL)
+ goto failed;
+ stream->workspace = vmalloc(zlib_inflate_workspacesize());
+ if (stream->workspace == NULL)
+ goto failed;
+
+ return stream;
+
+failed:
+ ERROR("Failed to allocate zlib workspace\n");
+ kfree(stream);
+ return ERR_PTR(-ENOMEM);
+}
+
+
+static void zlib_free(void *strm)
+{
+ z_stream *stream = strm;
+
+ if (stream)
+ vfree(stream->workspace);
+ kfree(stream);
+}
+
+
+static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
+ struct bio *bio, int offset, int length,
+ struct squashfs_page_actor *output)
+{
+ struct bvec_iter_all iter_all = {};
+ struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
+ int zlib_init = 0, error = 0;
+ z_stream *stream = strm;
+
+ stream->avail_out = PAGE_SIZE;
+ stream->next_out = squashfs_first_page(output);
+ stream->avail_in = 0;
+
+ if (IS_ERR(stream->next_out)) {
+ error = PTR_ERR(stream->next_out);
+ goto finish;
+ }
+
+ for (;;) {
+ int zlib_err;
+
+ if (stream->avail_in == 0) {
+ const void *data;
+ int avail;
+
+ if (!bio_next_segment(bio, &iter_all)) {
+ /* Z_STREAM_END must be reached. */
+ error = -EIO;
+ break;
+ }
+
+ avail = min(length, ((int)bvec->bv_len) - offset);
+ data = bvec_virt(bvec);
+ length -= avail;
+ stream->next_in = data + offset;
+ stream->avail_in = avail;
+ offset = 0;
+ }
+
+ if (stream->avail_out == 0) {
+ stream->next_out = squashfs_next_page(output);
+ if (IS_ERR(stream->next_out)) {
+ error = PTR_ERR(stream->next_out);
+ break;
+ } else if (stream->next_out != NULL)
+ stream->avail_out = PAGE_SIZE;
+ }
+
+ if (!zlib_init) {
+ zlib_err = zlib_inflateInit(stream);
+ if (zlib_err != Z_OK) {
+ error = -EIO;
+ break;
+ }
+ zlib_init = 1;
+ }
+
+ zlib_err = zlib_inflate(stream, Z_SYNC_FLUSH);
+ if (zlib_err == Z_STREAM_END)
+ break;
+ if (zlib_err != Z_OK) {
+ error = -EIO;
+ break;
+ }
+ }
+
+finish:
+ squashfs_finish_page(output);
+
+ if (!error)
+ if (zlib_inflateEnd(stream) != Z_OK)
+ error = -EIO;
+
+ return error ? error : stream->total_out;
+}
+
+const struct squashfs_decompressor squashfs_zlib_comp_ops = {
+ .init = zlib_init,
+ .free = zlib_free,
+ .decompress = zlib_uncompress,
+ .id = ZLIB_COMPRESSION,
+ .name = "zlib",
+ .alloc_buffer = 1,
+ .supported = 1
+};
+
diff --git a/fs/squashfs/zstd_wrapper.c b/fs/squashfs/zstd_wrapper.c
new file mode 100644
index 0000000000..0e407c4d8b
--- /dev/null
+++ b/fs/squashfs/zstd_wrapper.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2016-present, Facebook, Inc.
+ * All rights reserved.
+ *
+ * zstd_wrapper.c
+ */
+
+#include <linux/mutex.h>
+#include <linux/bio.h>
+#include <linux/slab.h>
+#include <linux/zstd.h>
+#include <linux/vmalloc.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs.h"
+#include "decompressor.h"
+#include "page_actor.h"
+
+struct workspace {
+ void *mem;
+ size_t mem_size;
+ size_t window_size;
+};
+
+static void *zstd_init(struct squashfs_sb_info *msblk, void *buff)
+{
+ struct workspace *wksp = kmalloc(sizeof(*wksp), GFP_KERNEL);
+
+ if (wksp == NULL)
+ goto failed;
+ wksp->window_size = max_t(size_t,
+ msblk->block_size, SQUASHFS_METADATA_SIZE);
+ wksp->mem_size = zstd_dstream_workspace_bound(wksp->window_size);
+ wksp->mem = vmalloc(wksp->mem_size);
+ if (wksp->mem == NULL)
+ goto failed;
+
+ return wksp;
+
+failed:
+ ERROR("Failed to allocate zstd workspace\n");
+ kfree(wksp);
+ return ERR_PTR(-ENOMEM);
+}
+
+
+static void zstd_free(void *strm)
+{
+ struct workspace *wksp = strm;
+
+ if (wksp)
+ vfree(wksp->mem);
+ kfree(wksp);
+}
+
+
+static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
+ struct bio *bio, int offset, int length,
+ struct squashfs_page_actor *output)
+{
+ struct workspace *wksp = strm;
+ zstd_dstream *stream;
+ size_t total_out = 0;
+ int error = 0;
+ zstd_in_buffer in_buf = { NULL, 0, 0 };
+ zstd_out_buffer out_buf = { NULL, 0, 0 };
+ struct bvec_iter_all iter_all = {};
+ struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
+
+ stream = zstd_init_dstream(wksp->window_size, wksp->mem, wksp->mem_size);
+
+ if (!stream) {
+ ERROR("Failed to initialize zstd decompressor\n");
+ return -EIO;
+ }
+
+ out_buf.size = PAGE_SIZE;
+ out_buf.dst = squashfs_first_page(output);
+ if (IS_ERR(out_buf.dst)) {
+ error = PTR_ERR(out_buf.dst);
+ goto finish;
+ }
+
+ for (;;) {
+ size_t zstd_err;
+
+ if (in_buf.pos == in_buf.size) {
+ const void *data;
+ int avail;
+
+ if (!bio_next_segment(bio, &iter_all)) {
+ error = -EIO;
+ break;
+ }
+
+ avail = min(length, ((int)bvec->bv_len) - offset);
+ data = bvec_virt(bvec);
+ length -= avail;
+ in_buf.src = data + offset;
+ in_buf.size = avail;
+ in_buf.pos = 0;
+ offset = 0;
+ }
+
+ if (out_buf.pos == out_buf.size) {
+ out_buf.dst = squashfs_next_page(output);
+ if (IS_ERR(out_buf.dst)) {
+ error = PTR_ERR(out_buf.dst);
+ break;
+ } else if (out_buf.dst == NULL) {
+ /* Shouldn't run out of pages
+ * before stream is done.
+ */
+ error = -EIO;
+ break;
+ }
+ out_buf.pos = 0;
+ out_buf.size = PAGE_SIZE;
+ }
+
+ total_out -= out_buf.pos;
+ zstd_err = zstd_decompress_stream(stream, &out_buf, &in_buf);
+ total_out += out_buf.pos; /* add the additional data produced */
+ if (zstd_err == 0)
+ break;
+
+ if (zstd_is_error(zstd_err)) {
+ ERROR("zstd decompression error: %d\n",
+ (int)zstd_get_error_code(zstd_err));
+ error = -EIO;
+ break;
+ }
+ }
+
+finish:
+
+ squashfs_finish_page(output);
+
+ return error ? error : total_out;
+}
+
+const struct squashfs_decompressor squashfs_zstd_comp_ops = {
+ .init = zstd_init,
+ .free = zstd_free,
+ .decompress = zstd_uncompress,
+ .id = ZSTD_COMPRESSION,
+ .name = "zstd",
+ .alloc_buffer = 1,
+ .supported = 1
+};