summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-15 17:23:08 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-15 17:23:08 +0000
commitdd76e45c20acc3f352ffe8257208cc617ba33eba (patch)
treec50c016a4182a27fd1ece9ec7ba4abf405f19e5f
parentInitial commit. (diff)
downloadsquashfs-tools-upstream.tar.xz
squashfs-tools-upstream.zip
Adding upstream version 1:4.6.1.upstream/1%4.6.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
-rw-r--r--.gitattributes8
-rw-r--r--ACKNOWLEDGEMENTS162
-rw-r--r--ACTIONS-README774
-rw-r--r--CHANGES914
-rw-r--r--COPYING339
-rw-r--r--INSTALL47
-rw-r--r--README-4.6.1340
-rw-r--r--TECHNICAL-INFO289
-rw-r--r--USAGE-4.694
-rw-r--r--USAGE-MKSQUASHFS-4.61069
-rw-r--r--USAGE-SQFSCAT-4.674
-rw-r--r--USAGE-SQFSTAR-4.6812
-rw-r--r--USAGE-UNSQUASHFS-4.6498
-rw-r--r--examples/pseudo-file.example74
-rwxr-xr-xgenerate-manpages/functions.sh18
-rwxr-xr-xgenerate-manpages/install-manpages.sh82
-rwxr-xr-xgenerate-manpages/mksquashfs-manpage.sh253
-rw-r--r--generate-manpages/mksquashfs.h2m150
-rwxr-xr-xgenerate-manpages/sqfscat-manpage.sh189
-rw-r--r--generate-manpages/sqfscat.h2m29
-rwxr-xr-xgenerate-manpages/sqfstar-manpage.sh241
-rw-r--r--generate-manpages/sqfstar.h2m84
-rwxr-xr-xgenerate-manpages/unsquashfs-manpage.sh189
-rw-r--r--generate-manpages/unsquashfs.h2m89
-rw-r--r--manpages/.gitattributes1
-rw-r--r--manpages/README13
-rw-r--r--manpages/mksquashfs.1554
-rw-r--r--manpages/sqfscat.1103
-rw-r--r--manpages/sqfstar.1400
-rw-r--r--manpages/unsquashfs.1279
-rw-r--r--squashfs-tools/.gitignore5
-rwxr-xr-xsquashfs-tools/Makefile470
-rw-r--r--squashfs-tools/action.c3574
-rw-r--r--squashfs-tools/action.h351
-rw-r--r--squashfs-tools/caches-queues-lists.c647
-rw-r--r--squashfs-tools/caches-queues-lists.h200
-rw-r--r--squashfs-tools/compressor.c145
-rw-r--r--squashfs-tools/compressor.h132
-rw-r--r--squashfs-tools/date.c129
-rw-r--r--squashfs-tools/date.h28
-rw-r--r--squashfs-tools/endian_compat.h34
-rw-r--r--squashfs-tools/error.h43
-rw-r--r--squashfs-tools/fnmatch_compat.h32
-rw-r--r--squashfs-tools/gzip_wrapper.c513
-rw-r--r--squashfs-tools/gzip_wrapper.h69
-rw-r--r--squashfs-tools/info.c174
-rw-r--r--squashfs-tools/info.h30
-rw-r--r--squashfs-tools/lz4_wrapper.c307
-rw-r--r--squashfs-tools/lz4_wrapper.h55
-rw-r--r--squashfs-tools/lzma_wrapper.c127
-rw-r--r--squashfs-tools/lzma_xz_wrapper.c169
-rw-r--r--squashfs-tools/lzo_wrapper.c450
-rw-r--r--squashfs-tools/lzo_wrapper.h72
-rw-r--r--squashfs-tools/merge_sort.h116
-rw-r--r--squashfs-tools/mksquashfs.c8902
-rw-r--r--squashfs-tools/mksquashfs.h285
-rw-r--r--squashfs-tools/mksquashfs_error.h74
-rw-r--r--squashfs-tools/process_fragments.c373
-rw-r--r--squashfs-tools/process_fragments.h28
-rw-r--r--squashfs-tools/progressbar.c300
-rw-r--r--squashfs-tools/progressbar.h35
-rw-r--r--squashfs-tools/pseudo.c1376
-rw-r--r--squashfs-tools/pseudo.h107
-rw-r--r--squashfs-tools/pseudo_xattr.c176
-rw-r--r--squashfs-tools/read_fs.c1090
-rw-r--r--squashfs-tools/read_fs.h36
-rw-r--r--squashfs-tools/read_xattrs.c454
-rw-r--r--squashfs-tools/reader.c715
-rw-r--r--squashfs-tools/reader.h39
-rw-r--r--squashfs-tools/restore.c168
-rw-r--r--squashfs-tools/restore.h28
-rw-r--r--squashfs-tools/signals.h54
-rw-r--r--squashfs-tools/sort.c373
-rw-r--r--squashfs-tools/sort.h37
-rw-r--r--squashfs-tools/squashfs_compat.h833
-rw-r--r--squashfs-tools/squashfs_fs.h502
-rw-r--r--squashfs-tools/squashfs_swap.h425
-rw-r--r--squashfs-tools/swap.c117
-rw-r--r--squashfs-tools/tar.c1682
-rw-r--r--squashfs-tools/tar.h153
-rw-r--r--squashfs-tools/tar_xattr.c122
-rw-r--r--squashfs-tools/unsquash-1.c582
-rw-r--r--squashfs-tools/unsquash-12.c30
-rw-r--r--squashfs-tools/unsquash-123.c79
-rw-r--r--squashfs-tools/unsquash-1234.c95
-rw-r--r--squashfs-tools/unsquash-2.c715
-rw-r--r--squashfs-tools/unsquash-3.c824
-rw-r--r--squashfs-tools/unsquash-34.c183
-rw-r--r--squashfs-tools/unsquash-4.c832
-rw-r--r--squashfs-tools/unsquashfs.c4655
-rw-r--r--squashfs-tools/unsquashfs.h345
-rw-r--r--squashfs-tools/unsquashfs_error.h64
-rw-r--r--squashfs-tools/unsquashfs_info.c125
-rw-r--r--squashfs-tools/unsquashfs_info.h30
-rw-r--r--squashfs-tools/unsquashfs_xattr.c302
-rw-r--r--squashfs-tools/version.mk2
-rw-r--r--squashfs-tools/xattr.c1322
-rw-r--r--squashfs-tools/xattr.h241
-rw-r--r--squashfs-tools/xz_wrapper.c551
-rw-r--r--squashfs-tools/xz_wrapper.h65
-rw-r--r--squashfs-tools/zstd_wrapper.c265
-rw-r--r--squashfs-tools/zstd_wrapper.h42
102 files changed, 45873 insertions, 0 deletions
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..bf8e7a0
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,8 @@
+version.mk export-subst
+kernel export-ignore
+README export-ignore
+README-4.5 export-ignore
+README-4.5.1 export-ignore
+README-4.6 export-ignore
+USAGE export-ignore
+RELEASE-READMEs export-ignore
diff --git a/ACKNOWLEDGEMENTS b/ACKNOWLEDGEMENTS
new file mode 100644
index 0000000..2097bb7
--- /dev/null
+++ b/ACKNOWLEDGEMENTS
@@ -0,0 +1,162 @@
+ ACKNOWLEDGEMENTS
+
+Thanks to everyone who have downloaded Squashfs. I appreciate people
+using it, and any feedback you have.
+
+The following have provided useful feedback, which has guided
+some of the extra features in squashfs. This is a randomly ordered
+(roughly in chronological order) list, which is updated when
+I remember...
+
+Acknowledgements for Squashfs 4.3
+---------------------------------
+
+Thanks to Bruno Wolff III and Andy Lutomirski for useful feedback
+during the long development process of Squashfs 4.3.
+
+Acknowledgements for Squashfs 4.2
+---------------------------------
+
+Thanks to Lasse Collin (http://tukaani.org/xz/) for mainlining XZ
+decompression support.
+
+Acknowledgements for Squashfs 4.1
+---------------------------------
+
+Thanks to Chan Jeong <chan.jeong@lge.com> and LG for the patches to support LZO
+compression.
+
+Acknowledgements for Squashfs 4.0
+---------------------------------
+
+Thanks to Tim Bird and CELF (Consumer Electronics Linux Forum) for helping
+fund mainstreaming of Squashfs into the 2.6.29 kernel and the
+changes to the Squashfs tools to support the new 4.0 file system layout.
+
+Acknowledgements for Squashfs-3.3
+------------------------------------
+
+Peter Korsgaard and others sent patches updating Squashfs to changes in the
+VFS interface for 2.6.22/2.6.23/2.6.24-rc1. Peter also sent some small patches
+for the Squashfs kernel code.
+
+Vito Di Leo sent a patch extending Mksquashfs to support regex filters.
+While his patched worked, it unfortunately made it easy to make Mksquashfs
+perform unpredictably with poorly choosen regex expressions. It, however,
+encouraged myself to add support for wildcard pattern matching and regex
+filters in a different way.
+
+Acknowledgements for Squashfs-3.2-r2
+------------------------------------
+
+Junjiro Okajima discovered a couple of SMP issues, thanks.
+
+Junjiro Okajima and Tomas Matejicek have produced some good LZMA patches
+for Squashfs.
+
+Acknowledgements for Squashfs-3.2
+---------------------------------
+
+Peter Korsgaard sent a patch updating Squashfs to changes in the VFS interface
+in Linux 2.6.20.
+
+Acknowledgements for Squashfs-3.1
+---------------------------------
+
+Kenneth Duda and Ed Swierk of Arastra Inc. identified numerous bugs with
+Squashfs, and provided patches which were the basis for some of the
+fixes. In particular they identified the fragment rounding bug, the
+NFS bug, the initrd bug, and helped identify the 4K stack overflow bug.
+
+Scott James Remnant (Ubuntu) also identified the fragment rounding bug,
+and he also provided a patch.
+
+Ming Zhang identified the Lseek bug in Mksquashfs. His tests on the
+performance of Mksquashfs on SMP systems encouraged the rewrite of
+Mksquashfs.
+
+Peter Korsgaard, Daniel Olivera and Zilvinas Valinskas noticed
+Squashfs 3.0 didn't compile on Linux-2.6.18-rc[1-4] due to changes
+in the Linux VFS interfaces, and provided patches.
+
+Tomas Matejicek (SLAX) suggested the -force option on Unsquashfs, and noticed
+Unsquashfs didn't return the correct exit status.
+
+Yann Le Doare reported a kernel oops and provided a Qemu image that led
+to the identification of the simultaneously accessing multiply mounted Squashfs
+filesystems bug.
+
+
+Older acknowledgements
+----------------------
+
+Mark Robson - pointed out early on that initrds didn't work
+
+Adam Warner - pointed out that greater than 2GB filesystems didn't work.
+
+John Sutton - raised the problem when archiving the entire filesystem
+(/) there was no way to prevent /proc being archived. This prompted
+exclude files.
+
+Martin Mueller (LinuxTV) - noticed that the filesystem length in the
+superblock doesn't match the output filesystem length. This is due to
+padding to a 4K boundary. This prompted the addition of the -nopad option.
+He also reported a problem where 32K block filesystems hung when used as
+initrds.
+
+Arkadiusz Patyk (Polish Linux Distribution - PLD) reported a problem where 32K
+block filesystems hung when used as a root filesystem mounted as a loopback
+device.
+
+David Fox (Lindows) noticed that the exit codes returned by Mksquashfs were
+wrong. He also noticed that a lot of time was spent in the duplicate scan
+routine.
+
+Cameron Rich complained that Squashfs did not support FIFOs or sockets.
+
+Steve Chadsey and Thomas Weissmuller noticed that files larger than the
+available memory could not be compressed by Mksquashfs.
+
+"Ptwahyu" and "Hoan" (I have no full names and I don't like giving people's
+email addresses), noticed that Mksquashfs 1.3 SEGV'd occasionally. Even though
+I had already noticed this bug, it is useful to be informed by other people.
+
+Don Elwell, Murray Jensen and Cameron Rich, have all sent in patches. Thanks,
+I have not had time to do anything about them yet...
+
+Drew Scott Daniels has been a good advocate for Squashfs.
+
+Erik Andersen has made some nice suggestions, unfortunately, I have
+not had time to implement anything.
+
+Artemiy I. Pavlov has written a useful LDP mini-howto for Squashfs
+(http://linuxdoc.artemio.net/squashfs).
+
+Yves Combe reported the Apple G5 bug, when using Squashfs for
+his PPC Knoppix-mib livecd project.
+
+Jaco Greeff (mklivecd project, and maintainer of the Mandrake
+squashfs-tools package) suggested the new mksquashfs -ef option, and the
+standalone build for mksquashfs.
+
+Mike Schaudies made a donation.
+
+Arkadiusz Patyk from the Polish Linux Distribution reported that Squashfs
+didn't work on amd64 machines. He gave me an account on a PLD amd64 machine
+which allowed myself to track down these bugs.
+
+Miles Roper, Peter Kjellerstedt and Willy Tarreau reported that release 2.1 did
+not compile with gcc < 3.x.
+
+Marcel J.E. Mol reported lack of kernel memory issues when using Squashfs
+on small memory embedded systems. This prompted the addition of the embedded
+system kernel configuration options.
+
+Era Scarecrow noticed that Mksquashfs had not been updated to reflect that
+smaller than 4K blocks are no longer supported.
+
+Kenichi Shima reported the Kconfig file had not been updated to 2.2.
+
+Aaron Ten Clay made a donation!
+
+Tomas Matejicek (SLAX) made a donation!
diff --git a/ACTIONS-README b/ACTIONS-README
new file mode 100644
index 0000000..ede4f76
--- /dev/null
+++ b/ACTIONS-README
@@ -0,0 +1,774 @@
+ MKSQUASHFS ACTIONS
+ ==================
+
+The new Mksquashfs Actions code allows an "action" to be executed
+on a file if one or more "tests" succeed. If you're familiar
+with the "find" command, then an action is similar to "-print",
+and a test is similar to say "-name" or "-type".
+
+Actions add greater flexibility when building images from sources.
+They can be used to optimise compression, I/O performance, and they
+also allow more control on the exclusion of files from the source, and
+allow uid/gid and mode to be changed on a file basis.
+
+1. Specification
+================
+
+Actions can be specified on the command line with the -action option.
+They can also be put into a file, and added with the -action-file
+option. If put into a file, there is one action per line. But, lines
+can be extended over many lines with continuation (\).
+
+If you want to get a log of what actions were performed, and the values
+returned by the tests for each file, you can use the -log-action option
+for the command line and -log-action-file for action files.
+
+Similarly there are -true-action (-true-action-file) and -false-action
+(-false-action-file) options which log if the tests evaluated to TRUE,
+and vice-versa:
+
+2. Syntax
+=========
+
+An action consists of two parts, separated by an "@". The action to
+be executed is placed before the @, and one or more tests are
+placed afer the @. If the action or tests has an argument, it is
+given in brackets. Brackets are optional if no argument is needed,
+e.g.
+
+compressed()@name("filename")
+
+compressed@name("filename")
+
+do exactly the same thing.
+
+Arguments can be either numeric or string, depending on the
+action and test.
+
+String arguments can be enclosed in double-quotes ("), to prevent the
+parser from treating characters within it specially. Within double-quotes
+only '\' is treatedly specially, and only at the end of a line. Special
+characters can also be backslashed (\) to prevent interpretation by the
+parser, e.g. the following is equivalent:
+
+compressed@name(file\ name\ with\ \&&\ and\ spaces)
+
+compressed@name("file name with && and spaces")
+
+Numeric arguments are of the form [range]number[size], where
+[range] is either
+
+ '<' or '-', match on less than number
+ '>' or '+', match on greater than number
+ "" (nothing), match on exactly number
+
+[size] is either:
+ "" (nothing), number
+ 'k' or 'K', number * 2^10
+ 'm' or 'M', number * 2^20
+ 'g' or 'G', number * 2^30
+
+e.g. the following is equivalent:
+
+compressed@filesize(-81920)
+compressed@filesize(<80K)
+
+Both will match on files less than 80K in size.
+
+Characters which are treated specially by the parser are * ( ) && ||
+! , @. Plus whitespace (spaces and tabs).
+
+Note: if the action is typed on the command line, then many special
+characters will be evaluated by the shell, and you should always
+check what is actually being passed to Mksquashfs. If in doubt use
+-action-file where the additional complexities of shell evaluation is
+avoided.
+
+For example this action line will work in an action file
+
+compressed()@name("file name")
+
+But, if typed on the command line, it will need to be:
+
+% mksquashfs source image -action "compressed()@name(\"file name\")"
+
+
+3. Logical operators
+====================
+
+Tests can be combined with the logical operators && (and), || (or) and
+can be negated with the unary ! (not). Expressions thus formed can also
+be bracketed with "(" and ")", to create nested expressions.
+
+Operators do not have precedence and are evaluated strictly left to
+right. To enforce precedence use brackets, e.g.
+
+test1 && test2 || test3
+
+will be evaluated as
+
+(test1 && test2) || test3
+
+&& and || are short-circuit operators, where the rhs (right hand side)
+is only evaluated if the lhs (left hand side) has been insufficient
+to determine the value. For example in the above, test3 will only be
+evaluated if (test1 && test2) evaluates to FALSE.
+
+4. Test operators
+=================
+
+The following test operators are supported:
+
+4.1 name(pattern)
+
+Returns TRUE if the filename (basename without leading directory components)
+matches pattern. Pattern can have wildcards.
+
+4.2 pathname(pattern)
+---------------------
+
+Returns TRUE if the full pathname of the file matches pattern.
+Pattern can have wildcards.
+
+4.3 subpathname(pattern)
+------------------------
+
+Returns TRUE if the <n> directory components of pattern match the first <n>
+directory components of the pathname.
+
+For example, if pattern has one component:
+
+subpathname(dir1) will match "dir1/somefile", "dir1/dir2/somefile" etc.
+
+If pattern had two components:
+
+subpathname(dir1/dir2) will match ""dir1/dir2/somefile" etc.
+
+Pattern can have wildcards.
+
+4.4 filesize(value)
+-------------------
+
+Return TRUE if the size of the file is less than, equal to, or larger than
+<value>, where <value> can be [<-]number, number, [>+]number. Returns FALSE
+on anything not a file.
+
+4.5 dirsize(value)
+------------------
+
+Return TRUE if the size of the directory is less than, equal to, or larger than
+<value>, where <value> can be [<-]number, number, [>+]number. Returns FALSE
+on anything not a directory.
+
+4.6 size(value)
+---------------
+
+Return TRUE if the size of the file is less than, equal to, or larger than
+<value>, where <value> can be [<-]number, number, [>+]number. Works on any
+file type.
+
+4.7 inode(value)
+----------------
+
+Return TRUE if the inode number is less than, equal to, or larger than
+<value>, where <value> can be [<-]number, number, [>+]number.
+
+4.8 nlink(value)
+----------------
+
+Return TRUE if the nlink count is less than, equal to, or larger than
+<value>, where <value> can be [<-]number, number, [>+]number.
+
+4.9 fileblocks(value)
+---------------------
+
+Return TRUE if the size of the file in blocks (512 bytes) is less than, equal
+to, or larger than <value>, where <value> can be [<-]number, number, [>+]number.
+Returns FALSE on anything not a file.
+
+4.10 dirblocks(value)
+---------------------
+
+Return TRUE if the size of the directory in blocks (512 bytes) is less than,
+equal to, or larger than <value>, where <value> can be [<-]number, number,
+[>+]number. Returns FALSE on anything not a directory.
+
+4.11 blocks(value)
+------------------
+
+Return TRUE if the size of the file in blocks (512 bytes) is less than, equal
+to, or larger than <value>, where <value> can be [<-]number, number, [>+]number.
+Works on any file type.
+
+4.12 uid(value)
+---------------
+Return TRUE if the uid value is less than, equal to, or larger than
+<value>, where <value> can be [<-]number, number, [>+]number.
+
+4.13 gid(value)
+---------------
+
+Return TRUE if the gid value is less than, equal to, or larger than
+<value>, where <value> can be [<-]number, number, [>+]number.
+
+4.14 user(string)
+-----------------
+
+Return TRUE if the file owner matches <string>.
+
+4.15 group(string)
+------------------
+
+Return TRUE if the file group matches <string>.
+
+4.16 depth(value)
+-----------------
+
+Return TRUE if file is at depth less than, equal to, or larger than <value>,
+where <value> can be [<-]number, number, [>+]number. Top level directory is
+depth 1.
+
+4.17 dircount(value)
+--------------------
+
+Return TRUE if the number of files in the directory is less than, equal to, or
+larger than <value>, where <value> can be [<-]number, number, [>+]number.
+Returns FALSE on anything not a directory.
+
+4.18 filesize_range(minimum, maximum)
+-------------------------------------
+
+Return TRUE if the size of the file is within the range [<minimum>, <maximum>]
+inclusive. Returns FALSE on anything not a file.
+
+4.19 dirsize_range(minimum, maximum)
+------------------------------------
+
+Return TRUE if the size of the directory is within the rang
+[<minimum>, <maximum>] inclusive. Returns FALSE on anything not a directory.
+
+4.20 size_range(minimum, maximum)
+---------------------------------
+
+Return TRUE if the size of the file is within the range [<minimum>, <maximum>]
+inclusive. Works on any file type.
+
+4.21 inode_range(minimum, maximum)
+----------------------------------
+
+Return TRUE if the inode number is within the range [<minimum>, <maximum>]
+inclusive.
+
+4.22 fileblocks_range(minimum, maximum)
+---------------------------------------
+
+Return TRUE if the size of the file in blocks (512 bytes) is within the range
+[<minimum>, <maximum>] inclusive. Returns FALSE on anything not a file.
+
+4.23 dirblocks_range(minimum, maximum)
+--------------------------------------
+
+Return TRUE if the size of the directory in blocks (512 bytes) is within the
+range [<minimum>, <maximum>] inclusive. Returns FALSE on anything not a
+directory.
+
+4.24 blocks_range(minimum, maximum)
+-----------------------------------
+
+Return TRUE if the size of the file in blocks (512 bytes) is within the range
+[<minimum>, <maximum>] inclusive. Works on any file type.
+
+4.25 uid_range(minimum, maximum)
+--------------------------------
+
+Return TRUE if the file uid is within the range [<minimum>, <maximum>]
+inclusive.
+
+4.26 gid_range(minimum, maximum)
+--------------------------------
+
+Return TRUE if the file gid is within the range [<minimum>, <maximum>]
+inclusive.
+
+4.27 depth_range(minimum, maximum)
+----------------------------------
+
+Return TRUE if file is at depth within the range [<minimum>, <maximum>].
+Top level directory is depth 1.
+
+4.28 dircount_range(minimum, maximum)
+-------------------------------------
+
+Returns TRUE is the number of files in the directory is within the range
+[<minimum>, <maximum>]. Returns FALSE on anything not a directory.
+
+4.29 type(c)
+------------
+
+Returns TRUE if the file matches type <c>. <c> can be
+ f - regular file
+ d - directory
+ l - symbolic link
+ c - character device
+ b - block device
+ p - Named Pipe / FIFO
+ s - socket
+
+
+4.30 perm(mode)
+---------------
+
+Return TRUE if file permissions match <mode>. <Mode> is the same as
+find's -perm option:
+
+ perm(mode) - TRUE if file's permission bits are exactly <mode>.
+ <mode> can be octal or symbolic.
+
+ perm(-mode) - TRUE if all <mode> permission bits are set for this file.
+ <mode> can be octal or symbolic.
+
+ perm(/mode) - TRUE if any <mode> permission bits are set for this file.
+ <mode> can be octal or symbolic.
+
+ The symbolic mode is of the format [ugoa]*[[+-=]PERMS]+
+ PERMS = [rwxXst]+ or [ugo]
+ and can be repeated separated with commas.
+
+Examples:
+
+perm(0644) match on a file with permissions exactly rw-r--r--.
+perm(u=rw,go=r) as above, but expressed symbolically.
+
+perm(/222) match on a file which is writable for any of user, group, other,
+perm(/u=w,g=w,o=w) as above but expressed symbolically,
+perm(/ugo=w) as above but specified more concisely.
+
+4.31 file(string)
+-----------------
+
+Execute "file command" on file, and return TRUE if the output
+matches the substring <string>, for example
+
+file(ASCII text) will return TRUE if the file is ASCII text.
+
+Note, this is an expensive test, and should only be run if the file
+has matched a number of other short-circuit tests.
+
+4.32 exists()
+-------------
+
+Test if the file pointed to by the symbolic link exists within the
+output filesystem, that is, whether the symbolic link has a relative
+path and the relative path can be resolved to an entry within the
+output filesystem.
+
+If the file isn't a symbolic link then the test always returns TRUE.
+
+4.33 absolute()
+---------------
+
+Test if the symbolic link is absolute, which by definition means
+it points outside of the output filesystem (unless it is to be mounted
+as root). If the file isn't a symbolic link then the test always returns
+FALSE.
+
+4.34 readlink(expression)
+-------------------------
+
+Follow or dereference the symbolic link, and evaluate <expression> in
+the context of the file pointed to by the symbolic link. All inode
+attributes, pathname, name and depth all refer to the dereferenced
+file.
+
+If the symbolic link cannot be dereferenced because it points to something
+not in the output filesystem (see exists() function above), then FALSE is
+returned. If the file is not a symbolic link, the result is the same as
+<expression>, i.e. readlink(<expression>) == <expression>.
+
+Examples:
+
+readlink("name(*.[ch])") returns TRUE if the file referenced by the symbolic
+link matches *.[ch].
+
+Obviously, expressions created with && || etc. can be specified.
+
+readlink("depth(1) && filesize(<20K)") returns TRUE if the file referenced
+by the symbolic link is a regular file less than 20K in size and in the
+top level directory.
+
+Note: in the above tests the embedded expression to be evaluated is enclosed
+in double-quotes ("), this is to prevent the special characters being
+evaluated by the parser when parsed at the top-level. Readlink causes
+re-evaluation of the embedded string.
+
+4.36 eval(path, expression)
+---------------------------
+
+Follow <path> (arg1), and evaluate the <expression> (arg2) in the
+context of the file discovered by following <path>. All inode attributes,
+pathname, name and depth all refer to the file discovered by following
+<path>.
+
+This test operation allows you to add additional context to the evaluation
+of the file being scanned, such as "if the current file is XXX, test if the
+parent is YYY, and then do ...". Often times you need or want to test
+a combination of file status.
+
+The <path> can be absolute (in which case it is from the root directory of the
+output filesystem), or it can be relative to the current file. Obviously
+relative paths are more useful.
+
+If the file referenced by <path> does not exist in the output filesystem,
+then FALSE is returned.
+
+Examples of usage:
+
+1. If a directory matches pattern, check that it contains a ".git" directory.
+ This allows you to exclude git repositories, with a double check that it is
+ a git repository by checking for the .git subdirectory.
+
+ prune@name(*linux*) && type(d) && eval(.git, "type(d)")
+
+ This action will match on any directory named *linux*, and exclude it if
+ it contains a .git subdirectory.
+
+
+2. If a file matches a pattern, check that the parent directory matches
+ another pattern. This allows you to delete files if and only if they
+ are in a particular directory.
+
+ prune@name(*.[ch]) && eval(.., "name(*linux*)")
+
+ This action will delete *.[ch] files, but, only if they are in a directory
+ matching *linux*.
+
+4.37 false
+----------
+
+Always returns FALSE.
+
+4.38 true
+---------
+
+Always returns TRUE.
+
+5. Actions
+==========
+
+An action is something which is done (or applied) to a file if the expression
+(made up of the above test operators) returns TRUE.
+
+Different actions are applied in separate phases or gated, rather than being
+applied all at once. This is to ensure that you know what the overall
+state of the filesystem is when an action is applied. Or to put it another
+way, if you have an action that depends on another action having already been
+processed (for the entire filesystem), you'll want to know that is how
+they will be applied.
+
+5.1 Actions applied at source filesystem reading (stage 1)
+----------------------------------------------------------
+
+5.1.1 exclude()
+---------------
+
+This action excludes all files and directories where the expression
+returns TRUE.
+
+Obviously this action allows much greater control over which files are
+excluded than the current name/pathname matching.
+
+Examples:
+
+1. Exclude any files/directories belonging to user phillip
+
+exclude@user(phillip)
+
+2. Exclude any regular files larger than 1M
+
+exclude@filesize(>1M)
+
+3. Only archive files/directories to a depth of 3
+
+exclude@depth(>3)
+
+4. As above but also exclude directories at the depth of 3
+ (which will be empty due to the above exclusion)
+
+exclude@depth(>3) || (depth(3) && type(d))
+
+Which obviously reduces to
+
+exclude@depth(3) && type(d)
+
+Note: the following tests do not work in stage 1, and so they can't be
+used in the exclude() action (see prune() action for explanation and
+alternative).
+
+ dircount()
+ dircount_range()
+ exists()
+ absolute()
+ readlink()
+ eval()
+
+5.2 Actions applied at directory scanning (stage 2)
+---------------------------------------------------
+
+5.2.1 fragment(name)
+--------------------
+
+Place all files matching the expression into a specialised fragment
+named <name>. This can increase compression and/or improve
+I/O by placing similar fragments together.
+
+Examples:
+
+1. fragment(cfiles)@name(*.[ch])
+
+Place all C files into special fragments reserved for them.
+
+2. fragment(phillip)@user(phillip)
+
+Place all files owned by user Phillip into special fragments.
+
+5.2.2 fragments()
+-----------------
+
+Tell Mksquashfs to use fragment packing for the files matching the
+expression.
+
+For obvious reasons this should be used in conjunction with the global
+Mksquashfs option -no-fragments. By default all files are packed into
+fragments if they're less than the block size.
+
+5.2.3 no-fragments()
+--------------------
+
+Tell Mksquashfs to not pack the files matching the expression into
+fragments.
+
+This can be used where you want to optimise I/O latency by not packing
+certain files into fragments.
+
+5.2.4 tailend()
+---------------
+
+Tell Mksquashfs to use tail-end packing for the files matching the
+expression. Normally Mksquashfs does not pack tail-ends into fragments,
+as it may affect I/O performance because it may produce more disk head
+seeking.
+
+But tail-end packing can increase compression. Additionally with modern
+solid state media, seeking is not such a major issue anymore.
+
+5.2.5. no-tailend()
+-------------------
+
+Tell Mksquashfs not to use tail-end packing for the files matching the
+exppression.
+
+For obvious reasons this should be used in conjuction with the global
+Mksquashfs option -always-use-fragments. By default tail-ends are not
+packed into fragments.
+
+5.2.6 compressed()
+------------------
+
+Tell Mksquashfs to compress the fies matching the expression.
+
+For obvious reasons this should be used in conjunction with the global
+Mksquashfs options -noD and -noF. File are by default compressed.
+
+5.2.7 uncompressed()
+--------------------
+
+Tell Mksquashfs to not compress the files matching the expression.
+
+This action obviously can be used to avoid compressing already compressed
+files (XZ, GZIP etc.).
+
+5.2.8 uid(uid or user)
+----------------------
+
+Set the ownership of the files matching the expression to uid (if arg1
+is a number) or user (if arg1 is a string).
+
+5.2.9 gid(gid or group)
+-----------------------
+
+Set the group of the files matching the expression to gid (if arg1
+is a number) or group (if arg1 is a string).
+
+5.2.10 guid(uid/user, gid/group)
+--------------------------------
+
+Set the uid/user and gid/group of the files matching the expression.
+
+5.2.11 chmod(mode)
+------------------
+
+Mode can be octal, or symbolic.
+
+If Mode is Octal, the permission bits are set to the octal value.
+
+If Mode is Symbolic, permissions can be Set, Added or Removed.
+
+The symbolic mode is of the format [ugoa]*[[+-=]PERMS]+
+ PERMS = [rwxXst]+ or [ugo]
+ and the above sequence can be repeated separated with commas.
+
+A combination of the letters ugoa, specify which permission bits will
+be affected, u means user, g means group, o means other, and a
+means all or ugo.
+
+The next letter is +, - or =. The letter + means add to the existing
+permission bits, - means remove the bits from the existing permission
+bits, and = means set the permission bits.
+
+The permission bits (PERMS) are a combination of [rwxXst] which
+sets/adds/removes those bits for the specified ugoa combination. They
+can alternatively be u, g or o, which takes the permission bits from the
+user, group or other of the file respectively.
+
+Examples:
+
+1. chmod(u+r)
+
+Adds the read permission to user.
+
+2. chmod(ug+rw)
+
+Adds the read and write permissions to both user and group.
+
+3. chmod(u=rw,go=r)
+
+Sets the permissions to rw-r--r--, which is eqivalent to
+
+4. chmod(644)
+
+5. cgmod(ug=o)
+
+Sets the user and group permissions to the permissions for other.
+
+5.3 Actions applied at second directory scan (stage 3)
+------------------------------------------------------
+
+5.3.1 prune()
+
+The prune() action deletes the file or directory (and everything
+underneath it) that matches the expression. In that respect it is
+identical to the exclude() action, except that it takes place in the
+third stage, rather than the first stage. There are a number of
+reasons to have a prune() action in addition to an exclude()
+action.
+
+1. In the first stage Mksquashfs is building an in-memory representation
+ of the filesystem to be compressed. At that point some of the tests
+ don't work because they rely on an in-memory representation having been
+ built.
+
+ So the following tests don't work in stage 1, and so they can't be
+ used in the exclude() action.
+
+ dircount()
+ dircount_range()
+ exists()
+ absolute()
+ readlink()
+ eval()
+
+ If you want to use these tests, you have to use the prune() action.
+
+2. Many exclusion/pruning operations may only be easily applied after
+ transformation actions have been applied in stages 1 & 2.
+
+ For example, you may change the ownership and permissions of
+ matching files in stage 2, and then want to delete files based on
+ some criteria which relies on this having taken place.
+
+5.4. Actions applied at third directory scan (stage 4)
+------------------------------------------------------
+
+5.4.1 empty(reason)
+
+The empty() action deletes any directory which matches the expression,
+and which is also empty for <reason>. <reason> is one of "excluded",
+"source" and "all". If no argument is given, empty() defaults to "all".
+
+The reason "excluded" means the directory has become empty due to
+the exclude() or prune() actions or by exclusion on the command line.
+The reason "source" means the directory was empty in the source filesystem.
+The reason "all" means it is empty for either one of the above two reasons.
+
+This action is often useful when exclusion has produced an empty
+directory, or a hierarchy of directories which are empty but for a
+sub-directory which is empty but for a sub-directory until an
+empty directory is reached.
+
+Example
+
+1. Exclude any file which isn't a directory, and then clean-up
+ any directories which are empty as a result.
+
+ exclude@!type(d)
+ empty(excluded)@true
+
+This will produce an empty filesystem, unless there were some
+directories that were originally empty.
+
+Changing the empty action to
+
+ exclude@!type(d)
+ empty@true
+
+Will produce an empty filesystem.
+
+5.5 Actions performed at filesystem creation (stage 6)
+------------------------------------------------------
+
+5.5.1 xattrs-exclude(regex)
+
+The xattrs-exclude action excludes any xattr names matching <regex>. <regex> is
+a POSIX regular expression, e.g. xattrs-exclude("^user.") excludes xattrs from
+the user namespace.
+
+5.5.2 xattrs-include(regex)
+
+The xattrs-include action includes any xattr names matching <regex>. <regex> is
+a POSIX regular expression, e.g. -xattrs-include("^user.") includes xattrs from
+the user namespace.
+
+5.5.3 xattrs-add(name=val)
+
+The xattrs-add action adds the xattr <name> with contents <val>. If an user
+xattr it can be added to regular files and directories (see man 7 xattr).
+Otherwise it can be added to all files.
+
+The extended attribute value by default will be treated as binary (i.e. an
+uninterpreted byte sequence), but it can be prefixed with 0s, where it will be
+treated as base64 encoded, or prefixed with 0x, where it will be treated as
+hexidecimal.
+
+Obviously using base64 or hexidecimal allows values to be used that cannot be
+entered on the command line such as non-printable characters etc. But it
+renders the string non-human readable. To keep readability and to allow
+non-printable characters to be entered, the 0t prefix is supported. This
+encoding is similar to binary encoding, except backslashes are specially
+treated, and a backslash followed by three octal digits can be used to encode
+any ASCII character, which obviously can be used to encode non-printable values.
+
+The following four actions are equivalent
+
+-xattrs-add("user.comment=hello world")
+-xattrs-add("user.comment=0saGVsbG8gd29ybGQ=")
+-xattrs-add("user.comment=0x68656c6c6f20776f726c64")
+-xattrs-add("user.comment=0thello world")
+
+Obviously in the above example there are no non-printable characters and so
+the 0t prefixed string is identical to the first line. The following three
+actions are identical, but where the space has been replaced by the
+non-printable NUL '\0' (null character).
+
+-xattrs-add("user.comment=0thello\000world")
+-xattrs-add("user.comment=0saGVsbG8Ad29ybGQ=")
+-xattrs-add("user.comment=0x68656c6c6f00776f726c64")
diff --git a/CHANGES b/CHANGES
new file mode 100644
index 0000000..d01415e
--- /dev/null
+++ b/CHANGES
@@ -0,0 +1,914 @@
+ SQUASHFS CHANGE LOG
+
+4.6.1 25 MAR 2023 Bug fix release to fix race condition and XATTRs
+ code
+
+ 1. Race condition which can cause corruption of the "fragment table"
+ fixed. This is a regression introduced in August 2022, and it has
+ been seen when tailend packing is used (-tailends option).
+ 2. Fix build failure when the tools are being built without extended
+ attribute (XATTRs) support.
+ 3. Fix XATTR error message when an unrecognised prefix is found
+ (Christian Hesse).
+ 4. Fix incorrect free of pointer when an unrecognised XATTR prefix is
+ found.
+
+
+4.6 17 MAR 2023 Major improvements in extended attribute handling,
+ pseudo file handling, and miscellaneous new options and
+ improvements
+
+ 1. Extended attribute handling improved in Mksquashfs and Sqfstar
+
+ 1.1.New -xattrs-exclude option to exclude extended attributes
+ from files using a regular expression.
+ 1.2 New -xattrs-include option to include extended attributes
+ from files using a regular expression.
+ 1.3 New -xattrs-add option to add extended attributes to files.
+ 1.4 New Pseudo file xattr definition to add extended attributes
+ to files.
+ 1.5 New xattrs-add Action to add extended attributes to files
+ (Mksquashfs only).
+
+ 2. Extended attribute handling improved in Unsquashfs
+
+ 2.1 New -xattrs-exclude option to exclude extended attributes
+ from files using a regular expression.
+ 2.2 New -xattrs-include option to include extended attributes
+ from files using a regular expression.
+ 2.3 Extended attributes are now supported in Pseudo file output.
+
+ 3. Other major improvements
+
+ 3.1 Unsquashfs can now output Pseudo files to standard out.
+ 3.2 Mksquashfs can now input Pseudo files from standard in.
+ 3.3 Squashfs filesystems can now be converted (different block
+ size compression etc) without unpacking to an intermediate
+ filesystem or mounting, by piping the output of Unsquashfs
+ to Mksquashfs.
+ 3.4 Pseudo files are now supported by Sqfstar.
+ 3.5 "Non-anchored" excludes are now supported by Unsquashfs.
+
+ 4. Mksquashfs minor improvements
+
+ 4.1 A new -max-depth option has been added, which limits
+ the depth Mksquashfs descends when creating the filesystem.
+ 4.2 A new -mem-percent option which allows memory for caches to
+ be specified as a percentage of physical RAM, rather than
+ requiring an absolute value.
+ 4.3 A new -percentage option added which rather than generating
+ the full progress-bar instead outputs a percentage. This
+ can be used with dialog --gauge etc.
+ 4.4 -mkfs-time, -all-time and -root-time options now take
+ a human date string, in addition to the seconds since
+ the epoch of 1970 00:00 UTC. For example "now",
+ "last week", "Wed Mar 8 05:55:01 GMT 2023" are supported.
+ 4.5 -root-uid, -root-gid, -force-uid and -force-gid options now
+ take a user/group name in addition to the integer uid/gid.
+ 4.6 A new -mem-default option which displays default memory
+ usage for caches in Mbytes.
+ 4.7 A new -no-compression option which produces no compression,
+ and it is a short-cut for -noI, -noD, -noF and -noX.
+ 4.8 A new -pseudo-override option which makes pseudo file uids
+ and gids override -all-root, -force-uid and -force-gid
+ options. Normally these options take precedence.
+
+ 5. Unsquashfs minor improvements
+
+ 5.1 New -all-time option which sets all file timestamps to
+ <time>, rather than the time stored in the filesystem
+ inode. <time> can be an integer indicating seconds since
+ the epoch (1970-01-01) or a human string value such as
+ "now", "last week", or "Wed Feb 15 21:02:39 GMT 2023".
+ 5.2 New -full-precision option which uses full precision when
+ displaying times including seconds. Use with -linfo, -lls,
+ -lln and -llc options.
+ 5.3 New -match option where Unsquashfs will abort if any
+ extract file does not match on anything, and can not be
+ resolved.
+ 5.4 New -percentage option added which rather than generating
+ the full progress-bar instead outputs a percentage. This
+ can be used with dialog --gauge etc.
+
+ 6. Sqfstar minor improvements
+
+ 6.1 A new -ignore-zeros option added which allows tar files to
+ be concatenated together and fed to Sqfstar. Normally a
+ tarfile has two consecutive 512 byte blocks filled with
+ zeros which means EOF and Sqfstar will stop reading after
+ the first tar file on encountering them. This option makes
+ Sqfstar ignore the zero filled blocks.
+ 6.2 A new -mem-percent option which allows memory for caches to
+ be specified as a percentage of physical RAM, rather than
+ requiring an absolute value.
+ 6.3 A new -percentage option added which rather than generating
+ the full progress-bar instead outputs a percentage. This
+ can be used with dialog --gauge etc.
+ 6.4 -mkfs-time, -all-time and -root-time options now take
+ a human date string, in addition to the seconds since
+ the epoch of 1970 00:00 UTC. For example "now",
+ "last week", "Wed Mar 8 05:55:01 GMT 2023" are supported.
+ 6.5 -root-uid, -root-gid, -force-uid and -force-gid options now
+ take a user/group name in addition to the integer uid/gid.
+ 6.6 A new -mem-default option which displays default memory
+ usage for caches in Mbytes.
+ 6.7 A new -no-compression option which produces no compression,
+ and it is a short-cut for -noI, -noD, -noF and -noX.
+ 6.8 A new -pseudo-override option which makes pseudo file uids
+ and gids override -all-root, -force-uid and -force-gid
+ options. Normally these options take precedence.
+ 6.9 Do not abort if ZERO filled blocks indicating end of the
+ TAR archive are missing.
+
+ 7. Other minor improvements
+
+ 7.1 If Mksquashfs/Unsquashfs fails to execute generating the
+ manpages because they have been cross-compiled, fall back
+ to using the pre-built manpages.
+ 7.2 Add new Makefile configure option USE_PREBUILT_MANPAGES
+ to always use pre-built manpages rather than generating
+ them when "make install" is run.
+
+ 8. Major bug fixes
+
+ 8.1 Following a symlink in Sqfscat or where -follow-symlinks
+ option is given with Unsquashfs, incorrectly triggered the
+ corrupted filesystem loop detection code.
+ 8.2 In Unsquashfs if a file was not writable it could not add
+ extended attributes to it.
+ 8.3 Sqfstar would incorrectly reject compressor specific
+ options that have an argument.
+ 8.4 Sqfstar would incorrectly strip pathname components in PAX
+ header linkpath if symbolic.
+ 8.5 Sqfstar -root-uid, -root-gid and -root-time options were
+ documented but not implemented.
+ 8.6 Mksquashfs -one-file-system option would not create empty
+ mount point directory when filesystem boundary crossed.
+ 8.7 Mksquashfs did not check the close() return result.
+
+
+4.5.1 17 MAR 2022 New Manpages, Fix CVE-2021-41072 and miscellaneous
+ improvements and bug fixes
+
+ 1. Major improvements
+
+ 1.1 This release adds Manpages for Mksquashfs(1), Unsquashfs(1),
+ Sqfstar(1) and Sqfscat(1).
+ 1.2 The -help text output from the utilities has been improved
+ and extended as well (but the Manpages are now more
+ comprehensive).
+ 1.3 CVE-2021-41072 which is a writing outside of destination
+ exploit, has been fixed.
+
+ 2. Minor improvements
+
+ 2.1 The number of hard-links in the filesystem is now also
+ displayed by Mksquashfs in the output summary.
+ 2.2 The number of hard-links written by Unsquashfs is now
+ also displayed in the output summary.
+ 2.3 Unsquashfs will now write to a pre-existing destination
+ directory, rather than aborting.
+ 2.4 Unsquashfs now allows "." to used as the destination, to
+ extract to the current directory.
+ 2.5 The Unsquashfs progress bar now tracks empty files and
+ hardlinks, in addition to data blocks.
+ 2.6 -no-hardlinks option has been implemented for Sqfstar.
+ 2.7 More sanity checking for "corrupted" filesystems, including
+ checks for multiply linked directories and directory loops.
+ 2.8 Options that may cause filesystems to be unmountable have
+ been moved into a new "experts" category in the Mksquashfs
+ help text (and Manpage).
+
+ 3. Bug fixes
+
+ 3.1 Maximum cpiostyle filename limited to PATH_MAX. This
+ prevents attempts to overflow the stack, or cause system
+ calls to fail with a too long pathname.
+ 3.2 Don't always use "max open file limit" when calculating
+ length of queues, as a very large file limit can cause
+ Unsquashfs to abort. Instead use the smaller of max open
+ file limit and cache size.
+ 3.3 Fix Mksquashfs silently ignoring Pseudo file definitions
+ when appending.
+ 3.4 Don't abort if no XATTR support has been built in, and
+ there's XATTRs in the filesystem. This is a regression
+ introduced in 2019 in Version 4.4.
+ 3.5 Fix duplicate check when the last file block is sparse.
+
+
+4.5 22 JUL 2021 Major improvements including: Actions, Sqfstar,
+ tar and cpio style reading of input sources, Sqfscat
+
+ 1. Mksquashfs improvements
+
+ 1.1 Mksquashfs now supports "Actions". These are modelled on
+ "find" and allow compression, fragment packing, file
+ exclusion and file attributes to be changed.
+ 1.2 New sqfstar command which will create a Squashfs image from
+ a tar archive.
+ 1.3 Tar style handling of source pathnames in Mksquashfs.
+ 1.4 Cpio style handling of source pathnames in Mksquashfs.
+ 1.5 New option to throttle the amount of CPU and I/O.
+ 1.6 New Pseudo file definitions which support timestamps.
+ 1.7 New Pseudo file definitions to create File references.
+ 1.8 New Pseudo file definitions to create Sockets/Fifos.
+ 1.9 Mksquashfs now allows no source directory to be specified.
+ 1.10 New Pseudo file "R" definition which allows a Regular file
+ to be created with data stored within the Pseudo file.
+
+ 2. Major improvements in Unsquashfs
+
+ 2.1 Sqfscat command which outputs files to stdout.
+ 2.2 Symbolic links are now followed in extract files (using
+ -follow-symlinks or -missing-symlinks).
+ 2.3 Unsquashfs now supports "exclude" files.
+ 2.4 Max depth traversal option added.
+ 2.5 Unsquashfs can now output a "Pseudo file" representing the
+ input Squashfs filesystem.
+
+ 3. Minor improvements and bug fixes
+
+ 3.1 The progress bar is now displayed and updated whilst the
+ input is being scanned.
+ 3.2 New -one-file-system option in Mksquashfs.
+ 3.3 New -no-hardlinks option in Mksquashfs.
+ 3.4 New -help options in Mksquashfs and Unsquashfs which output
+ to standard out.
+ 3.5 New -root-uid option in Mksquashfs.
+ 3.6 New -root-gid option in Mksquashfs.
+ 3.7 New -root-time option in Mksquashfs.
+ 3.8 -no-exit-code option added to Unsquashfs which makes it
+ not output an error exit code.
+ 3.9 Exit code in Unsquashfs changed to distinguish between
+ non-fatal errors (exit 2), and fatal errors (exit 1).
+ 3.10 Mksquashfs when appending, now writes the recovery file to
+ the home directory, rather than the current directory.
+ 3.11 New -recovery-path <name> option.
+ 3.12 Xattr id count added in Unsquashfs "-stat" output.
+ 3.13 Unsquashfs "write outside directory" exploit fixed.
+ 3.14 Error handling in Unsquashfs writer thread fixed.
+ 3.15 Fix failure to truncate destination if appending aborted.
+ 3.16 Prevent Mksquashfs reading the destination file.
+
+4.4 29 AUG 2019 Reproducible builds, new compressors,
+ CVE fixes, security hardening and new options
+ for Mksquashfs/Unsquashfs.
+
+ 1. Overall improvements:
+
+ 1.1 Mksquashfs now generates reproducible images by default.
+ 1.2 Mkfs time and file timestamps can also be specified.
+ 1.3 Support for the Zstandard (ZSTD) compression algorithm.
+ 1.4 CVE-2015-4645 and CVE-2015-4646 have been fixed.
+
+ 2. Mksquashfs improvements and major bug fixes:
+
+ 2.1 Pseudo files now support symbolic links.
+ 2.2 New -mkfs-time option.
+ 2.3 New -all-time option.
+ 2.4 New -root-mode option.
+ 2.5 New -quiet option.
+ 2.6 New -noId option.
+ 2.7 New -offset option.
+ 2.8 Update lz4 wrapper to use new functions introduced
+ in 1.7.0.
+ 2.9 Bug fix, don't allow "/" pseudo filenames.
+ 2.10 Bug fix, allow quoting of pseudo files, to
+ better handle filenames with spaces.
+ 2.11 Fix compilation with glibc 2.25+.
+
+ 3. Unsquashfs improvements and major bug fixes:
+
+ 3.1 CVE-2015-4645 and CVE-2015-4646 have been fixed.
+ 3.2 Unsquashfs has been further hardened against corrupted
+ filestems.
+ 3.3 Unsquashfs is now more strict about error handling.
+ 3.4 New -ignore-errors option.
+ 3.5 New -strict-errors option.
+ 3.6 New -lln[umeric] option.
+ 3.7 New -lc option.
+ 3.8 New -llc option.
+ 3.9 New -mkfs-time option.
+ 3.10 New -UTC option.
+ 3.11 New -offset option.
+ 3.12 New -quiet option.
+ 3.13 Update lz4 wrapper to use new functions introduced
+ in 1.7.0.
+ 3.14 Bug fix, fatal and non-fatal errors now set the exit
+ code to 1.
+ 3.15 Bug fix, fix time setting for symlinks.
+ 3.16 Bug fix, try to set sticky-bit when running as a
+ user process.
+ 3.17 Fix compilation with glibc 2.25+.
+
+4.3 12 MAY 2014 New compressor options, new Mksquashfs/Unsquashfs
+ functionality, duplicate checking optimisations,
+ stability improvements (option/file parsing,
+ buffer/memory overflow checks, filesystem hardening
+ on corrupted filesystems), CVE fixes.
+
+ Too many changes to do the traditional custom changelog. But, this
+ is now unnecessary, so instead list most significant 15% of commits
+ from git changelog in chronological order.
+
+ - unsquashfs: add checks for corrupted data in opendir functions
+ - unsquashfs: completely empty filesystems incorrectly generate an error
+ - unsquashfs: fix open file limit
+ - mksquashfs: Use linked list to store directory entries rather
+ - mksquashfs: Remove qsort and add a bottom up linked list merge sort
+ - mksquashfs: optimise lookup_inode2() for dirs
+ - pseudo: fix handling of modify pseudo files
+ - pseudo: fix handling of directory pseudo files
+ - xattr: Fix ERROR() so that it is synchronised with the progress bar
+ - mksquashfs/sort: Fix INFO() so that it is synced with the progress bar
+ - mksquashfs: Add -progress to force progress bar when using -info
+ - error.h: consolidate the various error macros into one header file
+ - mksquashfs: fix stack overflow in write_fragment_table()
+ - mksquashfs: move list allocation from off the stack
+ - unsquashfs: fix oversight in directory permission setting
+ - mksquashfs: dynamically allocate recovery_file
+ - mksquashfs: dynamically allocate buffer in subpathname()
+ - mksquashfs: dynamically allocate buffer in pathname()
+ - unsquashfs: fix CVE-2012-4024
+ - unsquashfs: fix CVE-2012-4025
+ - mksquashfs: fix potential stack overflow in get_component()
+ - mksquashfs: add parse_number() helper for numeric command line options
+ - mksquasfs: check return value of fstat() in reader_read_file()
+ - mksquashfs: dynamically allocate filename in old_add_exclude()
+ - unsquashfs: dynamically allocate pathname in dir_scan()
+ - unsquashfs: dynamically allocate pathname in pre_scan()
+ - sort: dynamically allocate filename in add_sort_list()
+ - mksquashfs: fix dir_scan() exit if lstat of source directory fails
+ - pseudo: fix memory leak in read_pseudo_def() if exec_file() fails
+ - pseudo: dynamically allocate path in dump_pseudo()
+ - mksquashfs: dynamically allocate path in display_path2()
+ - mksquashfs: dynamically allocate b_buffer in getbase()
+ - pseudo: fix potential stack overflow in get_component()
+ - pseudo: avoid buffer overflow in read_pseudo_def() using sscanf()
+ - pseudo: dynamically allocate filename in exec_file()
+ - pseudo: avoid buffer overflow in read_sort_file() using fscanf()
+ - sort: tighten up sort file parsing
+ - unsquashfs: fix name under-allocation in process_extract_files()
+ - unsquashfs: avoid buffer overflow in print_filename() using sprintf()
+ - Fix some limits in the file parsing routines
+ - pseudo: Rewrite pseudo file processing
+ - read_fs: fix small memory leaks in read_filesystem()
+ - mksquashfs: fix fclose leak in reader_read_file() on I/O error
+ - mksquashfs: fix frag struct leak in write_file_{process|blocks|frag}
+ - unsquashfs_xattr: fix memory leak in write_xattr()
+ - read_xattrs: fix xattr free in get_xattr() in error path
+ - unsquashfs: add -user-xattrs option to only extract user.xxx xattrs
+ - unsquashfs: add code to only print "not superuser" error message once
+ - unsquashfs: check for integer overflow in user input
+ - mksquashfs: check for integer overflow in user input
+ - mksquashfs: fix "new" variable leak in dir_scan1()
+ - read_fs: prevent buffer {over|under}flow in read_block() with
+ corrupted filesystems
+ - read_fs: check metadata blocks are expected size in scan_inode_table()
+ - read_fs: check the root inode block is found in scan_inode_table()
+ - read_fs: Further harden scan_inode_table() against corrupted
+ filesystems
+ - unsquashfs: prevent buffer {over|under}flow in read_block() with
+ corrupted filesystems
+ - read_xattrs: harden xattr data reading against corrupted filesystems
+ - unsquash-[23]: harden frag table reading against corrupted filesystems
+ - unsquash-4.c: harden uid/gid & frag table reading against corruption
+ - unsquashfs: harden inode/directory table reading against corruption
+ - mksquashfs: improve out of space in output filesystem handling
+ - mksquashfs: flag lseek error in writer as probable out of space
+ - mksquashfs: flag lseek error in write_destination as probable out of
+ space
+ - mksquashfs: print file being squashed when ^\ (SIGQUIT) typed
+ - mksquashfs: make EXIT_MKSQUASHFS() etc restore via new restore thread
+ - mksquashfs: fix recursive restore failure check
+ - info: dump queue and cache status if ^\ hit twice within one second
+ - mksquashfs: fix rare race condition in "locked fragment" queueing
+ - lz4: add experimental support for lz4 compression
+ - lz4: add support for lz4 "high compression"
+ - lzo_wrapper: new implementation with compression options
+ - gzip_wrapper: add compression options
+ - mksquashfs: redo -comp <compressor> parsing
+ - mksquashfs: display compressor options when -X option isn't recognised
+ - mksquashfs: add -Xhelp option
+ - mksquashfs/unsquashfs: fix mtime signedness
+ - Mksquashfs: optimise duplicate checking when appending
+ - Mksquashfs: introduce additional per CPU fragment process threads
+ - Mksquashfs: significantly optimise fragment duplicate checking
+ - read_fs: scan_inode_table(), fix memory leak on filesystem corruption
+ - pseudo: add_pseudo(), fix use of freed variable
+ - mksquashfs/unsquashfs: exclude/extract/pseudo files, fix handling of
+ leaf name
+ - mksquashfs: rewrite default queue size so it's based on physical mem
+ - mksquashfs: add a new -mem <mbytes> option
+ - mksquashfs: fix limit on the number of dynamic pseudo files
+ - mksquashfs: make -mem take a normal byte value, optionally with a
+ K, M or G
+
+4.2 28 FEB 2011 XZ compression, and compression options support
+
+ 1. Filesystem improvements:
+
+ 1.1 Added XZ compression
+ 1.2 Added compression options support
+
+ 2. Miscellaneous improvements/bug fixes
+
+ 1.1 Add missing NO_XATTR filesystem flag to indicate no-xattrs
+ option was specified and no xattrs should be stored when
+ appending.
+ 1.2 Add suppport in Unquashfs -stat option for displaying
+ NO_XATTR flag.
+ 1.3 Remove checkdata entry from Unsquashfs -stat option if a 4.0
+ filesystem - checkdata is no longer supported.
+ 1.4 Fix appending bug when appending to an empty filesystem - this
+ would be incorrectly treated as an error.
+ 1.5 Use glibc sys/xattr.h include rather than using attr/xattr.h
+ which isn't present by default on some distributions.
+ 1.6 Unsquashfs, fix block calculation error with regular files when
+ file size is between 2^32-block_size+1 and 2^32-1.
+ 1.7 Unsquashfs, fix sparse file writing when holes are larger than
+ 2^31-1.
+ 1.8 Add external CFLAGS and LDFLAGS support to Makefile, and allow
+ build options to be specified on command line. Also don't
+ over-write passed in CFLAGS definition.
+
+
+4.1 19 SEPT 2010 Major filesystem and tools improvements
+
+ 1. Filesystem improvements:
+
+ 1.1 Extended attribute support
+ 1.2 New compression framework
+ 1.3 Support for LZO compression
+ 1.4 Support for LZMA compression (not yet in mainline)
+
+ 2. Mksquashfs improvements:
+
+ 1.1 Enhanced pseudo file support
+ 1.2 New options for choosing compression algorithm used
+ 1.3 New options for controlling extended attributes
+ 1.4 Fix misalignment issues with memcpy etc. seen on ARM
+ 1.5 Fix floating point error in progress_bar when max == 0
+ 1.6 Removed use of get_nproc() call unavailable in ulibc
+ 1.7 Reorganised help text
+
+ 3. Unsquashfs improvements:
+
+ 1.1 New options for controlling extended attributes
+ 1.2 Fix misalignment issues with memcpy etc. seen on ARM
+ 1.3 Fix floating point error in progress_bar when max == 0
+ 1.4 Removed use of get_nproc() call unavailable in ulibc
+
+
+4.0 5 APR 2009 Major filesystems improvements
+
+ 1. Kernel code improvements:
+
+ 1.1 Fixed little endian layout adopted. All swapping macros
+ removed, and in-line swapping added for big-endian
+ architectures.
+ 1.2 Kernel code substantially improved and restructured.
+ 1.3 Kernel code split into separate files along functional lines.
+ 1.4 Vmalloc usage removed, and code changed to use separately
+ allocated 4K buffers
+
+ 2. Unsquashfs improvements:
+
+ 2.1 Support for 4.0 filesystems added.
+ 2.2 Swapping macros rewritten.
+ 2.3 Unsquashfs code restructured and split into separate files.
+
+ 3. Mksquashfs improvements:
+
+ 3.1 Swapping macros rewritten. Fixed little-endian layout allows
+ code to be optimised and only added at compile time for
+ big endian systems.
+ 3.2 Support for pseudo files added.
+
+3.4 26 AUG 2008 Performance improvements to Unsquashfs, Mksquashfs
+ and the kernel code. Plus many small bug fixes.
+
+ 1. Kernel code improvements:
+
+ 1.1 Internal Squashfs kernel metadata and fragment cache
+ implementations have been merged and optimised. Spinlocks are
+ now used, locks are held for smaller periods and wakeups have
+ been minimised. Small race condition fixed where if two or
+ more processes tried to read the same cache block
+ simultaneously they would both read and decompress it. 10-20%+
+ speed improvement has been seen on tests.
+ 1.2 NFS export code rewritten following VFS changes in
+ linux-2.6.24.
+ 1.3 New patches for linux-2.6.25, linux-2.6.26, and linux-2.6.27.
+ Fixed patch for linux-2.6.24.
+ 1.4 Fixed small buffer_head leak in squashfs_read_data when
+ handling badly corrupted filesystems.
+ 1.5 Fixed bug in get_dir_index_using_offset.
+
+ 2. Unsquashfs improvements:
+
+ 2.1 Unsquashfs has been parallelised. Filesystem reading, writing
+ and decompression is now multi-threaded. Up to 40% speed
+ improvement seen on tests.
+ 2.2 Unsquashfs now has a progress bar. Use -no-progress to
+ disable it.
+ 2.3 Fixed small bug where unistd.h wasn't being included on
+ some distributions, leading to lseek being used rather than
+ lseek64 - which meant on these distributions Unsquashfs
+ couldn't unsquash filesystems larger than 4GB.
+
+ 3. Mksquashfs improvements:
+
+ 3.1 Removed some small remaining parallelisation bottlenecks.
+ Depending on source filesystem, up to 10%+ speed improvement.
+ 3.2 Progress bar improved, and moved to separate thread.
+ 3.3 Sparse file handling bug in Mksquashfs 3.3 fixed.
+ 3.4 Two rare appending restore bugs fixed (when ^C hit twice).
+
+
+3.3 1 NOV 2007 Increase in block size, sparse file support,
+ Mksquashfs and Unsquashfs extended to use
+ pattern matching in exclude/extract files, plus
+ many more improvements and bug fixes.
+
+ 1. Filesystem improvements:
+
+ 1.1. Maximum block size has been increased to 1Mbyte, and the
+ default block size has been increased to 128 Kbytes.
+ This improves compression.
+
+ 1.2. Sparse files are now supported. Sparse files are files
+ which have large areas of unallocated data commonly called
+ holes. These files are now detected by Squashfs and stored
+ more efficiently. This improves compression and read
+ performance for sparse files.
+
+ 2. Mksquashfs improvements:
+
+ 2.1. Exclude files have been extended to use wildcard pattern
+ matching and regular expressions. Support has also been
+ added for non-anchored excludes, which means it is
+ now possible to specify excludes which match anywhere
+ in the filesystem (i.e. leaf files), rather than always
+ having to specify exclude files starting from the root
+ directory (anchored excludes).
+
+ 2.2. Recovery files are now created when appending to existing
+ Squashfs filesystems. This allows the original filesystem
+ to be recovered if Mksquashfs aborts unexpectedly
+ (i.e. power failure).
+
+ 3. Unsquashfs improvements:
+
+ 3.1. Multiple extract files can now be specified on the
+ command line, and the files/directories to be extracted can
+ now also be given in a file.
+
+ 3.2. Extract files have been extended to use wildcard pattern
+ matching and regular expressions.
+
+ 3.3. Filename printing has been enhanced and Unquashfs can
+ now display filenames with file attributes
+ ('ls -l' style output).
+
+ 3.4. A -stat option has been added which displays the filesystem
+ superblock information.
+
+ 3.5. Unsquashfs now supports 1.x filesystems.
+
+ 4. Miscellaneous improvements/bug fixes:
+
+ 4.1. Squashfs kernel code improved to use SetPageError in
+ squashfs_readpage() if I/O error occurs.
+
+ 4.2. Fixed Squashfs kernel code bug preventing file
+ seeking beyond 2GB.
+
+ 4.3. Mksquashfs now detects file size changes between
+ first phase directory scan and second phase filesystem create.
+ It also deals better with file I/O errors.
+
+
+3.2-r2 15 JAN 2007 Kernel patch update and progress bar bug fix
+
+ 1. Kernel patches 2.6.19/2.6.20 have been updated to use
+ const structures and mutexes rather than older semaphores.
+ 2. Minor SMP bug fixes.
+ 3. Progress bar broken on x86-64. Fixed.
+
+3.2 2 JAN 2007 NFS support, improvements to the Squashfs-tools, major
+ bug fixes, lots of small improvements/bug fixes, and new
+ kernel patches.
+
+ Improvements:
+
+ 1. Squashfs filesystems can now be exported via NFS.
+ 2. Unsquashfs now supports 2.x filesystems.
+ 3. Mksquashfs now displays a progress bar.
+ 4. Squashfs kernel code has been hardened against accidently or
+ maliciously corrupted Squashfs filesystems.
+
+ Bug fixes:
+
+ 5. Race condition occurring on S390 in readpage() fixed.
+ 6. Odd behaviour of MIPS memcpy in read_data() routine worked-around.
+ 7. Missing cache_flush in Squashfs symlink_readpage() added.
+
+
+3.1-r2 30 AUG 2006 Mksquashfs -sort bug fix
+
+ A code optimisation after testing unfortunately
+ broke sorting in Mksquashfs. This has been fixed.
+
+3.1 19 AUG 2006 This release has some major improvements to
+ the squashfs-tools, a couple of major bug
+ fixes, lots of small improvements/bug fixes,
+ and new kernel patches.
+
+
+ 1. Mksquashfs has been rewritten to be multi-threaded. It
+ has the following improvements
+
+ 1.1. Parallel compression. By default as many compression and
+ fragment compression threads are created as there are available
+ processors. This significantly speeds up performance on SMP
+ systems.
+ 1.2. File input and filesystem output is peformed in parallel on
+ separate threads to maximise I/O performance. Even on single
+ processor systems this speeds up performance by at least 10%.
+ 1.3. Appending has been significantly improved, and files within the
+ filesystem being appended to are no longer scanned and
+ checksummed. This significantly improves append time for large
+ filesystems.
+ 1.4. File duplicate checking has been optimised, and split into two
+ separate phases. Only files which are considered possible
+ duplicates after the first phase are checksummed and cached in
+ memory.
+ 1.5 The use of swap memory was found to significantly impact
+ performance. The amount of memory used to cache files is now a
+ command line option, by default this is 512 Mbytes.
+
+ 2. Unsquashfs has the following improvements
+
+ 2.1 Unsquashfs now allows you to specify the filename or the
+ directory within the Squashfs filesystem that is to be
+ extracted, rather than always extracting the entire filesystem.
+ 2.2 A new -force option has been added which forces Unsquashfs to
+ output to the destination directory even if files and directories
+ already exist in the destination directory. This allows you to
+ update an already existing directory tree, or to Unsquashfs to
+ a partially filled directory tree. Without the -force option
+ Unsquashfs will refuse to output.
+
+ 3. The following major bug fixes have been made
+
+ 3.1 A fragment table rounding bug has been fixed in Mksquashfs.
+ Previously if the number of fragments in the filesystem
+ were a multiple of 512, Mksquashfs would generate an
+ incorrect filesystem.
+ 3.2 A rare SMP bug which occurred when simultaneously acccessing
+ multiply mounted Squashfs filesystems has been fixed.
+
+ 4. Miscellaneous improvements/bug fixes
+
+ 4.1 Kernel code stack usage has been reduced. This is to ensure
+ Squashfs works with 4K stacks.
+ 4.2 Readdir (Squashfs kernel code) has been fixed to always
+ return 0, rather than the number of directories read. Squashfs
+ should now interact better with NFS.
+ 4.3 Lseek bug in Mksquashfs when appending to larger than 4GB
+ filesystems fixed.
+ 4.4 Squashfs 2.x initrds can now been mounted.
+ 4.5 Unsquashfs exit status fixed.
+ 4.6 New patches for linux-2.6.18 and linux-2.4.33.
+
+
+3.0 15 MAR 2006 Major filesystem improvements
+
+ 1. Filesystems are no longer limited to 4 GB. In
+ theory 2^64 or 4 exabytes is now supported.
+ 2. Files are no longer limited to 4 GB. In theory the maximum
+ file size is 4 exabytes.
+ 3. Metadata (inode table and directory tables) are no longer
+ restricted to 16 Mbytes.
+ 4. Hardlinks are now suppported.
+ 5. Nlink counts are now supported.
+ 6. Readdir now returns '.' and '..' entries.
+ 7. Special support for files larger than 256 MB has been added to
+ the Squashfs kernel code for faster read access.
+ 8. Inode numbers are now stored within the inode rather than being
+ computed from inode location on disk (this is not so much an
+ improvement, but a change forced by the previously listed
+ improvements).
+
+2.2-r2 8 SEPT 2005 Second release of 2.2, this release fixes a couple
+ of small bugs, a couple of small documentation
+ mistakes, and adds a patch for kernel 2.6.13.
+
+ 1. Mksquashfs now deletes the output filesystem image file if an
+ error occurs whilst generating the filesystem. Previously on
+ error the image file was left empty or partially written.
+ 2. Updated mksquashfs so that it doesn't allow you to generate
+ filesystems with block sizes smaller than 4K. Squashfs hasn't
+ supported block sizes less than 4K since 2.0-alpha.
+ 3. Mksquashfs now ignores missing files/directories in sort files.
+ This was the original behaviour before 2.2.
+ 4. Fixed small mistake in fs/Kconfig where the version was still
+ listed as 2.0.
+ 5. Updated ACKNOWLEDGEMENTS file.
+
+
+2.2 3 JUL 2005 This release has some small improvements, bug fixes
+ and patches for new kernels.
+
+ 1. Sort routine re-worked and debugged from release 2.1. It now allows
+ you to give Mkisofs style sort files and checks for filenames that
+ don't match anything. Sort priority has also been changed to
+ conform to Mkisofs usage, highest priority files are now placed
+ at the start of the filesystem (this means they will be on the
+ inside of a CD or DVD).
+ 2. New Configure options for embedded systems (memory constrained
+ systems). See INSTALL file for further details.
+ 3. Directory index bug fixed where chars were treated as signed on
+ some architectures. A file would not be found in the rare case
+ that the filename started with a chracter greater than 127.
+ 4. Bug introduced into the read_data() routine when sped up to use data
+ block queueing fixed. If the second or later block resulted in an
+ I/O error this was not checked.
+ 5. Append bug introduced in 2.1 fixed. The code to compute the new
+ compressed and uncompressed directory parts after appending was
+ wrong.
+ 6. Metadata block length read routine altered to not perform a
+ misaligned short read. This was to fix reading on an ARM7 running
+ uCLinux without a misaligned read interrupt handler.
+ 7. Checkdata bug introduced in 2.1 fixed.
+
+
+2.1-r2 15 DEC 2004 Code changed so it can be compiled with gcc 2.x
+
+ 1. In some of the code added for release 2.1 I unknowingly used some
+ gcc extensions only supported by 3.x compilers. I have received
+ a couple of reports that the 2.1 release doesn't build on 2.x and so
+ people are clearly still using gcc 2.x. The code has been
+ rewritten to remove these extensions.
+
+2.1 10 DEC 2004 Significantly improved directory handling plus numerous
+ other smaller improvements
+
+ 1. Fast indexed directories implemented. These speed up directory
+ operations (ls, file lookup etc.) significantly for directories
+ larger than 8 KB.
+ 2. All directories are now sorted in alphabetical order. This again
+ speeds up directory operations, and in some cases it also results in
+ a small compression improvement (greater data similarity between
+ files with alphabetically similar names).
+ 3. Maximum directory size increased from 512 KB to 128 MB.
+ 4. Duplicate fragment checking and appending optimised in mksquashfs,
+ depending on filesystem, this is now up to 25% faster.
+ 5. Mksquashfs help information reformatted and reorganised.
+ 6. The Squashfs version and release date is now printed at kernel
+ boot-time or module insertion. This addition will hopefully help
+ to reduce the growing problem where the Squashfs version supported
+ by a kernel is unknown and the kernel source is unavailable.
+ 7. New PERFORMANCE.README file.
+ 8. New -2.0 mksquashfs option.
+ 9. CHANGES file reorganised.
+ 10. README file reorganised, clarified and updated to include the 2.0
+ mksquashfs options.
+ 11. New patch for Linux 2.6.9.
+ 12. New patch for Linux 2.4.28.
+
+2.0r2 29 AUG 2004 Workaround for kernel bug in kernels 2.6.8 and newer
+ added
+
+ 1. New patch for kernel 2.6.8.1. This includes a workaround for a
+ kernel bug introduced in 2.6.7bk14, which is present in all later
+ versions of the kernel.
+
+ If you're using a 2.6.8 kernel or later then you must use this
+ 2.6.8.1 patch. If you've experienced hangs or oopses using Squashfs
+ with a 2.6.8 or later kernel then you've hit this bug, and this
+ patch will fix it.
+
+ It is worth mentioning that this kernel bug potentially affects
+ other filesystems. If you receive odd results with other
+ filesystems you may be experiencing this bug with that filesystem.
+ I submitted a patch but this has not yet gone into the
+ kernel, hopefully the bug will be fixed in later kernels.
+
+2.0 13 JULY 2004 A couple of new options, and some bug fixes
+
+ 1. New mksquashfs -all-root, -root-owned, -force-uid, and -force-gid
+ options. These allow the uids/gids of files in the generated
+ filesystem to be specified, overriding the uids/gids in the
+ source filesystem.
+ 2. Initrds are now supported for kernels 2.6.x.
+ 3. amd64 bug fixes. If you use an amd64, please read the README-AMD64
+ file.
+ 4. Check-data and gid bug fixes. With 2.0-alpha when mounting 1.x
+ filesystems in certain cases file gids were corrupted.
+ 5. New patch for Linux 2.6.7.
+
+2.0-ALPHA 21 MAY 2004 Filesystem changes and compression improvements
+
+ 1. Squashfs 2.0 has added the concept of fragment blocks.
+ Files smaller than the file block size and optionally the
+ remainder of files that do not fit fully into a block (i.e. the
+ last 32K in a 96K file) are packed into shared fragments and
+ compressed together. This achieves on average 5 - 20% better
+ compression than Squashfs 1.x.
+ 2. The maximum block size has been increased to 64K (in the ALPHA
+ version of Squashfs 2.0).
+ 3. The maximum number of UIDs has been increased to 256 (from 48 in
+ 1.x).
+ 4. The maximum number of GIDs has been increased to 256 (from 15 in
+ 1.x).
+ 5. Removal of sleep_on() function call in 2.6.x patch, to allow Squashfs
+ to work on the Fedora rc2 kernel.
+ 6. Numerous small bug fixes have been made.
+
+1.3r3 18 JAN 2004 Third release of 1.3, this adds a new mksquashfs option,
+ some bug fixes, and extra patches for new kernels
+
+ 1. New mksquashfs -ef exclude option. This option reads the exclude
+ dirs/files from an exclude file, one exclude dir/file per line. This
+ avoids the command line size limit when using the -e exclude option,
+ 2. When appending to existing filesystems, if mksquashfs experiences a
+ fatal error (e.g. out of space when adding to the destination), the
+ original filesystem is restored,
+ 3. Mksquashfs now builds standalone, without the kernel needing to be
+ patched.
+ 4. Bug fix in the kernel squashfs filesystem, where the pages being
+ filled were not kmapped. This seems to only have caused problems
+ on an Apple G5,
+ 5. New patch for Linux 2.4.24,
+
+ 6. New patch for Linux 2.6.1, this replaces the patch for 2.6.0-test7.
+
+1.3r2 14 OCT 2003 Second release of 1.3, bug fixes and extra patches for
+ new kernels
+
+ 1. Bug fix in routine that adds files to the filesystem being
+ generated in mksquashfs. This bug was introduced in 1.3
+ (not enough testing...) when I rewrote it to handle files larger
+ than available memory. This bug caused a SEGV, so if you've ever
+ got that, it is now fixed,
+ 2. Long running bug where ls -s and du reported wrong block size
+ fixed. I'm pretty sure this used to work many kernel versions ago
+ (2.4.7) but it broke somewhere along the line since then,
+ 3. New patch for Linux 2.4.22,
+ 4. New patch for 2.6.0-test7, this replaces the patch for 2.6.0-test1.
+
+1.3 29 JUL 2003 FIFO/Socket support added plus optimisations and
+ improvements
+
+ 1. FIFOs and Socket inodes are now supported,
+ 2. Mksquashfs can now compress files larger than available
+ memory,
+ 3. File duplicate check routine optimised,
+ 4. Exit codes fixed in Mksquashfs,
+ 5. Patch for Linux 2.4.21,
+ 6. Patch for Linux 2.6.0-test1. Hopefully, this will work for
+ the next few releases of 2.6.0-testx, otherwise, I'll be
+ releasing a lot of updates to the 2.6.0 patch...
+
+1.2 13 MAR 2003 Append feature and new mksquashfs options added
+
+ Mksquashfs can now add to existing squashfs filesystems. Three extra
+ options "-noappend", "-keep-as-directory", and "root-becomes"
+ have been added.
+
+ The append option with file duplicate detection, means squashfs can be
+ used as a simple versioning archiving filesystem. A squashfs
+ filesystem can be created with for example the linux-2.4.19 source.
+ Appending the linux-2.4.20 source will create a filesystem with the
+ two source trees, but only the changed files will take extra room,
+ the unchanged files will be detected as duplicates.
+
+ See the README file for usage changes.
+
+1.1b 16 JAN 2003 Bug fix release
+
+ Fixed readpage deadlock bug. This was a rare deadlock bug that
+ happened when pushing pages into the page cache when using greater
+ than 4K blocks. I never got this bug when I tested the filesystem,
+ but two people emailed me on the same day about the problem!
+ I fixed it by using a page cache function that wasn't there when
+ I originally did the work, which was nice :-)
+
+1.1 8 JAN 2003 Added features
+
+ 1. Kernel squashfs can now mount different byte order filesystems.
+ 2. Additional features added to mksquashfs. Mksquashfs now supports
+ exclude files and multiple source files/directories can be
+ specified. A nopad option has also been added, which
+ informs mksquashfs not to pad filesystems to a multiple of 4K.
+ See README for mksquashfs usage changes.
+ 3. Greater than 2GB filesystems bug fix. Filesystems greater than 2GB
+ can now be created.
+
+1.0c 14 NOV 2002 Bug fix release
+
+ Fixed bugs with initrds and device nodes
+
+1.0 23 OCT 2002 Initial release
diff --git a/COPYING b/COPYING
new file mode 100644
index 0000000..d159169
--- /dev/null
+++ b/COPYING
@@ -0,0 +1,339 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/INSTALL b/INSTALL
new file mode 100644
index 0000000..c28f3b4
--- /dev/null
+++ b/INSTALL
@@ -0,0 +1,47 @@
+ INSTALLING SQUASHFS-TOOLS
+
+1. Kernel support
+-----------------
+
+This release is for 2.6.29 and newer kernels. Kernel patching is not necessary.
+
+Extended attribute support requires 2.6.35 or newer kernels. But
+file systems with extended attributes can be mounted on 2.6.29 and
+newer kernels (the extended attributes will be ignored with a warning).
+
+LZO compression support requires 2.6.36 or newer kernels.
+
+XZ compression support requires 2.6.38 or newer kernels.
+
+LZ4 compression support requires 3.11 or newer kernels.
+
+ZSTD compression support requires 4.14 or newer kernels.
+
+2. Building squashfs tools
+--------------------------
+
+The squashfs-tools directory contains the source code for the Mksquashfs,
+and Unsquashfs programs. These can be compiled by typing "make". This
+will also create symbolic links sqfstar and sqfscat.
+
+They can be installed, along with the symbolic links sqfstar and sqfscat,
+to /usr/local/bin by typing "sudo make install".
+
+2.1 Compressors supported
+
+By default the Makefile is configured to build Mksquashfs and Unsquashfs
+with GZIP suppport. Read the Makefile in squashfs-tools for instructions on
+building LZO, LZ4, XZ and ZSTD compression support.
+
+2.2 Extended attribute support
+
+By default the Makefile is configured to build Mksquashfs and Unsquashfs
+with extended attribute support. Read the Makefile in squashfs-tools for
+instructions on how to disable extended attribute support, if not supported
+by your distribution/C library, or if it is not needed.
+
+2.3 Reproducible builds
+
+By default the Makefile is configured to build Mksquashfs with reproducible
+output as default. Read the Makefile in squashfs-tools for instructions
+on how to disable reproducible output as default if desired.
diff --git a/README-4.6.1 b/README-4.6.1
new file mode 100644
index 0000000..a259db3
--- /dev/null
+++ b/README-4.6.1
@@ -0,0 +1,340 @@
+ SQUASHFS-TOOLS 4.6.1 - A squashed read-only filesystem for Linux
+
+ Copyright 2002-2023 Phillip Lougher <phillip@squashfs.org.uk>
+
+ Released under the GPL licence (version 2 or later).
+
+Welcome to Squashfs-Tools 4.6.1. This is a bug fix update release to
+Squashfs-Tools 4.6.
+
+Please see the INSTALL file for instructions on installing the tools, and the
+USAGE-4.6 files for documentation on how to use the tools.
+
+This README will describe the improvements, and has the following sections:
+
+1. Bug fixes in 4.6.1
+2. Summary of changes in 4.6
+3. Filtering and adding extended attributes (XATTRs)
+4. Squashfs filesystems conversion (piping Unsquashfs output to Mksquashfs)
+5. Author info
+
+1. Bug fixes in 4.6.1
+---------------------
+
+1. Race condition which can cause corruption of the "fragment table" fixed.
+ This is a regression introduced in August 2022, and it has been seen when
+ tailend packing is used (-tailends option).
+2. Fix build failure when the tools are being built without extended attribute
+ (XATTRs) support.
+3. Fix XATTR error message when an unrecognised prefix is found (Christian Hesse).
+4. Fix incorrect free of pointer when an unrecognised XATTR prefix is found.
+
+2. Summary of changes in 4.6
+----------------------------
+
+1. Extended attribute handling improved in Mksquashfs and Sqfstar
+
+ 1.1.New -xattrs-exclude option to exclude extended attributes from files
+ using a regular expression.
+ 1.2 New -xattrs-include option to include extended attributes from files
+ using a regular expression.
+ 1.3 New -xattrs-add option to add extended attributes to files.
+ 1.4 New Pseudo file xattr definition to add extended attributes to
+ files.
+ 1.5 New xattrs-add Action to add extended attributes to files
+ (Mksquashfs only).
+
+2. Extended attribute handling improved in Unsquashfs
+
+ 2.1 New -xattrs-exclude option to exclude extended attributes from files
+ using a regular expression.
+ 2.2 New -xattrs-include option to include extended attributes from files
+ using a regular expression.
+ 2.3 Extended attributes are now supported in Pseudo file output.
+
+3. Other major improvements
+
+ 3.1 Unsquashfs can now output Pseudo files to standard out.
+ 3.2 Mksquashfs can now input Pseudo files from standard in.
+ 3.3 Squashfs filesystems can now be converted (different block size
+ compression etc) without unpacking to an intermediate filesystem or
+ mounting, by piping the output of Unsquashfs to Mksquashfs.
+ 3.4 Pseudo files are now supported by Sqfstar.
+ 3.5 "Non-anchored" excludes are now supported by Unsquashfs.
+
+4. Mksquashfs minor improvements
+
+ 4.1 A new -max-depth option has been added, which limits the depth
+ Mksquashfs descends when creating the filesystem.
+ 4.2 A new -mem-percent option which allows memory for caches to be
+ specified as a percentage of physical RAM, rather than requiring an
+ absolute value.
+ 4.3 A new -percentage option added which rather than generating the full
+ progress-bar instead outputs a percentage. This can be used with
+ dialog --gauge etc.
+ 4.4 -mkfs-time, -all-time and -root-time options now take a human date
+ string, in addition to the seconds since the epoch of 1970 00:00
+ UTC. For example "now", "last week", "Wed Mar 8 05:55:01 GMT 2023"
+ are supported.
+ 4.5 -root-uid, -root-gid, -force-uid and -force-gid options now take a
+ user/group name in addition to the integer uid/gid.
+ 4.6 A new -mem-default option which displays default memory usage for
+ caches in Mbytes.
+ 4.7 A new -no-compression option which produces no compression, and it
+ is a short-cut for -noI, -noD, -noF and -noX.
+ 4.8 A new -pseudo-override option which makes pseudo file uids and gids
+ override -all-root, -force-uid and -force-gid options. Normally
+ these options take precedence.
+
+5. Unsquashfs minor improvements
+
+ 5.1 New -all-time option which sets all file timestamps to <time>,
+ rather than the time stored in the filesystem inode. <time> can be
+ an integer indicating seconds since the epoch (1970-01-01) or a
+ human string value such as "now", "last week", or
+ "Wed Feb 15 21:02:39 GMT 2023".
+ 5.2 New -full-precision option which uses full precision when displaying
+ times including seconds. Use with -linfo, -lls, -lln and -llc
+ options.
+ 5.3 New -match option where Unsquashfs will abort if any extract file
+ does not match on anything, and can not be resolved.
+ 5.4 New -percentage option added which rather than generating the full
+ progress-bar instead outputs a percentage. This can be used with
+ dialog --gauge etc.
+
+6. Sqfstar minor improvements
+
+ 6.1 New -ignore-zeros option added which allows tar files to be
+ concatenated together and fed to Sqfstar. Normally a tarfile has
+ two consecutive 512 byte blocks filled with zeros which means EOF
+ and Sqfstar will stop reading after the first tar file on
+ encountering them. This option makes Sqfstar ignore the zero filled
+ blocks.
+ 6.2 A new -mem-percent option which allows memory for caches to be
+ specified as a percentage of physical RAM, rather than requiring an
+ absolute value.
+ 6.3 A new -percentage option added which rather than generating the full
+ progress-bar instead outputs a percentage. This can be used with
+ dialog --gauge etc.
+ 6.4 -mkfs-time, -all-time and -root-time options now take a human date
+ string, in addition to the seconds since the epoch of 1970 00:00
+ UTC. For example "now", "last week", "Wed Mar 8 05:55:01 GMT 2023"
+ are supported.
+ 6.5 -root-uid, -root-gid, -force-uid and -force-gid options now take a
+ user/group name in addition to the integer uid/gid.
+ 6.6 A new -mem-default option which displays default memory usage for
+ caches in Mbytes.
+ 6.7 A new -no-compression option which produces no compression, and it
+ is a short-cut for -noI, -noD, -noF and -noX.
+ 6.8 A new -pseudo-override option which makes pseudo file uids and gids
+ override -all-root, -force-uid and -force-gid options. Normally
+ these options take precedence.
+ 6.9 Do not abort if ZERO filled blocks indicating end of the TAR archive
+ are missing.
+
+7. Other minor improvements
+
+ 7.1 If Mksquashfs/Unsquashfs fails to execute generating the manpages
+ because they have been cross-compiled, fall back to using the
+ pre-built manpages.
+ 7.2 Add new Makefile configure option USE_PREBUILT_MANPAGES to always
+ use pre-built manpages rather than generating them when "make
+ install" is run.
+
+8. Major bug fixes
+
+ 8.1 Following a symlink in Sqfscat or where -follow-symlinks option is
+ given with Unsquashfs, incorrectly triggered the corrupted
+ filesystem loop detection code.
+ 8.2 In Unsquashfs if a file was not writable it could not add extended
+ attributes to it.
+ 8.3 Sqfstar would incorrectly reject compressor specific options that
+ have an argument.
+ 8.4 Sqfstar would incorrectly strip pathname components in PAX header
+ linkpath if symbolic.
+ 8.5 Sqfstar -root-uid, -root-gid and -root-time options were documented
+ but not implemented.
+ 8.6 Mksquashfs -one-file-system option would not create empty mount
+ point directory when filesystem boundary crossed.
+ 8.7 Mksquashfs did not check the close() return result.
+
+
+3. Filtering and adding extended attributes (XATTRs)
+----------------------------------------------------
+
+Mksquashfs, Unsquashfs and Sqfstar have a number of new options which allow
+extended attributes (xattrs) to be filtered from the source files or added to
+the created Squashfs filesystem.
+
+The -xattrs-exclude option specifies a regular expression (regex), which
+removes any extended attribute that matches the regular expression from all
+files.
+
+The -xattrs-include option instead specifies a regular expression (regex)
+which includes any extended attribute that matches, and removes anything
+that does't match.
+
+Examples:
+
+% mksquashfs directory image.sqsh -xattrs-exclude "^user."
+
+Exclude any extended attributes in the User namespace from the Squashfs
+filesystem.
+
+% unsquashfs -xattrs-exclude "^user." image.sqsh
+
+As above, but exclude them from the Squashfs filesystem when extracting.
+
+% mksquashfs directory image.sqsh -xattrs-include "^user."
+
+Only include extended attributes in the User namespace in the Squashfs
+filesystem.
+
+3.1 Adding extended attributes
+------------------------------
+
+Mksquashfs and Sqfstar also allows you to add extended attributes to files in
+the Squashfs filesystem using the -xattrs-add option and the Pseudo file "x"
+definition. Both options take an xattr name and value pair separated by the
+'=' character.
+
+The extended attribute name can be any valid name and can be in the namespaces
+security, system, trusted, or user. User extended attributes are added to files
+and directories (see man 7 xattr for explanation), and the others are added to
+all files.
+
+The extended attribute value by default will be treated as binary (i.e. an
+uninterpreted byte sequence), but it can be prefixed with 0s, where it will be
+treated as base64 encoded, or prefixed with 0x, where it will be treated as
+hexidecimal.
+
+Obviously using base64 or hexidecimal allows values to be used that cannot be
+entered on the command line such as non-printable characters etc. But it
+renders the string non-human readable. To keep readability and to allow
+non-printable characters to be entered, the 0t prefix is supported. This
+encoding is similar to binary encoding, except backslashes are specially
+treated, and a backslash followed by three octal digits can be used to encode
+any ASCII character, which obviously can be used to encode non-printable values.
+The following four command lines are equivalent, and will add the extended
+attribute "user.comment" to all files:
+
+mksquashfs dir image.sqfs -xattrs-add "user.comment=hello world"
+mksquashfs dir image.sqfs -xattrs-add "user.comment=0saGVsbG8gd29ybGQ="
+mksquashfs dir image.sqfs -xattrs-add "user.comment=0x68656c6c6f20776f726c64"
+mksquashfs dir image.sqfs -xattrs-add "user.comment=0thello world"
+
+Obviously in the above example there are no non-printable characters and so
+the 0t prefixed string is identical to the first line. The following three
+command lines are identical, but where the space has been replaced by the
+non-printable NUL '\0' (null character):
+
+mksquashfs dir image.sqfs -xattrs-add "user.comment=0thello\000world"
+mksquashfs dir image.sqfs -xattrs-add "user.comment=0saGVsbG8Ad29ybGQ="
+mksquashfs dir image.sqfs -xattrs-add "user.comment=0x68656c6c6f00776f726c64"
+
+The following will add an extended attribute to the file named "Hello_World"
+using the Pseudo file "x" definition.
+
+% mksquashfs dir image.sqsh -p "Hello_Word x user.comment=Hello_World"
+
+Again, as above, the following are equivalent:
+
+% mksquashfs dir image.sqfs -p "Hello_World x user.comment=0thello\000world"
+% mksquashfs dir image.sqfs -p "Hello_World x user.comment=0saGVsbG8Ad29ybGQ="
+% mksquashfs dir image.sqfs -p "Hello_World x user.comment=0x68656c6c6f00776f726c64"
+
+3.2 Filtering and adding extended attributes using Actions
+----------------------------------------------------------
+
+Finally Mksquashfs allows extended attributes to be filtered and added to files
+using the xattrs-exclude, xattrs-include and xattrs-add actions.
+
+xattrs-exclude(regex), this action excludes any extended attributes matching
+the regular expression, for any file matching the action tests.
+
+xattrs-include(regex), this action includes any extended attributes matching
+the regular expression, for any file matching the action tests.
+
+xattrs-add(name=val), this action will add the extended attribute <name>
+with contents <val>, to any file matching the action tests.
+
+Examples:
+
+% mksquashfs dir image.sqfs -action "xattrs-exclude(^user.)@true"
+
+This will exclude all extended attributes in the User namespace for all
+files (true matches on everything). It is directly equivalent to
+-xattrs-exclude(^user.).
+
+% mksquashfs dir image.sqfs -action "xattrs-exclude(^user.)@type(f)"
+
+This will exclude all extended attributes in the User namespace but only for
+regular files.
+
+% mksquashfs dir image.sqfs -action "xattrs-include(^user.)@user(phillip)&&type(f)"
+
+Likewise this will include only extended attributes in the User namespace, but
+it will only be applied to regular files owned by user Phillip.
+
+% mksquashfs dir image.sqsh -action "xattrs-add(user.comment=Hello_World)@name(hello*)"
+
+This will add the extended attribute "user.comment" to any file named "hello*".
+
+The following two commands are equivalent:
+
+% mksquashfs dir image.sqsh -action "xattrs-add(user.comment=Hello_World)@pathname(Hello_World)"
+% mksquashfs dir image.sqsh -p "Hello_Word x user.comment=Hello_World"
+
+
+4. Squashfs filesystems conversion (piping Unsquashfs output to Mksquashfs)
+---------------------------------------------------------------------------
+
+Sometimes you have an existing Squashfs filesystem which you want to
+regenerate using a different set of compression options, such as compression
+algorithm, block-size, tail-packing etc. Or you want to modify some parts of
+the filesystem such as excluding files, change ownership etc.
+
+Obviously you have been able to mount the Squashfs filesystem and regenerate
+the filesystem by running Mksquashfs on the mounted directory. But, this
+requires root access (which sometimes isn't available). The only other
+alternative until now has been to extract the Squashfs filesystem to an
+intermediate uncompressed directory, and then regenerate the filesystem by
+running Mksquashfs on that. This, however, is slow and requires storage to
+store the uncompressed filesystem.
+
+Unsquashfs can now output a Pseudo file representing the input filesystem
+to stdout, and Mksquashfs can now read a Pseudo file from stdin. This allows
+the output of Unsquashfs to be piped to Mksquashfs.
+
+Some examples follow.
+
+If you had a GZIP filesystem and wanted to convert it to ZSTD, you can do:
+
+% unsquashfs -pf - image.sqsh | mksquashfs - new.sqsh -pf - -comp zstd
+
+If you wanted to change to XZ compression, increase to a 1 Mbyte block size, and
+use -tailend packing, you could do:
+
+% unsquashfs -pf - image.sqsh | mksquashfs - new.sqsh -pf - -comp xz -b 1M -tailends
+
+If you only want the directory "foobar" you can tell Unsquashfs to only
+extract that:
+
+% unsquashfs -pf - image.sqsh foobar | mksquashfs - new.sqsh -pf -
+
+If you had inadvertantly stored binary ".o" files, you can remove them by
+using the new "non-anchored" Unsquashfs exclude file functionality:
+
+% unsquashfs -excludes -pf - image.sqsh "... *.o" | mksquashfs - new.sqsh -pf -
+
+If you want to update all the file timestamps to "now", and make every
+file owned by "phillip":
+
+% unsquashfs -pf - image.sqsh | mksquashfs - new.sqsh -all-time now -force-uid phillip -pf -
+
+5. AUTHOR INFO
+--------------
+
+Squashfs was written by Phillip Lougher, email phillip@squashfs.org.uk,
+in Chepstow, Wales, UK. If you like the program, or have any problems,
+then please email me, as it's nice to get feedback!
diff --git a/TECHNICAL-INFO b/TECHNICAL-INFO
new file mode 100644
index 0000000..a5cc491
--- /dev/null
+++ b/TECHNICAL-INFO
@@ -0,0 +1,289 @@
+
+1. REPRODUCIBLE BUILDS (since version 4.4)
+------------------------------------------
+
+Ever since Mksquashfs was parallelised back in 2006, there
+has been a certain randomness in how fragments and multi-block
+files are ordered in the output filesystem even if the input
+remains the same.
+
+This is because the multiple parallel threads can be scheduled
+differently between Mksquashfs runs. For example, the thread
+given fragment 10 to compress may finish before the thread
+given fragment 9 to compress on one run (writing fragment 10
+to the output filesystem before fragment 9), but, on the next
+run it could be vice-versa. There are many different scheduling
+scenarios here, all of which can have a knock on effect causing
+different scheduling and ordering later in the filesystem too.
+
+Mkquashfs doesn't care about the ordering of fragments and
+multi-block files within the filesystem, as this does not
+affect the correctness of the filesystem.
+
+In fact not caring about the ordering, as it doesn't matter, allows
+Mksquashfs to run as fast as possible, maximising CPU and I/O
+performance.
+
+But, in the last couple of years, Squashfs has become used in
+scenarios (cloud etc) where this randomness is causing problems.
+Specifically this appears to be where downloaders, installers etc.
+try to work out the differences between Squashfs filesystem
+updates to minimise the amount of data that needs to transferred
+to update an image.
+
+Additionally, in the last couple of years has arisen the notion
+of reproducible builds, that is the same source and build
+environment etc should be able to (re-)generate identical
+output. This is usually for verification and security, allowing
+binaries/distributions to be checked for malicious activity.
+See https://reproducible-builds.org/ for more information.
+
+Mksquashfs now generates reproducible images by default.
+Images generated by Mksquashfs will be ordered identically to
+previous runs if the same input has been supplied, and the
+same options used.
+
+1.1 Dealing with timestamps
+
+Timestamps embedded in the filesystem will stiil cause differences.
+Each new run of Mksquashfs will produce a different mkfs (make filesystem)
+timestamp in the super-block. Moreover if any file timestamps have changed
+(even if the content hasn't), this will produce a difference.
+
+To prevent timestamps from producing differences, the following
+new Mksquashfs options have been added.
+
+1.1.1 -mkfs-time <time>
+
+Set mkfs time to <time>. Time can be an integer which is the seconds since
+the epoch of 1970-01-01 00:00:00 UTC), or a date string as recognised by the
+"date" command.
+
+1.1.2 -all-time <time>
+
+Set all file timestamps to <time>. Time can be an integer which is the seconds
+since the epoch of 1970-01-01 00:00:00 UTC), or a date string as recognised by
+the "date" command.
+
+1.1.3 environment variable SOURCE_DATE_EPOCH
+
+As an alternative to the above command line options, you can
+set the environment variable SOURCE_DATE_EPOCH to a time value.
+
+This value will be used to set the mkfs time. Also any
+file timestamps which are after SOURCE_DATE_EPOCH will be
+clamped to SOURCE_DATE_EPOCH.
+
+See https://reproducible-builds.org/docs/source-date-epoch/
+for more information.
+
+1.1.4 -not-reproducible
+
+This option tells Mksquashfs that the files do not have to be
+strictly ordered. This will make Mksquashfs behave like version 4.3.
+
+
+2. EXTENDED ATTRIBUTES (XATTRS)
+-------------------------------
+
+Squashfs file systems now has extended attribute support. The
+extended attribute implementation has the following features:
+
+1. Layout can store up to 2^48 bytes of compressed xattr data.
+2. Number of xattrs per inode unlimited.
+3. Total size of xattr data per inode 2^48 bytes of compressed data.
+4. Up to 4 Gbytes of data per xattr value.
+5. Inline and out-of-line xattr values supported for higher performance
+ in xattr scanning (listxattr & getxattr), and to allow xattr value
+ de-duplication.
+6. Both whole inode xattr duplicate detection and individual xattr value
+ duplicate detection supported. These can obviously nest, file C's
+ xattrs can be a complete duplicate of file B, and file B's xattrs
+ can be a partial duplicate of file A.
+7. Xattr name prefix types stored, allowing the redundant "user.", "trusted."
+ etc. characters to be eliminated and more concisely stored.
+8. Support for files, directories, symbolic links, device nodes, fifos
+ and sockets.
+
+Extended attribute support is in 2.6.35 and later kernels. File systems
+with extended attributes can be mounted on 2.6.29 and later kernels, the
+extended attributes will be ignored with a warning.
+
+
+3. FILESYSTEM LAYOUT
+--------------------
+
+A squashfs filesystem consists of a maximum of nine parts, packed together on a
+byte alignment:
+
+ ---------------
+ | superblock |
+ |---------------|
+ | compression |
+ | options |
+ |---------------|
+ | datablocks |
+ | & fragments |
+ |---------------|
+ | inode table |
+ |---------------|
+ | directory |
+ | table |
+ |---------------|
+ | fragment |
+ | table |
+ |---------------|
+ | export |
+ | table |
+ |---------------|
+ | uid/gid |
+ | lookup table |
+ |---------------|
+ | xattr |
+ | table |
+ ---------------
+
+Compressed data blocks are written to the filesystem as files are read from
+the source directory, and checked for duplicates. Once all file data has been
+written the completed super-block, compression options, inode, directory,
+fragment, export, uid/gid lookup and xattr tables are written.
+
+3.1 Compression options
+-----------------------
+
+Compressors can optionally support compression specific options (e.g.
+dictionary size). If non-default compression options have been used, then
+these are stored here.
+
+3.2 Inodes
+----------
+
+Metadata (inodes and directories) are compressed in 8Kbyte blocks. Each
+compressed block is prefixed by a two byte length, the top bit is set if the
+block is uncompressed. A block will be uncompressed if the -noI option is set,
+or if the compressed block was larger than the uncompressed block.
+
+Inodes are packed into the metadata blocks, and are not aligned to block
+boundaries, therefore inodes overlap compressed blocks. Inodes are identified
+by a 48-bit number which encodes the location of the compressed metadata block
+containing the inode, and the byte offset into that block where the inode is
+placed (<block, offset>).
+
+To maximise compression there are different inodes for each file type
+(regular file, directory, device, etc.), the inode contents and length
+varying with the type.
+
+To further maximise compression, two types of regular file inode and
+directory inode are defined: inodes optimised for frequently occurring
+regular files and directories, and extended types where extra
+information has to be stored.
+
+3.3 Directories
+---------------
+
+Like inodes, directories are packed into compressed metadata blocks, stored
+in a directory table. Directories are accessed using the start address of
+the metablock containing the directory and the offset into the
+decompressed block (<block, offset>).
+
+Directories are organised in a slightly complex way, and are not simply
+a list of file names. The organisation takes advantage of the
+fact that (in most cases) the inodes of the files will be in the same
+compressed metadata block, and therefore, can share the start block.
+Directories are therefore organised in a two level list, a directory
+header containing the shared start block value, and a sequence of directory
+entries, each of which share the shared start block. A new directory header
+is written once/if the inode start block changes. The directory
+header/directory entry list is repeated as many times as necessary.
+
+Directories are sorted, and can contain a directory index to speed up
+file lookup. Directory indexes store one entry per metablock, each entry
+storing the index/filename mapping to the first directory header
+in each metadata block. Directories are sorted in alphabetical order,
+and at lookup the index is scanned linearly looking for the first filename
+alphabetically larger than the filename being looked up. At this point the
+location of the metadata block the filename is in has been found.
+The general idea of the index is ensure only one metadata block needs to be
+decompressed to do a lookup irrespective of the length of the directory.
+This scheme has the advantage that it doesn't require extra memory overhead
+and doesn't require much extra storage on disk.
+
+3.4 File data
+-------------
+
+Regular files consist of a sequence of contiguous compressed blocks, and/or a
+compressed fragment block (tail-end packed block). The compressed size
+of each datablock is stored in a block list contained within the
+file inode.
+
+To speed up access to datablocks when reading 'large' files (256 Mbytes or
+larger), the code implements an index cache that caches the mapping from
+block index to datablock location on disk.
+
+The index cache allows Squashfs to handle large files (up to 1.75 TiB) while
+retaining a simple and space-efficient block list on disk. The cache
+is split into slots, caching up to eight 224 GiB files (128 KiB blocks).
+Larger files use multiple slots, with 1.75 TiB files using all 8 slots.
+The index cache is designed to be memory efficient, and by default uses
+16 KiB.
+
+3.5 Fragment lookup table
+-------------------------
+
+Regular files can contain a fragment index which is mapped to a fragment
+location on disk and compressed size using a fragment lookup table. This
+fragment lookup table is itself stored compressed into metadata blocks.
+A second index table is used to locate these. This second index table for
+speed of access (and because it is small) is read at mount time and cached
+in memory.
+
+3.6 Uid/gid lookup table
+------------------------
+
+For space efficiency regular files store uid and gid indexes, which are
+converted to 32-bit uids/gids using an id look up table. This table is
+stored compressed into metadata blocks. A second index table is used to
+locate these. This second index table for speed of access (and because it
+is small) is read at mount time and cached in memory.
+
+3.7 Export table
+----------------
+
+To enable Squashfs filesystems to be exportable (via NFS etc.) filesystems
+can optionally (disabled with the -no-exports Mksquashfs option) contain
+an inode number to inode disk location lookup table. This is required to
+enable Squashfs to map inode numbers passed in filehandles to the inode
+location on disk, which is necessary when the export code reinstantiates
+expired/flushed inodes.
+
+This table is stored compressed into metadata blocks. A second index table is
+used to locate these. This second index table for speed of access (and because
+it is small) is read at mount time and cached in memory.
+
+3.8 Xattr table
+---------------
+
+The xattr table contains extended attributes for each inode. The xattrs
+for each inode are stored in a list, each list entry containing a type,
+name and value field. The type field encodes the xattr prefix
+("user.", "trusted." etc) and it also encodes how the name/value fields
+should be interpreted. Currently the type indicates whether the value
+is stored inline (in which case the value field contains the xattr value),
+or if it is stored out of line (in which case the value field stores a
+reference to where the actual value is stored). This allows large values
+to be stored out of line improving scanning and lookup performance and it
+also allows values to be de-duplicated, the value being stored once, and
+all other occurences holding an out of line reference to that value.
+
+The xattr lists are packed into compressed 8K metadata blocks.
+To reduce overhead in inodes, rather than storing the on-disk
+location of the xattr list inside each inode, a 32-bit xattr id
+is stored. This xattr id is mapped into the location of the xattr
+list using a second xattr id lookup table.
+
+4. AUTHOR INFO
+--------------
+
+Squashfs was written by Phillip Lougher, email phillip@squashfs.org.uk,
+in Chepstow, Wales, UK. If you like the program, or have any problems,
+then please email me, as it's nice to get feedback!
diff --git a/USAGE-4.6 b/USAGE-4.6
new file mode 100644
index 0000000..fcc28dc
--- /dev/null
+++ b/USAGE-4.6
@@ -0,0 +1,94 @@
+ SQUASHFS - A squashed read-only filesystem for Linux
+
+ Copyright 2002-2023 Phillip Lougher <phillip@squashfs.org.uk>
+
+ Released under the GPL licence (version 2 or later).
+
+Welcome to Squashfs-tools. Please read the CHANGES and README-4.6 files for the
+added features and improvements.
+
+Squashfs is a highly compressed read-only filesystem for Linux.
+It uses either gzip/xz/lzo/lz4/zstd compression to compress both files, inodes
+and directories. Inodes in the system are very small and all blocks are
+packed to minimise data overhead. Block sizes greater than 4K are supported
+up to a maximum of 1Mbytes (default block size 128K).
+
+Squashfs is intended for general read-only filesystem use, for archival
+use (i.e. in cases where a .tar.gz file may be used), and in constrained
+block device/memory systems (e.g. embedded systems) where low overhead is
+needed.
+
+1. SQUASHFS AND TOOLS OVERVIEW
+------------------------------
+
+1. Data, inodes and directories can be compressed.
+
+2. Squashfs stores full uid/gids (32 bits), and file creation time.
+
+3. In theory files up to 2^64 bytes are supported. In theory filesystems can
+ be up to 2^64 bytes.
+
+4. Inode and directory data are highly compacted, and packed on byte
+ boundaries. Each compressed inode is on average 8 bytes in length
+ (the exact length varies on file type, i.e. regular file, directory,
+ symbolic link, and block/char device inodes have different sizes).
+
+5. Squashfs can use block sizes up to 1Mbyte (the default size is 128K).
+ Using 128K blocks achieves greater compression ratios than the normal
+ 4K block size.
+
+6. File duplicates are detected and removed.
+
+7. Filesystems can be compressed with gzip, xz (lzma2), lzo, lz4
+ or zstd compression algorithms.
+
+8. Pseudo files are supported by Mksquashfs/Sqfstar which allow files which
+ don't exist in the source filesystem to be created on-demand.
+
+9. Creation of Squashfs filesystems from TAR archives is supported using
+ Sqfstar or the -tar option of Mksquashfs.
+
+10. External attributes are supported, and there are a full range of options
+ to filter attributes on filesystem creation and extraction, and additional
+ external attributes can be created which don't exist in the source
+ filesystem.
+
+
+2. SQUASHFS TOOLS AVAILABLE
+---------------------------
+
+The Squashfs-tools consists of 4 programs.
+
+1. MKSQUASHFS
+
+This tool creates Squashfs filesystems from a set of source files and
+directories. The file USAGE-MKSQUASHFS-4.6 describes how to use this
+program. The (once installed) mksquashfs manpage contains more brief
+information and a lot of usage examples.
+
+2. UNSQUASHFS
+
+This tool allows you to extract and list the contents of Squashfs
+filesystems without mounting. The file USAGE-UNSQUASHFS-4.6 describes
+how to use this program. The (once installed) unsquashfs manpage contains
+more brief information and a lot of usage examples.
+
+3. SQFSTAR
+
+This tool creates Squashfs filesystems from a TAR archive. The file
+USAGE-SQFSTAR-4.6 describes how to use this program. The (once installed)
+sqfstar manpage contains more brief information and a lot of usage examples.
+
+4. SQFSCAT
+
+This program allows you to "cat" files to STDOUT from a Squashfs filesystem.
+USAGE-SQFSCAT-4.6 describes how to use this program. The (once installed)
+sqfscat manpage contains more brief information and a lot of usage examples.
+
+3. SUMMARY OF USAGE FILES
+-------------------------
+
+For MKSQUASHFS see USAGE-MKSQUASHFS-4.6
+For UNSQUASHFS see USAGE-UNSQUASHFS-4.6
+For SQFSTAR see USAGE-SQFSTAR-4.6
+For SQFSCAT see USAGE-SQFSCAT-4.6
diff --git a/USAGE-MKSQUASHFS-4.6 b/USAGE-MKSQUASHFS-4.6
new file mode 100644
index 0000000..37dab44
--- /dev/null
+++ b/USAGE-MKSQUASHFS-4.6
@@ -0,0 +1,1069 @@
+ MKSQUASHFS - a tool to create Squashfs filesystems
+
+As Squashfs is a read-only filesystem, the Mksquashfs program must be used to
+create populated squashfs filesystems.
+
+SYNTAX:mksquashfs source1 source2 ... FILESYSTEM [OPTIONS] [-e list of
+exclude dirs/files]
+
+Filesystem compression options:
+-b <block_size> set data block to <block_size>. Default 128 Kbytes.
+ Optionally a suffix of K or M can be given to specify
+ Kbytes or Mbytes respectively
+-comp <comp> select <comp> compression
+ Compressors available:
+ gzip (default)
+ lzo
+ lz4
+ xz
+ zstd
+-noI do not compress inode table
+-noId do not compress the uid/gid table (implied by -noI)
+-noD do not compress data blocks
+-noF do not compress fragment blocks
+-noX do not compress extended attributes
+-no-compression do not compress any of the data or metadata. This is
+ equivalent to specifying -noI -noD -noF and -noX
+
+Filesystem build options:
+-tar read uncompressed tar file from standard in (stdin)
+-no-strip act like tar, and do not strip leading directories
+ from source files
+-tarstyle alternative name for -no-strip
+-cpiostyle act like cpio, and read file pathnames from standard in
+ (stdin)
+-cpiostyle0 like -cpiostyle, but filenames are null terminated. Can
+ be used with find -print0 action
+-reproducible build filesystems that are reproducible (default)
+-not-reproducible build filesystems that are not reproducible
+-mkfs-time <time> set filesystem creation timestamp to <time>. <time> can
+ be an unsigned 32-bit int indicating seconds since the
+ epoch (1970-01-01) or a string value which is passed to
+ the "date" command to parse. Any string value which the
+ date command recognises can be used such as "now",
+ "last week", or "Wed Feb 15 21:02:39 GMT 2023"
+-all-time <time> set all file timestamps to <time>. <time> can be an
+ unsigned 32-bit int indicating seconds since the epoch
+ (1970-01-01) or a string value which is passed to the
+ "date" command to parse. Any string value which the date
+ command recognises can be used such as "now", "last
+ week", or "Wed Feb 15 21:02:39 GMT 2023"
+-root-time <time> set root directory time to <time>. <time> can be an
+ unsigned 32-bit int indicating seconds since the epoch
+ (1970-01-01) or a string value which is passed to the
+ "date" command to parse. Any string value which the date
+ command recognises can be used such as "now", "last
+ week", or "Wed Feb 15 21:02:39 GMT 2023"
+-root-mode <mode> set root directory permissions to octal <mode>
+-root-uid <value> set root directory owner to specified <value>,
+ <value> can be either an integer uid or user name
+-root-gid <value> set root directory group to specified <value>,
+ <value> can be either an integer gid or group name
+-all-root make all files owned by root
+-force-uid <value> set all file uids to specified <value>, <value> can be
+ either an integer uid or user name
+-force-gid <value> set all file gids to specified <value>, <value> can be
+ either an integer gid or group name
+-pseudo-override make pseudo file uids and gids override -all-root,
+ -force-uid and -force-gid options
+-no-exports do not make filesystem exportable via NFS (-tar default)
+-exports make filesystem exportable via NFS (default)
+-no-sparse do not detect sparse files
+-no-tailends do not pack tail ends into fragments (default)
+-tailends pack tail ends into fragments
+-no-fragments do not use fragments
+-no-duplicates do not perform duplicate checking
+-no-hardlinks do not hardlink files, instead store duplicates
+-keep-as-directory if one source directory is specified, create a root
+ directory containing that directory, rather than the
+ contents of the directory
+
+Filesystem filter options:
+-p <pseudo-definition> add pseudo file definition. The definition should
+ be quoted
+-pf <pseudo-file> add list of pseudo file definitions from <pseudo-file>,
+ use - for stdin. Pseudo file definitions should not be
+ quoted
+-sort <sort_file> sort files according to priorities in <sort_file>. One
+ file or dir with priority per line. Priority -32768 to
+ 32767, default priority 0
+-ef <exclude_file> list of exclude dirs/files. One per line
+-wildcards allow extended shell wildcards (globbing) to be used in
+ exclude dirs/files
+-regex allow POSIX regular expressions to be used in exclude
+ dirs/files
+-max-depth <levels> descend at most <levels> of directories when scanning
+ filesystem
+-one-file-system do not cross filesystem boundaries. If a directory
+ crosses the boundary, create an empty directory for
+ each mount point. If a file crosses the boundary
+ ignore it
+-one-file-system-x do not cross filesystem boundaries. Like
+ -one-file-system option except directories are also
+ ignored if they cross the boundary
+
+Filesystem extended attribute (xattrs) options:
+-no-xattrs do not store extended attributes
+-xattrs store extended attributes (default)
+-xattrs-exclude <regex> exclude any xattr names matching <regex>. <regex> is a
+ POSIX regular expression, e.g. -xattrs-exclude '^user.'
+ excludes xattrs from the user namespace
+-xattrs-include <regex> include any xattr names matching <regex>. <regex> is a
+ POSIX regular expression, e.g. -xattrs-include '^user.'
+ includes xattrs from the user namespace
+-xattrs-add <name=val> add the xattr <name> with <val> to files. If an
+ user xattr it will be added to regular files and
+ directories (see man 7 xattr). Otherwise it will be
+ added to all files. <val> by default will be treated as
+ binary (i.e. an uninterpreted byte sequence), but it can
+ be prefixed with 0s, where it will be treated as base64
+ encoded, or prefixed with 0x, where val will be treated
+ as hexidecimal. Additionally it can be prefixed with
+ 0t where this encoding is similar to binary encoding,
+ except backslashes are specially treated, and a
+ backslash followed by 3 octal digits can be used to
+ encode any ASCII character, which obviously can be used
+ to encode control codes. The option can be repeated
+ multiple times to add multiple xattrs
+
+Mksquashfs runtime options:
+-version print version, licence and copyright message
+-exit-on-error treat normally ignored errors as fatal
+-quiet no verbose output
+-info print files written to filesystem
+-no-progress do not display the progress bar
+-progress display progress bar when using the -info option
+-percentage display a percentage rather than the full progress bar.
+ Can be used with dialog --gauge etc.
+-throttle <percentage> throttle the I/O input rate by the given percentage.
+ This can be used to reduce the I/O and CPU consumption
+ of Mksquashfs
+-limit <percentage> limit the I/O input rate to the given percentage.
+ This can be used to reduce the I/O and CPU consumption
+ of Mksquashfs (alternative to -throttle)
+-processors <number> use <number> processors. By default will use number of
+ processors available
+-mem <size> use <size> physical memory for caches. Use K, M or G to
+ specify Kbytes, Mbytes or Gbytes respectively
+-mem-percent <percent> use <percent> physical memory for caches. Default 25%
+-mem-default print default memory usage in Mbytes
+
+Filesystem append options:
+-noappend do not append to existing filesystem
+-root-becomes <name> when appending source files/directories, make the
+ original root become a subdirectory in the new root
+ called <name>, rather than adding the new source items
+ to the original root
+-no-recovery do not generate a recovery file
+-recovery-path <name> use <name> as the directory to store the recovery file
+-recover <name> recover filesystem data using recovery file <name>
+
+Filesystem actions options:
+-action <action@expr> evaluate <expr> on every file, and execute <action>
+ if it returns TRUE
+-log-action <act@expr> as above, but log expression evaluation results and
+ actions performed
+-true-action <act@expr> as above, but only log expressions which return TRUE
+-false-action <act@exp> as above, but only log expressions which return FALSE
+-action-file <file> as action, but read actions from <file>
+-log-action-file <file> as -log-action, but read actions from <file>
+-true-action-file <f> as -true-action, but read actions from <f>
+-false-action-file <f> as -false-action, but read actions from <f>
+
+Tar file only options:
+-default-mode <mode> tar files often do not store permissions for
+ intermediate directories. This option sets the default
+ directory permissions to octal <mode>, rather than 0755.
+ This also sets the root inode mode
+-default-uid <uid> tar files often do not store uids for intermediate
+ directories. This option sets the default directory
+ owner to <uid>, rather than the user running Mksquashfs.
+ This also sets the root inode uid
+-default-gid <gid> tar files often do not store gids for intermediate
+ directories. This option sets the default directory
+ group to <gid>, rather than the group of the user
+ running Mksquashfs. This also sets the root inode gid
+-ignore-zeros allow tar files to be concatenated together and fed to
+ Mksquashfs. Normally a tarfile has two consecutive 512
+ byte blocks filled with zeros which means EOF and
+ Mksquashfs will stop reading after the first tar file on
+ encountering them. This option makes Mksquashfs ignore
+ the zero filled blocks
+
+Expert options (these may make the filesystem unmountable):
+-nopad do not pad filesystem to a multiple of 4K
+-offset <offset> skip <offset> bytes at the beginning of FILESYSTEM.
+ Optionally a suffix of K, M or G can be given to specify
+ Kbytes, Mbytes or Gbytes respectively.
+ Default 0 bytes
+-o <offset> synonym for -offset
+
+Miscellaneous options:
+-fstime <time> alternative name for -mkfs-time
+-always-use-fragments alternative name for -tailends
+-root-owned alternative name for -all-root
+-noInodeCompression alternative name for -noI
+-noIdTableCompression alternative name for -noId
+-noDataCompression alternative name for -noD
+-noFragmentCompression alternative name for -noF
+-noXattrCompression alternative name for -noX
+
+-help output this options text to stdout
+-h output this options text to stdout
+
+-Xhelp print compressor options for selected compressor
+
+Pseudo file definition format:
+"filename d mode uid gid" create a directory
+"filename m mode uid gid" modify filename
+"filename b mode uid gid major minor" create a block device
+"filename c mode uid gid major minor" create a character device
+"filename f mode uid gid command" create file from stdout of command
+"filename s mode uid gid symlink" create a symbolic link
+"filename i mode uid gid [s|f]" create a socket (s) or FIFO (f)
+"filename x name=val" create an extended attribute
+"filename l linkname" create a hard-link to linkname
+"filename L pseudo_filename" same, but link to pseudo file
+"filename D time mode uid gid" create a directory with timestamp time
+"filename M time mode uid gid" modify a file with timestamp time
+"filename B time mode uid gid major minor"
+ create block device with timestamp time
+"filename C time mode uid gid major minor"
+ create char device with timestamp time
+"filename F time mode uid gid command" create file with timestamp time
+"filename S time mode uid gid symlink" create symlink with timestamp time
+"filename I time mode uid gid [s|f]" create socket/fifo with timestamp time
+
+
+Source1 source2 ... are the source directories/files containing the
+files/directories that will form the squashfs filesystem. If a single
+directory is specified (i.e. mksquashfs source image.sqfs) the squashfs
+filesystem will consist of that directory, with the top-level root
+directory containing the contents.
+
+If multiple source directories or files are specified, Mksquashfs will merge
+the specified sources into a single filesystem, with the root directory
+containing each of the source files/directories. The name of each directory
+entry will be the basename of the source path. If more than one source
+entry maps to the same name, the conflicts are named xxx_1, xxx_2, etc. where
+xxx is the original name.
+
+To make this clear, take two example directories. Source directory
+"/home/phillip/test" contains "file1", "file2" and "dir1".
+Source directory "goodies" contains "goodies1", "goodies2" and "goodies3".
+
+usage example 1:
+
+% mksquashfs /home/phillip/test image.sqfs
+
+This will generate a squashfs filesystem with root entries
+"file1", "file2" and "dir1".
+
+example 2:
+
+% mksquashfs /home/phillip/test goodies image.sqfs
+
+This will create a squashfs filesystem with the root containing
+entries "test" and "goodies" corresponding to the source
+directories "/home/phillip/test" and "goodies".
+
+example 3:
+
+% mksquashfs /home/phillip/test goodies test image.sqfs
+
+This is the same as the previous example, except a third
+source directory "test" has been specified. This conflicts
+with the first directory named "test" and will be renamed "test_1".
+
+Multiple sources allow filesystems to be generated without needing to
+copy all source files into a common directory. This simplifies creating
+filesystems.
+
+The -keep-as-directory option can be used when only one source directory
+is specified, and you wish the root to contain that directory, rather than
+the contents of the directory. For example:
+
+example 4:
+
+% mksquashfs /home/phillip/test image.sqfs -keep-as-directory
+
+This is the same as example 1, except for -keep-as-directory.
+This will generate a root directory containing directory "test",
+rather than the "test" directory contents "file1", "file2" and "dir1".
+
+If you want the full path to retained (like TAR behaviour), you can specify the
+-no-strip option.
+
+example 5:
+
+% mksquashfs /home/phillip/test image.sqfs -no-strip
+
+This will make a filesystem with "home", "home/phillip" and "home/phillip/test"
+directories, see section 4 for more details.
+
+The Dest argument is the destination where the squashfs filesystem will be
+written. This can either be a conventional file or a block device. If the file
+doesn't exist it will be created, if it does exist and a squashfs
+filesystem exists on it, Mksquashfs will append. The -noappend option will
+write a new filesystem irrespective of whether an existing filesystem is
+present. See section 12 for more details about appending.
+
+1. CHANGING COMPRESSION ALGORITHM AND COMPRESSION SPECIFIC OPTIONS
+------------------------------------------------------------------
+
+By default Mksquashfs will compress using the GZIP compression algorithm. This
+algorithm offers a good trade-off between compression ratio, and memory and time
+taken to decompress.
+
+Squashfs also supports LZ4, LZO, XZ and ZSTD compression. LZO offers worse
+compression ratio than GZIP, but is faster to decompress. XZ offers better
+compression ratio than GZIP, but at the expense of greater memory and time
+to decompress (and significantly more time to compress). LZ4 is similar
+to LZO. ZSTD has been developed by Facebook, and aims to compress well and
+be fast to decompress. You should experiment with the compressors to
+see which is best for you.
+
+If you're not building the squashfs-tools and kernel from source, then the tools
+and kernel may or may not have been built with support for LZ4, LZO, XZ or ZSTD
+compression. The compression algorithms supported by the build of Mksquashfs
+can be found by typing mksquashfs -help.
+
+The full list of compressors available and their compression specific options
+are:
+
+Compressors available and compressor specific options:
+ gzip (default)
+ -Xcompression-level <compression-level>
+ <compression-level> should be 1 .. 9 (default 9)
+ -Xwindow-size <window-size>
+ <window-size> should be 8 .. 15 (default 15)
+ -Xstrategy strategy1,strategy2,...,strategyN
+ Compress using strategy1,strategy2,...,strategyN in turn
+ and choose the best compression.
+ Available strategies: default, filtered, huffman_only,
+ run_length_encoded and fixed
+ lzo
+ -Xalgorithm <algorithm>
+ Where <algorithm> is one of:
+ lzo1x_1
+ lzo1x_1_11
+ lzo1x_1_12
+ lzo1x_1_15
+ lzo1x_999 (default)
+ -Xcompression-level <compression-level>
+ <compression-level> should be 1 .. 9 (default 8)
+ Only applies to lzo1x_999 algorithm
+ lz4
+ -Xhc
+ Compress using LZ4 High Compression
+ xz
+ -Xbcj filter1,filter2,...,filterN
+ Compress using filter1,filter2,...,filterN in turn
+ (in addition to no filter), and choose the best compression.
+ Available filters: x86, arm, armthumb, powerpc, sparc, ia64
+ -Xdict-size <dict-size>
+ Use <dict-size> as the XZ dictionary size. The dictionary size
+ can be specified as a percentage of the block size, or as an
+ absolute value. The dictionary size must be less than or equal
+ to the block size and 8192 bytes or larger. It must also be
+ storable in the xz header as either 2^n or as 2^n+2^(n+1).
+ Example dict-sizes are 75%, 50%, 37.5%, 25%, or 32K, 16K, 8K
+ etc.
+ zstd
+ -Xcompression-level <compression-level>
+ <compression-level> should be 1 .. 22 (default 15)
+
+If the compressor offers compression specific options (all the compressors now
+have compression specific options except the deprecated lzma1 compressor)
+then these options are also displayed (.i.e. in the above XZ is shown with two
+compression specific options). The compression specific options are, obviously,
+specific to the compressor in question, and the compressor documentation and
+web sites should be consulted to understand their behaviour. In general
+the Mksquashfs compression defaults for each compressor are optimised to
+give the best performance for each compressor, where what constitutes
+best depends on the compressor. For GZIP/XZ best means highest compression,
+for LZO/LZ4 best means a tradeoff between compression and (de)-compression
+overhead (LZO/LZ4 by definition are intended for weaker processors).
+
+
+2. REDUCING CPU AND I/O USAGE
+-----------------------------
+
+By default Mksquashfs will use all the CPUs available to compress and create the
+filesystem, and will read from the source files and write to the output
+filesystem as fast as possible. This maximises both CPU usage and I/O.
+
+Sometimes you don't want Mksquashfs to use all CPU and I/O bandwidth. For those
+cases Mksquashfs supports two complementary options, -processors and -throttle.
+
+The -processors option can be used to reduce the number of parallel compression
+threads used by Mksquashfs. Reducing this to 1 will create the minimum number
+of threads, and this will reduce CPU usage, and that in turn will reduce I/O
+(because CPUs are normally the bottleneck).
+
+The -throttle option reduces the speed Mksquashfs reads from the source files.
+The value is a percentage (obviously from 1 - 100), and 50 will reduce the
+read rate by half (the read thread will spend half its time idling), and 75
+by three quarters. Reducing the speed of I/O will also reduce the CPU
+usage as there is insufficient data rate to use all cores.
+
+Which option should you use? Both will effectively reduce CPU and I/O in
+normal cases where intensive use is being made of both I/O and CPUs. But
+in edge cases there can be an imbalance where reducing one has no effect, or
+it can't be reduced any further. For instance when there is only 1 or 2 cores
+available, setting -processors to the minimum of 1 may still use too much
+CPU. Additionally if your input source is slow Mksquashfs may still max it out
+with -processors set to the minimum of 1. In this case you can use -throttle
+in addition to -processors or on its own.
+
+
+3. CHANGING GLOBAL COMPRESSION DEFAULTS USED IN MKSQUASHFS
+----------------------------------------------------------
+
+There are a large number of options that can be used to control the
+compression in Mksquashfs. By and large the defaults are the most
+optimum settings and should rarely need to be changed.
+
+Note, this does not apply to the block size, increasing the block size from the
+default of 128 Kbytes will increase compression (especially for the XZ and ZSTD
+compressors) and should increase I/O performance too. However, a block size of
+greater than 128 Kbytes may increase latency in certain cases (where the
+filesystem contains lots of fragments, and no locality of reference is
+observed). For this reason the block size default is configured to the less
+optimal 128 Kbytes. Users should experiment with 256 Kbyte sizes or above.
+
+The -b option allows the block size to be selected, both "K" and "M" postfixes
+are supported, this can be either 4K, 8K, 16K, 32K, 64K, 128K, 256K, 512K or
+1M bytes.
+
+The -noI, -noD, -noF and -noX options can be used to force Mksquashfs to not
+compress inodes/directories, data, fragments and extended attributes
+respectively. Giving all options generates an uncompressed filesystem.
+
+The -no-fragments option tells Mksquashfs to not generate fragment blocks. A
+fragment block consists of multiple small files (all smaller than the block
+size) packed and compressed together. This produces much better compression
+than storing and compressing these files separately. It also typically
+improves I/O as multiple files in the same directory are read at the same time.
+You don't want to enable this option unless you fully understand the effects.
+
+The -tailends option tells Mksquashfs to always generate fragments for files
+irrespective of the file length. By default only small files less than the data
+block size are packed into fragment blocks. The tail ends of files which do not
+fit fully into a block, are NOT by default packed into fragments. This is a
+legacy setting when hard disks were mechanical, and had slow seek times. In
+general setting this option will gain a little more compression, without
+affecting I/O performance.
+
+The -no-duplicates option tells Mksquashfs to not check the files being added to
+the filesystem for duplicates. This can result in quicker filesystem
+generation although obviously compression will suffer badly if there is a lot
+of duplicates.
+
+
+4. TAR STYLE HANDLING OF SOURCE PATHNAMES IN MKSQUASHFS
+-------------------------------------------------------
+
+Mksquashfs has always stripped the leading directories of any source pathnames
+given on the command line.
+
+For example given the command line
+
+% mksquashfs dir-a/dir-b/dir-c/file1 dir-A/dir-B/file2 sqfs
+
+Mksquashfs will strip the leading directories, and put file1 and file2 into
+the same root directory. If file1 and file2 are directories it will place the
+directories into the same root directory, but, if only one directory is
+specified, it will put the contents of that directory into the root directory.
+Obviously, for a lot of the time this is what you want. But, if it isn't what
+you want it can be quite difficult to get Mksquashfs to do something different.
+
+A lot of people don't like this, and would prefer Mksquashfs acted like "tar",
+which does not strip leading directories. This allows you to create a directory
+hierarchy from the pathnames of the supplied files. In the above example, the
+tar archive would contain the pathnames "dir-a/dir-b/dir-c/file1" and
+"dir-A/dir-B/file2".
+
+Mksquashfs will act like tar if given the option -no-strip, or -tarstyle.
+
+
+5. CPIO STYLE HANDLING OF SOURCE PATHNAMES IN MKSQUASHFS
+--------------------------------------------------------
+
+Mksquashfs allows you to pipe the set of files to be added to the filesystem to
+standard in (stdin), if the -cpiostyle option is given.
+
+As with cpio, directories are not recursively scanned and their contents added.
+Evey file to be added to the filesystem must be explicitly specified.
+
+Typically the list of files to be added will be produced via find, or a similar
+utility.
+
+For example
+
+% find /home/phillip/squashfs-tools | mksquashfs - img.sqfs -cpiostyle
+
+Will create an image containing everything in squashfs-tools and its
+sub-directories. Note, "-" is given as the source pathname in Mksquashfs, and
+indicates no commmand line sources.
+
+The following will add just the files ending in .c, .h and .o.
+
+% find /home/phillip/squashfs-tools -name "*.[cho]" | mksquashfs - img.sqfs -cpiostyle
+
+
+6. SPECIFYING THE UIDs/GIDs USED IN THE FILESYSTEM
+--------------------------------------------------
+
+By default files in the generated filesystem inherit the UID and GID ownership
+of the original file. However, Mksquashfs provides a number of options which
+can be used to override the ownership.
+
+The -all-root option forces all file uids/gids in the generated Squashfs
+filesystem to be root. This allows root owned filesystems to be built without
+root access on the host machine.
+
+The "-force-uid uid" option forces all files in the generated Squashfs
+filesystem to be owned by the specified uid. The uid can be specified either by
+name (i.e. "root") or by number.
+
+The "-force-gid gid" option forces all files in the generated Squashfs
+filesystem to be group owned by the specified gid. The gid can be specified
+either by name (i.e. "root") or by number.
+
+
+7. EXCLUDING FILES FROM THE FILESYSTEM
+--------------------------------------
+
+The -e and -ef options allow files/directories to be specified which are
+excluded from the output filesystem. The -e option takes the exclude
+files/directories from the command line, the -ef option takes the
+exlude files/directories from the specified exclude file, one file/directory
+per line.
+
+Two styles of exclude file matching are supported: basic exclude matching, and
+extended wildcard matching. Basic exclude matching is a legacy feature
+retained for backwards compatibility with earlier versions of Mksquashfs.
+Extended wildcard matching should be used in preference.
+
+7.1 BASIC EXCLUDE MATCHING
+--------------------------
+
+Each exclude file is treated as an exact match of a file/directory in
+the source directories. If an exclude file/directory is absolute (i.e.
+prefixed with /, ../, or ./) the entry is treated as absolute, however, if an
+exclude file/directory is relative, it is treated as being relative to each of
+the sources in turn, i.e.
+
+% mksquashfs /tmp/source1 source2 output_fs -e ex1 /tmp/source1/ex2 out/ex3
+
+Will generate exclude files /tmp/source1/ex2, /tmp/source1/ex1, source2/ex1,
+/tmp/source1/out/ex3 and source2/out/ex3.
+
+7.2 EXTENDED EXCLUDE FILE HANDLING
+----------------------------------
+
+Extended exclude file matching treats each exclude file as a wildcard or
+regex expression. To enable wildcard matching specify the -wildcards
+option, and to enable regex matching specify the -regex option. In most
+cases the -wildcards option should be used rather than -regex because wildcard
+matching behaviour is significantly easier to understand!
+
+In addition to wildcards/regex expressions, exclude files can be "anchored" or
+"non-anchored". An anchored exclude is one which matches from the root of the
+directory and nowhere else, a non-anchored exclude matches anywhere. For
+example given the directory hierarchy "a/b/c/a/b", the anchored exclude
+"a/b" will match "a/b" at the root of the directory hierarchy, but
+it will not match the "/a/b" sub-directory within directory "c", whereas a
+non-anchored exclude would.
+
+A couple of examples should make this clearer.
+
+Anchored excludes
+
+ 1. mksquashfs example image.sqsh -wildcards -e 'test/*.gz'
+
+ Exclude all files matching "*.gz" in the top level directory "test".
+
+ 2. mksquashfs example image.sqsh -wildcards -e '*/[Tt]est/example*'
+
+ Exclude all files beginning with "example" inside directories called
+ "Test" or "test", that occur inside any top level directory.
+
+ Using extended wildcards, negative matching is also possible.
+
+ 3. mksquashfs example image.sqsh -wildcards -e 'test/!(*data*).gz'
+
+ Exclude all files matching "*.gz" in top level directory "test",
+ except those with "data" in the name.
+
+Non-anchored excludes
+
+ By default excludes match from the top level directory, but it is
+ often useful to exclude a file matching anywhere in the source directories.
+ For this non-anchored excludes can be used, specified by pre-fixing the
+ exclude with "...".
+
+ Examples:
+
+ 1. mksquashfs example image.sqsh -wildcards -e '... *.gz'
+
+ Exclude files matching "*.gz" anywhere in the source directories.
+ For example this will match "example.gz", "test/example.gz", and
+ "test/test/example.gz".
+
+ 2. mksquashfs example image.sqsh -wildcards -e '... [Tt]est/*.gz'
+
+ Exclude files matching "*.gz" inside directories called "Test" or
+ "test" that occur anywhere in the source directories.
+
+ Again, using extended wildcards, negative matching is also possible.
+
+ 3. mksquashfs example image.sqsh -wildcards -e '... !(*data*).gz'
+
+ Exclude all files matching "*.gz" anywhere in the source directories,
+ except those with "data" in the name.
+
+
+8. FILTERING AND ADDING EXTENDED ATTRIBUTES (XATTRs)
+----------------------------------------------------
+
+Mksquashfs has a number of options which allow extended attributes (xattrs) to
+be filtered from the source files or added to the created Squashfs filesystem.
+
+The -no-xattrs option removes any extended attributes which may exist in the
+source files, and creates a filesystem without any extended attributes.
+
+The -xattrs-exclude option specifies a regular expression (regex), which
+removes any extended attribute that matches the regular expression from all
+files. For example the regex '^user.' will remove all User extended attributes.
+
+The -xattrs-include option instead specifies a regular expression (regex)
+which includes any extended attribute that matches, and removes anything
+that does't match. For example the regex '^user.' will only keep User
+extended attributes and anything else will be removed.
+
+Mksquashfs also allows you to add extended attributes to files in the Squashfs
+filesystem using the -xattrs-add option. This option takes an xattr name and
+value pair separated by the '=' character.
+
+The extended attribute name can be any valid name and can be in the namespaces
+security, system, trusted, or user. User extended attributes are added to files
+and directories (see man 7 xattr for explanation), and the others are added to
+all files.
+
+The extended attribute value by default will be treated as binary (i.e. an
+uninterpreted byte sequence), but it can be prefixed with 0s, where it will be
+treated as base64 encoded, or prefixed with 0x, where it will be treated as
+hexidecimal.
+
+Obviously using base64 or hexidecimal allows values to be used that cannot be
+entered on the command line such as non-printable characters etc. But it
+renders the string non-human readable. To keep readability and to allow
+non-printable characters to be entered, the 0t prefix is supported. This
+encoding is similar to binary encoding, except backslashes are specially
+treated, and a backslash followed by three octal digits can be used to encode
+any ASCII character, which obviously can be used to encode non-printable values.
+The following four command lines are equivalent
+
+mksquashfs dir image.sqfs -xattrs-add "user.comment=hello world"
+mksquashfs dir image.sqfs -xattrs-add "user.comment=0saGVsbG8gd29ybGQ="
+mksquashfs dir image.sqfs -xattrs-add "user.comment=0x68656c6c6f20776f726c64"
+mksquashfs dir image.sqfs -xattrs-add "user.comment=0thello world"
+
+Obviously in the above example there are no non-printable characters and so
+the 0t prefixed string is identical to the first line. The following three
+command lines are identical, but where the space has been replaced by the
+non-printable NUL '\0' (null character).
+
+mksquashfs dir image.sqfs -xattrs-add "user.comment=0thello\000world"
+mksquashfs dir image.sqfs -xattrs-add "user.comment=0saGVsbG8Ad29ybGQ="
+mksquashfs dir image.sqfs -xattrs-add "user.comment=0x68656c6c6f00776f726c64"
+
+9. PSEUDO FILE SUPPORT
+----------------------
+
+Mksquashfs supports pseudo files, these allow files, directories, character
+devices, block devices, fifos, symbolic links, hard links and extended
+attributes to be specified and added to the Squashfs filesystem being built,
+rather than requiring them to be present in the source files. This, for
+example, allows device nodes to be added to the filesystem without requiring
+root access.
+
+Pseudo files also support "dynamic pseudo files" and a modify operation.
+Dynamic pseudo files allow files to be dynamically created when Mksquashfs
+is run, their contents being the result of running a command or piece of
+shell script. The modify operation allows the mode/uid/gid of an existing
+file in the source filesystem to be modified.
+
+Two Mksquashfs options are supported, -p allows one pseudo file to be specified
+on the command line, and -pf allows a pseudo file to be specified containing a
+list of pseduo definitions, one per line.
+
+9.1 CREATING A DYNAMIC FILE
+---------------------------
+
+Pseudo definition
+
+Filename f mode uid gid command
+
+mode is the octal mode specifier, similar to that expected by chmod.
+
+uid and gid can be either specified as a decimal number, or by name.
+
+command can be an executable or a piece of shell script, and it is executed
+by running "/bin/sh -c command". The stdout becomes the contents of
+"Filename".
+
+Examples:
+
+Running a basic command
+-----------------------
+
+/somedir/dmesg f 444 root root dmesg
+
+creates a file "/somedir/dmesg" containing the output from dmesg.
+
+Executing shell script
+----------------------
+
+RELEASE f 444 root root \
+ if [ ! -e /tmp/ver ]; then \
+ echo 0 > /tmp/ver; \
+ fi; \
+ ver=`cat /tmp/ver`; \
+ ver=$((ver +1)); \
+ echo $ver > /tmp/ver; \
+ echo -n `cat /tmp/release`; \
+ echo "-dev #"$ver `date` "Build host" `hostname`
+
+Creates a file RELEASE containing the release name, date, build host, and
+an incrementing version number. The incrementing version is a side-effect
+of executing the shell script, and ensures every time Sqfstar is run a
+new version number is used without requiring any other shell scripting.
+
+The above example also shows that commands can be split across multiple lines
+using "\". Obviously as the script will be presented to the shell as a single
+line, a semicolon is need to separate individual shell commands within the
+shell script.
+
+Reading from a device (or fifo/named socket)
+--------------------------------------------
+
+input f 444 root root dd if=/dev/sda1 bs=1024 count=10
+
+Copies 10K from the device /dev/sda1 into the file input. Ordinarily Sqfstar
+given a device, fifo, or named socket will place that special file within the
+Squashfs filesystem, the above allows input from these special files to be
+captured and placed in the Squashfs filesystem.
+
+9.2 CREATING A BLOCK OR CHARACTER DEVICE
+----------------------------------------
+
+Pseudo definition
+
+Filename type mode uid gid major minor
+
+Where type is either
+ b - for block devices, and
+ c - for character devices
+
+mode is the octal mode specifier, similar to that expected by chmod.
+
+uid and gid can be either specified as a decimal number, or by name.
+
+For example:
+
+/dev/chr_dev c 666 root root 100 1
+/dev/blk_dev b 666 0 0 200 200
+
+creates a character device "/dev/chr_dev" with major:minor 100:1 and
+a block device "/dev/blk_dev" with major:minor 200:200, both with root
+uid/gid and a mode of rw-rw-rw.
+
+9.3 CREATING A DIRECTORY
+-------------------------
+
+Pseudo definition
+
+Filename d mode uid gid
+
+mode is the octal mode specifier, similar to that expected by chmod.
+
+uid and gid can be either specified as a decimal number, or by name.
+
+For example:
+
+/pseudo_dir d 666 root root
+
+creates a directory "/pseudo_dir" with root uid/gid and mode of rw-rw-rw.
+
+9.4 CREATING A SYMBOLIC LINK
+-----------------------------
+
+Pseudo definition
+
+Filename s mode uid gid symlink
+
+uid and gid can be either specified as a decimal number, or by name.
+
+Note mode is ignored, as symlinks always have "rwxrwxrwx" permissions.
+
+For example:
+
+symlink s 0 root root example
+
+8reates a symlink "symlink" to file "example" with root uid/gid.
+
+9.5 CREATING HARD LINKS (FILE REFERENCES)
+-----------------------------------------
+
+The "f" Pseudo definition allows a regular file to be created from the output of
+a command (or shell). Often this is used to reference a file outside the source
+directories by executing "cat", e.g.
+
+README f 0555 0 0 cat /home/phillip/latest-version/README
+
+Because this is a quite frequent use of the definition, an alternative faster
+"File reference" or Hard Link Pseudo definition exists:
+
+README l /home/phillip/latest-version/README
+
+Will create a reference to "/home/phillip/latest-version/README",
+and obviously the timestamp/mode and owership will be used.
+
+The definition also be used to create additional references to
+files within the source directories. For instance if "phillip/latest/README"
+was a file being added to the filesystem, then
+
+README l phillip/latest/README
+
+Will create a Hard Link (and increment the nlink count on the inode).
+
+In both cases, the path to the file being referenced is the system
+filesystem path, and can be absolute (prefixed with /), or relative
+to the current working directory.
+
+There is an additional 'L' Pseudo definition, which closes a loophole in
+the above 'l' definition. The 'l' Pseudo definition cannot create references
+or Hard Links to files created by Pseudo definitions, because by
+definition they do not exist in the system filesystem.
+
+with 'L' the referenced file is expected to be a Pseudo file, and in this case
+the path is taken to be from the root of the Squashfs filesystem being created,
+e.g.
+
+char-dev c 0555 0 0 1 2
+
+link L char-dev
+
+Will create a Hard Link named "link" to the character device called "char-dev"
+created by the previous Pseudo definition.
+
+9.6 CREATING SOCKETS/FIFOS
+---------------------------
+
+Pseudo definition
+
+filename i mode uid gid [s|f]
+
+To create a Unix domain socket, 's' should be used, i.e.
+
+filename i 0777 root root s
+
+and to create a FIFO, 'f' should be used, i.e.
+
+filename i 0777 root root f
+
+9.7 ADDING EXTENDED ATTRIBUTES TO FILES
+---------------------------------------
+
+Pseudo definition
+
+filename x name=val
+
+Will add the extended attribute <name> to <filename> with <val> contents. See
+Section 7 for a description of the <val> formats supported.
+
+9.8 MODIFYING ATTRIBUTES OF AN EXISTING FILE
+--------------------------------------------
+
+Pseudo definition
+
+Filename m mode uid gid
+
+mode is the octal mode specifier, similar to that expected by chmod.
+
+uid and gid can be either specified as a decimal number, or by name.
+
+For example:
+
+dmesg m 666 root root
+
+Changes the attributes of the file "dmesg" in the filesystem to have
+root uid/gid and a mode of rw-rw-rw, overriding the attributes obtained
+from the source filesystem.
+
+10. EXTENDED PSEUDO FILE DEFINITIONS WITH TIMESTAMPS
+----------------------------------------------------
+
+The Pseudo file definitions described above do not allow the timestamp
+of the created file to be specified, and the files will be timestamped
+with the current time.
+
+Extended versions of the Pseudo file definitions are supported which
+take a <time> timestamp. These are distinquished from the previous
+definitions by using an upper-case type character. For example the "D"
+definition is identical to the "d" definition, but it takes a <time>
+timestamp.
+
+The list of extended definitions are:
+
+ filename F time mode uid gid command
+ filename D time mode uid gid
+ filename B time mode uid gid major minor
+ filename C time mode uid gid major minor
+ filename S time mode uid gid symlink
+ filename I time mode uid gid [s|f]
+ filename M time mode uid gid
+
+<time> can be either an unsigned decimal integer (which represents the
+seconds since the epoch of 1970-01-01 00:00 UTC), or a "date string"
+which is parsed and converted into an integer since the epoch, by calling
+the "date" command.
+
+Because most date strings have spaces, they will need to be quoted, and if
+entered on the command line, these quotes will need to be protected from the
+shell by backslashes, i.e.
+
+% mksquashfs dir image.sqsh -p "file D \"1 jan 1980\" 0777 phillip phillip"
+
+Obviously anything "date" accepts as a valid string can be used, such as
+"yesterday", "last week" etc.
+
+
+11. APPENDING TO SQUASHFS FILESYSTEMS
+-------------------------------------
+
+Running Mksquashfs with the destination directory containing an existing
+filesystem will add the source items to the existing filesystem. By default,
+the source items are added to the existing root directory.
+
+To make this clear... An existing filesystem "image" contains root entries
+"old1", and "old2". Source directory "/home/phillip/test" contains "file1",
+"file2" and "dir1".
+
+example 1:
+
+% mksquashfs /home/phillip/test image
+
+Will create a new "image" with root entries "old1", "old2", "file1", "file2" and
+"dir1"
+
+example 2:
+
+% mksquashfs /home/phillip/test image -keep-as-directory
+
+Will create a new "image" with root entries "old1", "old2", and "test".
+As shown in the previous section, for single source directories
+'-keep-as-directory' adds the source directory rather than the
+contents of the directory.
+
+example 3:
+
+% mksquashfs /home/phillip/test image -keep-as-directory -root-becomes
+original-root
+
+Will create a new "image" with root entries "original-root", and "test". The
+'-root-becomes' option specifies that the original root becomes a subdirectory
+in the new root, with the specified name.
+
+The append option with file duplicate detection, means squashfs can be
+used as a simple versioning archiving filesystem. A squashfs filesystem can
+be created with for example the linux-2.4.19 source. Appending the linux-2.4.20
+source will create a filesystem with the two source trees, but only the
+changed files will take extra room, the unchanged files will be detected as
+duplicates.
+
+12. APPENDING RECOVERY FILE FEATURE
+-----------------------------------
+
+Recovery files are created when appending to existing Squashfs
+filesystems. This allows the original filesystem to be recovered
+if Mksquashfs aborts unexpectedly (i.e. power failure).
+
+The recovery files are called squashfs_recovery_xxx_yyy, where
+"xxx" is the name of the filesystem being appended to, and "yyy" is a
+number to guarantee filename uniqueness (the PID of the parent Mksquashfs
+process).
+
+Normally if Mksquashfs exits correctly the recovery file is deleted to
+avoid cluttering the filesystem. If Mksquashfs aborts, the "-recover"
+option can be used to recover the filesystem, giving the previously
+created recovery file as a parameter, i.e.
+
+mksquashfs dummy image.sqsh -recover squashfs_recovery_image.sqsh_1234
+
+The writing of the recovery file can be disabled by specifying the
+"-no-recovery" option.
+
+
+13. MKSQUASHFS ACTIONS INTRODUCTION
+-----------------------------------
+
+The Mksquashfs Actions code allows an "action" to be executed on a file if one
+or more "tests" succeed. If you're familiar with the "find" command, then an
+action is similar to "-print", and a test is similar to say "-name" or "-type".
+
+To illustrate this it is useful to give two concrete examples.
+
+example 1: the fragment action
+
+% mksquashfs /home/phillip/github github.sqsh -action "fragment(cfiles) @ name(*.[ch])" -action "fragment(ofiles) @ name(*.o)"
+
+This example defines two "fragment actions" which control the packing of files
+within fragments. Specifically, it creates a specialised fragment called
+"cfiles" which packs files matching the wildcard name "*.[ch]".
+
+It also creates another specialised fragment called "ofiles" which packs files
+matching the wilcard name "*.o".
+
+Producing specialised fragments which only pack files which match a range of
+tests, can produce better compression and/or I/O performance as it can optimise
+similarity or access patterns. But it can also produce worse compression, and
+so you should always test the effect.
+
+Additionally, you should be able to see that an action definition is split into
+an action function before the "@", and one or more test functions after the @.
+Quoting is necessary here to protect it from interpretation by the shell. Also
+the spacing before and after the "@" isn't necessary and is used here for added
+readability.
+
+example 2: the uncompressed action
+
+% mksquashfs /home/phillip backup.sqsh -action "uncompressed @ ( name(*.jpg) || name(*.mpg) ) || ( name(*.img) && filesize(+1G) )"
+
+This is a more complex example. It tells Mksquashfs to not try and compress any
+file which matches the wildcard names "*.jpg" and "*.mpg". But it also tells
+Mksquashfs not to try and compress files which match the wildcard name "*.img"
+and are also 1 Gigabyte in size or larger.
+
+This example introduces the fact that tests can be combined using the logical
+operators && (and), || (or) and ! (not), and can be bracketed.
+
+Please see the ACTIONS-README file for syntax and extra information.
+
+
+14. MISCELLANEOUS OPTIONS
+------------------------
+
+The -info option displays the files/directories as they are compressed and
+added to the filesystem. The original uncompressed size of each file
+is printed, along with DUPLICATE if the file is a duplicate of a
+file in the filesystem.
+
+The -nopad option informs Mksquashfs to not pad the filesystem to a 4K multiple.
+This is performed by default to enable the output filesystem file to be mounted
+by loopback, which requires files to be a 4K multiple. If the filesystem is
+being written to a block device, or is to be stored in a bootimage, the extra
+pad bytes are not needed.
diff --git a/USAGE-SQFSCAT-4.6 b/USAGE-SQFSCAT-4.6
new file mode 100644
index 0000000..772f78e
--- /dev/null
+++ b/USAGE-SQFSCAT-4.6
@@ -0,0 +1,74 @@
+ SQFSCAT - A tool to output files to stdout
+
+Sqfscat allows you to "cat" files to STDOUT from a Squashfs filesystem
+without mounting it. It can read all official Squashfs filesystems.
+
+The Sqfscat usage info is:
+
+SYNTAX: sqfscat [OPTIONS] FILESYSTEM [list of files to cat to stdout]
+ -v[ersion] print version, licence and copyright information
+ -p[rocessors] <number> use <number> processors. By default will use
+ the number of processors available
+ -o[ffset] <bytes> skip <bytes> at start of FILESYSTEM.
+ Optionally a suffix of K, M or G can be given to
+ specify Kbytes, Mbytes or Gbytes respectively
+ (default 0 bytes).
+ -ig[nore-errors] treat errors writing files to stdout as
+ non-fatal
+ -st[rict-errors] treat all errors as fatal
+ -no-exit[-code] don't set exit code (to nonzero) on non-fatal
+ errors
+ -da[ta-queue] <size> set data queue to <size> Mbytes. Default 256
+ Mbytes
+ -fr[ag-queue] <size> set fragment queue to <size> Mbytes. Default
+ 256 Mbytes
+ -no-wild[cards] do not use wildcard matching in filenames
+ -r[egex] treat filenames as POSIX regular expressions
+ rather than use the default shell wildcard
+ expansion (globbing)
+ -h[elp] output options text to stdout
+
+
+The pathnames of the files to be output, like cat, can contain symbolic links,
+and "." and ".." elements.
+
+Sqfscat is a short-cut to using the equivalent Unsquashfs -cat option, i.e.
+the following will behave the same:
+
+% sqfscat image.sqfs file
+
+% unsquashfs -cat image.sqfs file
+
+If any of the files given on the command line does not result in a regular file,
+Sqfscat will throw an error, but will continue to output the remaining
+files on the command line. This follows the behaviour of "cat", for example:
+
+phillip@phoenix:/tmp$ sqfscat image.sqfs dir Hello_World
+cat: /dir is a directory
+Hello World!
+
+phillip@phoenix:/tmp$ cat dir Hello_World
+cat: dir: Is a directory
+Hello World!
+
+Sqfscat supports wildcards and it will output the contents of any file that
+matches, e.g.
+
+% sqfscat image.sqfs "*.[ch]"
+
+Will output the contents of all the files in the root directory that match the
+wildcard *.[ch], to stdout, for example hello.c, hello.h, world.c, world.h.
+
+Note: when passing wildcarded names to Sqfscat, they should be quoted (as in
+the above example), to ensure that they are not processed by the shell.
+
+ERRORS and EXIT CODE
+--------------------
+
+If Sqfscat encounters fatal errors such as I/O error, filesystem corruption,
+it will abort immediately, and return an exit code of 1.
+
+If it skipped a file because it wasn't a regular file, or it wasn't in the
+filesystem, it will return an exit code of 2.
+
+Otherwise a success exit code of 0 will be returned.
diff --git a/USAGE-SQFSTAR-4.6 b/USAGE-SQFSTAR-4.6
new file mode 100644
index 0000000..9b5f0e8
--- /dev/null
+++ b/USAGE-SQFSTAR-4.6
@@ -0,0 +1,812 @@
+ SQFSTAR - A tool to create a Squashfs filesystem from a TAR archive
+
+Sqfstar will read an uncompressed TAR archive from standard in, and it will
+create a Squashfs filesystem from it. If a TAR archive is compressed it
+should be piped through a decompressor utility and then input into Sqfstar.
+
+Sqfstar supports V7, ustar, bsdtar (libarchive), GNU tar and PAX extensions.
+Sparse file extensions are supported, including the "old GNU format, type S",
+and PAX formats, Versions 0.0, 0.1 and the current 1.0.
+
+Sqfstar supports extended attributes, and recognises the SCHILY xattr
+PAX extension (used by GNU tar), and the LIBARCHIVE xattr PAX extension
+(used by bsdtar).
+
+The Sqfstar usage info is:
+
+SYNTAX:sqfstar [OPTIONS] FILESYSTEM [list of exclude dirs/files]
+
+Filesystem compression options:
+-b <block_size> set data block to <block_size>. Default 128 Kbytes.
+ Optionally a suffix of K or M can be given to specify
+ Kbytes or Mbytes respectively
+-comp <comp> select <comp> compression
+ Compressors available:
+ gzip (default)
+ lzo
+ lz4
+ xz
+ zstd
+-noI do not compress inode table
+-noId do not compress the uid/gid table (implied by -noI)
+-noD do not compress data blocks
+-noF do not compress fragment blocks
+-noX do not compress extended attributes
+-no-compression do not compress any of the data or metadata. This is
+ equivalent to specifying -noI -noD -noF and -noX
+
+Filesystem build options:
+-reproducible build filesystems that are reproducible (default)
+-not-reproducible build filesystems that are not reproducible
+-mkfs-time <time> set filesystem creation timestamp to <time>. <time> can
+ be an unsigned 32-bit int indicating seconds since the
+ epoch (1970-01-01) or a string value which is passed to
+ the "date" command to parse. Any string value which the
+ date command recognises can be used such as "now",
+ "last week", or "Wed Feb 15 21:02:39 GMT 2023"
+-all-time <time> set all file timestamps to <time>. <time> can be an
+ unsigned 32-bit int indicating seconds since the epoch
+ (1970-01-01) or a string value which is passed to the
+ "date" command to parse. Any string value which the date
+ command recognises can be used such as "now", "last
+ week", or "Wed Feb 15 21:02:39 GMT 2023"
+-root-time <time> set root directory time to <time>. <time> can be an
+ unsigned 32-bit int indicating seconds since the epoch
+ (1970-01-01) or a string value which is passed to the
+ "date" command to parse. Any string value which the date
+ command recognises can be used such as "now", "last
+ week", or "Wed Feb 15 21:02:39 GMT 2023"
+-root-mode <mode> set root directory permissions to octal <mode>
+-root-uid <value> set root directory owner to specified <value>,
+ <value> can be either an integer uid or user name
+-root-gid <value> set root directory group to specified <value>,
+ <value> can be either an integer gid or group name
+-all-root make all files owned by root
+-force-uid <value> set all file uids to specified <value>, <value> can be
+ either an integer uid or user name
+-force-gid <value> set all file gids to specified <value>, <value> can be
+ either an integer gid or group name
+-default-mode <mode> tar files often do not store permissions for
+ intermediate directories. This option sets the default
+ directory permissions to octal <mode>, rather than 0755.
+ This also sets the root inode mode
+-default-uid <uid> tar files often do not store uids for intermediate
+ directories. This option sets the default directory
+ owner to <uid>, rather than the user running Sqfstar.
+ This also sets the root inode uid
+-default-gid <gid> tar files often do not store gids for intermediate
+ directories. This option sets the default directory
+ group to <gid>, rather than the group of the user
+ running Sqfstar. This also sets the root inode gid
+-pseudo-override make pseudo file uids and gids override -all-root,
+ -force-uid and -force-gid options
+-exports make the filesystem exportable via NFS
+-no-sparse do not detect sparse files
+-no-fragments do not use fragments
+-no-tailends do not pack tail ends into fragments
+-no-duplicates do not perform duplicate checking
+-no-hardlinks do not hardlink files, instead store duplicates
+
+Filesystem filter options:
+-p <pseudo-definition> add pseudo file definition. The definition should
+ be quoted
+-pf <pseudo-file> add list of pseudo file definitions. Pseudo file
+ definitions in pseudo-files should not be quoted
+-ef <exclude_file> list of exclude dirs/files. One per line
+-regex allow POSIX regular expressions to be used in exclude
+ dirs/files
+-ignore-zeros allow tar files to be concatenated together and fed to
+ Sqfstar. Normally a tarfile has two consecutive 512
+ byte blocks filled with zeros which means EOF and
+ Sqfstar will stop reading after the first tar file on
+ encountering them. This option makes Sqfstar ignore the
+ zero filled blocks
+
+Filesystem extended attribute (xattrs) options:
+-no-xattrs do not store extended attributes
+-xattrs store extended attributes (default)
+-xattrs-exclude <regex> exclude any xattr names matching <regex>. <regex> is a
+ POSIX regular expression, e.g. -xattrs-exclude '^user.'
+ excludes xattrs from the user namespace
+-xattrs-include <regex> include any xattr names matching <regex>. <regex> is a
+ POSIX regular expression, e.g. -xattrs-include '^user.'
+ includes xattrs from the user namespace
+-xattrs-add <name=val> add the xattr <name> with <val> to files. If an
+ user xattr it will be added to regular files and
+ directories (see man 7 xattr). Otherwise it will be
+ added to all files. <val> by default will be treated as
+ binary (i.e. an uninterpreted byte sequence), but it can
+ be prefixed with 0s, where it will be treated as base64
+ encoded, or prefixed with 0x, where val will be treated
+ as hexidecimal. Additionally it can be prefixed with
+ 0t where this encoding is similar to binary encoding,
+ except backslashes are specially treated, and a
+ backslash followed by 3 octal digits can be used to
+ encode any ASCII character, which obviously can be used
+ to encode control codes. The option can be repeated
+ multiple times to add multiple xattrs
+
+Sqfstar runtime options:
+-version print version, licence and copyright message
+-force force Sqfstar to write to block device or file
+-exit-on-error treat normally ignored errors as fatal
+-quiet no verbose output
+-info print files written to filesystem
+-no-progress do not display the progress bar
+-progress display progress bar when using the -info option
+-percentage display a percentage rather than the full progress bar.
+ Can be used with dialog --gauge etc.
+-throttle <percentage> throttle the I/O input rate by the given percentage.
+ This can be used to reduce the I/O and CPU consumption
+ of Sqfstar
+-limit <percentage> limit the I/O input rate to the given percentage.
+ This can be used to reduce the I/O and CPU consumption
+ of Sqfstar (alternative to -throttle)
+-processors <number> use <number> processors. By default will use number of
+ processors available
+-mem <size> use <size> physical memory for caches. Use K, M or G to
+ specify Kbytes, Mbytes or Gbytes respectively
+-mem-percent <percent> use <percent> physical memory for caches. Default 25%
+-mem-default print default memory usage in Mbytes
+
+Expert options (these may make the filesystem unmountable):
+-nopad do not pad filesystem to a multiple of 4K
+-offset <offset> skip <offset> bytes at the beginning of FILESYSTEM.
+ Optionally a suffix of K, M or G can be given to specify
+ Kbytes, Mbytes or Gbytes respectively.
+ Default 0 bytes
+-o <offset> synonym for -offset
+
+Miscellaneous options:
+-fstime <time> alternative name for mkfs-time
+-root-owned alternative name for -all-root
+-noInodeCompression alternative name for -noI
+-noIdTableCompression alternative name for -noId
+-noDataCompression alternative name for -noD
+-noFragmentCompression alternative name for -noF
+-noXattrCompression alternative name for -noX
+
+-help output this options text to stdout
+-h output this options text to stdout
+
+-Xhelp print compressor options for selected compressor
+
+Pseudo file definition format:
+"filename d mode uid gid" create a directory
+"filename m mode uid gid" modify filename
+"filename b mode uid gid major minor" create a block device
+"filename c mode uid gid major minor" create a character device
+"filename f mode uid gid command" create file from stdout of command
+"filename s mode uid gid symlink" create a symbolic link
+"filename i mode uid gid [s|f]" create a socket (s) or FIFO (f)
+"filename x name=val" create an extended attribute
+"filename l linkname" create a hard-link to linkname
+"filename L pseudo_filename" same, but link to pseudo file
+"filename D time mode uid gid" create a directory with timestamp time
+"filename M time mode uid gid" modify a file with timestamp time
+"filename B time mode uid gid major minor"
+ create block device with timestamp time
+"filename C time mode uid gid major minor"
+ create char device with timestamp time
+"filename F time mode uid gid command" create file with timestamp time
+"filename S time mode uid gid symlink" create symlink with timestamp time
+"filename I time mode uid gid [s|f]" create socket/fifo with timestamp time
+
+
+Files and directories in the TAR archive may be excluded, and both anchored
+and non-anchored exclude files are supported (see section 6 and examples later).
+Wildcards (globbing) is supported by default.
+
+Sqfstar can also by invoked by running "mksquashfs - image.sqfs -tar", if
+the Sqfstar link isn't available.
+
+1. USAGE EXAMPLES
+-----------------
+
+% sqfstar image.sqfs < archive.tar
+
+ Create a Squashfs image from archive.tar, using defaults (gzip compression,
+ 128K blocks).
+
+% sqfstar -comp xz -b 1M image.sqfs < archive.tar
+
+ As previous, but use XZ compression and 1Mbyte block sizes.
+
+% zcat archive.tgz | sqfstar image.sqfs
+
+ Create a Squashfs image from a gzip compressed tar archive.
+
+% sqfstar -root-uid 0 -root-gid 0 image.sqfs < archive.tar
+
+ Tar files do not supply a definition for the root directory, and the
+ default is to make the directory owned/group owned by the user running
+ Sqfstar. The above command sets the ownership/group ownership to root.
+
+% sqfstar -root-mode 0755 image.sqfs < archive.tar
+
+ The default permissions for the root directory is 0777 (rwxrwxrwx). The
+ above command sets the permissions to 0755 (rwxr-xr-x).
+
+% sqfstar image.sqsh dir1/file1 dir2/file2 < archive.tar
+
+ Create a Squashfs image but exclude the files "file1" and "file2".
+
+% sqfstar image.sqsh "... *.[ch]" < archive.tar
+
+ Create a Squashfs image but exclude any file matching "*.[ch]" anywhere
+ in the archive.
+
+
+2. CHANGING COMPRESSION ALGORITHM AND COMPRESSION SPECIFIC OPTIONS
+------------------------------------------------------------------
+
+By default Sqfstar will compress using the GZIP compression algorithm. This
+algorithm offers a good trade-off between compression ratio, and memory and time
+taken to decompress.
+
+Squashfs also supports LZ4, LZO, XZ and ZSTD compression. LZO offers worse
+compression ratio than GZIP, but is faster to decompress. XZ offers better
+compression ratio than GZIP, but at the expense of greater memory and time
+to decompress (and significantly more time to compress). LZ4 is similar
+to LZO. ZSTD has been developed by Facebook, and aims to compress well and
+be fast to decompress. You should experiment with the compressors to
+see which is best for you.
+
+If you're not building the squashfs-tools and kernel from source, then
+the tools and kernel may or may not have been built with support for LZ4, LZO,
+XZ or ZSTD compression. The compression algorithms supported by the build of
+Sqfstar can be found by typing sqfstar -help.
+
+The full list of compressors available and their compression specific options
+are:
+
+Compressors available and compressor specific options:
+ gzip (default)
+ -Xcompression-level <compression-level>
+ <compression-level> should be 1 .. 9 (default 9)
+ -Xwindow-size <window-size>
+ <window-size> should be 8 .. 15 (default 15)
+ -Xstrategy strategy1,strategy2,...,strategyN
+ Compress using strategy1,strategy2,...,strategyN in turn
+ and choose the best compression.
+ Available strategies: default, filtered, huffman_only,
+ run_length_encoded and fixed
+ lzo
+ -Xalgorithm <algorithm>
+ Where <algorithm> is one of:
+ lzo1x_1
+ lzo1x_1_11
+ lzo1x_1_12
+ lzo1x_1_15
+ lzo1x_999 (default)
+ -Xcompression-level <compression-level>
+ <compression-level> should be 1 .. 9 (default 8)
+ Only applies to lzo1x_999 algorithm
+ lz4
+ -Xhc
+ Compress using LZ4 High Compression
+ xz
+ -Xbcj filter1,filter2,...,filterN
+ Compress using filter1,filter2,...,filterN in turn
+ (in addition to no filter), and choose the best compression.
+ Available filters: x86, arm, armthumb, powerpc, sparc, ia64
+ -Xdict-size <dict-size>
+ Use <dict-size> as the XZ dictionary size. The dictionary size
+ can be specified as a percentage of the block size, or as an
+ absolute value. The dictionary size must be less than or equal
+ to the block size and 8192 bytes or larger. It must also be
+ storable in the xz header as either 2^n or as 2^n+2^(n+1).
+ Example dict-sizes are 75%, 50%, 37.5%, 25%, or 32K, 16K, 8K
+ etc.
+ zstd
+ -Xcompression-level <compression-level>
+ <compression-level> should be 1 .. 22 (default 15)
+
+If the compressor offers compression specific options (all the compressors now
+have compression specific options except the deprecated lzma1 compressor)
+then these options are also displayed (.i.e. in the above XZ is shown with two
+compression specific options). The compression specific options are, obviously,
+specific to the compressor in question, and the compressor documentation and
+web sites should be consulted to understand their behaviour. In general
+the Sqfstar compression defaults for each compressor are optimised to
+give the best performance for each compressor, where what constitutes
+best depends on the compressor. For GZIP/XZ best means highest compression,
+for LZO/LZ4 best means a tradeoff between compression and (de)-compression
+overhead (LZO/LZ4 by definition are intended for weaker processors).
+
+3. REDUCING CPU AND I/O USAGE
+-----------------------------
+
+By default Sqfstar will use all the CPUs available to compress and create the
+filesystem, and will read from the TAR archive and write to the output
+filesystem as fast as possible. This maximises both CPU usage and I/O.
+
+Sometimes you don't want Sqfstar to use all CPU and I/O bandwidth. For those
+cases Sqfstar supports two complementary options, -processors and -throttle.
+
+The -processors option can be used to reduce the number of parallel compression
+threads used by Sqfstar. Reducing this to 1 will create the minimum number of
+threads, and this will reduce CPU usage, and that in turn will reduce I/O
+(because CPUs are normally the bottleneck).
+
+The -throttle option reduces the speed Sqfstar reads from the TAR archive.
+The value is a percentage (obviously from 1 - 100), and 50 will reduce the
+read rate by half (the read thread will spend half its time idling), and 75
+by three quarters. Reducing the speed of I/O will also reduce the CPU
+usage as there is insufficient data rate to use all cores.
+
+Which option should you use? Both will effectively reduce CPU and I/O in
+normal cases where intensive use is being made of both I/O and CPUs. But
+in edge cases there can be an imbalance where reducing one has no effect, or
+it can't be reduced any further. For instance when there is only 1 or 2 cores
+available, setting -processors to the minimum of 1 may still use too much
+CPU. Additionally if your input source is slow Sqfstar may still max it out
+with -processors set to the minimum of 1. In this case you can use -throttle
+in addition to -processors or on its own.
+
+4. CHANGING GLOBAL COMPRESSION DEFAULTS USED IN SQFSTAR
+-------------------------------------------------------
+
+There are a large number of options that can be used to control the compression
+in Sqfstar. By and large the defaults are the most optimum settings and should
+rarely need to be changed.
+
+Note, this does not apply to the block size, increasing the block size from
+the default of 128 Kbytes will increase compression (especially for the XZ and
+ZSTD compressors) and should increase I/O performance too. However, a block
+size of greater than 128 Kbytes may increase latency in certain cases (where the
+filesystem contains lots of fragments, and no locality of reference is
+observed). For this reason the block size default is configured to the less
+optimal 128 Kbytes. Users should experiment with 256 Kbyte sizes or above.
+
+The -b option allows the block size to be selected, both "K" and "M" postfixes
+are supported, this can be either 4K, 8K, 16K, 32K, 64K, 128K, 256K, 512K or
+1M bytes.
+
+The -noI, -noD, -noF and -noX options can be used to force Sqfstar to not
+compress inodes/directories, data, fragments and extended attributes
+respectively. Giving all options generates an uncompressed filesystem.
+
+The -no-fragments option tells Sqfstar to not generate fragment blocks. A
+fragment block consists of multiple small files (all smaller than the block
+size) packed and compressed together. This produces much better compression
+than storing and compressing these files separately. It also typically
+improves I/O as multiple files in the same directory are read at the same time.
+You don't want to enable this option unless you fully understand the effects.
+
+The -no-tailends option tells Sqfstar to not pack file tailends into fragment
+blocks. Normally a file will not be a multiple of the block size, and so
+there were always be a tail which doesn't fit fully into a data block. This
+tailend is by default packed into fragment blocks. Enabling this option will
+reduce compression.
+
+The -no-duplicates option tells Sqfstar to not check the files being added to
+the filesystem for duplicates. This can result in quicker filesystem generation
+although obviously compression will suffer badly if there is a lot of duplicate
+files.
+
+5. SPECIFYING THE UIDs/GIDs USED IN THE FILESYSTEM
+--------------------------------------------------
+
+By default files in the generated filesystem use the ownership of the file
+stored in the TAR archive. TAR archives depending on the TAR format stores
+ownership in two ways. The early V7 format only stored a numeric UID and GID.
+If Sqfstar is reading a V7 archive these are used. Later ustar and PAX
+archives can also store a string user name and group name. If these are
+present and are recognised by the system (i.e. can be mapped to a UID and GID),
+their UID and GID is used. If a user name or group name is not recognised
+by the system, the numeric UID or GID is used.
+
+The -all-root option forces all file UIDs/GIDs in the generated Squashfs
+filesystem to be root. This allows root owned filesystems to be built without
+root access on the host machine.
+
+The "-force-uid value" option forces all files in the generated Squashfs
+filesystem to be owned by value, where value can either be a user name or a
+numeric UID.
+
+The "-force-gid value" option forces all files in the generated Squashfs
+filesystem to be group owned by value, where value can be either a group name or
+a numeric GID.
+
+6. EXCLUDING FILES FROM THE FILESYSTEM
+--------------------------------------
+
+Sqfstar can exclude files from the TAR archive, so that they don't appear in
+the Squashfs filesystem. Exclude files can be directly specified on
+the command line (immediately after the FILESYSTEM argument), or the -ef
+option can be used to specify an exclude file, with one exclude file per line.
+
+Exclude files by default use wildcard matching (globbing) and can match on
+more than one file (if wildcards are used). Regular expression matching
+can be used instead by specifying the -regex option. In most cases wildcards
+should be used rather than regular expressions because wildcard matching
+behaviour is significantly easier to understand!
+
+In addition to wildcards/regex expressions, exclude files can be "anchored" or
+"non-anchored". An anchored exclude is one which matches from the root of the
+directory and nowhere else, a non-anchored exclude matches anywhere. For
+example given the directory hierarchy "a/b/c/a/b", the anchored exclude "a/b"
+will match "a/b" at the root of the directory hierarchy, but it will not match
+the "/a/b" sub-directory within directory "c", whereas a non-anchored exclude
+would.
+
+A couple of examples should make this clearer.
+
+Anchored excludes
+
+ 1. sqfstar image.sqsh 'test/*.gz' < archive.tar
+
+ Exclude all files matching "*.gz" in the top level directory "test".
+
+ 2. sqfstar image.sqsh '*/[Tt]est/example*' < archive.tar
+
+ Exclude all files beginning with "example" inside directories called
+ "Test" or "test", that occur inside any top level directory.
+
+ Using extended wildcards, negative matching is also possible.
+
+ 3. sqfstar image.sqsh 'test/!(*data*).gz' < archive.tar
+
+ Exclude all files matching "*.gz" in top level directory "test",
+ except those with "data" in the name.
+
+Non-anchored excludes
+
+ By default excludes match from the top level directory, but it is
+ often useful to exclude a file matching anywhere in the source directories.
+ For this non-anchored excludes can be used, specified by pre-fixing the
+ exclude with "...".
+
+ Examples:
+
+ 1. sqfstar image.sqsh '... *.gz' < archive.tar
+
+ Exclude files matching "*.gz" anywhere in the source directories.
+ For example this will match "example.gz", "test/example.gz", and
+ "test/test/example.gz".
+
+ 2. sqfstar image.sqsh '... [Tt]est/*.gz' < archive.tar
+
+ Exclude files matching "*.gz" inside directories called "Test" or
+ "test" that occur anywhere in the source directories.
+
+ Again, using extended wildcards, negative matching is also possible.
+
+ 3. sqfstar image.sqsh '... !(*data*).gz' < archive.tar
+
+ Exclude all files matching "*.gz" anywhere in the source directories,
+ except those with "data" in the name.
+
+
+7. FILTERING AND ADDING EXTENDED ATTRIBUTES (XATTRs)
+----------------------------------------------------
+
+Sqfstar has a number of options which allow extended attributes (xattrs) to be
+filtered from the TAR archive or added to the created Squashfs filesystem.
+
+The -no-xattrs option removes any extended attributes which may exist in the
+TAR archive, and creates a filesystem without any extended attributes.
+
+The -xattrs-exclude option specifies a regular expression (regex), which
+removes any extended attribute that matches the regular expression from all
+files. For example the regex '^user.' will remove all User extended attributes.
+
+The -xattrs-include option instead specifies a regular expression (regex)
+which includes any extended attribute that matches, and removes anything
+that does't match. For example the regex '^user.' will only keep User
+extended attributes and anything else will be removed.
+
+Sqfstar also allows you to add extended attributes to files in the Squashfs
+filesystem using the -xattrs-add option. This option takes an xattr name and
+value pair separated by the '=' character.
+
+The extended attribute name can be any valid name and can be in the namespaces
+security, system, trusted, or user. User extended attributes are added to files
+and directories (see man 7 xattr for explanation), and the others are added to
+all files.
+
+The extended attribute value by default will be treated as binary (i.e. an
+uninterpreted byte sequence), but it can be prefixed with 0s, where it will be
+treated as base64 encoded, or prefixed with 0x, where it will be treated as
+hexidecimal.
+
+Obviously using base64 or hexidecimal allows values to be used that cannot be
+entered on the command line such as non-printable characters etc. But it
+renders the string non-human readable. To keep readability and to allow
+non-printable characters to be entered, the 0t prefix is supported. This
+encoding is similar to binary encoding, except backslashes are specially
+treated, and a backslash followed by three octal digits can be used to encode
+any ASCII character, which obviously can be used to encode non-printable values.
+
+The following four command lines are equivalent
+
+sqfstar -xattrs-add "user.comment=hello world" image.sqfs
+sqfstar -xattrs-add "user.comment=0saGVsbG8gd29ybGQ=" image.sqfs
+sqfstar -xattrs-add "user.comment=0x68656c6c6f20776f726c64"
+sqfstar -xattrs-add "user.comment=0thello world" image.sqfs
+
+Obviously in the above example there are no non-printable characters and so
+the 0t prefixed string is identical to the first line. The following three
+command lines are identical, but where the space has been replaced by the
+non-printable NUL '\0' (null character).
+
+sqfstar -xattrs-add "user.comment=0thello\000world" image.sqfs
+sqfstar -xattrs-add "user.comment=0saGVsbG8Ad29ybGQ=" image.sqfs
+sqfstar -xattrs-add "user.comment=0x68656c6c6f00776f726c64" image.sqsh
+
+8. PSEUDO FILE SUPPORT
+----------------------
+
+Sqfstar supports pseudo files, these allow files, directories, character
+devices, block devices, fifos, symbolic links, hard links and extended
+attributes to be specified and added to the Squashfs filesystem being built,
+rather than requiring them to be present in the TAR archive. This, for example,
+allows device nodes to be added to the filesystem without requiring root access.
+
+Pseudo files also support "dynamic pseudo files" and a modify operation.
+Dynamic pseudo files allow files to be dynamically created when Sqfstar
+is run, their contents being the result of running a command or piece of
+shell script. The modifiy operation allows the mode/uid/gid of an existing
+file in the source filesystem to be modified.
+
+Two Sqfstar options are supported, -p allows one pseudo file to be specified
+on the command line, and -pf allows a pseudo file to be specified containing a
+list of pseduo definitions, one per line.
+
+8.1 CREATING A DYNAMIC FILE
+---------------------------
+
+Pseudo definition
+
+Filename f mode uid gid command
+
+mode is the octal mode specifier, similar to that expected by chmod.
+
+uid and gid can be either specified as a decimal number, or by name.
+
+command can be an executable or a piece of shell script, and it is executed
+by running "/bin/sh -c command". The stdout becomes the contents of
+"Filename".
+
+Examples:
+
+Running a basic command
+-----------------------
+
+/somedir/dmesg f 444 root root dmesg
+
+creates a file "/somedir/dmesg" containing the output from dmesg.
+
+Executing shell script
+----------------------
+
+RELEASE f 444 root root \
+ if [ ! -e /tmp/ver ]; then \
+ echo 0 > /tmp/ver; \
+ fi; \
+ ver=`cat /tmp/ver`; \
+ ver=$((ver +1)); \
+ echo $ver > /tmp/ver; \
+ echo -n `cat /tmp/release`; \
+ echo "-dev #"$ver `date` "Build host" `hostname`
+
+Creates a file RELEASE containing the release name, date, build host, and
+an incrementing version number. The incrementing version is a side-effect
+of executing the shell script, and ensures every time Sqfstar is run a
+new version number is used without requiring any other shell scripting.
+
+The above example also shows that commands can be split across multiple lines
+using "\". Obviously as the script will be presented to the shell as a single
+line, a semicolon is need to separate individual shell commands within the
+shell script.
+
+Reading from a device (or fifo/named socket)
+--------------------------------------------
+
+input f 444 root root dd if=/dev/sda1 bs=1024 count=10
+
+Copies 10K from the device /dev/sda1 into the file input. Ordinarily Sqfstar
+given a device, fifo, or named socket will place that special file within the
+Squashfs filesystem, the above allows input from these special files to be
+captured and placed in the Squashfs filesystem.
+
+8.2 CREATING A BLOCK OR CHARACTER DEVICE
+----------------------------------------
+
+Pseudo definition
+
+Filename type mode uid gid major minor
+
+Where type is either
+ b - for block devices, and
+ c - for character devices
+
+mode is the octal mode specifier, similar to that expected by chmod.
+
+uid and gid can be either specified as a decimal number, or by name.
+
+For example:
+
+/dev/chr_dev c 666 root root 100 1
+/dev/blk_dev b 666 0 0 200 200
+
+creates a character device "/dev/chr_dev" with major:minor 100:1 and
+a block device "/dev/blk_dev" with major:minor 200:200, both with root
+uid/gid and a mode of rw-rw-rw.
+
+8.3 CREATING A DIRECTORY
+------------------------
+
+Pseudo definition
+
+Filename d mode uid gid
+
+mode is the octal mode specifier, similar to that expected by chmod.
+
+uid and gid can be either specified as a decimal number, or by name.
+
+For example:
+
+/pseudo_dir d 666 root root
+
+creates a directory "/pseudo_dir" with root uid/gid and mode of rw-rw-rw.
+
+8.4 CREATING A SYMBOLIC LINK
+----------------------------
+
+Pseudo definition
+
+Filename s mode uid gid symlink
+
+uid and gid can be either specified as a decimal number, or by name.
+
+Note mode is ignored, as symlinks always have "rwxrwxrwx" permissions.
+
+For example:
+
+symlink s 0 root root example
+
+Creates a symlink "symlink" to file "example" with root uid/gid.
+
+8.5 CREATING HARD LINKS (FILE REFERENCES)
+-----------------------------------------
+
+The "f" Pseudo definition allows a regular file to be created from the output of
+a command (or shell). Often this is used to reference a file outside the source
+directories by executing "cat", e.g.
+
+README f 0555 0 0 cat /home/phillip/latest-version/README
+
+Because this is a quite frequent use of the definition, an alternative faster
+"File reference" or Hard Link Pseudo definition exists:
+
+README l /home/phillip/latest-version/README
+
+Will create a reference to "/home/phillip/latest-version/README",
+and obviously the timestamp/mode and owership will be used.
+
+The definition also can be used to create additional references to files
+within the source directories. For instance if "phillip/latest/README" was a
+file being added to the filesystem, then
+
+README l phillip/latest/README
+
+Will create a Hard Link (and increment the nlink count on the inode).
+
+In both cases, the path to the file being referenced is the system
+filesystem path, and can be absolute (prefixed with /), or relative
+to the current working directory.
+
+There is an additional 'L' Pseudo definition, which closes a loophole in
+the above 'l' definition. The 'l' Pseudo definition cannot create references
+or Hard Links to files created by Pseudo definitions, because by
+definition they do not exist in the system filesystem.
+
+with 'L' the referenced file is expected to be a Pseudo file, and in this case
+the path is taken to be from the root of the Squashfs filesystem being created,
+e.g.
+
+char-dev c 0555 0 0 1 2
+
+link L char-dev
+
+Will create a Hard Link named "link" to the character device called "char-dev"
+created by the previous Pseudo definition.
+
+8.6 CREATING SOCKETS/FIFOS
+--------------------------
+
+Pseudo definition
+
+filename i mode uid gid [s|f]
+
+To create a Unix domain socket, 's' should be used, i.e.
+
+filename i 0777 root root s
+
+and to create a FIFO, 'f' should be used, i.e.
+
+filename i 0777 root root f
+
+8.7 ADDING EXTENDED ATTRIBUTES TO FILES
+---------------------------------------
+
+Pseudo definition
+
+filename x name=val
+
+Will add the extended attribute <name> to <filename> with <val > contents. See
+Section 7 for a description of the <val> formats supported.
+
+8.8 MODIFYING ATTRIBUTES OF AN EXISTING FILE
+--------------------------------------------
+
+Pseudo definition
+
+Filename m mode uid gid
+
+mode is the octal mode specifier, similar to that expected by chmod.
+
+uid and gid can be either specified as a decimal number, or by name.
+
+For example:
+
+dmesg m 666 root root
+
+Changes the attributes of the file "dmesg" in the filesystem to have
+root uid/gid and a mode of rw-rw-rw, overriding the attributes obtained
+from the TAR archive.
+
+
+9. EXTENDED PSEUDO FILE DEFINITIONS WITH TIMESTAMPS
+---------------------------------------------------
+
+The Pseudo file definitions described above do not allow the timestamp
+of the created file to be specified, and the files will be timestamped
+with the current time.
+
+Extended versions of the Pseudo file definitions are supported which
+take a <time> timestamp. These are distinquished from the previous
+definitions by using an upper-case type character. For example the "D"
+definition is identical to the "d" definition, but it takes a <time>
+timestamp.
+
+The list of extended definitions are:
+
+ filename F time mode uid gid command
+ filename D time mode uid gid
+ filename B time mode uid gid major minor
+ filename C time mode uid gid major minor
+ filename S time mode uid gid symlink
+ filename I time mode uid gid [s|f]
+ filename M time mode uid gid
+
+<time> can be either an unsigned decimal integer (which represents the
+seconds since the epoch of 1970-01-01 00:00 UTC), or a "date string"
+which is parsed and converted into an integer since the epoch, by calling
+the "date" command.
+
+Because most date strings have spaces, they will need to be quoted, and if
+entered on the command line, these quotes will need to be protected from the
+shell by backslashes, i.e.
+
+% sqfstar img.sqfs -p "file D \"1 jan 1980\" 0777 phillip phillip" < archive.tar
+
+Obviously anything "date" accepts as a valid string can be used, such as
+"yesterday", "last week" etc.
+
+
+10. MISCELLANEOUS OPTIONS
+------------------------
+
+The -info option displays the files/directories as they are compressed and
+added to the filesystem. The original uncompressed size of each file
+is printed, along with DUPLICATE if the file is a duplicate of a
+file in the filesystem.
+
+The -nopad option informs Sqfstar to not pad the filesystem to a 4K multiple.
+This is performed by default to enable the output filesystem file to be mounted
+by loopback, which requires files to be a 4K multiple. If the filesystem is
+being written to a block device, or is to be stored in a bootimage, the extra
+pad bytes are not needed.
diff --git a/USAGE-UNSQUASHFS-4.6 b/USAGE-UNSQUASHFS-4.6
new file mode 100644
index 0000000..0f7dc0e
--- /dev/null
+++ b/USAGE-UNSQUASHFS-4.6
@@ -0,0 +1,498 @@
+ UNSQUASHFS - a tool to extract and list Squashfs filesystems
+
+Unsquashfs allows you to decompress and extract a Squashfs filesystem without
+mounting it. It can extract the entire filesystem, or a specific
+file or directory.
+
+Unsquashfs can decompress all official Squashfs filesystem versions.
+
+The Unsquashfs usage info is:
+
+SYNTAX: unsquashfs [OPTIONS] FILESYSTEM [files to extract or exclude (with -excludes) or cat (with -cat )]
+
+Filesystem extraction (filtering) options:
+ -d[est] <pathname> extract to <pathname>, default "squashfs-root".
+ This option also sets the prefix used when
+ listing the filesystem
+ -max[-depth] <levels> descend at most <levels> of directories when
+ extracting
+ -excludes treat files on command line as exclude files
+ -ex[clude-list] list of files to be excluded, terminated
+ with ; e.g. file1 file2 ;
+ -extract-file <file> list of directories or files to extract.
+ One per line
+ -exclude-file <file> list of directories or files to exclude.
+ One per line
+ -match abort if any extract file does not match on
+ anything, and can not be resolved. Implies
+ -missing-symlinks and -no-wildcards
+ -follow[-symlinks] follow symlinks in extract files, and add all
+ files/symlinks needed to resolve extract file.
+ Implies -no-wildcards
+ -missing[-symlinks] Unsquashfs will abort if any symlink can't be
+ resolved in -follow-symlinks
+ -no-wild[cards] do not use wildcard matching in extract and
+ exclude names
+ -r[egex] treat extract names as POSIX regular expressions
+ rather than use the default shell wildcard
+ expansion (globbing)
+ -all[-time] <time> set all file timestamps to <time>, rather than
+ the time stored in the filesystem inode. <time>
+ can be an unsigned 32-bit int indicating
+ seconds since the epoch (1970-01-01) or a string
+ value which is passed to the "date" command to
+ parse. Any string value which the date command
+ recognises can be used such as "now", "last
+ week", or "Wed Feb 15 21:02:39 GMT 2023"
+ -cat cat the files on the command line to stdout
+ -f[orce] if file already exists then overwrite
+ -pf <file> output a pseudo file equivalent of the input
+ Squashfs filesystem, use - for stdout
+
+Filesystem information and listing options:
+ -s[tat] display filesystem superblock information
+ -max[-depth] <levels> descend at most <levels> of directories when
+ listing
+ -i[nfo] print files as they are extracted
+ -li[nfo] print files as they are extracted with file
+ attributes (like ls -l output)
+ -l[s] list filesystem, but do not extract files
+ -ll[s] list filesystem with file attributes (like
+ ls -l output), but do not extract files
+ -lln[umeric] same as -lls but with numeric uids and gids
+ -lc list filesystem concisely, displaying only files
+ and empty directories. Do not extract files
+ -llc list filesystem concisely with file attributes,
+ displaying only files and empty directories.
+ Do not extract files
+ -full[-precision] use full precision when displaying times
+ including seconds. Use with -linfo, -lls, -lln
+ and -llc
+ -UTC use UTC rather than local time zone when
+ displaying time
+ -mkfs-time display filesystem superblock time, which is an
+ unsigned 32-bit int representing the time in
+ seconds since the epoch (1970-01-01)
+
+Filesystem extended attribute (xattrs) options:
+ -no[-xattrs] do not extract xattrs in file system
+ -x[attrs] extract xattrs in file system (default)
+ -xattrs-exclude <regex> exclude any xattr names matching <regex>.
+ <regex> is a POSIX regular expression, e.g.
+ -xattrs-exclude '^user.' excludes xattrs from
+ the user namespace
+ -xattrs-include <regex> include any xattr names matching <regex>.
+ <regex> is a POSIX regular expression, e.g.
+ -xattrs-include '^user.' includes xattrs from
+ the user namespace
+
+Unsquashfs runtime options:
+ -v[ersion] print version, licence and copyright information
+ -p[rocessors] <number> use <number> processors. By default will use
+ the number of processors available
+ -q[uiet] no verbose output
+ -n[o-progress] do not display the progress bar
+ -percentage display a percentage rather than the full
+ progress bar. Can be used with dialog --gauge
+ etc.
+ -ig[nore-errors] treat errors writing files to output as
+ non-fatal
+ -st[rict-errors] treat all errors as fatal
+ -no-exit[-code] do not set exit code (to nonzero) on non-fatal
+ errors
+ -da[ta-queue] <size> set data queue to <size> Mbytes. Default 256
+ Mbytes
+ -fr[ag-queue] <size> set fragment queue to <size> Mbytes. Default
+ 256 Mbytes
+
+Miscellaneous options:
+ -h[elp] output this options text to stdout
+ -o[ffset] <bytes> skip <bytes> at start of FILESYSTEM. Optionally
+ a suffix of K, M or G can be given to specify
+ Kbytes, Mbytes or Gbytes respectively (default
+ 0 bytes).
+ -fstime synonym for -mkfs-time
+ -e[f] <extract file> synonym for -extract-file
+ -exc[f] <exclude file> synonym for -exclude-file
+ -L synonym for -follow-symlinks
+ -pseudo-file <file> alternative name for -pf
+
+
+By default Unsquashfs will extract all the files in the Squashfs filesystem
+into the directory "squashfs-root", placed in the current working directory.
+The location and name of the directory can be changed with the -dest option.
+
+Unsquashfs can also extract only part of the filesystem, with both extract
+filenames and exclude filenames supported. These can be combined to specify
+a set of directories to be extracted, and then a set of files or directories
+to be excluded within them.
+
+Section 2 describes using extract files with Unsquashfs, and Section 3 describes
+using exclude files with Unsquashfs.
+
+1. FREQUENTLY USED UNSQUASHFS OPTIONS
+-------------------------------------
+
+The "-dest" option specifies the directory that is used to decompress
+the filesystem data. If this option is not given then the filesystem is
+decompressed to the directory "squashfs-root" in the current working directory.
+The filename "." can used to specify the current directory.
+
+The "-ls" option can be used to list the contents of a filesystem without
+decompressing the filesystem data itself. The "-lls" option is similar
+but it also displays file attributes (ls -l style output). The "-lln"
+option is the same but displays uids and gids numerically.
+
+The "-lc" option is similar to the -ls option except it only displays files
+and empty directories. The -llc option displays file attributes.
+
+The "-info" option forces Unsquashfs to print each file as it is decompressed.
+The -"linfo" is similar but it also displays file attributes.
+
+The "max-depth" option limits extraction and listing of the filesystem to
+at most <level> directories.
+
+The "-force" option forces Unsquashfs to output to the destination
+directory even if files or directories already exist. This allows you
+to update an existing directory tree, or to Unsquashfs to a partially
+filled directory. Without the "-force" option, Unsquashfs will
+refuse to overwrite any existing files, or to create any directories if they
+already exist. This is done to protect data in case of mistakes, and
+so the "-force" option should be used with caution.
+
+The "-stat" option displays filesystem superblock information. This is
+useful to discover the filesystem version, byte ordering, whether it has a NFS
+export table, and what options were used to compress the filesystem, etc.
+
+The -mkfs-time option displays the make filesystem time contained
+in the super-block. This is displayed as the number of seconds since
+the epoch of 1970-01-01 00:00:00 UTC.
+
+The -UTC option makes Unsquashfs display all times in the UTC time zone
+rather than using the default local time zone.
+
+2. USING "EXTRACT" FILES
+------------------------
+
+To extract a subset of the filesystem, the filenames or directory
+trees that are to be extracted can be specified on the command line. The
+files/directories should be specified using the full path to the
+files/directories as they appear within the Squashfs filesystem. The
+files/directories will also be extracted to those positions within the
+specified destination directory.
+
+The extract files can also be given in a file using the "-e[f]" option.
+
+Similarly to Mksquashfs, wildcard matching is performed on the extract
+files. Wildcard matching is enabled by default.
+
+Examples:
+
+ 1. unsquashfs image.sqsh 'test/*.gz'
+
+ Extract all files matching "*.gz" in the top level directory "test".
+
+ 2. unsquashfs image.sqsh '[Tt]est/example*'
+
+ Extract all files beginning with "example" inside top level directories
+ called "Test" or "test".
+
+ Using extended wildcards, negative matching is also possible.
+
+ 3. unsquashfs image.sqsh 'test/!(*data*).gz'
+
+ Extract all files matching "*.gz" in top level directory "test",
+ except those with "data" in the name.
+
+
+3. USING "EXCLUDE" FILES IN ADDITION TO "EXTRACT" FILES
+-------------------------------------------------------
+
+Unsquashfs allows exclude files to be specified, either on their own, or in
+addition to extract files.
+
+An exclude file is, obviously, the opposite of an extract file. Whereas an
+extract file limits the output of Unsquashfs to the files/directories matched by
+the extract file(s), exclude file(s) output the entire filesystem, with the
+sub-set of files matched by the exclude file(s) excluded.
+
+Often you want to output the filesystem where you're only interested in some
+files, which is where extract files are useful. But equally, you often want to
+output the entire filesystem, but, with some unwanted files removed. An example
+of this, perhaps, is where you've inadvertantly, archived binaries (.o files
+etc.) and you're only interested in extracting the source code.
+
+In the above example, trying to remove the binaries (.o etc) when you've only
+got "extract" files capability becomes extremely messy. It is a lot easier to
+do that with "exclude" files.
+
+Unsquashfs supports two ways of specifying exclude files, and it supports
+"anchored" and "non-anchored" excludes. The two ways of specifying exclude
+files is described first.
+
+The most straightforward way is to tell Unsquashfs to treat extract files as
+exclude files. That is extract files are specified on the command line as a
+list of files after the options and filesystem image. Giving the -excludes
+option tells Unsquashfs to treat them as exclude files.
+
+To make this clearer,
+
+% unsquashfs img file1 file2 file3
+
+Tells Unsquashfs to extract file1, file2 and file3. But,
+
+% unsquashfs -excludes img file1 file2 file3
+
+Tells Unsquashfs to exclude file1, file2, and file3.
+
+The perhaps obvious problem with this, is it allows you to choose between
+"extract" files or "exclude" files. But, it doesn't allow you to have both
+"extract" files and "exclude" files on the command line.
+
+To get around this problem Unsquashfs supports another way of specifying exclude
+files. That is to use the option -exclude-list. This option allows a list of
+exclude files to specified, terminated by a ";". The necessity of using ";" to
+terminate the list is because this is a normal option, without it, any further
+entries on the command line would be interpreted as being part of the list.
+
+For example, the following are equivalent:
+
+% unsquashfs -excludes img file1 file2 file3
+
+% unsquashfs -exclude-list file1 file2 file3 \; img
+
+Note the black-slashing of ";" to prevent it from being interpreted by the shell
+as a special character.
+
+Obviously, where the -exclude-list option comes into its own is when it is mixed
+with extract files, for example:
+
+% unsquashfs -exclude-list dir1/file1 dir2/file2 \; img dir1 dir2
+
+This tells Unsquashfs to extract directories "dir1" and "dir2", and then to
+exclude the files "dir1/file1" and "dir2/file2".
+
+From this it should be clear that the precedence is extract files and then
+exclude files, because it doesn't make any sense otherwise. Extract files
+define the set of directories/files to be extracted, and exclude files remove
+directories/files from that sub-set.
+
+Now the concepts of "anchored" and "non-anchored" exclude files can be
+explained. An anchored exclude is one which matches from the root of the
+directory and nowhere else, a non-anchored exclude matches anywhere. For
+example given the directory hierarchy "a/b/c/a/b", the anchored exclude
+"a/b" will match "a/b" at the root of the directory hierarchy, but
+it will not match the "/a/b" sub-directory within directory "c", whereas a
+non-anchored exclude would.
+
+A couple of examples should make this clearer.
+
+Anchored excludes
+
+ 1. unsquashfs -excludes image.sqsh 'test/*.gz'
+
+ Exclude all files matching "*.gz" in the top level directory "test".
+
+ 2. unsquashfs -excludes image.sqsh '*/[Tt]est/example*'
+
+ Exclude all files beginning with "example" inside directories called
+ "Test" or "test", that occur inside any top level directory.
+
+ Using extended wildcards, negative matching is also possible.
+
+ 3. unsquashfs -excludes image.sqsh 'test/!(*data*).gz'
+
+ Exclude all files matching "*.gz" in top level directory "test",
+ except those with "data" in the name.
+
+Non-anchored excludes
+
+ By default excludes match from the top level directory, but it is
+ often useful to exclude a file matching anywhere in the Squashfs filesystem.
+ For this non-anchored excludes can be used, specified by pre-fixing the
+ exclude with "...".
+
+ Examples:
+
+ 1. unsquashfs -excludes image.sqsh '... *.gz'
+
+ Exclude files matching "*.gz" anywhere in the Squashfs filesystem.
+ For example this will match "example.gz", "test/example.gz", and
+ "test/test/example.gz".
+
+ 2. unsquashfs -excludes image.sqsh '... [Tt]est/*.gz'
+
+ Exclude files matching "*.gz" inside directories called "Test" or
+ "test" that occur anywhere in the Squashfs filesystem.
+
+ Again, using extended wildcards, negative matching is also possible.
+
+ 3. unsquashfs -excludes image.sqsh '... !(*data*).gz'
+
+ Exclude all files matching "*.gz" anywhere in the Squashfs filesystem,
+ except those with "data" in the name.
+
+
+4. FOLLOWING SYMBOLIC LINKS IN EXTRACT FILES
+--------------------------------------------
+
+Unsquashfs walks the extract file paths as it recursively descends
+the Squashfs filesystem from top to bottom. During that recursive
+extraction symbolic links are obviously not followed (see below).
+
+The consequence of this is if an extract file ends in a symbolic link
+(leaf component) it is extracted and left as a dangling symlink,
+unless the real file it links to has also specified as an extract file.
+
+Additionally, if the extract file pathname traverses symbolic links
+while walking the pathname (i.e. the extract file pathname has
+embedded symbolic links), the extraction will stop at that point.
+
+One way of solving this problem is by following (or dereferencing) the
+symbolic link(s), and replacing them with what they actually link to,
+in a similar way to "cp -L". But this is dangerous, and can cause
+Unsquashfs to produce output which does not match the input Squashfs
+filesystem, which is something Unsquashfs should never do.
+
+The reason for this is whereas "cp -L" is derefencing the *input*,
+Unsquashfs is dereferencing the *output*. If a filesystem has a real
+filename, say
+
+ a/b/c/hello_world
+
+and two symlinks
+
+ a/symlink1 ---> b/c/hello_world
+ a/symlink2 ---> b/c
+
+There are multiple different ways (or paths) to the *single* hello_world file.
+
+ a/b/c/hello_world
+ a/symlink1
+ a/symlink2/hello_world
+
+If Unsquashfs was given all three paths as extract files, and Unsquashfs
+dereferenced them on output, you will get *three* copies of the hello_world
+file, and two copies of directory "c".
+
+Superficially the output may look the same, but, it may not work the same,
+and obviously edits to the one hello_world file will not get reflected in
+the other copies. Any option that can be accidently or maliciously used
+to produce such an output is too dangerous to be added.
+
+Unsquashfs solves the problem in an equivalent way, but which does not alter
+the output, and so it is a completely safe option.
+
+If the -follow-symlinks option is specified, Unsquashfs will canonicalise
+the extract files to produce the canonical pathname (that is the
+"real" pathname without any symbolic links). It will then add all
+the symbolic links necessary to ensure that the extract file can be
+resolved.
+
+The -missing-symlinks option is similar to -follow-symlinks except it
+will cause Unsquashfs to abort if any symbolic link cannot be resolved.
+
+Note: as a side effect of the canonicalisation, with the above options
+enabled extract filenames can also now have ".", ".." elements within
+the pathnames.
+
+5. DEALING WITH ERRORS
+----------------------
+
+Unsquashfs splits errors into two categories: fatal errors and non-fatal
+errors.
+
+Fatal errors are those which cause Unsquashfs to abort instantly.
+These are generally due to failure to read the filesystem (corruption),
+and/or failure to write files to the output filesystem, due to I/O error
+or out of space. Generally anything which is unexpected is a fatal error.
+
+Non-fatal errors are generally where support is lacking in the
+output filesystem, and it can be considered to be an expected failure.
+This includes the inability to write extended attributes (xattrs) to
+a filesystem that doesn't support them, the inability to create files on
+filesystem that doesn't support them (i.e. symbolic links on VFAT), and the
+inability to execute privileged operations as a user-process.
+
+The user may well know the filesystem cannot support certain operations
+and would prefer Unsquashfs to ignore then without aborting.
+
+In the past Unsquashfs was much more tolerant of errors, now a significant
+number of errors that were non-fatal have been hardened to fatal.
+
+-ignore-errors
+
+This makes Unsquashfs behave like previous versions, and treats more
+errors as non-fatal.
+
+-strict-errors
+
+This makes Unsquashfs treat every error as fatal, and it will abort
+instantly.
+
+
+6. UNSQUASHFS PSEUDO FILE OUTPUT
+--------------------------------
+
+If the Pseudo file (-pf) option is given, Unsquashfs will output a Pseudo file
+representation of the input filesystem. This pseudo file can be used as input
+to Mksquashfs to reproduce the Squashfs filesystem without having to unpack the
+input filesystem image.
+
+The Pseudo file output is designed to be editable, and the pseudo file entries
+can be altered (name, date, ownership etc.) or added/deleted before any new file
+system is rebuilt.
+
+The format of the pseudo file, is obviously, the same as the pseudo file
+definitions supported by Msquashfs.
+
+Regular files (with data) are supported with the "R" pseudo definition, which is
+
+filename R time mode uid gid length offset sparse
+
+<length> specifies the size of the file, and <offset> is a byte offset into the
+pseudo file where the data is stored. This offset is taken from the start of
+the data section (see below), rather than from the start of the file (this is to
+allow the pseudo file entries to be edited without altering the data offsets).
+Data is deliberately stored out of line (i.e. unlike tar), to make the file more
+easily editable manually.
+
+<sparse> is a special boolean parameter. It controls whether the file is
+presented to user-space as a "sparse" file when the filesystem is mounted,
+or is extracted as a "sparse" file by Unsquashfs. This flag is important
+because Mksquashfs will always convert a file to a "sparse" file in the
+filesystem if it has block size sequences of zeros. These zeros will not be
+stored and replaced with a hole. But Mksquashfs is careful to preserve
+the semantics of the files it stores, if is was originally non-sparse it
+will flag that, so it doesn't appear as sparse to user-space or get
+copied as sparse by Unsquashfs.
+
+An example pseudo file output for a filesystem consisting of a directory, two
+regular files, and a symbolic link might be
+
+ / D 1625536033 1777 0 0
+ test D 1625536969 755 1000 100
+ test/hello_world R 1625535696 644 1000 100 12 0 0
+ test/regfile R 1625536928 644 1000 100 37 12 0
+ test/symlink S 1625535712 777 1000 100 hello_world
+ #
+ # START OF DATA - DO NOT MODIFY
+ #
+ Hello World
+ This is the data contents of regfile
+
+Here you should be able to see the most important aspects of the layout, the
+pseudo file entries appear at the start of the file, and the data is stored at
+the end. The two sections are separated by 3 special marker lines, containing
+the words "START OF DATA - DO NOT MODIFY".
+
+The following is a small example of how someone might edit and rebuild a
+Squashfs image.
+
+$ unsquashfs -pf pseudo test.sqsh
+$ sed -i "0,/# START OF DATA/s/\([^ ]* . [0-9]* [0-7]* \)[0-9]*/\11234/g" pseudo
+$ sed -i "0,/# START OF DATA/s/hello_world/hello/g" pseudo
+$ mksquashfs - test2.sqsh -pf pseudo
+
+This will change the ownership to uid 1234, and change the name of "hello_word"
+to "hello".
diff --git a/examples/pseudo-file.example b/examples/pseudo-file.example
new file mode 100644
index 0000000..f866d90
--- /dev/null
+++ b/examples/pseudo-file.example
@@ -0,0 +1,74 @@
+# Pseudo file example
+
+# Mksquashfs supports pseudo files, these allow fake files, directories,
+# character and block devices to be specified and added to the Squashfs
+# filesystem being built, rather than requiring them to be present in the
+# source directories.
+#
+# This, for example, allows device nodes to be added to the filesystem without
+# requiring root access.
+
+# Mksquashfs 4.1 adds support for "dynamic pseudo files" and a modify operation.
+# Dynamic pseudo files allow files to be dynamically created when Mksquashfs
+# is run, their contents being the result of running a command or piece of
+# shell script. The modifiy operation allows the mode/uid/gid of an existing
+# file in the source filesystem to be modified.
+
+# Two Mksquashfs options are supported, -p allows one pseudo file to be
+# specified #on the command line, and -pf allows a pseudo file to be specified
+# containing a list of pseduo definitions, one per line.
+
+# Pseudo file examples
+# Run mkquashfs . /tmp/img -pf pseudo-file.examples
+# to see their effect
+
+# Creating dynamic file examples
+
+# Create a file "dmesg" containing the output from dmesg.
+dmesg f 444 root root dmesg
+
+
+# Create a file RELEASE containing the release name, date, build host, and
+# an incrementing version number. The incrementing version is a side-effect
+# of executing the shell script, and ensures every time Mksquashfs is run a
+# new version number is used without requiring any other shell scripting.
+RELEASE f 444 root root \
+ if [ ! -e /tmp/ver ]; then \
+ echo 0 > /tmp/ver; \
+ fi; \
+ ver=`cat /tmp/ver`; \
+ ver=$((ver +1)); \
+ echo $ver > /tmp/ver; \
+ echo -n "release x.x"; \
+ echo "-dev #"$ver `date` "Build host" `hostname`
+
+
+# Copy 10K from the device /dev/sda1 into the file input. Ordinarily
+# Mksquashfs given a device, fifo, or named socket will place that special file
+# within the Squashfs filesystem, this allows input from these special
+# files to be captured and placed in the Squashfs filesystem.
+input f 444 root root dd if=/dev/sda1 bs=1024 count=10
+
+
+# Creating a block or character device examples
+
+# Create a character device "chr_dev" with major:minor 100:1 and
+# a block device "blk_dev" with major:minor 200:200, both with root
+# uid/gid and a mode of rw-rw-rw.
+chr_dev c 666 root root 100 1
+blk_dev b 666 0 0 200 200
+
+
+# Creating a directory example
+
+# create a directory "pseudo_dir" with root uid/gid and mode of r--r--r--.
+pseudo_dir d 444 root root
+
+
+# Modifying attributes of an existing file exmaple
+
+# Change the attributes of the file "INSTALL" in the filesystem to have
+# root uid/gid and a mode of rw-rw-rw, overriding the attributes obtained
+# from the source filesystem.
+INSTALL m 666 root root
+
diff --git a/generate-manpages/functions.sh b/generate-manpages/functions.sh
new file mode 100755
index 0000000..ded09f2
--- /dev/null
+++ b/generate-manpages/functions.sh
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+# Sanity check, check that the non-builtin echo exists and is in PATH
+if ! which echo > /dev/null 2>&1; then
+ echo "$0: This script needs the non-builtin echo, which is not in your PATH." >&2
+ echo "$0: Fix PATH or install before running this script!" >&2
+ exit 1
+fi
+
+ECHO=$(which echo)
+
+print() {
+ ${ECHO} "$@"
+}
+
+error() {
+ ${ECHO} "$@" >&2
+}
diff --git a/generate-manpages/install-manpages.sh b/generate-manpages/install-manpages.sh
new file mode 100755
index 0000000..23ab6a2
--- /dev/null
+++ b/generate-manpages/install-manpages.sh
@@ -0,0 +1,82 @@
+#!/bin/sh
+
+
+if [ $# -lt 3 ]; then
+ echo "$0: Insufficient arguments." >&2
+ echo "$0: <path to git-root/source-root> <path to install manpages> <use prebuilt manpages=y/n>" >&2
+ exit 1;
+fi
+
+if [ ! -f $1/generate-manpages/functions.sh ]; then
+ echo "$0: <arg1> doesn't seem to contain the path to the git-root/source-root" >&2
+ exit 1
+fi
+
+. $1/generate-manpages/functions.sh
+
+if [ -z "$2" ]; then
+ error "$0: Install path for manpages empty. Skipping manpage install"
+ exit 0
+fi
+
+# Sanity check, check that the utilities this script depends on, are in PATH
+for i in gzip; do
+ if ! which $i > /dev/null 2>&1; then
+ error "$0: This script needs $i, which is not in your PATH."
+ error "$0: Fix PATH or install before running this script!"
+ exit 1
+ fi
+done
+cd $1/generate-manpages
+
+# We must have help2man to generate "custom" manpages for the
+# built squashfs-tools, incorporating build choices (the
+# compressors built, default compressors, XATTR support etc).
+#
+# If help2man doesn't exist, or the manpage generation fails, use
+# the pre-built manpages.
+
+if [ $3 = "y" ]; then
+ print "$0: Using pre-built manpages"
+elif which help2man > /dev/null 2>&1; then
+ for i in mksquashfs unsquashfs sqfstar sqfscat; do
+ if ! ./$i-manpage.sh ../squashfs-tools ../squashfs-tools/$i.1; then
+ error "$0: Failed to generate manpage. Falling back to using pre-built manpages"
+ failed="y"
+ break
+ fi
+ done
+
+ [ -z "$failed" ] && source=../squashfs-tools
+else
+ error "$0: ERROR - No help2man in PATH. Cannot generate manpages."
+ failed="y"
+fi
+
+if [ -z "$source" ]; then
+ if [ "$failed" = "y" ]; then
+ error "$0: WARNING: Installing pre-built manpages."
+ error "$0: WARNING: These pages are built with the Makefile defaults, and all"
+ error "$0: WARNING: the compressors configured (except the deprecated lzma). This may not"
+ error "$0: WARNING: match your build configuration."
+ error -e "\n$0: Set USE_PREBUILT_MANPAGES to "y" in Makefile, to avoid these errors/warnings"
+ fi
+ source=../manpages
+fi
+
+if ! mkdir -p $2; then
+ error "$0: Creating manpage install directory failed. Aborting"
+ exit 1
+fi
+
+for i in mksquashfs unsquashfs sqfstar sqfscat; do
+ if ! cp $source/$i.1 $2/$i.1; then
+ error "$0: Copying manpage to install directory failed. Aborting"
+ exit 1
+ fi
+
+ if ! gzip -n -f9 $2/$i.1; then
+ error "$0: Compressing installed manpage failed. Aborting"
+ exit 1
+ fi
+done
diff --git a/generate-manpages/mksquashfs-manpage.sh b/generate-manpages/mksquashfs-manpage.sh
new file mode 100755
index 0000000..09658f5
--- /dev/null
+++ b/generate-manpages/mksquashfs-manpage.sh
@@ -0,0 +1,253 @@
+#!/bin/sh
+
+# This script generates a manpage from the mksquashfs -help and -version
+# output, using help2man. The script does various modfications to the
+# output from -help and -version, before passing it to help2man, to allow
+# it be successfully processed into a manpage by help2man.
+
+if [ ! -f functions.sh ]; then
+ echo "$0: this script should be run in the <git-root/source-root>/generate-manpages directory" >&2
+ exit 1
+fi
+
+. ./functions.sh
+
+if [ $# -lt 2 ]; then
+ error "$0: Insufficient arguments"
+ error "$0: <path to mksquashfs> <output file>"
+ exit 1
+fi
+
+# Sanity check, ensure $1 points to a directory with a runnable Mksquashfs
+if [ ! -x $1/mksquashfs ]; then
+ error "$0: <arg1> doesn't point to a directory with Mksquashfs in it!"
+ error "$0: <arg1> should point to the directory with the Mksquashfs" \
+ "you want to generate a manpage for."
+ exit 1
+fi
+
+# Sanity check, check that the utilities this script depends on, are in PATH
+for i in expand sed help2man; do
+ if ! which $i > /dev/null 2>&1; then
+ error "$0: This script needs $i, which is not in your PATH."
+ error "$0: Fix PATH or install before running this script!"
+ exit 1
+ fi
+done
+
+tmp=$(mktemp -d)
+
+# Run mksquashfs -help, expand TABS to spaces, and output the help text to
+# $tmp/mksquashfs.help. This is to allow it to be modified before
+# passing to help2man.
+
+if ! $1/mksquashfs -help > $tmp/mksquashfs.help2 2>&1; then
+ error "$0: Running Mksquashfs failed. Cross-compiled or incompatible binary?"
+ exit 1
+fi
+
+expand $tmp/mksquashfs.help2 > $tmp/mksquashfs.help
+
+# Run mksquashfs -version, and output the version text to
+# $tmp/mksquashfs.version. This is to allow it to be modified before
+# passing to help2man.
+
+$1/mksquashfs -version > $tmp/mksquashfs.version
+
+# Create a dummy executable in $tmp, which outputs $tmp/mksquashfs.help
+# and $tmp/mksquashfs.version. This gets around the fact help2man wants
+# to pass --help and --version directly to mksquashfs, rather than take the
+# (modified) output from $tmp/mksquashfs.help and $tmp/mksquashfs.version
+
+print "#!/bin/sh
+if [ \$1 = \"--help\" ]; then
+ cat $tmp/mksquashfs.help
+else
+ cat $tmp/mksquashfs.version
+fi" > $tmp/mksquashfs.sh
+
+chmod u+x $tmp/mksquashfs.sh
+
+# help2man gets confused by the version date returned by -version,
+# and includes it in the version string
+
+sed -i "s/ (.*)$//" $tmp/mksquashfs.version
+
+# help2man expects copyright to have an upper-case C ...
+
+sed -i "s/^copyright/Copyright/" $tmp/mksquashfs.version
+
+# help2man doesn't pick up the author from the version. Easiest to add
+# it here.
+
+print >> $tmp/mksquashfs.version
+print "Written by Phillip Lougher <phillip@squashfs.org.uk>" >> $tmp/mksquashfs.version
+
+# help2man expects "Usage: ", and so rename "SYNTAX:" to "Usage: "
+
+sed -i "s/^SYNTAX:/Usage: /" $tmp/mksquashfs.help
+
+# The Usage text expands over two lines, and that confuses help2man.
+# So concatenate the lines if the second isn't empty
+
+sed -i "/^Usage/ {
+N
+/\n$/b
+s/\n/ /
+}" $tmp/mksquashfs.help
+
+# Man pages expect the options to be in the "Options" section. So insert
+# Options section after Usage
+
+sed -i "/^Usage/a *OPTIONS*" $tmp/mksquashfs.help
+
+# help2man expects options to start in the 2nd column
+
+sed -i "s/^-/ -/" $tmp/mksquashfs.help
+sed -i "s/^ *-X/ -X/" $tmp/mksquashfs.help
+
+# help2man expects the options usage to be separated from the
+# option and operands text by at least 2 spaces.
+
+sed -i -e "s/expr> as/expr> as/" -e "s/exp> as/exp> as/" -e "s/file> as/file> as/" -e "s/regex> exclude/regex> exclude/" -e "s/regex> include/regex> include/" $tmp/mksquashfs.help
+
+# Expand certain operands truncated in help text due to lack of space
+
+sed -i -e "s/act@/action@/g" -e "s/expr>/expression>/g" -e "s/exp>/expression>/" -e "s/<f>/<file>/g" $tmp/mksquashfs.help
+
+# Uppercase the options operands (between < and > ) to make it conform
+# more to man page standards
+
+sed -i "s/<[^>]*>/\U&/g" $tmp/mksquashfs.help
+
+# Remove the "<" and ">" around options operands to make it conform
+# more to man page standards
+
+sed -i -e "s/<//g" -e "s/>//g" $tmp/mksquashfs.help
+
+# help2man doesn't deal well with the list of supported compressors.
+# So concatenate them onto one line with commas
+
+sed -i "/^ -comp/ {
+N
+s/\n */. /
+s/:/: /
+
+N
+s/\n *\([^ ]*$\)/\1/
+s/\n *\([^ ]* (default)$\)/\1/
+
+: again
+N
+/\n -noI/b
+
+s/\n *\([^ ]*$\)/, \1/
+s/\n *\([^ ]* (default)$\)/, \1/
+b again
+}" $tmp/mksquashfs.help
+
+# help2man doesn't deal well with the list of lzo1* algorithms.
+# So concatenate them onto one line with commas
+
+sed -i "/^ *lzo1x_1/ {
+s/\n *\([^ ]*$\)/\1/
+s/\n *\([^ ]* (default)$\)/\1/
+
+: again
+N
+/\n *lzo/!b
+
+s/\n *\([^ ]*$\)/, \1/
+s/\n *\([^ ]* (default)$\)/, \1/
+b again
+}" $tmp/mksquashfs.help
+
+# Make the pseudo file definitions into "options" so they're handled
+# properly by help2man
+
+sed -i "s/^\"filename/ -p \"filename/" $tmp/mksquashfs.help
+
+# Make each compressor entry in the compressors available section, a subsection
+# First, have to deal with the deprecated lzma compressor separately, because
+# it doesn't have any options (i.e. text prefixed with -).
+
+sed -i "/^ *lzma/ {
+s/^ *\(lzma.*$\)/\1:/
+n
+s/^ */ /
+} " $tmp/mksquashfs.help
+
+# Now deal with the others
+
+sed -i -e "s/^ *\(gzip.*$\)/\1:/" -e "s/^ *\(lzo$\)/\1:/" \
+ -e "s/^ *\(lzo (default)$\)/\1:/" -e "s/^ *\(lz4.*$\)/\1:/" \
+ -e "s/^ *\(xz.*$\)/\1:/" -e "s/^ *\(zstd.*$\)/\1:/" \
+ $tmp/mksquashfs.help
+
+# Concatenate the options text (normal options, pseudo file definitions and
+# compressor options) on to one line. Add a full stop to the end of the
+# options text
+
+sed -i "/^ -/ {
+:option
+s/^ *-/ -/
+
+/ -.* /!s/.$/& /
+
+:again
+N
+/\n$/b print
+/\n[^ ]/b print
+/\n -/b print
+s/\n */ /
+b again
+
+:print
+s/ \n/.\n/
+s/\([^.]\)\n/\1.\n/
+P
+s/^.*\n//
+/^ *-/b option
+}" $tmp/mksquashfs.help
+
+# Concatenate the SOURCE_DATE_EPOCH text on to one line. Indent the line by
+# two and add a full stop to the end of the line
+
+sed -i " /SOURCE_DATE_EPOCH/ {
+s/SOURCE_DATE_EPOCH/ SOURCE_DATE_EPOCH/
+
+:again
+N
+/\n$/b print
+s/\n */ /
+b again
+
+:print
+s/\([^.]\)\n/\1.\n/
+}" $tmp/mksquashfs.help
+
+# Make Compressors available header into a manpage section
+
+sed -i "s/\(Compressors available and compressor specific options\):/*\1*/" $tmp/mksquashfs.help
+
+# Make pseudo definition format header into a manpage section
+
+sed -i "s/\(Pseudo file definition format\):/*\1*/" $tmp/mksquashfs.help
+
+# Add reference to manpages for other squashfs-tools programs
+sed -i "s/See also:/See also:\nunsquashfs(1), sqfstar(1), sqfscat(1)\n/" $tmp/mksquashfs.help
+
+# Make See also header into a manpage section
+
+sed -i "s/\(See also\):/*\1*/" $tmp/mksquashfs.help
+
+# Make Environment header into a manpage section
+
+sed -i "s/\(Environment\):/*\1*/" $tmp/mksquashfs.help
+
+if ! help2man -Ni mksquashfs.h2m -o $2 $tmp/mksquashfs.sh; then
+ error "$0: help2man returned error. Aborting"
+ exit 1
+fi
+
+rm -rf $tmp
diff --git a/generate-manpages/mksquashfs.h2m b/generate-manpages/mksquashfs.h2m
new file mode 100644
index 0000000..8f9acaa
--- /dev/null
+++ b/generate-manpages/mksquashfs.h2m
@@ -0,0 +1,150 @@
+[Name]
+mksquashfs - tool to create and append to squashfs filesystems
+
+[Description]
+Squashfs is a highly compressed read-only filesystem for Linux.
+It uses either gzip/xz/lzo/lz4/zstd compression to compress both files, inodes
+and directories. Inodes in the system are very small and all blocks are
+packed to minimise data overhead. Block sizes greater than 4K are supported
+up to a maximum of 1Mbytes (default block size 128K).
+
+Squashfs is intended for general read-only filesystem use, for archival
+use (i.e. in cases where a .tar.gz file may be used), and in constrained
+block device/memory systems (e.g. embedded systems) where low overhead is
+needed.
+
+[Examples]
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS
+Create a Squashfs filesystem from the contents of DIRECTORY, writing the output
+to IMAGE.SQSH. Mksquashfs will use the default compressor (normally gzip), and
+block size of 128 Kbytes.
+.TP
+mksquashfs DIRECTORY FILE1 FILE2 IMAGE.SQFS
+Create a Squashfs filesystem containing DIRECTORY and FILE1 and FILE2. If
+multiple sources are specified on the command line they will be combined into
+a single directory.
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -b 1M -comp zstd
+Use a block size of 1 Mbyte and Zstandard compression to create the filesystem.
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -e file1 file2
+Exclude file1 and file2 from DIRECTORY when creating filesystem. No wildcard
+matching of files.
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -wildcards -e "*.gz"
+Exclude anything in DIRECTORY which matches the wildcard pattern "*.gz".
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -wildcards -e "... *.gz"
+Exclude files which match the wildcard pattern "*.gz" anywhere within DIRECTORY
+and its sub-directories. The initial "..." indicates the wildcard pattern is
+"non-anchored" and will match anywhere.
+.PP
+Note: when passing wildcarded names to Mksquashfs, they should be quoted (as in
+the above examples), to ensure that they are not processed by the shell.
+
+.SS Using pseudo file definitions
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -p "build_dir d 0644 0 0"
+Create a directory called "build_dir" in the output filesystem.
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -p "version.txt l /tmp/build/version"
+Create a reference called "version.txt" to a file outside DIRECTORY, which acts
+as if the file "/tmp/build/version" was copied or hard-linked into DIRECTORY
+before calling Mksquashfs.
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -p "date.txt f 0644 0 0 date"
+Create a file called "date.txt" which holds the output (stdout) from running
+the "date" command.
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -p "\\"hello world\\" f 0644 0 0 date"
+As above, but, showing that filenames can have spaces, if they are quoted.
+The quotes need to be blackslashed to protect them from the shell.
+.TP
+mksquashfs - IMAGE.SQFS -p "input f 0644 root root dd if=/dev/sda1 bs=1024" -p "/ d 0644 0 0"
+Create a file containing the contents of partition /dev/sda1". Ordinarily
+Mksquashfs given a device, fifo, or named socket will place that special file
+within the Squashfs filesystem, the above allows input from these special files
+to be captured and placed in the Squashfs filesystem. Note there are no other
+sources than the pseudo file, and so the command line source is "-". If there
+are no other sources than pseudo files, the root (/) directory must be defined
+too, as seen in this example.
+.TP
+unsquashfs -pf - IMAGE.SQFS | mksquashfs - NEW.SQFS -pf -
+Transcode IMAGE.SQFS to NEW.SQFS by piping the pseudo file output from
+Unsquashfs to Mksquashfs using stdout and stdin. This can convert from
+earlier Squashfs filesystems or change compression algorithm, block size etc.
+without needing to unpack into an intermediate directory or file.
+.PP
+Note: pseudo file definitions should be quoted (as in the above examples), to
+ensure that they are passed to Mksquashfs as a single argument, and to ensure
+that they are not processed by the shell.
+
+.SS Using extended attribute options
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -no-xattrs
+Do not store any extended attributes in the Squashfs filesystem. Any extended
+attributes in the source files will be ignored.
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -xattrs-include "^user."
+Filter the extended attributes in the source files, and only store extended
+attributes in the user namespace in the Squashfs filesystem.
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -xattrs-exclude "^user."
+Filter the extended attributes in the source files, and don't store any
+extended attributes in the user namespace in the Squashfs filesystem.
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -xattrs-add "user.comment=hello world"
+Add the extended attribute called "user.comment" with the content "hello world"
+to all files and directories in the Squashfs filesystem.
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -xattrs-add "user.comment=0thello world\\012"
+Add the extended attribute called "user.comment" to all files and directories,
+but in this case the contents of the extended attribute will be "hello world"
+with a trailing newline character (012 octal).
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -xattrs-add "user.comment=0saGVsbG8gd29ybGQ="
+Add the extended attribute called "user.comment" to all files and directories,
+where the value is given in base64 encoding, representing "hello world".
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -action "-xattrs-include(^user.) @ type(f)"
+Filter the extended attributes but only in regular files (type f), and only
+store extended attributes in the user namespace.
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -p "hello_world x user.comment=0tsalve mundi\\012"
+Add the extended attribute called "user.comment" to the file called
+"hello_world", with the contents of the extended attribute being "salve mundi"
+with a trailing newline character (012 octal).
+
+.SS Using Actions to not compress, change attributes etc.
+.TP
+mksquashfs DIRECTORY IMAGE.SQSH -action "uncompressed @ (name(*.jpg) || name(*.mpg) ) || (name(*.img) && filesize(+1G))"
+Specify that any files matching the wildcards "*.jpg" and "*.mpg" should not be
+compressed. Additionally, it also specifies any files matching the wildcard
+"*.img" and are larger than 1 Gigabyte should be uncompressed too. This shows
+test operators can be combined with logical expressions.
+.TP
+mksquashfs DIRECTORY IMAGE.SQSH -action "chmod(o+r)@! perm(o+r)"
+If any files within DIRECTORY are not readable by "others", then make them
+readable by others in the Squashfs filesystem.
+.TP
+mksquashfs DIRECTORY IMAGE.SQSH -action "uid(phillip)@! perm(o+r)"
+As previous, match on any files which are not readable by "others", but, in
+this case change the owner of the file to "phillip" in the Squashfs filesystem.
+.TP
+mksquashfs DIRECTORY IMAGE.SQSH -action "prune @ type(l) && ! exists"
+Delete any symbolic link within DIRECTORY which points outside of DIRECTORY,
+i.e. will be unresolvable in the Squashfs filesystem.
+.TP
+mksquashfs DIRECTORY IMAGE.SQSH -action "exclude @ depth(3)"
+Create a Squashfs filesystem containing the two top most levels (contents of
+DIRECTORY and immediate sub-directories), and exclude anything at level 3 or
+below.
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -action "-xattrs-include(^user.) @ type(f)"
+Filter the extended attributes but only in regular files (type f), and only
+store extended attributes in the user namespace.
+.PP
+Note: actions should be quoted (as in the above examples), to ensure that they
+are passed to Mksquashfs as a single argument, and to ensure that they are not
+processed by the shell.
diff --git a/generate-manpages/sqfscat-manpage.sh b/generate-manpages/sqfscat-manpage.sh
new file mode 100755
index 0000000..305c2a9
--- /dev/null
+++ b/generate-manpages/sqfscat-manpage.sh
@@ -0,0 +1,189 @@
+#!/bin/sh
+
+# This script generates a manpage from the sqfscat -help and -version
+# output, using help2man. The script does various modfications to the
+# output from -help and -version, before passing it to help2man, to allow
+# it be successfully processed into a manpage by help2man.
+
+if [ ! -f functions.sh ]; then
+ echo "$0: this script should be run in the <git-root/source-root>/generate-manpages directory" >&2
+ exit 1
+fi
+
+. ./functions.sh
+
+if [ $# -lt 2 ]; then
+ error "$0: Insufficient arguments"
+ error "$0: <path to sqfscat> <output file>"
+ exit 1
+fi
+
+# Sanity check, ensure $1 points to a directory with a runnable Sqfscat
+if [ ! -x $1/sqfscat ]; then
+ error "$0: <arg1> doesn't point to a directory with Sqfscat in it!"
+ error "$0: <arg1> should point to the directory with the Sqfscat" \
+ "you want to generate a manpage for."
+ exit 1
+fi
+
+# Sanity check, check that the utilities this script depends on, are in PATH
+for i in sed help2man; do
+ if ! which $i > /dev/null 2>&1; then
+ error "$0: This script needs $i, which is not in your PATH."
+ error "$0: Fix PATH or install before running this script!"
+ exit 1
+ fi
+done
+
+tmp=$(mktemp -d)
+
+# Run sqfscat -help, and output the help text to
+# $tmp/sqfscat.help. This is to allow it to be modified before
+# passing to help2man.
+
+if ! $1/sqfscat -help > $tmp/sqfscat.help; then
+ error "$0: Running Sqfscat failed. Cross-compiled or incompatible binary?"
+ exit 1
+fi
+
+# Run sqfscat -version, and output the version text to
+# $tmp/sqfscat.version. This is to allow it to be modified before
+# passing to help2man.
+
+$1/sqfscat -version > $tmp/sqfscat.version
+
+# Create a dummy executable in $tmp, which outputs $tmp/sqfscat.help
+# and $tmp/sqfscat.version. This gets around the fact help2man wants
+# to pass --help and --version directly to sqfscat, rather than take the
+# (modified) output from $tmp/sqfscat.help and $tmp/sqfscat.version
+
+print "#!/bin/sh
+if [ \$1 = \"--help\" ]; then
+ cat $tmp/sqfscat.help
+else
+ cat $tmp/sqfscat.version
+fi" > $tmp/sqfscat.sh
+
+chmod u+x $tmp/sqfscat.sh
+
+# help2man gets confused by the version date returned by -version,
+# and includes it in the version string
+
+sed -i "s/ (.*)$//" $tmp/sqfscat.version
+
+# help2man expects copyright to have an upper-case C ...
+
+sed -i "s/^copyright/Copyright/" $tmp/sqfscat.version
+
+# help2man doesn't pick up the author from the version. Easiest to add
+# it here.
+
+print >> $tmp/sqfscat.version
+print "Written by Phillip Lougher <phillip@squashfs.org.uk>" >> $tmp/sqfscat.version
+
+# help2man expects "Usage: ", and so rename "SYNTAX:" to "Usage: "
+
+sed -i "s/^SYNTAX:/Usage: /" $tmp/sqfscat.help
+
+# Man pages expect the options to be in the "Options" section. So insert
+# Options section after Usage
+
+sed -i "/^Usage/a *OPTIONS*" $tmp/sqfscat.help
+
+# help2man expects options to start in the 2nd column
+
+sed -i "s/^\t-/ -/" $tmp/sqfscat.help
+
+# Split combined short-form/long-form options into separate short-form,
+# and long form, i.e.
+# -da[ta-queue] <size> becomes
+# -da <size>, -data-queue <size>
+
+sed -i "s/\([^ ][^ \[]*\)\[\([a-z-]*\)\] \(<[a-z-]*>\)/\1 \3, \1\2 \3/" $tmp/sqfscat.help
+sed -i "s/\([^ ][^ \[]*\)\[\([a-z-]*\)\]/\1, \1\2/" $tmp/sqfscat.help
+
+# help2man expects the options usage to be separated from the
+# option and operands text by at least 2 spaces.
+
+sed -i "s/\t/ /g" $tmp/sqfscat.help
+
+# Uppercase the options operands (between < and > ) to make it conform
+# more to man page standards
+
+sed -i "s/<[^>]*>/\U&/g" $tmp/sqfscat.help
+
+# Remove the "<" and ">" around options operands to make it conform
+# more to man page standards
+
+sed -i -e "s/<//g" -e "s/>//g" $tmp/sqfscat.help
+
+# help2man doesn't deal well with the list of supported compressors.
+# So concatenate them onto one line with commas
+
+sed -i "/^Decompressors available:/ {
+n
+s/^ //
+
+: again
+N
+/\n$/b
+
+s/\n */, /
+b again
+}" $tmp/sqfscat.help
+
+# Concatenate the options text on to one line. Add a full stop to the end of the
+# options text
+
+sed -i "/^ -/ {
+:again
+N
+/\n$/b print
+/\n -/b print
+s/\n */ /
+b again
+
+:print
+s/\([^.]\)\n/\1.\n/
+P
+s/^.*\n//
+/^ -/b again
+}" $tmp/sqfscat.help
+
+# Concatenate the exit status text on to one line.
+
+sed -i "/^ [012]/ {
+:again
+N
+/\n$/b print
+/\n [012]/b print
+s/\n */ /
+b again
+
+:print
+P
+s/^.*\n//
+/^ [012]/b again
+}" $tmp/sqfscat.help
+
+# Make Decompressors available header into a manpage section
+
+sed -i "s/\(Decompressors available\):/*\1*/" $tmp/sqfscat.help
+
+# Make Exit status header into a manpage section
+
+sed -i "s/\(Exit status\):/*\1*/" $tmp/sqfscat.help
+
+# Add reference to manpages for other squashfs-tools programs
+sed -i "s/See also:/See also:\nmksquashfs(1), unsquashfs(1), sqfstar(1)\n/" $tmp/sqfscat.help
+
+# Make See also header into a manpage section
+
+sed -i "s/\(See also\):/*\1*/" $tmp/sqfscat.help
+
+if ! help2man -Ni sqfscat.h2m -o $2 $tmp/sqfscat.sh; then
+ error "$0: help2man returned error. Aborting"
+ exit 1
+fi
+
+rm -rf $tmp
diff --git a/generate-manpages/sqfscat.h2m b/generate-manpages/sqfscat.h2m
new file mode 100644
index 0000000..b6a8acc
--- /dev/null
+++ b/generate-manpages/sqfscat.h2m
@@ -0,0 +1,29 @@
+[Name]
+sqfscat - tool to cat files from a squashfs filesystem to stdout
+
+[Description]
+Squashfs is a highly compressed read-only filesystem for Linux.
+It uses either gzip/xz/lzo/lz4/zstd compression to compress both files, inodes
+and directories. Inodes in the system are very small and all blocks are
+packed to minimise data overhead. Block sizes greater than 4K are supported
+up to a maximum of 1Mbytes (default block size 128K).
+
+Squashfs is intended for general read-only filesystem use, for archival
+use (i.e. in cases where a .tar.gz file may be used), and in constrained
+block device/memory systems (e.g. embedded systems) where low overhead is
+needed.
+
+[Examples]
+.TP
+sqfscat IMAGE.SQFS hello
+Output the contents of "hello" to stdout.
+.TP
+sqfscat IMAGE.SQFS hello world
+Output the contents of "hello" and then "world" to stdout.
+.TP
+sqfscat IMAGE.SQFS "*.[ch]"
+Output the contents of all the files in the root directory that match the
+wildcard *.[ch], to stdout, e.g. hello.c, hello.h, world.c, world.h.
+.PP
+Note: when passing wildcarded names to Sqfscat, they should be quoted (as in
+the above examples), to ensure that they are not processed by the shell.
diff --git a/generate-manpages/sqfstar-manpage.sh b/generate-manpages/sqfstar-manpage.sh
new file mode 100755
index 0000000..c37f695
--- /dev/null
+++ b/generate-manpages/sqfstar-manpage.sh
@@ -0,0 +1,241 @@
+#!/bin/sh
+
+# This script generates a manpage from the sqfstar -help and -version
+# output, using help2man. The script does various modfications to the
+# output from -help and -version, before passing it to help2man, to allow
+# it be successfully processed into a manpage by help2man.
+
+if [ ! -f functions.sh ]; then
+ echo "$0: this script should be run in the <git-root/source-root>/generate-manpages directory" >&2
+ exit 1
+fi
+
+. ./functions.sh
+
+if [ $# -lt 2 ]; then
+ error "$0: Insufficient arguments"
+ error "$0: <path to sqfstar> <output file>"
+ exit 1
+fi
+
+# Sanity check, ensure $1 points to a directory with a runnable Sqfstar
+if [ ! -x $1/sqfstar ]; then
+ error "$0: <arg1> doesn't point to a directory with Sqfstar in it!"
+ error "$0: <arg1> should point to the directory with the Sqfstar" \
+ "you want to generate a manpage for."
+ exit 1
+fi
+
+# Sanity check, check that the utilities this script depends on, are in PATH
+for i in expand sed help2man; do
+ if ! which $i > /dev/null 2>&1; then
+ error "$0: This script needs $i, which is not in your PATH."
+ error "$0: Fix PATH or install before running this script!"
+ exit 1
+ fi
+done
+
+tmp=$(mktemp -d)
+
+# Run sqfstar -help, expand TABS to spaces, and output the help text to
+# $tmp/sqfstar.help. This is to allow it to be modified before
+# passing to help2man.
+
+if ! $1/sqfstar -help > $tmp/sqfstar.help2; then
+ error "$0: Running Sqfstar failed. Cross-compiled or incompatible binary?"
+ exit 1
+fi
+
+expand $tmp/sqfstar.help2 > $tmp/sqfstar.help
+
+
+# Run sqfstar -version, and output the version text to
+# $tmp/sqfstar.version. This is to allow it to be modified before
+# passing to help2man.
+
+$1/sqfstar -version > $tmp/sqfstar.version
+
+# Create a dummy executable in $tmp, which outputs $tmp/sqfstar.help
+# and $tmp/sqfstar.version. This gets around the fact help2man wants
+# to pass --help and --version directly to sqfstar, rather than take the
+# (modified) output from $tmp/sqfstar.help and $tmp/sqfstar.version
+
+print "#!/bin/sh
+if [ \$1 = \"--help\" ]; then
+ cat $tmp/sqfstar.help
+else
+ cat $tmp/sqfstar.version
+fi" > $tmp/sqfstar.sh
+
+chmod u+x $tmp/sqfstar.sh
+
+# help2man gets confused by the version date returned by -version,
+# and includes it in the version string
+
+sed -i "s/ (.*)$//" $tmp/sqfstar.version
+
+# help2man expects copyright to have an upper-case C ...
+
+sed -i "s/^copyright/Copyright/" $tmp/sqfstar.version
+
+# help2man doesn't pick up the author from the version. Easiest to add
+# it here.
+
+print >> $tmp/sqfstar.version
+print "Written by Phillip Lougher <phillip@squashfs.org.uk>" >> $tmp/sqfstar.version
+
+# Man pages expect the options to be in the "Options" section. So insert
+# Options section after first line
+
+sed -i "1a *OPTIONS*" $tmp/sqfstar.help
+
+# Delete the first line, as this is being replaced by a section included
+# from sqfstar.h2m
+
+sed -i "1d" $tmp/sqfstar.help
+
+# help2man expects options to start in the 2nd column
+
+sed -i "s/^-/ -/" $tmp/sqfstar.help
+sed -i "s/^ *-X/ -X/" $tmp/sqfstar.help
+
+# help2man expects the options usage to be separated from the
+# option and operands text by at least 2 spaces.
+
+sed -i -e "s/regex> exclude/regex> exclude/" -e "s/regex> include/regex> include/" $tmp/sqfstar.help
+
+# Uppercase the options operands (between < and > ) to make it conform
+# more to man page standards
+
+sed -i "s/<[^>]*>/\U&/g" $tmp/sqfstar.help
+
+# Remove the "<" and ">" around options operands to make it conform
+# more to man page standards
+
+sed -i -e "s/<//g" -e "s/>//g" $tmp/sqfstar.help
+
+# help2man doesn't deal well with the list of supported compressors.
+# So concatenate them onto one line with commas
+
+sed -i "/^ -comp/ {
+N
+s/\n */. /
+s/:/: /
+
+N
+s/\n *\([^ ]*$\)/\1/
+s/\n *\([^ ]* (default)$\)/\1/
+
+: again
+N
+/\n -noI/b
+
+s/\n *\([^ ]*$\)/, \1/
+s/\n *\([^ ]* (default)$\)/, \1/
+b again
+}" $tmp/sqfstar.help
+
+# help2man doesn't deal well with the list of lzo1* algorithms.
+# So concatenate them onto one line with commas
+
+sed -i "/^ *lzo1x_1/ {
+s/\n *\([^ ]*$\)/\1/
+s/\n *\([^ ]* (default)$\)/\1/
+
+: again
+N
+/\n *lzo/!b
+
+s/\n *\([^ ]*$\)/, \1/
+s/\n *\([^ ]* (default)$\)/, \1/
+b again
+}" $tmp/sqfstar.help
+
+# Make the pseudo file definitions into "options" so they're handled
+# properly by help2man
+
+sed -i "s/^\"filename/ -p \"filename/" $tmp/sqfstar.help
+
+# Make each compressor entry in the compressors available section, a subsection
+# First, have to deal with the deprecated lzma compressor separately, because
+# it doesn't have any options (i.e. text prefixed with -).
+
+sed -i "/^ *lzma/ {
+s/^ *\(lzma.*$\)/\1:/
+n
+s/^ */ /
+} " $tmp/sqfstar.help
+
+# Now deal with the others
+
+sed -i -e "s/^ *\(gzip.*$\)/\1:/" -e "s/^ *\(lzo$\)/\1:/" \
+ -e "s/^ *\(lzo (default)$\)/\1:/" -e "s/^ *\(lz4.*$\)/\1:/" \
+ -e "s/^ *\(xz.*$\)/\1:/" -e "s/^ *\(zstd.*$\)/\1:/" \
+ $tmp/sqfstar.help
+
+# Concatenate the options text (normal options and compressor options) on to one
+# line. Add a full stop to the end of the options text
+
+sed -i "/^ -/ {
+:option
+s/^ *-/ -/
+
+/ -.* /!s/.$/& /
+
+:again
+N
+/\n$/b print
+/\n[^ ]/b print
+/\n -/b print
+s/\n */ /
+b again
+
+:print
+s/ \n/.\n/
+s/\([^.]\)\n/\1.\n/
+P
+s/^.*\n//
+/^ *-/b option
+}" $tmp/sqfstar.help
+
+# Concatenate the SOURCE_DATE_EPOCH text on to one line. Indent the line by
+# two and add a full stop to the end of the line
+
+sed -i " /SOURCE_DATE_EPOCH/ {
+s/SOURCE_DATE_EPOCH/ SOURCE_DATE_EPOCH/
+
+:again
+N
+/\n$/b print
+s/\n */ /
+b again
+
+:print
+s/\([^.]\)\n/\1.\n/
+}" $tmp/sqfstar.help
+
+# Make Compressors available header into a manpage section
+
+sed -i "s/\(Compressors available and compressor specific options\):/*\1*/" $tmp/sqfstar.help
+
+# Make pseudo definition format header into a manpage section
+
+sed -i "s/\(Pseudo file definition format\):/*\1*/" $tmp/sqfstar.help
+
+# Add reference to manpages for other squashfs-tools programs
+sed -i "s/See also:/See also:\nmksquashfs(1), unsquashfs(1), sqfscat(1)\n/" $tmp/sqfstar.help
+
+# Make See also header into a manpage section
+
+sed -i "s/\(See also\):/*\1*/" $tmp/sqfstar.help
+
+# Make Environment header into a manpage section
+
+sed -i "s/\(Environment\):/*\1*/" $tmp/sqfstar.help
+
+if ! help2man -Ni sqfstar.h2m -o $2 $tmp/sqfstar.sh; then
+ error "$0: help2man returned error. Aborting"
+ exit 1
+fi
+
+rm -rf $tmp
diff --git a/generate-manpages/sqfstar.h2m b/generate-manpages/sqfstar.h2m
new file mode 100644
index 0000000..b1d1c3e
--- /dev/null
+++ b/generate-manpages/sqfstar.h2m
@@ -0,0 +1,84 @@
+[Name]
+sqfstar - tool to create a squashfs filesystem from a tar archive
+
+[Synopsis]
+ cat xxx.tar | sqfstar [OPTIONS] FILESYSTEM [exclude files]
+ zcat xxx.tgz | sqfstar [OPTIONS] FILESYSTEM [exclude files]
+ xzcat xxx.tar.xz | sqfstar [OPTIONS] FILESYSTEM [exclude files]
+ zstdcat xxx.tar.zst | sqfstar [OPTIONS] FILESYSTEM [exclude files]
+
+[Description]
+Squashfs is a highly compressed read-only filesystem for Linux.
+It uses either gzip/xz/lzo/lz4/zstd compression to compress both files, inodes
+and directories. Inodes in the system are very small and all blocks are
+packed to minimise data overhead. Block sizes greater than 4K are supported
+up to a maximum of 1Mbytes (default block size 128K).
+
+Squashfs is intended for general read-only filesystem use, for archival
+use (i.e. in cases where a .tar.gz file may be used), and in constrained
+block device/memory systems (e.g. embedded systems) where low overhead is
+needed.
+
+[Examples]
+.TP
+sqfstar IMAGE.SQFS < archive.tar
+Create a Squashfs filesystem from the uncompressed tar file "archive.tar".
+Sqfstar will use the default compressor (normally gzip), and block size of 128
+Kbytes.
+.TP
+zcat archive.tgz | sqfstar IMAGE.SQFS
+Create a Squashfs filesystem from the compressed tar file "archive.tgz". Sqfstar
+will use the default compressor (normally gzip), and block size of 128 Kbytes.
+.TP
+sqfstar -b 1M -comp zstd IMAGE.SQFS < archive.tar
+Use a block size of 1 Mbyte and Zstandard compression to create the filesystem.
+.TP
+sqfstar -root-uid 0 -root-gid 0 IMAGE.SQFS < archive.tar
+Tar files do not supply a definition for the root directory, and the default is
+to make the directory owned/group owned by the user running Sqfstar. The above
+command sets the ownership/group ownership to root.
+.TP
+sqfstar -root-mode 0755 IMAGE.SQFS < archive.tar
+The default permissions for the root directory is 0777 (rwxrwxrwx). The above
+command sets the permissions to 0755 (rwxr-xr-x).
+.TP
+sqfstar IMAGE.SQFS file1 file2 < archive.tar
+Exclude file1 and file2 from the tar file when creating filesystem.
+.TP
+sqfstar IMAGE.SQFS "*.gz" < archive.tar
+Exclude any files in the top level directory which matches the wildcard pattern
+"*.gz".
+.TP
+sqfstar IMAGE.SQFS "... *.gz" < archive.tar
+Exclude any file which matches the wildcard pattern "*.gz" anywhere within the
+tar file. The initial "..." indicates the wildcard pattern is "non-anchored"
+and will match anywhere.
+.PP
+Note: when passing wildcarded names to Sqfstar, they should be quoted (as in
+the above examples), to ensure that they are not processed by the shell.
+
+.SS Using pseudo file definitions
+.TP
+sqfstar -p "build_dir d 0644 0 0" IMAGE.SQFS < archive.tar
+Create a directory called "build_dir" in the output filesystem.
+.TP
+sqfstar -p "version.txt l /tmp/build/version" IMAGE.SQFS < archive.tar
+Create a reference called "version.txt" to a file not in the tar archive,
+which acts as if that file was in the tar archive.
+.TP
+sqfstar -p "date.txt f 0644 0 0 date" IMAGE.SQFS < archive.tar
+Create a file called "date.txt" which holds the output (stdout) from running
+the "date" command.
+.TP
+sqfstar -p "\\"hello world\\" f 0644 0 0 date" IMAGE.SQFS < archive.tar
+As above, but, showing that filenames can have spaces, if they are quoted.
+The quotes need to be blackslashed to protect them from the shell.
+.TP
+sqfstar -p "input f 0644 root root dd if=/dev/sda1 bs=1024" IMAGE.SQFS < archive.tar
+Create a file containing the contents of partition /dev/sda1". The above allows
+input from these special files to be captured and placed in the Squashfs
+filesystem.
+.PP
+Note: pseudo file definitions should be quoted (as in the above examples), to
+ensure that they are passed to Mksquashfs as a single argument, and to ensure
+that they are not processed by the shell.
diff --git a/generate-manpages/unsquashfs-manpage.sh b/generate-manpages/unsquashfs-manpage.sh
new file mode 100755
index 0000000..ea8609b
--- /dev/null
+++ b/generate-manpages/unsquashfs-manpage.sh
@@ -0,0 +1,189 @@
+#!/bin/sh
+
+# This script generates a manpage from the unsquashfs -help and -version
+# output, using help2man. The script does various modfications to the
+# output from -help and -version, before passing it to help2man, to allow
+# it be successfully processed into a manpage by help2man.
+
+if [ ! -f functions.sh ]; then
+ echo "$0: this script should be run in the <git-root/source-root>/generate-manpages directory" >&2
+ exit 1
+fi
+
+. ./functions.sh
+
+if [ $# -lt 2 ]; then
+ error "$0: Insufficient arguments"
+ error "$0: <path to unsquashfs> <output file>"
+ exit 1
+fi
+
+# Sanity check, ensure $1 points to a directory with a runnable Unsquashfs
+if [ ! -x $1/unsquashfs ]; then
+ error "$0: <arg1> doesn't point to a directory with Unsquashfs in it!"
+ error "$0: <arg1> should point to the directory with the Unsquashfs" \
+ "you want to generate a manpage for."
+ exit 1
+fi
+
+# Sanity check, check that the utilities this script depends on, are in PATH
+for i in sed help2man; do
+ if ! which $i > /dev/null 2>&1; then
+ error "$0: This script needs $i, which is not in your PATH."
+ error "$0: Fix PATH or install before running this script!"
+ exit 1
+ fi
+done
+
+tmp=$(mktemp -d)
+
+# Run unsquashfs -help, and output the help text to
+# $tmp/unsquashfs.help. This is to allow it to be modified before
+# passing to help2man.
+
+if ! $1/unsquashfs -help > $tmp/unsquashfs.help; then
+ error "$0: Running Unsquashfs failed. Cross-compiled or incompatible binary?"
+ exit 1
+fi
+
+# Run unsquashfs -version, and output the version text to
+# $tmp/unsquashfs.version. This is to allow it to be modified before
+# passing to help2man.
+
+$1/unsquashfs -version > $tmp/unsquashfs.version
+
+# Create a dummy executable in $tmp, which outputs $tmp/unsquashfs.help
+# and $tmp/unsquashfs.version. This gets around the fact help2man wants
+# to pass --help and --version directly to unsquashfs, rather than take the
+# (modified) output from $tmp/unsquashfs.help and $tmp/unsquashfs.version
+
+print "#!/bin/sh
+if [ \$1 = \"--help\" ]; then
+ cat $tmp/unsquashfs.help
+else
+ cat $tmp/unsquashfs.version
+fi" > $tmp/unsquashfs.sh
+
+chmod u+x $tmp/unsquashfs.sh
+
+# help2man gets confused by the version date returned by -version,
+# and includes it in the version string
+
+sed -i "s/ (.*)$//" $tmp/unsquashfs.version
+
+# help2man expects copyright to have an upper-case C ...
+
+sed -i "s/^copyright/Copyright/" $tmp/unsquashfs.version
+
+# help2man doesn't pick up the author from the version. Easiest to add
+# it here.
+
+print >> $tmp/unsquashfs.version
+print "Written by Phillip Lougher <phillip@squashfs.org.uk>" >> $tmp/unsquashfs.version
+
+# help2man expects "Usage: ", and so rename "SYNTAX:" to "Usage: "
+
+sed -i "s/^SYNTAX:/Usage: /" $tmp/unsquashfs.help
+
+# Man pages expect the options to be in the "Options" section. So insert
+# Options section after Usage
+
+sed -i "/^Usage/a *OPTIONS*" $tmp/unsquashfs.help
+
+# help2man expects options to start in the 2nd column
+
+sed -i "s/^\t-/ -/" $tmp/unsquashfs.help
+
+# Split combined short-form/long-form options into separate short-form,
+# and long form, i.e.
+# -da[ta-queue] <size> becomes
+# -da <size>, -data-queue <size>
+
+sed -i "s/\([^ ][^ \[]*\)\[\([a-z-]*\)\] \(<[a-z-]*>\)/\1 \3, \1\2 \3/" $tmp/unsquashfs.help
+sed -i "s/\([^ ][^ \[]*\)\[\([a-z-]*\)\]/\1, \1\2/" $tmp/unsquashfs.help
+
+# help2man expects the options usage to be separated from the
+# option and operands text by at least 2 spaces.
+
+sed -i "s/\t/ /g" $tmp/unsquashfs.help
+
+# Uppercase the options operands (between < and > ) to make it conform
+# more to man page standards
+
+sed -i "s/<[^>]*>/\U&/g" $tmp/unsquashfs.help
+
+# Remove the "<" and ">" around options operands to make it conform
+# more to man page standards
+
+sed -i -e "s/<//g" -e "s/>//g" $tmp/unsquashfs.help
+
+# help2man doesn't deal well with the list of supported compressors.
+# So concatenate them onto one line with commas
+
+sed -i "/^Decompressors available:/ {
+n
+s/^ //
+
+: again
+N
+/\n$/b
+
+s/\n */, /
+b again
+}" $tmp/unsquashfs.help
+
+# Concatenate the options text on to one line. Add a full stop to the end of
+# the options text
+
+sed -i "/^ -/ {
+:again
+N
+/\n$/b print
+/\n -/b print
+s/\n */ /
+b again
+
+:print
+s/\([^.]\)\n/\1.\n/
+P
+s/^.*\n//
+/^ -/b again
+}" $tmp/unsquashfs.help
+
+# Concatenate the exit status text on to one line.
+
+sed -i "/^ [012]/ {
+:again
+N
+/\n$/b print
+/\n [012]/b print
+s/\n */ /
+b again
+
+:print
+P
+s/^.*\n//
+/^ [012]/b again
+}" $tmp/unsquashfs.help
+
+# Make Decompressors available header into a manpage section
+
+sed -i "s/\(Decompressors available\):/*\1*/" $tmp/unsquashfs.help
+
+# Make Exit status header into a manpage section
+
+sed -i "s/\(Exit status\):/*\1*/" $tmp/unsquashfs.help
+
+# Add reference to manpages for other squashfs-tools programs
+sed -i "s/See also:/See also:\nmksquashfs(1), sqfstar(1), sqfscat(1)\n/" $tmp/unsquashfs.help
+
+# Make See also header into a manpage section
+
+sed -i "s/\(See also\):/*\1*/" $tmp/unsquashfs.help
+
+if ! help2man -Ni unsquashfs.h2m -o $2 $tmp/unsquashfs.sh; then
+ error "$0: help2man returned error. Aborting"
+ exit 1
+fi
+
+rm -rf $tmp
diff --git a/generate-manpages/unsquashfs.h2m b/generate-manpages/unsquashfs.h2m
new file mode 100644
index 0000000..edbf165
--- /dev/null
+++ b/generate-manpages/unsquashfs.h2m
@@ -0,0 +1,89 @@
+[Name]
+unsquashfs - tool to uncompress, extract and list squashfs filesystems
+
+[Description]
+Squashfs is a highly compressed read-only filesystem for Linux.
+It uses either gzip/xz/lzo/lz4/zstd compression to compress both files, inodes
+and directories. Inodes in the system are very small and all blocks are
+packed to minimise data overhead. Block sizes greater than 4K are supported
+up to a maximum of 1Mbytes (default block size 128K).
+
+Squashfs is intended for general read-only filesystem use, for archival
+use (i.e. in cases where a .tar.gz file may be used), and in constrained
+block device/memory systems (e.g. embedded systems) where low overhead is
+needed.
+
+[Examples]
+.TP
+unsquashfs IMAGE.SQFS
+Extract IMAGE.SQFS to "squashfs-root" in the current working directory.
+.TP
+unsquashfs -d output IMAGE.SQFS
+Extract IMAGE.SQFS to "output" in the current working directory.
+.TP
+unsquashfs -d . IMAGE.SQFS
+Extract IMAGE.SQFS to current working directory.
+.TP
+unsquashfs -linfo IMAGE.SQFS
+Output a listing of IMAGE.SQFS with file attributes to stdout, while extracting
+the filesystem to "squashfs-root".
+.TP
+unsquashfs -lls IMAGE.SQFS
+Output a listing of IMAGE.SQFS with file attributes to stdout, but do not
+extract the filesystem. The listing will be prefixed with "squashfs-root".
+.TP
+unsquashfs -d "" -lls IMAGE.SQFS
+Output a listing of IMAGE.SQFS with file attributes to stdout, but do not
+extract the filesystem. The listing will not be prefixed with "squashfs-root".
+.TP
+unsquashfs IMAGE.SQFS fs/squashfs
+Extract only the "fs/squashfs" directory.
+.TP
+unsquashfs IMAGE.SQFS "[Tt]est/example*"
+Extract all files beginning with "example" inside top level directories
+called "Test" or "test".
+.TP
+unsquashfs -excludes IMAGE.SQFS "test/*data*.gz"
+This will extract everything except for files that match *data*.gz in the
+test directory. The -excludes option tells Unsquashfs to exclude the files
+on the command line rather than extract them.
+.TP
+unsquashfs -excludes IMAGE.SQFS "... *.gz"
+This will extract everything except for files that match *.gz anywhere
+in the image. The "..." means this is a non-anchored exclude which
+matches anywhere.
+.TP
+unsquashfs -ex "test/*data*.gz" \; IMAGE.SQFS test
+This uses both extract and exclude options, to tell Unsquashfs to only
+extract the "test" directory, and to exclude any files within it that
+match *data*.gz.
+.TP
+unsquashfs -ex "... *.gz" IMAGE.SQFS test
+This uses both extract and exclude options, to tell Unsquashfs to only
+extract the "test" directory, and to exclude files which match "*.gz"
+anywhere within "test" directory or sub-directories.
+.TP
+unsquashfs -dest output -max-depth 2 IMAGE.SQFS
+Extract only the top two levels of IMAGE.SQFS to "output" directory.
+.TP
+unsquashfs -max-depth 2 IMAGE.SQFS "test/*.gz"
+Only extract the gzipped files in the test directory.
+.TP
+unsquashfs -llc -max-depth 2 IMAGE.SQFS "test/*.gz"
+Output a listing of the gzipped files in the test directory to stdout,
+but do not extract them.
+.TP
+unsquashfs -no-xattrs IMAGE.SQFS
+Do not extract any extended attributes. Any extended attributes in the
+filesystem will be ignored.
+.TP
+unsquashfs -xattrs-include "^user." IMAGE.SQFS
+Filter the extended attributes and only extract extended attributes in the
+user namespace from the Squashfs filesystem.
+.TP
+unsquashfs -xattrs-exclude "^user." IMAGE.SQFS
+Filter the extended attributes and do not extract any extended attributes in
+the user namespace from the Squashfs filesystem.
+.PP
+Note: when passing wildcarded names to Unsquashfs, they should be quoted (as in
+the above examples), to ensure that they are not processed by the shell.
diff --git a/manpages/.gitattributes b/manpages/.gitattributes
new file mode 100644
index 0000000..9a546c2
--- /dev/null
+++ b/manpages/.gitattributes
@@ -0,0 +1 @@
+README !export-ignore
diff --git a/manpages/README b/manpages/README
new file mode 100644
index 0000000..a80b917
--- /dev/null
+++ b/manpages/README
@@ -0,0 +1,13 @@
+These pre-generated manpages are built using the Squashfs-tools Makefile
+defaults with the gzip, lzo, lz4, xz, and zstd compression algorithms
+selected.
+
+The manpages are intended to be used by the installer if help2man isn't
+available on the system.
+
+But, they can obviously also be viewed by anyone who doesn't want to build
+or install Squashfs-tools, by running man -l, i.e.
+
+% man -l mksquashfs.1
+
+Will display the manpage for Mksquashfs.
diff --git a/manpages/mksquashfs.1 b/manpages/mksquashfs.1
new file mode 100644
index 0000000..47377f4
--- /dev/null
+++ b/manpages/mksquashfs.1
@@ -0,0 +1,554 @@
+.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.48.5.
+.TH MKSQUASHFS "1" "March 2023" "mksquashfs version 4.6.1" "User Commands"
+.SH NAME
+mksquashfs - tool to create and append to squashfs filesystems
+.SH SYNOPSIS
+.B mksquashfs
+\fI\,source1 source2 \/\fR... \fI\,FILESYSTEM \/\fR[\fI\,OPTIONS\/\fR] [\fI\,-e list of exclude dirs/files\/\fR]
+.SH DESCRIPTION
+Squashfs is a highly compressed read-only filesystem for Linux.
+It uses either gzip/xz/lzo/lz4/zstd compression to compress both files, inodes
+and directories. Inodes in the system are very small and all blocks are
+packed to minimise data overhead. Block sizes greater than 4K are supported
+up to a maximum of 1Mbytes (default block size 128K).
+
+Squashfs is intended for general read-only filesystem use, for archival
+use (i.e. in cases where a .tar.gz file may be used), and in constrained
+block device/memory systems (e.g. embedded systems) where low overhead is
+needed.
+.SH OPTIONS
+.SS "Filesystem compression options:"
+.TP
+\fB\-b\fR BLOCK_SIZE
+set data block to BLOCK_SIZE. Default 128 Kbytes. Optionally a suffix of K or M can be given to specify Kbytes or Mbytes respectively.
+.TP
+\fB\-comp\fR COMP
+select COMP compression. Compressors available: gzip (default), lzo, lz4, xz, zstd.
+.TP
+\fB\-noI\fR
+do not compress inode table.
+.TP
+\fB\-noId\fR
+do not compress the uid/gid table (implied by \fB\-noI\fR).
+.TP
+\fB\-noD\fR
+do not compress data blocks.
+.TP
+\fB\-noF\fR
+do not compress fragment blocks.
+.TP
+\fB\-noX\fR
+do not compress extended attributes.
+.TP
+\fB\-no\-compression\fR
+do not compress any of the data or metadata. This is equivalent to specifying \fB\-noI\fR \fB\-noD\fR \fB\-noF\fR and \fB\-noX\fR.
+.SS "Filesystem build options:"
+.TP
+\fB\-tar\fR
+read uncompressed tar file from standard in (stdin).
+.TP
+\fB\-no\-strip\fR
+act like tar, and do not strip leading directories from source files.
+.TP
+\fB\-tarstyle\fR
+alternative name for \fB\-no\-strip\fR.
+.TP
+\fB\-cpiostyle\fR
+act like cpio, and read file pathnames from standard in (stdin).
+.TP
+\fB\-cpiostyle0\fR
+like \fB\-cpiostyle\fR, but filenames are null terminated. Can be used with find \fB\-print0\fR action.
+.TP
+\fB\-reproducible\fR
+build filesystems that are reproducible (default).
+.TP
+\fB\-not\-reproducible\fR
+build filesystems that are not reproducible.
+.TP
+\fB\-mkfs\-time\fR TIME
+set filesystem creation timestamp to TIME. TIME can be an unsigned 32\-bit int indicating seconds since the epoch (1970\-01\-01) or a string value which is passed to the "date" command to parse. Any string value which the date command recognises can be used such as "now", "last week", or "Wed Feb 15 21:02:39 GMT 2023".
+.TP
+\fB\-all\-time\fR TIME
+set all file timestamps to TIME. TIME can be an unsigned 32\-bit int indicating seconds since the epoch (1970\-01\-01) or a string value which is passed to the "date" command to parse. Any string value which the date command recognises can be used such as "now", "last week", or "Wed Feb 15 21:02:39 GMT 2023".
+.TP
+\fB\-root\-time\fR TIME
+set root directory time to TIME. TIME can be an unsigned 32\-bit int indicating seconds since the epoch (1970\-01\-01) or a string value which is passed to the "date" command to parse. Any string value which the date command recognises can be used such as "now", "last week", or "Wed Feb 15 21:02:39 GMT 2023".
+.TP
+\fB\-root\-mode\fR MODE
+set root directory permissions to octal MODE.
+.TP
+\fB\-root\-uid\fR VALUE
+set root directory owner to specified VALUE, VALUE can be either an integer uid or user name.
+.TP
+\fB\-root\-gid\fR VALUE
+set root directory group to specified VALUE, VALUE can be either an integer gid or group name.
+.TP
+\fB\-all\-root\fR
+make all files owned by root.
+.TP
+\fB\-force\-uid\fR VALUE
+set all file uids to specified VALUE, VALUE can be either an integer uid or user name.
+.TP
+\fB\-force\-gid\fR VALUE
+set all file gids to specified VALUE, VALUE can be either an integer gid or group name.
+.TP
+\fB\-pseudo\-override\fR
+make pseudo file uids and gids override \fB\-all\-root\fR, \fB\-force\-uid\fR and \fB\-force\-gid\fR options.
+.TP
+\fB\-no\-exports\fR
+do not make filesystem exportable via NFS (\fB\-tar\fR default).
+.TP
+\fB\-exports\fR
+make filesystem exportable via NFS (default).
+.TP
+\fB\-no\-sparse\fR
+do not detect sparse files.
+.TP
+\fB\-no\-tailends\fR
+do not pack tail ends into fragments (default).
+.TP
+\fB\-tailends\fR
+pack tail ends into fragments.
+.TP
+\fB\-no\-fragments\fR
+do not use fragments.
+.TP
+\fB\-no\-duplicates\fR
+do not perform duplicate checking.
+.TP
+\fB\-no\-hardlinks\fR
+do not hardlink files, instead store duplicates.
+.TP
+\fB\-keep\-as\-directory\fR
+if one source directory is specified, create a root directory containing that directory, rather than the contents of the directory.
+.SS "Filesystem filter options:"
+.TP
+\fB\-p\fR PSEUDO\-DEFINITION
+add pseudo file definition. The definition should be quoted.
+.TP
+\fB\-pf\fR PSEUDO\-FILE
+add list of pseudo file definitions from PSEUDO\-FILE, use \- for stdin. Pseudo file definitions should not be quoted.
+.TP
+\fB\-sort\fR SORT_FILE
+sort files according to priorities in SORT_FILE. One file or dir with priority per line. Priority \fB\-32768\fR to 32767, default priority 0.
+.TP
+\fB\-ef\fR EXCLUDE_FILE
+list of exclude dirs/files. One per line.
+.TP
+\fB\-wildcards\fR
+allow extended shell wildcards (globbing) to be used in exclude dirs/files.
+.TP
+\fB\-regex\fR
+allow POSIX regular expressions to be used in exclude dirs/files.
+.TP
+\fB\-max\-depth\fR LEVELS
+descend at most LEVELS of directories when scanning filesystem.
+.TP
+\fB\-one\-file\-system\fR
+do not cross filesystem boundaries. If a directory crosses the boundary, create an empty directory for each mount point. If a file crosses the boundary ignore it.
+.TP
+\fB\-one\-file\-system\-x\fR
+do not cross filesystem boundaries. Like \fB\-one\-file\-system\fR option except directories are also ignored if they cross the boundary.
+.SS "Filesystem extended attribute (xattrs) options:"
+.TP
+\fB\-no\-xattrs\fR
+do not store extended attributes.
+.TP
+\fB\-xattrs\fR
+store extended attributes (default).
+.TP
+\fB\-xattrs\-exclude\fR REGEX
+exclude any xattr names matching REGEX. REGEX is a POSIX regular expression, e.g. \fB\-xattrs\-exclude\fR '^user.' excludes xattrs from the user namespace.
+.TP
+\fB\-xattrs\-include\fR REGEX
+include any xattr names matching REGEX. REGEX is a POSIX regular expression, e.g. \fB\-xattrs\-include\fR '^user.' includes xattrs from the user namespace.
+.TP
+\fB\-xattrs\-add\fR NAME=VAL
+add the xattr NAME with VAL to files. If an user xattr it will be added to regular files and directories (see man 7 xattr). Otherwise it will be added to all files. VAL by default will be treated as binary (i.e. an uninterpreted byte sequence), but it can be prefixed with 0s, where it will be treated as base64 encoded, or prefixed with 0x, where val will be treated as hexidecimal. Additionally it can be prefixed with 0t where this encoding is similar to binary encoding, except backslashes are specially treated, and a backslash followed by 3 octal digits can be used to encode any ASCII character, which obviously can be used to encode control codes. The option can be repeated multiple times to add multiple xattrs.
+.SS "Mksquashfs runtime options:"
+.TP
+\fB\-version\fR
+print version, licence and copyright message.
+.TP
+\fB\-exit\-on\-error\fR
+treat normally ignored errors as fatal.
+.TP
+\fB\-quiet\fR
+no verbose output.
+.TP
+\fB\-info\fR
+print files written to filesystem.
+.TP
+\fB\-no\-progress\fR
+do not display the progress bar.
+.TP
+\fB\-progress\fR
+display progress bar when using the \fB\-info\fR option.
+.TP
+\fB\-percentage\fR
+display a percentage rather than the full progress bar. Can be used with dialog \fB\-\-gauge\fR etc.
+.TP
+\fB\-throttle\fR PERCENTAGE
+throttle the I/O input rate by the given percentage. This can be used to reduce the I/O and CPU consumption of Mksquashfs.
+.TP
+\fB\-limit\fR PERCENTAGE
+limit the I/O input rate to the given percentage. This can be used to reduce the I/O and CPU consumption of Mksquashfs (alternative to \fB\-throttle\fR).
+.TP
+\fB\-processors\fR NUMBER
+use NUMBER processors. By default will use number of processors available.
+.TP
+\fB\-mem\fR SIZE
+use SIZE physical memory for caches. Use K, M or G to specify Kbytes, Mbytes or Gbytes respectively.
+.TP
+\fB\-mem\-percent\fR PERCENT
+use PERCENT physical memory for caches. Default 25%.
+.TP
+\fB\-mem\-default\fR
+print default memory usage in Mbytes.
+.SS "Filesystem append options:"
+.TP
+\fB\-noappend\fR
+do not append to existing filesystem.
+.TP
+\fB\-root\-becomes\fR NAME
+when appending source files/directories, make the original root become a subdirectory in the new root called NAME, rather than adding the new source items to the original root.
+.TP
+\fB\-no\-recovery\fR
+do not generate a recovery file.
+.TP
+\fB\-recovery\-path\fR NAME
+use NAME as the directory to store the recovery file.
+.TP
+\fB\-recover\fR NAME
+recover filesystem data using recovery file NAME.
+.SS "Filesystem actions options:"
+.TP
+\fB\-action\fR ACTION@EXPRESSION
+evaluate EXPRESSION on every file, and execute ACTION if it returns TRUE.
+.TP
+\fB\-log\-action\fR ACTION@EXPRESSION
+as above, but log expression evaluation results and actions performed.
+.TP
+\fB\-true\-action\fR ACTION@EXPRESSION
+as above, but only log expressions which return TRUE.
+.TP
+\fB\-false\-action\fR ACTION@EXPRESSION
+as above, but only log expressions which return FALSE.
+.TP
+\fB\-action\-file\fR FILE
+as action, but read actions from FILE.
+.TP
+\fB\-log\-action\-file\fR FILE
+as \fB\-log\-action\fR, but read actions from FILE.
+.TP
+\fB\-true\-action\-file\fR FILE
+as \fB\-true\-action\fR, but read actions from FILE.
+.TP
+\fB\-false\-action\-file\fR FILE
+as \fB\-false\-action\fR, but read actions from FILE.
+.SS "Tar file only options:"
+.TP
+\fB\-default\-mode\fR MODE
+tar files often do not store permissions for intermediate directories. This option sets the default directory permissions to octal MODE, rather than 0755. This also sets the root inode mode.
+.TP
+\fB\-default\-uid\fR UID
+tar files often do not store uids for intermediate directories. This option sets the default directory owner to UID, rather than the user running Mksquashfs. This also sets the root inode uid.
+.TP
+\fB\-default\-gid\fR GID
+tar files often do not store gids for intermediate directories. This option sets the default directory group to GID, rather than the group of the user running Mksquashfs. This also sets the root inode gid.
+.TP
+\fB\-ignore\-zeros\fR
+allow tar files to be concatenated together and fed to Mksquashfs. Normally a tarfile has two consecutive 512 byte blocks filled with zeros which means EOF and Mksquashfs will stop reading after the first tar file on encountering them. This option makes Mksquashfs ignore the zero filled blocks.
+.SS "Expert options (these may make the filesystem unmountable):"
+.TP
+\fB\-nopad\fR
+do not pad filesystem to a multiple of 4K.
+.TP
+\fB\-offset\fR OFFSET
+skip OFFSET bytes at the beginning of FILESYSTEM. Optionally a suffix of K, M or G can be given to specify Kbytes, Mbytes or Gbytes respectively. Default 0 bytes.
+.TP
+\fB\-o\fR OFFSET
+synonym for \fB\-offset\fR.
+.SS "Miscellaneous options:"
+.TP
+\fB\-fstime\fR TIME
+alternative name for \fB\-mkfs\-time\fR.
+.TP
+\fB\-always\-use\-fragments\fR
+alternative name for \fB\-tailends\fR.
+.TP
+\fB\-root\-owned\fR
+alternative name for \fB\-all\-root\fR.
+.TP
+\fB\-noInodeCompression\fR
+alternative name for \fB\-noI\fR.
+.TP
+\fB\-noIdTableCompression\fR
+alternative name for \fB\-noId\fR.
+.TP
+\fB\-noDataCompression\fR
+alternative name for \fB\-noD\fR.
+.TP
+\fB\-noFragmentCompression\fR
+alternative name for \fB\-noF\fR.
+.TP
+\fB\-noXattrCompression\fR
+alternative name for \fB\-noX\fR.
+.TP
+\fB\-help\fR
+output this options text to stdout.
+.TP
+\fB\-h\fR
+output this options text to stdout.
+.TP
+\fB\-Xhelp\fR
+print compressor options for selected compressor.
+.SH "PSEUDO FILE DEFINITION FORMAT"
+.TP
+\fB\-p\fR "filename d mode uid gid"
+create a directory.
+.TP
+\fB\-p\fR "filename m mode uid gid"
+modify filename.
+.TP
+\fB\-p\fR "filename b mode uid gid major minor"
+create a block device.
+.TP
+\fB\-p\fR "filename c mode uid gid major minor"
+create a character device.
+.TP
+\fB\-p\fR "filename f mode uid gid command"
+create file from stdout of command.
+.TP
+\fB\-p\fR "filename s mode uid gid symlink"
+create a symbolic link.
+.TP
+\fB\-p\fR "filename i mode uid gid [s|f]"
+create a socket (s) or FIFO (f).
+.TP
+\fB\-p\fR "filename x name=val"
+create an extended attribute.
+.TP
+\fB\-p\fR "filename l linkname"
+create a hard\-link to linkname.
+.TP
+\fB\-p\fR "filename L pseudo_filename"
+same, but link to pseudo file.
+.TP
+\fB\-p\fR "filename D time mode uid gid"
+create a directory with timestamp time.
+.TP
+\fB\-p\fR "filename M time mode uid gid"
+modify a file with timestamp time.
+.TP
+\fB\-p\fR "filename B time mode uid gid major minor"
+create block device with timestamp time.
+.TP
+\fB\-p\fR "filename C time mode uid gid major minor"
+create char device with timestamp time.
+.TP
+\fB\-p\fR "filename F time mode uid gid command"
+create file with timestamp time.
+.TP
+\fB\-p\fR "filename S time mode uid gid symlink"
+create symlink with timestamp time.
+.TP
+\fB\-p\fR "filename I time mode uid gid [s|f]"
+create socket/fifo with timestamp time.
+.SH "COMPRESSORS AVAILABLE AND COMPRESSOR SPECIFIC OPTIONS"
+.SS "gzip (default):"
+.TP
+\fB\-Xcompression\-level\fR COMPRESSION\-LEVEL
+COMPRESSION\-LEVEL should be 1 .. 9 (default 9).
+.TP
+\fB\-Xwindow\-size\fR WINDOW\-SIZE
+WINDOW\-SIZE should be 8 .. 15 (default 15).
+.TP
+\fB\-Xstrategy\fR strategy1,strategy2,...,strategyN
+Compress using strategy1,strategy2,...,strategyN in turn and choose the best compression. Available strategies: default, filtered, huffman_only, run_length_encoded and fixed.
+.SS "lzo:"
+.TP
+\fB\-Xalgorithm\fR ALGORITHM
+Where ALGORITHM is one of: lzo1x_1, lzo1x_1_11, lzo1x_1_12, lzo1x_1_15, lzo1x_999 (default).
+.TP
+\fB\-Xcompression\-level\fR COMPRESSION\-LEVEL
+COMPRESSION\-LEVEL should be 1 .. 9 (default 8) Only applies to lzo1x_999 algorithm.
+.SS "lz4:"
+.TP
+\fB\-Xhc\fR
+Compress using LZ4 High Compression.
+.SS "xz:"
+.TP
+\fB\-Xbcj\fR filter1,filter2,...,filterN
+Compress using filter1,filter2,...,filterN in turn (in addition to no filter), and choose the best compression. Available filters: x86, arm, armthumb, powerpc, sparc, ia64.
+.TP
+\fB\-Xdict\-size\fR DICT\-SIZE
+Use DICT\-SIZE as the XZ dictionary size. The dictionary size can be specified as a percentage of the block size, or as an absolute value. The dictionary size must be less than or equal to the block size and 8192 bytes or larger. It must also be storable in the xz header as either 2^n or as 2^n+2^(n+1). Example dict\-sizes are 75%, 50%, 37.5%, 25%, or 32K, 16K, 8K etc.
+.SS "zstd:"
+.TP
+\fB\-Xcompression\-level\fR COMPRESSION\-LEVEL
+COMPRESSION\-LEVEL should be 1 .. 22 (default 15).
+.SH ENVIRONMENT
+.TP
+SOURCE_DATE_EPOCH
+If set, this is used as the filesystem creation timestamp. Also any file timestamps which are after SOURCE_DATE_EPOCH will be clamped to SOURCE_DATE_EPOCH. See https://reproducible\-builds.org/docs/source\-date\-epoch/ for more information.
+.SH EXAMPLES
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS
+Create a Squashfs filesystem from the contents of DIRECTORY, writing the output
+to IMAGE.SQSH. Mksquashfs will use the default compressor (normally gzip), and
+block size of 128 Kbytes.
+.TP
+mksquashfs DIRECTORY FILE1 FILE2 IMAGE.SQFS
+Create a Squashfs filesystem containing DIRECTORY and FILE1 and FILE2. If
+multiple sources are specified on the command line they will be combined into
+a single directory.
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -b 1M -comp zstd
+Use a block size of 1 Mbyte and Zstandard compression to create the filesystem.
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -e file1 file2
+Exclude file1 and file2 from DIRECTORY when creating filesystem. No wildcard
+matching of files.
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -wildcards -e "*.gz"
+Exclude anything in DIRECTORY which matches the wildcard pattern "*.gz".
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -wildcards -e "... *.gz"
+Exclude files which match the wildcard pattern "*.gz" anywhere within DIRECTORY
+and its sub-directories. The initial "..." indicates the wildcard pattern is
+"non-anchored" and will match anywhere.
+.PP
+Note: when passing wildcarded names to Mksquashfs, they should be quoted (as in
+the above examples), to ensure that they are not processed by the shell.
+
+.SS Using pseudo file definitions
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -p "build_dir d 0644 0 0"
+Create a directory called "build_dir" in the output filesystem.
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -p "version.txt l /tmp/build/version"
+Create a reference called "version.txt" to a file outside DIRECTORY, which acts
+as if the file "/tmp/build/version" was copied or hard-linked into DIRECTORY
+before calling Mksquashfs.
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -p "date.txt f 0644 0 0 date"
+Create a file called "date.txt" which holds the output (stdout) from running
+the "date" command.
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -p "\\"hello world\\" f 0644 0 0 date"
+As above, but, showing that filenames can have spaces, if they are quoted.
+The quotes need to be blackslashed to protect them from the shell.
+.TP
+mksquashfs - IMAGE.SQFS -p "input f 0644 root root dd if=/dev/sda1 bs=1024" -p "/ d 0644 0 0"
+Create a file containing the contents of partition /dev/sda1". Ordinarily
+Mksquashfs given a device, fifo, or named socket will place that special file
+within the Squashfs filesystem, the above allows input from these special files
+to be captured and placed in the Squashfs filesystem. Note there are no other
+sources than the pseudo file, and so the command line source is "-". If there
+are no other sources than pseudo files, the root (/) directory must be defined
+too, as seen in this example.
+.TP
+unsquashfs -pf - IMAGE.SQFS | mksquashfs - NEW.SQFS -pf -
+Transcode IMAGE.SQFS to NEW.SQFS by piping the pseudo file output from
+Unsquashfs to Mksquashfs using stdout and stdin. This can convert from
+earlier Squashfs filesystems or change compression algorithm, block size etc.
+without needing to unpack into an intermediate directory or file.
+.PP
+Note: pseudo file definitions should be quoted (as in the above examples), to
+ensure that they are passed to Mksquashfs as a single argument, and to ensure
+that they are not processed by the shell.
+
+.SS Using extended attribute options
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -no-xattrs
+Do not store any extended attributes in the Squashfs filesystem. Any extended
+attributes in the source files will be ignored.
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -xattrs-include "^user."
+Filter the extended attributes in the source files, and only store extended
+attributes in the user namespace in the Squashfs filesystem.
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -xattrs-exclude "^user."
+Filter the extended attributes in the source files, and don't store any
+extended attributes in the user namespace in the Squashfs filesystem.
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -xattrs-add "user.comment=hello world"
+Add the extended attribute called "user.comment" with the content "hello world"
+to all files and directories in the Squashfs filesystem.
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -xattrs-add "user.comment=0thello world\\012"
+Add the extended attribute called "user.comment" to all files and directories,
+but in this case the contents of the extended attribute will be "hello world"
+with a trailing newline character (012 octal).
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -xattrs-add "user.comment=0saGVsbG8gd29ybGQ="
+Add the extended attribute called "user.comment" to all files and directories,
+where the value is given in base64 encoding, representing "hello world".
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -action "-xattrs-include(^user.) @ type(f)"
+Filter the extended attributes but only in regular files (type f), and only
+store extended attributes in the user namespace.
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -p "hello_world x user.comment=0tsalve mundi\\012"
+Add the extended attribute called "user.comment" to the file called
+"hello_world", with the contents of the extended attribute being "salve mundi"
+with a trailing newline character (012 octal).
+
+.SS Using Actions to not compress, change attributes etc.
+.TP
+mksquashfs DIRECTORY IMAGE.SQSH -action "uncompressed @ (name(*.jpg) || name(*.mpg) ) || (name(*.img) && filesize(+1G))"
+Specify that any files matching the wildcards "*.jpg" and "*.mpg" should not be
+compressed. Additionally, it also specifies any files matching the wildcard
+"*.img" and are larger than 1 Gigabyte should be uncompressed too. This shows
+test operators can be combined with logical expressions.
+.TP
+mksquashfs DIRECTORY IMAGE.SQSH -action "chmod(o+r)@! perm(o+r)"
+If any files within DIRECTORY are not readable by "others", then make them
+readable by others in the Squashfs filesystem.
+.TP
+mksquashfs DIRECTORY IMAGE.SQSH -action "uid(phillip)@! perm(o+r)"
+As previous, match on any files which are not readable by "others", but, in
+this case change the owner of the file to "phillip" in the Squashfs filesystem.
+.TP
+mksquashfs DIRECTORY IMAGE.SQSH -action "prune @ type(l) && ! exists"
+Delete any symbolic link within DIRECTORY which points outside of DIRECTORY,
+i.e. will be unresolvable in the Squashfs filesystem.
+.TP
+mksquashfs DIRECTORY IMAGE.SQSH -action "exclude @ depth(3)"
+Create a Squashfs filesystem containing the two top most levels (contents of
+DIRECTORY and immediate sub-directories), and exclude anything at level 3 or
+below.
+.TP
+mksquashfs DIRECTORY IMAGE.SQFS -action "-xattrs-include(^user.) @ type(f)"
+Filter the extended attributes but only in regular files (type f), and only
+store extended attributes in the user namespace.
+.PP
+Note: actions should be quoted (as in the above examples), to ensure that they
+are passed to Mksquashfs as a single argument, and to ensure that they are not
+processed by the shell.
+.SH AUTHOR
+Written by Phillip Lougher <phillip@squashfs.org.uk>
+.SH COPYRIGHT
+Copyright \(co 2023 Phillip Lougher <phillip@squashfs.org.uk>
+.PP
+This program is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public License
+as published by the Free Software Foundation; either version 2,
+or (at your option) any later version.
+.PP
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+.SH "SEE ALSO"
+unsquashfs(1), sqfstar(1), sqfscat(1)
+.PP
+The README for the Squashfs\-tools 4.6.1 release, describing the new features can be
+read here https://github.com/plougher/squashfs\-tools/blob/master/README\-4.6.1
+.PP
+The Squashfs\-tools USAGE guide can be read here
+https://github.com/plougher/squashfs\-tools/blob/master/USAGE\-4.6
+.PP
+The ACTIONS\-README file describing how to use the new actions feature can be
+read here https://github.com/plougher/squashfs\-tools/blob/master/ACTIONS\-README
diff --git a/manpages/sqfscat.1 b/manpages/sqfscat.1
new file mode 100644
index 0000000..9770739
--- /dev/null
+++ b/manpages/sqfscat.1
@@ -0,0 +1,103 @@
+.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.48.5.
+.TH SQFSCAT "1" "March 2023" "sqfscat version 4.6.1" "User Commands"
+.SH NAME
+sqfscat - tool to cat files from a squashfs filesystem to stdout
+.SH SYNOPSIS
+.B sqfscat
+[\fI\,OPTIONS\/\fR] \fI\,FILESYSTEM \/\fR[\fI\,list of files to cat to stdout\/\fR]
+.SH DESCRIPTION
+Squashfs is a highly compressed read-only filesystem for Linux.
+It uses either gzip/xz/lzo/lz4/zstd compression to compress both files, inodes
+and directories. Inodes in the system are very small and all blocks are
+packed to minimise data overhead. Block sizes greater than 4K are supported
+up to a maximum of 1Mbytes (default block size 128K).
+
+Squashfs is intended for general read-only filesystem use, for archival
+use (i.e. in cases where a .tar.gz file may be used), and in constrained
+block device/memory systems (e.g. embedded systems) where low overhead is
+needed.
+.SH OPTIONS
+.TP
+\fB\-v\fR, \fB\-version\fR
+print version, licence and copyright information.
+.TP
+\fB\-p\fR NUMBER, \fB\-processors\fR NUMBER
+use NUMBER processors. By default will use the number of processors available.
+.TP
+\fB\-o\fR BYTES, \fB\-offset\fR BYTES
+skip BYTES at start of FILESYSTEM. Optionally a suffix of K, M or G can be given to specify Kbytes, Mbytes or Gbytes respectively (default 0 bytes).
+.TP
+\fB\-ig\fR, \fB\-ignore\-errors\fR
+treat errors writing files to stdout as non\-fatal.
+.TP
+\fB\-st\fR, \fB\-strict\-errors\fR
+treat all errors as fatal.
+.TP
+\fB\-no\-exit\fR, \fB\-no\-exit\-code\fR
+don't set exit code (to nonzero) on non\-fatal errors.
+.TP
+\fB\-da\fR SIZE, \fB\-data\-queue\fR SIZE
+set data queue to SIZE Mbytes. Default 256 Mbytes.
+.TP
+\fB\-fr\fR SIZE, \fB\-frag\-queue\fR SIZE
+set fragment queue to SIZE Mbytes. Default 256 Mbytes.
+.TP
+\fB\-no\-wild\fR, \fB\-no\-wildcards\fR
+do not use wildcard matching in filenames.
+.TP
+\fB\-r\fR, \fB\-regex\fR
+treat filenames as POSIX regular expressions rather than use the default shell wildcard expansion (globbing).
+.TP
+\fB\-h\fR, \fB\-help\fR
+output options text to stdout.
+.SH "DECOMPRESSORS AVAILABLE"
+gzip, lzo, lz4, xz, zstd
+.SH "EXIT STATUS"
+.TP
+0
+The file or files were output to stdout OK.
+.TP
+1
+FATAL errors occurred, e.g. filesystem corruption, I/O errors. Sqfscat did not continue and aborted.
+.TP
+2
+Non\-fatal errors occurred, e.g. not a regular file, or failed to resolve pathname. Sqfscat continued and did not abort.
+.PP
+See \fB\-ignore\-errors\fR, \fB\-strict\-errors\fR and \fB\-no\-exit\-code\fR options for how they affect
+the exit status.
+.SH EXAMPLES
+.TP
+sqfscat IMAGE.SQFS hello
+Output the contents of "hello" to stdout.
+.TP
+sqfscat IMAGE.SQFS hello world
+Output the contents of "hello" and then "world" to stdout.
+.TP
+sqfscat IMAGE.SQFS "*.[ch]"
+Output the contents of all the files in the root directory that match the
+wildcard *.[ch], to stdout, e.g. hello.c, hello.h, world.c, world.h.
+.PP
+Note: when passing wildcarded names to Sqfscat, they should be quoted (as in
+the above examples), to ensure that they are not processed by the shell.
+.SH AUTHOR
+Written by Phillip Lougher <phillip@squashfs.org.uk>
+.SH COPYRIGHT
+Copyright \(co 2023 Phillip Lougher <phillip@squashfs.org.uk>
+.PP
+This program is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public License
+as published by the Free Software Foundation; either version 2,
+or (at your option) any later version.
+.PP
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+.SH "SEE ALSO"
+mksquashfs(1), unsquashfs(1), sqfstar(1)
+.PP
+The README for the Squashfs\-tools 4.6.1 release, describing the new features can be
+read here https://github.com/plougher/squashfs\-tools/blob/master/README\-4.6.1
+.PP
+The Squashfs\-tools USAGE guide can be read here
+https://github.com/plougher/squashfs\-tools/blob/master/USAGE\-4.6
diff --git a/manpages/sqfstar.1 b/manpages/sqfstar.1
new file mode 100644
index 0000000..7e32732
--- /dev/null
+++ b/manpages/sqfstar.1
@@ -0,0 +1,400 @@
+.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.48.5.
+.TH SQFSTAR "1" "March 2023" "sqfstar version 4.6.1" "User Commands"
+.SH NAME
+sqfstar - tool to create a squashfs filesystem from a tar archive
+.SH SYNOPSIS
+ cat xxx.tar | sqfstar [OPTIONS] FILESYSTEM [exclude files]
+ zcat xxx.tgz | sqfstar [OPTIONS] FILESYSTEM [exclude files]
+ xzcat xxx.tar.xz | sqfstar [OPTIONS] FILESYSTEM [exclude files]
+ zstdcat xxx.tar.zst | sqfstar [OPTIONS] FILESYSTEM [exclude files]
+.SH DESCRIPTION
+Squashfs is a highly compressed read-only filesystem for Linux.
+It uses either gzip/xz/lzo/lz4/zstd compression to compress both files, inodes
+and directories. Inodes in the system are very small and all blocks are
+packed to minimise data overhead. Block sizes greater than 4K are supported
+up to a maximum of 1Mbytes (default block size 128K).
+
+Squashfs is intended for general read-only filesystem use, for archival
+use (i.e. in cases where a .tar.gz file may be used), and in constrained
+block device/memory systems (e.g. embedded systems) where low overhead is
+needed.
+.SH OPTIONS
+.SS "Filesystem compression options:"
+.TP
+\fB\-b\fR BLOCK_SIZE
+set data block to BLOCK_SIZE. Default 128 Kbytes. Optionally a suffix of K or M can be given to specify Kbytes or Mbytes respectively.
+.TP
+\fB\-comp\fR COMP
+select COMP compression. Compressors available: gzip (default), lzo, lz4, xz, zstd.
+.TP
+\fB\-noI\fR
+do not compress inode table.
+.TP
+\fB\-noId\fR
+do not compress the uid/gid table (implied by \fB\-noI\fR).
+.TP
+\fB\-noD\fR
+do not compress data blocks.
+.TP
+\fB\-noF\fR
+do not compress fragment blocks.
+.TP
+\fB\-noX\fR
+do not compress extended attributes.
+.TP
+\fB\-no\-compression\fR
+do not compress any of the data or metadata. This is equivalent to specifying \fB\-noI\fR \fB\-noD\fR \fB\-noF\fR and \fB\-noX\fR.
+.SS "Filesystem build options:"
+.TP
+\fB\-reproducible\fR
+build filesystems that are reproducible (default).
+.TP
+\fB\-not\-reproducible\fR
+build filesystems that are not reproducible.
+.TP
+\fB\-mkfs\-time\fR TIME
+set filesystem creation timestamp to TIME. TIME can be an unsigned 32\-bit int indicating seconds since the epoch (1970\-01\-01) or a string value which is passed to the "date" command to parse. Any string value which the date command recognises can be used such as "now", "last week", or "Wed Feb 15 21:02:39 GMT 2023".
+.TP
+\fB\-all\-time\fR TIME
+set all file timestamps to TIME. TIME can be an unsigned 32\-bit int indicating seconds since the epoch (1970\-01\-01) or a string value which is passed to the "date" command to parse. Any string value which the date command recognises can be used such as "now", "last week", or "Wed Feb 15 21:02:39 GMT 2023".
+.TP
+\fB\-root\-time\fR TIME
+set root directory time to TIME. TIME can be an unsigned 32\-bit int indicating seconds since the epoch (1970\-01\-01) or a string value which is passed to the "date" command to parse. Any string value which the date command recognises can be used such as "now", "last week", or "Wed Feb 15 21:02:39 GMT 2023".
+.TP
+\fB\-root\-mode\fR MODE
+set root directory permissions to octal MODE.
+.TP
+\fB\-root\-uid\fR VALUE
+set root directory owner to specified VALUE, VALUE can be either an integer uid or user name.
+.TP
+\fB\-root\-gid\fR VALUE
+set root directory group to specified VALUE, VALUE can be either an integer gid or group name.
+.TP
+\fB\-all\-root\fR
+make all files owned by root.
+.TP
+\fB\-force\-uid\fR VALUE
+set all file uids to specified VALUE, VALUE can be either an integer uid or user name.
+.TP
+\fB\-force\-gid\fR VALUE
+set all file gids to specified VALUE, VALUE can be either an integer gid or group name.
+.TP
+\fB\-default\-mode\fR MODE
+tar files often do not store permissions for intermediate directories. This option sets the default directory permissions to octal MODE, rather than 0755. This also sets the root inode mode.
+.TP
+\fB\-default\-uid\fR UID
+tar files often do not store uids for intermediate directories. This option sets the default directory owner to UID, rather than the user running Sqfstar. This also sets the root inode uid.
+.TP
+\fB\-default\-gid\fR GID
+tar files often do not store gids for intermediate directories. This option sets the default directory group to GID, rather than the group of the user running Sqfstar. This also sets the root inode gid.
+.TP
+\fB\-pseudo\-override\fR
+make pseudo file uids and gids override \fB\-all\-root\fR, \fB\-force\-uid\fR and \fB\-force\-gid\fR options.
+.TP
+\fB\-exports\fR
+make the filesystem exportable via NFS.
+.TP
+\fB\-no\-sparse\fR
+do not detect sparse files.
+.TP
+\fB\-no\-fragments\fR
+do not use fragments.
+.TP
+\fB\-no\-tailends\fR
+do not pack tail ends into fragments.
+.TP
+\fB\-no\-duplicates\fR
+do not perform duplicate checking.
+.TP
+\fB\-no\-hardlinks\fR
+do not hardlink files, instead store duplicates.
+.SS "Filesystem filter options:"
+.TP
+\fB\-p\fR PSEUDO\-DEFINITION
+add pseudo file definition. The definition should be quoted.
+.TP
+\fB\-pf\fR PSEUDO\-FILE
+add list of pseudo file definitions. Pseudo file definitions in pseudo\-files should not be quoted.
+.TP
+\fB\-ef\fR EXCLUDE_FILE
+list of exclude dirs/files. One per line.
+.TP
+\fB\-regex\fR
+allow POSIX regular expressions to be used in exclude dirs/files.
+.TP
+\fB\-ignore\-zeros\fR
+allow tar files to be concatenated together and fed to Sqfstar. Normally a tarfile has two consecutive 512 byte blocks filled with zeros which means EOF and Sqfstar will stop reading after the first tar file on encountering them. This option makes Sqfstar ignore the zero filled blocks.
+.SS "Filesystem extended attribute (xattrs) options:"
+.TP
+\fB\-no\-xattrs\fR
+do not store extended attributes.
+.TP
+\fB\-xattrs\fR
+store extended attributes (default).
+.TP
+\fB\-xattrs\-exclude\fR REGEX
+exclude any xattr names matching REGEX. REGEX is a POSIX regular expression, e.g. \fB\-xattrs\-exclude\fR '^user.' excludes xattrs from the user namespace.
+.TP
+\fB\-xattrs\-include\fR REGEX
+include any xattr names matching REGEX. REGEX is a POSIX regular expression, e.g. \fB\-xattrs\-include\fR '^user.' includes xattrs from the user namespace.
+.TP
+\fB\-xattrs\-add\fR NAME=VAL
+add the xattr NAME with VAL to files. If an user xattr it will be added to regular files and directories (see man 7 xattr). Otherwise it will be added to all files. VAL by default will be treated as binary (i.e. an uninterpreted byte sequence), but it can be prefixed with 0s, where it will be treated as base64 encoded, or prefixed with 0x, where val will be treated as hexidecimal. Additionally it can be prefixed with 0t where this encoding is similar to binary encoding, except backslashes are specially treated, and a backslash followed by 3 octal digits can be used to encode any ASCII character, which obviously can be used to encode control codes. The option can be repeated multiple times to add multiple xattrs.
+.SS "Sqfstar runtime options:"
+.TP
+\fB\-version\fR
+print version, licence and copyright message.
+.TP
+\fB\-force\fR
+force Sqfstar to write to block device or file.
+.TP
+\fB\-exit\-on\-error\fR
+treat normally ignored errors as fatal.
+.TP
+\fB\-quiet\fR
+no verbose output.
+.TP
+\fB\-info\fR
+print files written to filesystem.
+.TP
+\fB\-no\-progress\fR
+do not display the progress bar.
+.TP
+\fB\-progress\fR
+display progress bar when using the \fB\-info\fR option.
+.TP
+\fB\-percentage\fR
+display a percentage rather than the full progress bar. Can be used with dialog \fB\-\-gauge\fR etc.
+.TP
+\fB\-throttle\fR PERCENTAGE
+throttle the I/O input rate by the given percentage. This can be used to reduce the I/O and CPU consumption of Sqfstar.
+.TP
+\fB\-limit\fR PERCENTAGE
+limit the I/O input rate to the given percentage. This can be used to reduce the I/O and CPU consumption of Sqfstar (alternative to \fB\-throttle\fR).
+.TP
+\fB\-processors\fR NUMBER
+use NUMBER processors. By default will use number of processors available.
+.TP
+\fB\-mem\fR SIZE
+use SIZE physical memory for caches. Use K, M or G to specify Kbytes, Mbytes or Gbytes respectively.
+.TP
+\fB\-mem\-percent\fR PERCENT
+use PERCENT physical memory for caches. Default 25%.
+.TP
+\fB\-mem\-default\fR
+print default memory usage in Mbytes.
+.SS "Expert options (these may make the filesystem unmountable):"
+.TP
+\fB\-nopad\fR
+do not pad filesystem to a multiple of 4K.
+.TP
+\fB\-offset\fR OFFSET
+skip OFFSET bytes at the beginning of FILESYSTEM. Optionally a suffix of K, M or G can be given to specify Kbytes, Mbytes or Gbytes respectively. Default 0 bytes.
+.TP
+\fB\-o\fR OFFSET
+synonym for \fB\-offset\fR.
+.SS "Miscellaneous options:"
+.TP
+\fB\-fstime\fR TIME
+alternative name for mkfs\-time.
+.TP
+\fB\-root\-owned\fR
+alternative name for \fB\-all\-root\fR.
+.TP
+\fB\-noInodeCompression\fR
+alternative name for \fB\-noI\fR.
+.TP
+\fB\-noIdTableCompression\fR
+alternative name for \fB\-noId\fR.
+.TP
+\fB\-noDataCompression\fR
+alternative name for \fB\-noD\fR.
+.TP
+\fB\-noFragmentCompression\fR
+alternative name for \fB\-noF\fR.
+.TP
+\fB\-noXattrCompression\fR
+alternative name for \fB\-noX\fR.
+.TP
+\fB\-help\fR
+output this options text to stdout.
+.TP
+\fB\-h\fR
+output this options text to stdout.
+.TP
+\fB\-Xhelp\fR
+print compressor options for selected compressor.
+.SH "PSEUDO FILE DEFINITION FORMAT"
+.TP
+\fB\-p\fR "filename d mode uid gid"
+create a directory.
+.TP
+\fB\-p\fR "filename m mode uid gid"
+modify filename.
+.TP
+\fB\-p\fR "filename b mode uid gid major minor"
+create a block device.
+.TP
+\fB\-p\fR "filename c mode uid gid major minor"
+create a character device.
+.TP
+\fB\-p\fR "filename f mode uid gid command"
+create file from stdout of command.
+.TP
+\fB\-p\fR "filename s mode uid gid symlink"
+create a symbolic link.
+.TP
+\fB\-p\fR "filename i mode uid gid [s|f]"
+create a socket (s) or FIFO (f).
+.TP
+\fB\-p\fR "filename x name=val"
+create an extended attribute.
+.TP
+\fB\-p\fR "filename l linkname"
+create a hard\-link to linkname.
+.TP
+\fB\-p\fR "filename L pseudo_filename"
+same, but link to pseudo file.
+.TP
+\fB\-p\fR "filename D time mode uid gid"
+create a directory with timestamp time.
+.TP
+\fB\-p\fR "filename M time mode uid gid"
+modify a file with timestamp time.
+.TP
+\fB\-p\fR "filename B time mode uid gid major minor"
+create block device with timestamp time.
+.TP
+\fB\-p\fR "filename C time mode uid gid major minor"
+create char device with timestamp time.
+.TP
+\fB\-p\fR "filename F time mode uid gid command"
+create file with timestamp time.
+.TP
+\fB\-p\fR "filename S time mode uid gid symlink"
+create symlink with timestamp time.
+.TP
+\fB\-p\fR "filename I time mode uid gid [s|f]"
+create socket/fifo with timestamp time.
+.SH "COMPRESSORS AVAILABLE AND COMPRESSOR SPECIFIC OPTIONS"
+.SS "gzip (default):"
+.TP
+\fB\-Xcompression\-level\fR COMPRESSION\-LEVEL
+COMPRESSION\-LEVEL should be 1 .. 9 (default 9).
+.TP
+\fB\-Xwindow\-size\fR WINDOW\-SIZE
+WINDOW\-SIZE should be 8 .. 15 (default 15).
+.TP
+\fB\-Xstrategy\fR strategy1,strategy2,...,strategyN
+Compress using strategy1,strategy2,...,strategyN in turn and choose the best compression. Available strategies: default, filtered, huffman_only, run_length_encoded and fixed.
+.SS "lzo:"
+.TP
+\fB\-Xalgorithm\fR ALGORITHM
+Where ALGORITHM is one of: lzo1x_1, lzo1x_1_11, lzo1x_1_12, lzo1x_1_15, lzo1x_999 (default).
+.TP
+\fB\-Xcompression\-level\fR COMPRESSION\-LEVEL
+COMPRESSION\-LEVEL should be 1 .. 9 (default 8) Only applies to lzo1x_999 algorithm.
+.SS "lz4:"
+.TP
+\fB\-Xhc\fR
+Compress using LZ4 High Compression.
+.SS "xz:"
+.TP
+\fB\-Xbcj\fR filter1,filter2,...,filterN
+Compress using filter1,filter2,...,filterN in turn (in addition to no filter), and choose the best compression. Available filters: x86, arm, armthumb, powerpc, sparc, ia64.
+.TP
+\fB\-Xdict\-size\fR DICT\-SIZE
+Use DICT\-SIZE as the XZ dictionary size. The dictionary size can be specified as a percentage of the block size, or as an absolute value. The dictionary size must be less than or equal to the block size and 8192 bytes or larger. It must also be storable in the xz header as either 2^n or as 2^n+2^(n+1). Example dict\-sizes are 75%, 50%, 37.5%, 25%, or 32K, 16K, 8K etc.
+.SS "zstd:"
+.TP
+\fB\-Xcompression\-level\fR COMPRESSION\-LEVEL
+COMPRESSION\-LEVEL should be 1 .. 22 (default 15).
+.SH ENVIRONMENT
+.TP
+SOURCE_DATE_EPOCH
+If set, this is used as the filesystem creation timestamp. Also any file timestamps which are after SOURCE_DATE_EPOCH will be clamped to SOURCE_DATE_EPOCH. See https://reproducible\-builds.org/docs/source\-date\-epoch/ for more information.
+.SH EXAMPLES
+.TP
+sqfstar IMAGE.SQFS < archive.tar
+Create a Squashfs filesystem from the uncompressed tar file "archive.tar".
+Sqfstar will use the default compressor (normally gzip), and block size of 128
+Kbytes.
+.TP
+zcat archive.tgz | sqfstar IMAGE.SQFS
+Create a Squashfs filesystem from the compressed tar file "archive.tgz". Sqfstar
+will use the default compressor (normally gzip), and block size of 128 Kbytes.
+.TP
+sqfstar -b 1M -comp zstd IMAGE.SQFS < archive.tar
+Use a block size of 1 Mbyte and Zstandard compression to create the filesystem.
+.TP
+sqfstar -root-uid 0 -root-gid 0 IMAGE.SQFS < archive.tar
+Tar files do not supply a definition for the root directory, and the default is
+to make the directory owned/group owned by the user running Sqfstar. The above
+command sets the ownership/group ownership to root.
+.TP
+sqfstar -root-mode 0755 IMAGE.SQFS < archive.tar
+The default permissions for the root directory is 0777 (rwxrwxrwx). The above
+command sets the permissions to 0755 (rwxr-xr-x).
+.TP
+sqfstar IMAGE.SQFS file1 file2 < archive.tar
+Exclude file1 and file2 from the tar file when creating filesystem.
+.TP
+sqfstar IMAGE.SQFS "*.gz" < archive.tar
+Exclude any files in the top level directory which matches the wildcard pattern
+"*.gz".
+.TP
+sqfstar IMAGE.SQFS "... *.gz" < archive.tar
+Exclude any file which matches the wildcard pattern "*.gz" anywhere within the
+tar file. The initial "..." indicates the wildcard pattern is "non-anchored"
+and will match anywhere.
+.PP
+Note: when passing wildcarded names to Sqfstar, they should be quoted (as in
+the above examples), to ensure that they are not processed by the shell.
+
+.SS Using pseudo file definitions
+.TP
+sqfstar -p "build_dir d 0644 0 0" IMAGE.SQFS < archive.tar
+Create a directory called "build_dir" in the output filesystem.
+.TP
+sqfstar -p "version.txt l /tmp/build/version" IMAGE.SQFS < archive.tar
+Create a reference called "version.txt" to a file not in the tar archive,
+which acts as if that file was in the tar archive.
+.TP
+sqfstar -p "date.txt f 0644 0 0 date" IMAGE.SQFS < archive.tar
+Create a file called "date.txt" which holds the output (stdout) from running
+the "date" command.
+.TP
+sqfstar -p "\\"hello world\\" f 0644 0 0 date" IMAGE.SQFS < archive.tar
+As above, but, showing that filenames can have spaces, if they are quoted.
+The quotes need to be blackslashed to protect them from the shell.
+.TP
+sqfstar -p "input f 0644 root root dd if=/dev/sda1 bs=1024" IMAGE.SQFS < archive.tar
+Create a file containing the contents of partition /dev/sda1". The above allows
+input from these special files to be captured and placed in the Squashfs
+filesystem.
+.PP
+Note: pseudo file definitions should be quoted (as in the above examples), to
+ensure that they are passed to Mksquashfs as a single argument, and to ensure
+that they are not processed by the shell.
+.SH AUTHOR
+Written by Phillip Lougher <phillip@squashfs.org.uk>
+.SH COPYRIGHT
+Copyright \(co 2023 Phillip Lougher <phillip@squashfs.org.uk>
+.PP
+This program is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public License
+as published by the Free Software Foundation; either version 2,
+or (at your option) any later version.
+.PP
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+.SH "SEE ALSO"
+mksquashfs(1), unsquashfs(1), sqfscat(1)
+.PP
+The README for the Squashfs\-tools 4.6.1 release, describing the new features can be
+read here https://github.com/plougher/squashfs\-tools/blob/master/README\-4.6.1
+.PP
+The Squashfs\-tools USAGE guide can be read here
+https://github.com/plougher/squashfs\-tools/blob/master/USAGE\-4.6
diff --git a/manpages/unsquashfs.1 b/manpages/unsquashfs.1
new file mode 100644
index 0000000..2d45f87
--- /dev/null
+++ b/manpages/unsquashfs.1
@@ -0,0 +1,279 @@
+.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.48.5.
+.TH UNSQUASHFS "1" "March 2023" "unsquashfs version 4.6.1" "User Commands"
+.SH NAME
+unsquashfs - tool to uncompress, extract and list squashfs filesystems
+.SH SYNOPSIS
+.B unsquashfs
+[\fI\,OPTIONS\/\fR] \fI\,FILESYSTEM \/\fR[\fI\,files to extract or exclude (with -excludes) or cat (with -cat )\/\fR]
+.SH DESCRIPTION
+Squashfs is a highly compressed read-only filesystem for Linux.
+It uses either gzip/xz/lzo/lz4/zstd compression to compress both files, inodes
+and directories. Inodes in the system are very small and all blocks are
+packed to minimise data overhead. Block sizes greater than 4K are supported
+up to a maximum of 1Mbytes (default block size 128K).
+
+Squashfs is intended for general read-only filesystem use, for archival
+use (i.e. in cases where a .tar.gz file may be used), and in constrained
+block device/memory systems (e.g. embedded systems) where low overhead is
+needed.
+.SH OPTIONS
+.SS "Filesystem extraction (filtering) options:"
+.TP
+\fB\-d\fR PATHNAME, \fB\-dest\fR PATHNAME
+extract to PATHNAME, default "squashfs\-root". This option also sets the prefix used when listing the filesystem.
+.TP
+\fB\-max\fR LEVELS, \fB\-max\-depth\fR LEVELS
+descend at most LEVELS of directories when extracting.
+.TP
+\fB\-excludes\fR
+treat files on command line as exclude files.
+.TP
+\fB\-ex\fR, \fB\-exclude\-list\fR
+list of files to be excluded, terminated with ; e.g. file1 file2 ;.
+.TP
+\fB\-extract\-file\fR FILE
+list of directories or files to extract. One per line.
+.TP
+\fB\-exclude\-file\fR FILE
+list of directories or files to exclude. One per line.
+.TP
+\fB\-match\fR
+abort if any extract file does not match on anything, and can not be resolved. Implies \fB\-missing\-symlinks\fR and \fB\-no\-wildcards\fR.
+.TP
+\fB\-follow\fR, \fB\-follow\-symlinks\fR
+follow symlinks in extract files, and add all files/symlinks needed to resolve extract file. Implies \fB\-no\-wildcards\fR.
+.TP
+\fB\-missing\fR, \fB\-missing\-symlinks\fR
+Unsquashfs will abort if any symlink can't be resolved in \fB\-follow\-symlinks\fR.
+.TP
+\fB\-no\-wild\fR, \fB\-no\-wildcards\fR
+do not use wildcard matching in extract and exclude names.
+.TP
+\fB\-r\fR, \fB\-regex\fR
+treat extract names as POSIX regular expressions rather than use the default shell wildcard expansion (globbing).
+.TP
+\fB\-all\fR TIME, \fB\-all\-time\fR TIME
+set all file timestamps to TIME, rather than the time stored in the filesystem inode. TIME can be an unsigned 32\-bit int indicating seconds since the epoch (1970\-01\-01) or a string value which is passed to the "date" command to parse. Any string value which the date command recognises can be used such as "now", "last week", or "Wed Feb 15 21:02:39 GMT 2023".
+.TP
+\fB\-cat\fR
+cat the files on the command line to stdout.
+.TP
+\fB\-f\fR, \fB\-force\fR
+if file already exists then overwrite.
+.TP
+\fB\-pf\fR FILE
+output a pseudo file equivalent of the input Squashfs filesystem, use \- for stdout.
+.SS "Filesystem information and listing options:"
+.TP
+\fB\-s\fR, \fB\-stat\fR
+display filesystem superblock information.
+.TP
+\fB\-max\fR LEVELS, \fB\-max\-depth\fR LEVELS
+descend at most LEVELS of directories when listing.
+.TP
+\fB\-i\fR, \fB\-info\fR
+print files as they are extracted.
+.TP
+\fB\-li\fR, \fB\-linfo\fR
+print files as they are extracted with file attributes (like ls \fB\-l\fR output).
+.TP
+\fB\-l\fR, \fB\-ls\fR
+list filesystem, but do not extract files.
+.TP
+\fB\-ll\fR, \fB\-lls\fR
+list filesystem with file attributes (like ls \fB\-l\fR output), but do not extract files.
+.TP
+\fB\-lln\fR, \fB\-llnumeric\fR
+same as \fB\-lls\fR but with numeric uids and gids.
+.TP
+\fB\-lc\fR
+list filesystem concisely, displaying only files and empty directories. Do not extract files.
+.TP
+\fB\-llc\fR
+list filesystem concisely with file attributes, displaying only files and empty directories. Do not extract files.
+.TP
+\fB\-full\fR, \fB\-full\-precision\fR
+use full precision when displaying times including seconds. Use with \fB\-linfo\fR, \fB\-lls\fR, \fB\-lln\fR and \fB\-llc\fR.
+.TP
+\fB\-UTC\fR
+use UTC rather than local time zone when displaying time.
+.TP
+\fB\-mkfs\-time\fR
+display filesystem superblock time, which is an unsigned 32\-bit int representing the time in seconds since the epoch (1970\-01\-01).
+.SS "Filesystem extended attribute (xattrs) options:"
+.TP
+\fB\-no\fR, \fB\-no\-xattrs\fR
+do not extract xattrs in file system.
+.TP
+\fB\-x\fR, \fB\-xattrs\fR
+extract xattrs in file system (default).
+.TP
+\fB\-xattrs\-exclude\fR REGEX
+exclude any xattr names matching REGEX. REGEX is a POSIX regular expression, e.g. \fB\-xattrs\-exclude\fR '^user.' excludes xattrs from the user namespace.
+.TP
+\fB\-xattrs\-include\fR REGEX
+include any xattr names matching REGEX. REGEX is a POSIX regular expression, e.g. \fB\-xattrs\-include\fR '^user.' includes xattrs from the user namespace.
+.SS "Unsquashfs runtime options:"
+.TP
+\fB\-v\fR, \fB\-version\fR
+print version, licence and copyright information.
+.TP
+\fB\-p\fR NUMBER, \fB\-processors\fR NUMBER
+use NUMBER processors. By default will use the number of processors available.
+.TP
+\fB\-q\fR, \fB\-quiet\fR
+no verbose output.
+.TP
+\fB\-n\fR, \fB\-no\-progress\fR
+do not display the progress bar.
+.TP
+\fB\-percentage\fR
+display a percentage rather than the full progress bar. Can be used with dialog \fB\-\-gauge\fR etc.
+.TP
+\fB\-ig\fR, \fB\-ignore\-errors\fR
+treat errors writing files to output as non\-fatal.
+.TP
+\fB\-st\fR, \fB\-strict\-errors\fR
+treat all errors as fatal.
+.TP
+\fB\-no\-exit\fR, \fB\-no\-exit\-code\fR
+do not set exit code (to nonzero) on non\-fatal errors.
+.TP
+\fB\-da\fR SIZE, \fB\-data\-queue\fR SIZE
+set data queue to SIZE Mbytes. Default 256 Mbytes.
+.TP
+\fB\-fr\fR SIZE, \fB\-frag\-queue\fR SIZE
+set fragment queue to SIZE Mbytes. Default 256 Mbytes.
+.SS "Miscellaneous options:"
+.TP
+\fB\-h\fR, \fB\-help\fR
+output this options text to stdout.
+.TP
+\fB\-o\fR BYTES, \fB\-offset\fR BYTES
+skip BYTES at start of FILESYSTEM. Optionally a suffix of K, M or G can be given to specify Kbytes, Mbytes or Gbytes respectively (default 0 bytes).
+.TP
+\fB\-fstime\fR
+synonym for \fB\-mkfs\-time\fR.
+.TP
+\fB\-e\fR, \fB\-ef\fR EXTRACT FILE
+synonym for \fB\-extract\-file\fR.
+.TP
+\fB\-exc\fR, \fB\-excf\fR EXCLUDE FILE
+synonym for \fB\-exclude\-file\fR.
+.TP
+\fB\-L\fR
+synonym for \fB\-follow\-symlinks\fR.
+.TP
+\fB\-pseudo\-file\fR FILE
+alternative name for \fB\-pf\fR.
+.SH "DECOMPRESSORS AVAILABLE"
+gzip, lzo, lz4, xz, zstd
+.SH "EXIT STATUS"
+.TP
+0
+The filesystem listed or extracted OK.
+.TP
+1
+FATAL errors occurred, e.g. filesystem corruption, I/O errors. Unsquashfs did not continue and aborted.
+.TP
+2
+Non\-fatal errors occurred, e.g. no support for XATTRs, Symbolic links in output filesystem or couldn't write permissions to output filesystem. Unsquashfs continued and did not abort.
+.PP
+See \fB\-ignore\-errors\fR, \fB\-strict\-errors\fR and \fB\-no\-exit\-code\fR options for how they affect
+the exit status.
+.SH EXAMPLES
+.TP
+unsquashfs IMAGE.SQFS
+Extract IMAGE.SQFS to "squashfs-root" in the current working directory.
+.TP
+unsquashfs -d output IMAGE.SQFS
+Extract IMAGE.SQFS to "output" in the current working directory.
+.TP
+unsquashfs -d . IMAGE.SQFS
+Extract IMAGE.SQFS to current working directory.
+.TP
+unsquashfs -linfo IMAGE.SQFS
+Output a listing of IMAGE.SQFS with file attributes to stdout, while extracting
+the filesystem to "squashfs-root".
+.TP
+unsquashfs -lls IMAGE.SQFS
+Output a listing of IMAGE.SQFS with file attributes to stdout, but do not
+extract the filesystem. The listing will be prefixed with "squashfs-root".
+.TP
+unsquashfs -d "" -lls IMAGE.SQFS
+Output a listing of IMAGE.SQFS with file attributes to stdout, but do not
+extract the filesystem. The listing will not be prefixed with "squashfs-root".
+.TP
+unsquashfs IMAGE.SQFS fs/squashfs
+Extract only the "fs/squashfs" directory.
+.TP
+unsquashfs IMAGE.SQFS "[Tt]est/example*"
+Extract all files beginning with "example" inside top level directories
+called "Test" or "test".
+.TP
+unsquashfs -excludes IMAGE.SQFS "test/*data*.gz"
+This will extract everything except for files that match *data*.gz in the
+test directory. The -excludes option tells Unsquashfs to exclude the files
+on the command line rather than extract them.
+.TP
+unsquashfs -excludes IMAGE.SQFS "... *.gz"
+This will extract everything except for files that match *.gz anywhere
+in the image. The "..." means this is a non-anchored exclude which
+matches anywhere.
+.TP
+unsquashfs -ex "test/*data*.gz" \; IMAGE.SQFS test
+This uses both extract and exclude options, to tell Unsquashfs to only
+extract the "test" directory, and to exclude any files within it that
+match *data*.gz.
+.TP
+unsquashfs -ex "... *.gz" IMAGE.SQFS test
+This uses both extract and exclude options, to tell Unsquashfs to only
+extract the "test" directory, and to exclude files which match "*.gz"
+anywhere within "test" directory or sub-directories.
+.TP
+unsquashfs -dest output -max-depth 2 IMAGE.SQFS
+Extract only the top two levels of IMAGE.SQFS to "output" directory.
+.TP
+unsquashfs -max-depth 2 IMAGE.SQFS "test/*.gz"
+Only extract the gzipped files in the test directory.
+.TP
+unsquashfs -llc -max-depth 2 IMAGE.SQFS "test/*.gz"
+Output a listing of the gzipped files in the test directory to stdout,
+but do not extract them.
+.TP
+unsquashfs -no-xattrs IMAGE.SQFS
+Do not extract any extended attributes. Any extended attributes in the
+filesystem will be ignored.
+.TP
+unsquashfs -xattrs-include "^user." IMAGE.SQFS
+Filter the extended attributes and only extract extended attributes in the
+user namespace from the Squashfs filesystem.
+.TP
+unsquashfs -xattrs-exclude "^user." IMAGE.SQFS
+Filter the extended attributes and do not extract any extended attributes in
+the user namespace from the Squashfs filesystem.
+.PP
+Note: when passing wildcarded names to Unsquashfs, they should be quoted (as in
+the above examples), to ensure that they are not processed by the shell.
+.SH AUTHOR
+Written by Phillip Lougher <phillip@squashfs.org.uk>
+.SH COPYRIGHT
+Copyright \(co 2023 Phillip Lougher <phillip@squashfs.org.uk>
+.PP
+This program is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public License
+as published by the Free Software Foundation; either version 2,
+or (at your option) any later version.
+.PP
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+.SH "SEE ALSO"
+mksquashfs(1), sqfstar(1), sqfscat(1)
+.PP
+The README for the Squashfs\-tools 4.6.1 release, describing the new features can be
+read here https://github.com/plougher/squashfs\-tools/blob/master/README\-4.6.1
+.PP
+The Squashfs\-tools USAGE guide can be read here
+https://github.com/plougher/squashfs\-tools/blob/master/USAGE\-4.6
diff --git a/squashfs-tools/.gitignore b/squashfs-tools/.gitignore
new file mode 100644
index 0000000..a2c80e8
--- /dev/null
+++ b/squashfs-tools/.gitignore
@@ -0,0 +1,5 @@
+*.o
+mksquashfs
+sqfscat
+sqfstar
+unsquashfs
diff --git a/squashfs-tools/Makefile b/squashfs-tools/Makefile
new file mode 100755
index 0000000..9aa4381
--- /dev/null
+++ b/squashfs-tools/Makefile
@@ -0,0 +1,470 @@
+# Squashfs-tools 4.6.1 release
+RELEASE_VERSION = 4.6.1
+RELEASE_DATE = 2023/03/25
+###############################################
+# Build options #
+###############################################
+#
+# Edit the following definitions to customise the build, or run
+# "CONFIG=1 make" with the build options below to customise the build on
+# the command line.
+#
+ifndef CONFIG
+############# Building gzip support ###########
+#
+# Gzip support is by default enabled, and the compression type default
+# (COMP_DEFAULT) is gzip.
+#
+# If you don't want/need gzip support then comment out the GZIP SUPPORT line
+# below, and change COMP_DEFAULT to one of the compression types you have
+# selected.
+#
+# Obviously, you must select at least one of the available gzip, xz, lzo,
+# lz4, zstd or lzma (deprecated) compression types.
+#
+GZIP_SUPPORT = 1
+
+########### Building XZ support #############
+#
+# LZMA2 compression.
+#
+# XZ Utils liblzma (http://tukaani.org/xz/) is supported
+#
+# Development packages (libraries and header files) should be
+# supported by most modern distributions. Please refer to
+# your distribution package manager.
+#
+# To build install the library and uncomment
+# the XZ_SUPPORT line below.
+#
+#XZ_SUPPORT = 1
+
+
+############ Building LZO support ##############
+#
+# The LZO library (http://www.oberhumer.com/opensource/lzo/) is supported.
+#
+# Development packages (libraries and header files) should be
+# supported by most modern distributions. Please refer to
+# your distribution package manager.
+#
+# To build install the library and uncomment
+# the LZO_SUPPORT line below.
+#
+#LZO_SUPPORT = 1
+
+
+########### Building LZ4 support #############
+#
+# Yann Collet's LZ4 tools are supported
+# LZ4 homepage: https://lz4.github.io/lz4/
+# LZ4 source repository: https://github.com/lz4/lz4/
+#
+# Development packages (libraries and header files) should be
+# supported by most modern distributions. Please refer to
+# your distribution package manager.
+#
+# To build install and uncomment
+# the LZ4_SUPPORT line below.
+#
+#LZ4_SUPPORT = 1
+
+
+########### Building ZSTD support ############
+#
+# The ZSTD library is supported
+# ZSTD homepage: https://facebook.github.io/zstd/
+# ZSTD source repository: https://github.com/facebook/zstd
+#
+# Development packages (libraries and header files) should be
+# supported by most modern distributions. Please refer to
+# your distribution package manager.
+#
+# To build install the library and uncomment
+# the ZSTD_SUPPORT line below.
+#
+#ZSTD_SUPPORT = 1
+
+
+######## Specifying default compression ########
+#
+# The next line specifies which compression algorithm is used by default
+# in Mksquashfs. Obviously the compression algorithm must have been
+# selected to be built
+#
+COMP_DEFAULT = gzip
+
+
+###############################################
+# Extended attribute (XATTRs) build options #
+###############################################
+#
+# Building XATTR support for Mksquashfs and Unsquashfs
+#
+# If your C library or build/target environment doesn't support XATTRs then
+# comment out the next line to build Mksquashfs and Unsquashfs without XATTR
+# support
+XATTR_SUPPORT = 1
+
+# Select whether you wish xattrs to be stored by Mksquashfs and extracted
+# by Unsquashfs by default. If selected users can disable xattr support by
+# using the -no-xattrs option
+#
+# If unselected, Mksquashfs/Unsquashfs won't store and extract xattrs by
+# default. Users can enable xattrs by using the -xattrs option.
+XATTR_DEFAULT = 1
+
+
+###############################################
+# Reproducible Image options #
+###############################################
+#
+# Select whether you wish reproducible builds by default. If selected users
+# can disable reproducible builds using the not-reproducible option.
+# If not selected, users can enable reproducible builds using the
+# -reproducible option
+REPRODUCIBLE_DEFAULT = 1
+
+###############################################
+# Manpage generation #
+###############################################
+#
+# If help2man is available on the system, on installation, the Makefile
+# will try to generate "custom" manpages from the built squashfs-tools.
+#
+# If the squashfs-tools have been cross-compiled, or for any other
+# reason they're not executable, this will generate errors at
+# installation and the install script will fall back to using
+# pre-built manpages.
+#
+# Change next variable to "y" to use the pre-built manpages by default,
+# and not attempt to generate "custom" manpages. This will eliminate
+# errors and warnings at install time.
+USE_PREBUILT_MANPAGES=n
+
+###############################################
+# INSTALL PATHS #
+###############################################
+#
+# Alter INSTALL_* to install binaries and manpages
+# elsewhere.
+#
+# To skip building and installing manpages,
+# unset INSTALL_MANPAGES_DIR or set to ""
+#
+INSTALL_PREFIX = /usr/local
+INSTALL_DIR = $(INSTALL_PREFIX)/bin
+INSTALL_MANPAGES_DIR = $(INSTALL_PREFIX)/man/man1
+
+###############################################
+# Obsolete options #
+###############################################
+
+########### Building LZMA support #############
+#
+# LZMA1 compression.
+#
+# LZMA1 compression is obsolete, and the newer and better XZ (LZMA2)
+# compression should be used in preference.
+#
+# Both XZ Utils liblzma (http://tukaani.org/xz/) and LZMA SDK
+# (http://www.7-zip.org/sdk.html) are supported
+#
+# To build using XZ Utils liblzma - install the library and uncomment
+# the LZMA_XZ_SUPPORT line below.
+#
+# To build using the LZMA SDK (4.65 used in development, other versions may
+# work) - download and unpack it, uncomment and set LZMA_DIR to unpacked source,
+# and uncomment the LZMA_SUPPORT line below.
+#
+#LZMA_XZ_SUPPORT = 1
+#LZMA_SUPPORT = 1
+#LZMA_DIR = ../../../../LZMA/lzma465
+else
+GZIP_SUPPORT ?= 1
+ZX_SUPPORT ?= 0
+LZO_SUPPORT ?= 0
+LZ4_SUPPORT ?= 0
+ZSTD_SUPPORT ?= 0
+COMP_DEFAULT ?= gzip
+XATTR_SUPPORT ?= 1
+XATTR_DEFAULT ?= 1
+REPRODUCIBLE_DEFAULT ?= 1
+USE_PREBUILT_MANPAGES ?= 1
+INSTALL_PREFIX ?= /usr/local
+INSTALL_DIR ?= $(INSTALL_PREFIX)/bin
+INSTALL_MANPAGES_DIR ?= $(INSTALL_PREFIX)/man/man1
+LZMA_XZ_SUPPORT ?= 0
+LZMA_SUPPORT ?= 0
+LZMA_DIR ?= ../../../../LZMA/lzma465
+endif
+
+
+###############################################
+# End of BUILD options section #
+###############################################
+#
+INCLUDEDIR = -I.
+
+MKSQUASHFS_OBJS = mksquashfs.o read_fs.o action.o swap.o pseudo.o compressor.o \
+ sort.o progressbar.o info.o restore.o process_fragments.o \
+ caches-queues-lists.o reader.o tar.o date.o
+
+UNSQUASHFS_OBJS = unsquashfs.o unsquash-1.o unsquash-2.o unsquash-3.o \
+ unsquash-4.o unsquash-123.o unsquash-34.o unsquash-1234.o unsquash-12.o \
+ swap.o compressor.o unsquashfs_info.o date.o
+
+CFLAGS ?= -O2
+CFLAGS += $(EXTRA_CFLAGS) $(INCLUDEDIR) -D_FILE_OFFSET_BITS=64 \
+ -D_LARGEFILE_SOURCE -D_GNU_SOURCE -DCOMP_DEFAULT=\"$(COMP_DEFAULT)\" \
+ -Wall
+
+LIBS = -lpthread -lm
+ifeq ($(GZIP_SUPPORT),1)
+CFLAGS += -DGZIP_SUPPORT
+MKSQUASHFS_OBJS += gzip_wrapper.o
+UNSQUASHFS_OBJS += gzip_wrapper.o
+LIBS += -lz
+COMPRESSORS += gzip
+endif
+
+ifeq ($(LZMA_SUPPORT),1)
+LZMA_OBJS = $(LZMA_DIR)/C/Alloc.o $(LZMA_DIR)/C/LzFind.o \
+ $(LZMA_DIR)/C/LzmaDec.o $(LZMA_DIR)/C/LzmaEnc.o $(LZMA_DIR)/C/LzmaLib.o
+INCLUDEDIR += -I$(LZMA_DIR)/C
+CFLAGS += -DLZMA_SUPPORT
+MKSQUASHFS_OBJS += lzma_wrapper.o $(LZMA_OBJS)
+UNSQUASHFS_OBJS += lzma_wrapper.o $(LZMA_OBJS)
+COMPRESSORS += lzma
+endif
+
+ifeq ($(LZMA_XZ_SUPPORT),1)
+CFLAGS += -DLZMA_SUPPORT
+MKSQUASHFS_OBJS += lzma_xz_wrapper.o
+UNSQUASHFS_OBJS += lzma_xz_wrapper.o
+LIBS += -llzma
+COMPRESSORS += lzma
+endif
+
+ifeq ($(XZ_SUPPORT),1)
+CFLAGS += -DXZ_SUPPORT
+MKSQUASHFS_OBJS += xz_wrapper.o
+UNSQUASHFS_OBJS += xz_wrapper.o
+LIBS += -llzma
+COMPRESSORS += xz
+endif
+
+ifeq ($(LZO_SUPPORT),1)
+CFLAGS += -DLZO_SUPPORT
+MKSQUASHFS_OBJS += lzo_wrapper.o
+UNSQUASHFS_OBJS += lzo_wrapper.o
+LIBS += -llzo2
+COMPRESSORS += lzo
+endif
+
+ifeq ($(LZ4_SUPPORT),1)
+CFLAGS += -DLZ4_SUPPORT
+MKSQUASHFS_OBJS += lz4_wrapper.o
+UNSQUASHFS_OBJS += lz4_wrapper.o
+LIBS += -llz4
+COMPRESSORS += lz4
+endif
+
+ifeq ($(ZSTD_SUPPORT),1)
+CFLAGS += -DZSTD_SUPPORT
+MKSQUASHFS_OBJS += zstd_wrapper.o
+UNSQUASHFS_OBJS += zstd_wrapper.o
+LIBS += -lzstd
+COMPRESSORS += zstd
+endif
+
+ifeq ($(XATTR_SUPPORT),1)
+ifeq ($(XATTR_DEFAULT),1)
+CFLAGS += -DXATTR_SUPPORT -DXATTR_DEFAULT
+else
+CFLAGS += -DXATTR_SUPPORT
+endif
+MKSQUASHFS_OBJS += xattr.o read_xattrs.o tar_xattr.o pseudo_xattr.o
+UNSQUASHFS_OBJS += read_xattrs.o unsquashfs_xattr.o
+endif
+
+ifeq ($(REPRODUCIBLE_DEFAULT),1)
+CFLAGS += -DREPRODUCIBLE_DEFAULT
+endif
+
+#
+# If LZMA_SUPPORT is specified then LZMA_DIR must be specified too
+#
+ifeq ($(LZMA_SUPPORT),1)
+ifndef LZMA_DIR
+$(error "LZMA_SUPPORT requires LZMA_DIR to be also defined")
+endif
+endif
+
+#
+# Both LZMA_XZ_SUPPORT and LZMA_SUPPORT cannot be specified
+#
+ifeq ($(LZMA_XZ_SUPPORT),1)
+ifeq ($(LZMA_SUPPORT),1)
+$(error "Both LZMA_XZ_SUPPORT and LZMA_SUPPORT cannot be specified")
+endif
+endif
+
+#
+# At least one compressor must have been selected
+#
+ifndef COMPRESSORS
+$(error "No compressor selected! Select one or more of GZIP, LZMA, XZ, LZO, \
+ LZ4 or ZSTD!")
+endif
+
+#
+# COMP_DEFAULT should be defined
+#
+ifndef COMP_DEFAULT
+$(error "COMP_DEFAULT must be set to a compressor!")
+endif
+
+#
+# COMP_DEFAULT must be a selected compressor
+#
+ifeq (, $(findstring $(COMP_DEFAULT), $(COMPRESSORS)))
+$(error "COMP_DEFAULT is set to ${COMP_DEFAULT}, which isn't selected to be \
+ built!")
+endif
+
+#
+# Get VERSION and DATE for Mksquashfs/Unsquashfs version strings.
+#
+# If RELEASE_VERSION/RELEASE_DATE set, use them.
+#
+# If not set, this is a development version, therefore
+#
+# If this source has been exported by "git archive" use automatically
+# expanded strings.
+#
+# Otherwise ask git for the details from current HEAD.
+#
+include version.mk
+
+ifdef RELEASE_VERSION
+VERSION := $(RELEASE_VERSION)
+DATE := $(RELEASE_DATE)
+else
+ifeq ($(HASH),$Format:%h$)
+VERSION := 4.6.1-$(shell git show -s --pretty=format:%h)
+DATE := $(firstword $(subst -,/,$(shell git show -s --pretty=format:%ci)))
+else
+VERSION := 4.6-$(HASH)
+DATE := $(firstword $(FULLDATE))
+endif
+endif
+
+YEAR := $(firstword $(subst /, , $(DATE)))
+
+CFLAGS += -DVERSION=\"$(VERSION)\" -DDATE=\"$(DATE)\" -DYEAR=\"$(YEAR)\"
+
+.PHONY: all
+all: mksquashfs unsquashfs
+
+mksquashfs: $(MKSQUASHFS_OBJS)
+ $(CC) $(LDFLAGS) $(EXTRA_LDFLAGS) $(MKSQUASHFS_OBJS) $(LIBS) -o $@
+ ln -sf mksquashfs sqfstar
+
+mksquashfs.o: Makefile mksquashfs.c squashfs_fs.h squashfs_swap.h mksquashfs.h \
+ sort.h pseudo.h compressor.h xattr.h action.h mksquashfs_error.h progressbar.h \
+ info.h caches-queues-lists.h read_fs.h restore.h process_fragments.h
+
+reader.o: squashfs_fs.h mksquashfs.h caches-queues-lists.h progressbar.h \
+ mksquashfs_error.h pseudo.h sort.h
+
+read_fs.o: read_fs.c squashfs_fs.h squashfs_swap.h compressor.h xattr.h \
+ mksquashfs_error.h mksquashfs.h
+
+sort.o: sort.c squashfs_fs.h mksquashfs.h sort.h mksquashfs_error.h progressbar.h
+
+swap.o: swap.c
+
+pseudo.o: pseudo.c pseudo.h mksquashfs_error.h progressbar.h
+
+pseudo_xattr.o: pseudo_xattr.c xattr.h mksquashfs_error.h progressbar.h
+
+compressor.o: Makefile compressor.c compressor.h squashfs_fs.h
+
+xattr.o: xattr.c squashfs_fs.h squashfs_swap.h mksquashfs.h xattr.h mksquashfs_error.h \
+ progressbar.h
+
+read_xattrs.o: read_xattrs.c squashfs_fs.h squashfs_swap.h xattr.h error.h
+
+action.o: action.c squashfs_fs.h mksquashfs.h action.h mksquashfs_error.h
+
+progressbar.o: progressbar.c mksquashfs_error.h
+
+info.o: info.c squashfs_fs.h mksquashfs.h mksquashfs_error.h progressbar.h \
+ caches-queues-lists.h
+
+restore.o: restore.c caches-queues-lists.h squashfs_fs.h mksquashfs.h mksquashfs_error.h \
+ progressbar.h info.h
+
+process_fragments.o: process_fragments.c process_fragments.h
+
+caches-queues-lists.o: caches-queues-lists.c mksquashfs_error.h caches-queues-lists.h
+
+tar.o: tar.h
+
+tar_xattr.o: tar.h xattr.h
+
+date.o: date.h error.h
+
+gzip_wrapper.o: gzip_wrapper.c squashfs_fs.h gzip_wrapper.h compressor.h
+
+lzma_wrapper.o: lzma_wrapper.c compressor.h squashfs_fs.h
+
+lzma_xz_wrapper.o: lzma_xz_wrapper.c compressor.h squashfs_fs.h
+
+lzo_wrapper.o: lzo_wrapper.c squashfs_fs.h lzo_wrapper.h compressor.h
+
+lz4_wrapper.o: lz4_wrapper.c squashfs_fs.h lz4_wrapper.h compressor.h
+
+xz_wrapper.o: xz_wrapper.c squashfs_fs.h xz_wrapper.h compressor.h
+
+unsquashfs: $(UNSQUASHFS_OBJS)
+ $(CC) $(LDFLAGS) $(EXTRA_LDFLAGS) $(UNSQUASHFS_OBJS) $(LIBS) -o $@
+ ln -sf unsquashfs sqfscat
+
+unsquashfs.o: unsquashfs.h unsquashfs.c squashfs_fs.h squashfs_swap.h \
+ squashfs_compat.h xattr.h read_fs.h compressor.h unsquashfs_error.h
+
+unsquash-1.o: unsquashfs.h unsquash-1.c squashfs_fs.h squashfs_compat.h unsquashfs_error.h
+
+unsquash-2.o: unsquashfs.h unsquash-2.c squashfs_fs.h squashfs_compat.h unsquashfs_error.h
+
+unsquash-3.o: unsquashfs.h unsquash-3.c squashfs_fs.h squashfs_compat.h unsquashfs_error.h
+
+unsquash-4.o: unsquashfs.h unsquash-4.c squashfs_fs.h squashfs_swap.h \
+ read_fs.h unsquashfs_error.h
+
+unsquash-123.o: unsquashfs.h unsquash-123.c squashfs_fs.h squashfs_compat.h unsquashfs_error.h
+
+unsquash-34.o: unsquashfs.h unsquash-34.c unsquashfs_error.h
+
+unsquash-1234.o: unsquash-1234.c unsquashfs_error.h
+
+unsquash-12.o: unsquash-12.c unsquashfs.h
+
+unsquashfs_xattr.o: unsquashfs_xattr.c unsquashfs.h squashfs_fs.h xattr.h unsquashfs_error.h
+
+unsquashfs_info.o: unsquashfs.h squashfs_fs.h unsquashfs_error.h
+
+.PHONY: clean
+clean:
+ -rm -f *.o mksquashfs unsquashfs sqfstar sqfscat *.1
+
+.PHONY: install
+install: mksquashfs unsquashfs
+ mkdir -p $(INSTALL_DIR)
+ cp mksquashfs $(INSTALL_DIR)
+ cp unsquashfs $(INSTALL_DIR)
+ ln -fs unsquashfs $(INSTALL_DIR)/sqfscat
+ ln -fs mksquashfs $(INSTALL_DIR)/sqfstar
+ ../generate-manpages/install-manpages.sh $(shell pwd)/.. "$(INSTALL_MANPAGES_DIR)" "$(USE_PREBUILT_MANPAGES)"
diff --git a/squashfs-tools/action.c b/squashfs-tools/action.c
new file mode 100644
index 0000000..182487b
--- /dev/null
+++ b/squashfs-tools/action.c
@@ -0,0 +1,3574 @@
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2011, 2012, 2013, 2014, 2021, 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * action.c
+ */
+
+#include <fcntl.h>
+#include <dirent.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <pwd.h>
+#include <grp.h>
+#include <sys/wait.h>
+#include <regex.h>
+#include <limits.h>
+#include <errno.h>
+#include <ctype.h>
+
+#include "squashfs_fs.h"
+#include "mksquashfs.h"
+#include "action.h"
+#include "mksquashfs_error.h"
+#include "fnmatch_compat.h"
+#include "xattr.h"
+
+#define TRUE 1
+#define FALSE 0
+#define MAX_LINE 16384
+
+/*
+ * code to parse actions
+ */
+
+static char *cur_ptr, *source;
+static struct action *fragment_spec = NULL;
+static struct action *exclude_spec = NULL;
+static struct action *empty_spec = NULL;
+static struct action *move_spec = NULL;
+static struct action *prune_spec = NULL;
+static struct action *xattr_exc_spec = NULL;
+static struct action *xattr_inc_spec = NULL;
+static struct action *xattr_add_spec = NULL;
+static struct action *other_spec = NULL;
+static int fragment_count = 0;
+static int exclude_count = 0;
+static int empty_count = 0;
+static int move_count = 0;
+static int prune_count = 0;
+static int xattr_exc_count = 0;
+static int xattr_inc_count = 0;
+static int xattr_add_count = 0;
+static int other_count = 0;
+static struct action_entry *parsing_action;
+
+static struct file_buffer *def_fragment = NULL;
+static struct file_buffer *tail_fragment = NULL;
+
+static struct token_entry token_table[] = {
+ { "(", TOK_OPEN_BRACKET, 1, },
+ { ")", TOK_CLOSE_BRACKET, 1 },
+ { "&&", TOK_AND, 2 },
+ { "||", TOK_OR, 2 },
+ { "!", TOK_NOT, 1 },
+ { ",", TOK_COMMA, 1 },
+ { "@", TOK_AT, 1},
+ { " ", TOK_WHITE_SPACE, 1 },
+ { "\t ", TOK_WHITE_SPACE, 1 },
+ { "", -1, 0 }
+};
+
+
+static struct test_entry test_table[];
+
+static struct action_entry action_table[];
+
+static struct expr *parse_expr(int subexp);
+
+extern char *pathname(struct dir_ent *);
+
+/*
+ * Read a file, passing each line to parse_line() for
+ * parsing.
+ *
+ * Lines can be split across multiple lines using "\".
+ *
+ * Blank lines and comment lines indicated by # are supported.
+ */
+static int read_file(char *filename, char *type, int (parse_line)(char *))
+{
+ FILE *fd;
+ char *def, *err, *line = NULL;
+ int res, size = 0;
+
+ fd = fopen(filename, "r");
+ if(fd == NULL) {
+ ERROR("Could not open %s device file \"%s\" because %s\n",
+ type, filename, strerror(errno));
+ return FALSE;
+ }
+
+ while(1) {
+ int total = 0;
+
+ while(1) {
+ int len;
+
+ if(total + (MAX_LINE + 1) > size) {
+ line = realloc(line, size += (MAX_LINE + 1));
+ if(line == NULL)
+ MEM_ERROR();
+ }
+
+ err = fgets(line + total, MAX_LINE + 1, fd);
+ if(err == NULL)
+ break;
+
+ len = strlen(line + total);
+ total += len;
+
+ if(len == MAX_LINE && line[total - 1] != '\n') {
+ /* line too large */
+ ERROR("Line too long when reading "
+ "%s file \"%s\", larger than "
+ "%d bytes\n", type, filename, MAX_LINE);
+ goto failed;
+ }
+
+ /*
+ * Remove '\n' terminator if it exists (the last line
+ * in the file may not be '\n' terminated)
+ */
+ if(len && line[total - 1] == '\n') {
+ line[-- total] = '\0';
+ len --;
+ }
+
+ /*
+ * If no line continuation then jump out to
+ * process line. Note, we have to be careful to
+ * check for "\\" (backslashed backslash) and to
+ * ensure we don't look at the previous line
+ */
+ if(len == 0 || line[total - 1] != '\\' || (len >= 2 &&
+ strcmp(line + total - 2, "\\\\") == 0))
+ break;
+ else
+ total --;
+ }
+
+ if(err == NULL) {
+ if(ferror(fd)) {
+ ERROR("Reading %s file \"%s\" failed "
+ "because %s\n", type, filename,
+ strerror(errno));
+ goto failed;
+ }
+
+ /*
+ * At EOF, normally we'll be finished, but, have to
+ * check for special case where we had "\" line
+ * continuation and then hit EOF immediately afterwards
+ */
+ if(total == 0)
+ break;
+ else
+ line[total] = '\0';
+ }
+
+ /* Skip any leading whitespace */
+ for(def = line; isspace(*def); def ++);
+
+ /* if line is now empty after skipping characters, skip it */
+ if(*def == '\0')
+ continue;
+
+ /* if comment line, skip */
+ if(*def == '#')
+ continue;
+
+ res = parse_line(def);
+ if(res == FALSE)
+ goto failed;
+ }
+
+ fclose(fd);
+ free(line);
+ return TRUE;
+
+failed:
+ fclose(fd);
+ free(line);
+ return FALSE;
+}
+/*
+ * Lexical analyser
+ */
+#define STR_SIZE 256
+
+static int get_token(char **string)
+{
+ /* string buffer */
+ static char *str = NULL;
+ static int size = 0;
+
+ char *str_ptr;
+ int cur_size, i, quoted;
+
+ while (1) {
+ if (*cur_ptr == '\0')
+ return TOK_EOF;
+ for (i = 0; token_table[i].token != -1; i++)
+ if (strncmp(cur_ptr, token_table[i].string,
+ token_table[i].size) == 0)
+ break;
+ if (token_table[i].token != TOK_WHITE_SPACE)
+ break;
+ cur_ptr ++;
+ }
+
+ if (token_table[i].token != -1) {
+ cur_ptr += token_table[i].size;
+ return token_table[i].token;
+ }
+
+ /* string */
+ if(str == NULL) {
+ str = malloc(STR_SIZE);
+ if(str == NULL)
+ MEM_ERROR();
+ size = STR_SIZE;
+ }
+
+ /* Initialise string being read */
+ str_ptr = str;
+ cur_size = 0;
+ quoted = 0;
+
+ while(1) {
+ while(*cur_ptr == '"') {
+ cur_ptr ++;
+ quoted = !quoted;
+ }
+
+ if(*cur_ptr == '\0') {
+ /* inside quoted string EOF, otherwise end of string */
+ if(quoted)
+ return TOK_EOF;
+ else
+ break;
+ }
+
+ if(!quoted) {
+ for(i = 0; token_table[i].token != -1; i++)
+ if (strncmp(cur_ptr, token_table[i].string,
+ token_table[i].size) == 0)
+ break;
+ if (token_table[i].token != -1)
+ break;
+ }
+
+ if(*cur_ptr == '\\') {
+ cur_ptr ++;
+ if(*cur_ptr == '\0')
+ return TOK_EOF;
+ }
+
+ if(cur_size + 2 > size) {
+ char *tmp;
+ int offset = str_ptr - str;
+
+ size = (cur_size + 1 + STR_SIZE) & ~(STR_SIZE - 1);
+
+ tmp = realloc(str, size);
+ if(tmp == NULL)
+ MEM_ERROR();
+
+ str_ptr = tmp + offset;
+ str = tmp;
+ }
+
+ *str_ptr ++ = *cur_ptr ++;
+ cur_size ++;
+ }
+
+ *str_ptr = '\0';
+ *string = str;
+ return TOK_STRING;
+}
+
+
+static int peek_token(char **string)
+{
+ char *saved = cur_ptr;
+ int token = get_token(string);
+
+ cur_ptr = saved;
+
+ return token;
+}
+
+
+/*
+ * Expression parser
+ */
+static void free_parse_tree(struct expr *expr)
+{
+ if(expr->type == ATOM_TYPE) {
+ int i;
+
+ for(i = 0; i < expr->atom.test->args; i++)
+ free(expr->atom.argv[i]);
+
+ free(expr->atom.argv);
+ } else if (expr->type == UNARY_TYPE)
+ free_parse_tree(expr->unary_op.expr);
+ else {
+ free_parse_tree(expr->expr_op.lhs);
+ free_parse_tree(expr->expr_op.rhs);
+ }
+
+ free(expr);
+}
+
+
+static struct expr *create_expr(struct expr *lhs, int op, struct expr *rhs)
+{
+ struct expr *expr;
+
+ if (rhs == NULL) {
+ free_parse_tree(lhs);
+ return NULL;
+ }
+
+ expr = malloc(sizeof(*expr));
+ if (expr == NULL)
+ MEM_ERROR();
+
+ expr->type = OP_TYPE;
+ expr->expr_op.lhs = lhs;
+ expr->expr_op.rhs = rhs;
+ expr->expr_op.op = op;
+
+ return expr;
+}
+
+
+static struct expr *create_unary_op(struct expr *lhs, int op)
+{
+ struct expr *expr;
+
+ if (lhs == NULL)
+ return NULL;
+
+ expr = malloc(sizeof(*expr));
+ if (expr == NULL)
+ MEM_ERROR();
+
+ expr->type = UNARY_TYPE;
+ expr->unary_op.expr = lhs;
+ expr->unary_op.op = op;
+
+ return expr;
+}
+
+
+static struct expr *parse_test(char *name)
+{
+ char *string, **argv = NULL;
+ int token, args = 0;
+ int i;
+ struct test_entry *test;
+ struct expr *expr;
+
+ for (i = 0; test_table[i].args != -1; i++)
+ if (strcmp(name, test_table[i].name) == 0)
+ break;
+
+ test = &test_table[i];
+
+ if (test->args == -1) {
+ SYNTAX_ERROR("Non-existent test \"%s\"\n", name);
+ return NULL;
+ }
+
+ if(parsing_action->type == EXCLUDE_ACTION && !test->exclude_ok) {
+ fprintf(stderr, "Failed to parse action \"%s\"\n", source);
+ fprintf(stderr, "Test \"%s\" cannot be used in exclude "
+ "actions\n", name);
+ fprintf(stderr, "Use prune action instead ...\n");
+ return NULL;
+ }
+
+ expr = malloc(sizeof(*expr));
+ if (expr == NULL)
+ MEM_ERROR();
+
+ expr->type = ATOM_TYPE;
+
+ expr->atom.test = test;
+ expr->atom.data = NULL;
+
+ /*
+ * If the test has no arguments, then go straight to checking if there's
+ * enough arguments
+ */
+ token = peek_token(&string);
+
+ if (token != TOK_OPEN_BRACKET)
+ goto skip_args;
+
+ get_token(&string);
+
+ /*
+ * speculatively read all the arguments, and then see if the
+ * number of arguments read is the number expected, this handles
+ * tests with a variable number of arguments
+ */
+ token = get_token(&string);
+ if (token == TOK_CLOSE_BRACKET)
+ goto skip_args;
+
+ while(1) {
+ if (token != TOK_STRING) {
+ SYNTAX_ERROR("Unexpected token \"%s\", expected "
+ "argument\n", TOK_TO_STR(token, string));
+ goto failed;
+ }
+
+ argv = realloc(argv, (args + 1) * sizeof(char *));
+ if (argv == NULL)
+ MEM_ERROR();
+
+ argv[args ++ ] = strdup(string);
+
+ token = get_token(&string);
+
+ if (token == TOK_CLOSE_BRACKET)
+ break;
+
+ if (token != TOK_COMMA) {
+ SYNTAX_ERROR("Unexpected token \"%s\", expected "
+ "\",\" or \")\"\n", TOK_TO_STR(token, string));
+ goto failed;
+ }
+ token = get_token(&string);
+ }
+
+skip_args:
+ /*
+ * expected number of arguments?
+ */
+ if(test->args != -2 && args != test->args) {
+ SYNTAX_ERROR("Unexpected number of arguments, expected %d, "
+ "got %d\n", test->args, args);
+ goto failed;
+ }
+
+ expr->atom.args = args;
+ expr->atom.argv = argv;
+
+ if (test->parse_args) {
+ int res = test->parse_args(test, &expr->atom);
+
+ if (res == 0)
+ goto failed;
+ }
+
+ return expr;
+
+failed:
+ free(argv);
+ free(expr);
+ return NULL;
+}
+
+
+static struct expr *get_atom()
+{
+ char *string;
+ int token = get_token(&string);
+
+ switch(token) {
+ case TOK_NOT:
+ return create_unary_op(get_atom(), token);
+ case TOK_OPEN_BRACKET:
+ return parse_expr(1);
+ case TOK_STRING:
+ return parse_test(string);
+ default:
+ SYNTAX_ERROR("Unexpected token \"%s\", expected test "
+ "operation, \"!\", or \"(\"\n",
+ TOK_TO_STR(token, string));
+ return NULL;
+ }
+}
+
+
+static struct expr *parse_expr(int subexp)
+{
+ struct expr *expr = get_atom();
+
+ while (expr) {
+ char *string;
+ int op = get_token(&string);
+
+ if (op == TOK_EOF) {
+ if (subexp) {
+ free_parse_tree(expr);
+ SYNTAX_ERROR("Expected \"&&\", \"||\" or "
+ "\")\", got EOF\n");
+ return NULL;
+ }
+ break;
+ }
+
+ if (op == TOK_CLOSE_BRACKET) {
+ if (!subexp) {
+ free_parse_tree(expr);
+ SYNTAX_ERROR("Unexpected \")\", expected "
+ "\"&&\", \"!!\" or EOF\n");
+ return NULL;
+ }
+ break;
+ }
+
+ if (op != TOK_AND && op != TOK_OR) {
+ free_parse_tree(expr);
+ SYNTAX_ERROR("Unexpected token \"%s\", expected "
+ "\"&&\" or \"||\"\n", TOK_TO_STR(op, string));
+ return NULL;
+ }
+
+ expr = create_expr(expr, op, get_atom());
+ }
+
+ return expr;
+}
+
+
+/*
+ * Action parser
+ */
+int parse_action(char *s, int verbose)
+{
+ char *string, **argv = NULL;
+ int i, token, args = 0;
+ struct expr *expr;
+ struct action_entry *action;
+ void *data = NULL;
+ struct action **spec_list;
+ int spec_count;
+
+ cur_ptr = source = s;
+ token = get_token(&string);
+
+ if (token != TOK_STRING) {
+ SYNTAX_ERROR("Unexpected token \"%s\", expected name\n",
+ TOK_TO_STR(token, string));
+ return 0;
+ }
+
+ for (i = 0; action_table[i].args != -1; i++)
+ if (strcmp(string, action_table[i].name) == 0)
+ break;
+
+ if (action_table[i].args == -1) {
+ SYNTAX_ERROR("Non-existent action \"%s\"\n", string);
+ return 0;
+ }
+
+ action = &action_table[i];
+
+ token = get_token(&string);
+
+ if (token == TOK_AT)
+ goto skip_args;
+
+ if (token != TOK_OPEN_BRACKET) {
+ SYNTAX_ERROR("Unexpected token \"%s\", expected \"(\"\n",
+ TOK_TO_STR(token, string));
+ goto failed;
+ }
+
+ /*
+ * speculatively read all the arguments, and then see if the
+ * number of arguments read is the number expected, this handles
+ * actions with a variable number of arguments
+ */
+ token = get_token(&string);
+ if (token == TOK_CLOSE_BRACKET)
+ goto skip_args;
+
+ while (1) {
+ if (token != TOK_STRING) {
+ SYNTAX_ERROR("Unexpected token \"%s\", expected "
+ "argument\n", TOK_TO_STR(token, string));
+ goto failed;
+ }
+
+ argv = realloc(argv, (args + 1) * sizeof(char *));
+ if (argv == NULL)
+ MEM_ERROR();
+
+ argv[args ++] = strdup(string);
+
+ token = get_token(&string);
+
+ if (token == TOK_CLOSE_BRACKET)
+ break;
+
+ if (token != TOK_COMMA) {
+ SYNTAX_ERROR("Unexpected token \"%s\", expected "
+ "\",\" or \")\"\n", TOK_TO_STR(token, string));
+ goto failed;
+ }
+ token = get_token(&string);
+ }
+
+skip_args:
+ /*
+ * expected number of arguments?
+ */
+ if(action->args != -2 && args != action->args) {
+ SYNTAX_ERROR("Unexpected number of arguments, expected %d, "
+ "got %d\n", action->args, args);
+ goto failed;
+ }
+
+ if (action->parse_args) {
+ int res = action->parse_args(action, args, argv, &data);
+
+ if (res == 0)
+ goto failed;
+ }
+
+ if (token == TOK_CLOSE_BRACKET)
+ token = get_token(&string);
+
+ if (token != TOK_AT) {
+ SYNTAX_ERROR("Unexpected token \"%s\", expected \"@\"\n",
+ TOK_TO_STR(token, string));
+ goto failed;
+ }
+
+ parsing_action = action;
+ expr = parse_expr(0);
+
+ if (expr == NULL)
+ goto failed;
+
+ /*
+ * choose action list and increment action counter
+ */
+ switch(action->type) {
+ case FRAGMENT_ACTION:
+ spec_count = fragment_count ++;
+ spec_list = &fragment_spec;
+ break;
+ case EXCLUDE_ACTION:
+ spec_count = exclude_count ++;
+ spec_list = &exclude_spec;
+ break;
+ case EMPTY_ACTION:
+ spec_count = empty_count ++;
+ spec_list = &empty_spec;
+ break;
+ case MOVE_ACTION:
+ spec_count = move_count ++;
+ spec_list = &move_spec;
+ break;
+ case PRUNE_ACTION:
+ spec_count = prune_count ++;
+ spec_list = &prune_spec;
+ break;
+ case XATTR_EXC_ACTION:
+ spec_count = xattr_exc_count ++;
+ spec_list = &xattr_exc_spec;
+ break;
+ case XATTR_INC_ACTION:
+ spec_count = xattr_inc_count ++;
+ spec_list = &xattr_inc_spec;
+ break;
+ case XATTR_ADD_ACTION:
+ spec_count = xattr_add_count ++;
+ spec_list = &xattr_add_spec;
+ break;
+ default:
+ spec_count = other_count ++;
+ spec_list = &other_spec;
+ }
+
+ *spec_list = realloc(*spec_list, (spec_count + 1) *
+ sizeof(struct action));
+ if (*spec_list == NULL)
+ MEM_ERROR();
+
+ (*spec_list)[spec_count].type = action->type;
+ (*spec_list)[spec_count].action = action;
+ (*spec_list)[spec_count].args = args;
+ (*spec_list)[spec_count].argv = argv;
+ (*spec_list)[spec_count].expr = expr;
+ (*spec_list)[spec_count].data = data;
+ (*spec_list)[spec_count].verbose = verbose;
+
+ return 1;
+
+failed:
+ free(argv);
+ return 0;
+}
+
+
+/*
+ * Evaluate expressions
+ */
+
+#define ALLOC_SZ 128
+
+#define LOG_ENABLE 0
+#define LOG_DISABLE 1
+#define LOG_PRINT 2
+#define LOG_ENABLED 3
+
+static char *_expr_log(char *string, int cmnd)
+{
+ static char *expr_msg = NULL;
+ static int cur_size = 0, alloc_size = 0;
+ int size;
+
+ switch(cmnd) {
+ case LOG_ENABLE:
+ expr_msg = malloc(ALLOC_SZ);
+ alloc_size = ALLOC_SZ;
+ cur_size = 0;
+ return expr_msg;
+ case LOG_DISABLE:
+ free(expr_msg);
+ alloc_size = cur_size = 0;
+ return expr_msg = NULL;
+ case LOG_ENABLED:
+ return expr_msg;
+ default:
+ if(expr_msg == NULL)
+ return NULL;
+ break;
+ }
+
+ /* if string is empty append '\0' */
+ size = strlen(string) ? : 1;
+
+ if(alloc_size - cur_size < size) {
+ /* buffer too small, expand */
+ alloc_size = (cur_size + size + ALLOC_SZ - 1) & ~(ALLOC_SZ - 1);
+
+ expr_msg = realloc(expr_msg, alloc_size);
+ if(expr_msg == NULL)
+ MEM_ERROR();
+ }
+
+ memcpy(expr_msg + cur_size, string, size);
+ cur_size += size;
+
+ return expr_msg;
+}
+
+
+static char *expr_log_cmnd(int cmnd)
+{
+ return _expr_log(NULL, cmnd);
+}
+
+
+static char *expr_log(char *string)
+{
+ return _expr_log(string, LOG_PRINT);
+}
+
+
+static void expr_log_atom(struct atom *atom)
+{
+ int i;
+
+ if(atom->test->handle_logging)
+ return;
+
+ expr_log(atom->test->name);
+
+ if(atom->args) {
+ expr_log("(");
+ for(i = 0; i < atom->args; i++) {
+ expr_log(atom->argv[i]);
+ if (i + 1 < atom->args)
+ expr_log(",");
+ }
+ expr_log(")");
+ }
+}
+
+
+static void expr_log_match(int match)
+{
+ if(match)
+ expr_log("=True");
+ else
+ expr_log("=False");
+}
+
+
+static int eval_expr_log(struct expr *expr, struct action_data *action_data)
+{
+ int match;
+
+ switch (expr->type) {
+ case ATOM_TYPE:
+ expr_log_atom(&expr->atom);
+ match = expr->atom.test->fn(&expr->atom, action_data);
+ expr_log_match(match);
+ break;
+ case UNARY_TYPE:
+ expr_log("!");
+ match = !eval_expr_log(expr->unary_op.expr, action_data);
+ break;
+ default:
+ expr_log("(");
+ match = eval_expr_log(expr->expr_op.lhs, action_data);
+
+ if ((expr->expr_op.op == TOK_AND && match) ||
+ (expr->expr_op.op == TOK_OR && !match)) {
+ expr_log(token_table[expr->expr_op.op].string);
+ match = eval_expr_log(expr->expr_op.rhs, action_data);
+ }
+ expr_log(")");
+ break;
+ }
+
+ return match;
+}
+
+
+static int eval_expr(struct expr *expr, struct action_data *action_data)
+{
+ int match;
+
+ switch (expr->type) {
+ case ATOM_TYPE:
+ match = expr->atom.test->fn(&expr->atom, action_data);
+ break;
+ case UNARY_TYPE:
+ match = !eval_expr(expr->unary_op.expr, action_data);
+ break;
+ default:
+ match = eval_expr(expr->expr_op.lhs, action_data);
+
+ if ((expr->expr_op.op == TOK_AND && match) ||
+ (expr->expr_op.op == TOK_OR && !match))
+ match = eval_expr(expr->expr_op.rhs, action_data);
+ break;
+ }
+
+ return match;
+}
+
+
+static int eval_expr_top(struct action *action, struct action_data *action_data)
+{
+ if(action->verbose) {
+ int match, n;
+
+ expr_log_cmnd(LOG_ENABLE);
+
+ if(action_data->subpath)
+ expr_log(action_data->subpath);
+
+ expr_log("=");
+ expr_log(action->action->name);
+
+ if(action->args) {
+ expr_log("(");
+ for (n = 0; n < action->args; n++) {
+ expr_log(action->argv[n]);
+ if(n + 1 < action->args)
+ expr_log(",");
+ }
+ expr_log(")");
+ }
+
+ expr_log("@");
+
+ match = eval_expr_log(action->expr, action_data);
+
+ /*
+ * Print the evaluated expression log, if the
+ * result matches the logging specified
+ */
+ if((match && (action->verbose & ACTION_LOG_TRUE)) || (!match
+ && (action->verbose & ACTION_LOG_FALSE)))
+ progressbar_info("%s\n", expr_log(""));
+
+ expr_log_cmnd(LOG_DISABLE);
+
+ return match;
+ } else
+ return eval_expr(action->expr, action_data);
+}
+
+
+/*
+ * Read action file, passing each line to parse_action() for
+ * parsing.
+ *
+ * One action per line, of the form
+ * action(arg1,arg2)@expr(arg1,arg2)....
+ *
+ * Actions can be split across multiple lines using "\".
+ *
+ * Blank lines and comment lines indicated by # are supported.
+ */
+static int parse_action_true(char *s)
+{
+ return parse_action(s, ACTION_LOG_TRUE);
+}
+
+
+static int parse_action_false(char *s)
+{
+ return parse_action(s, ACTION_LOG_FALSE);
+}
+
+
+static int parse_action_verbose(char *s)
+{
+ return parse_action(s, ACTION_LOG_VERBOSE);
+}
+
+
+static int parse_action_nonverbose(char *s)
+{
+ return parse_action(s, ACTION_LOG_NONE);
+}
+
+
+int read_action_file(char *filename, int verbose)
+{
+ switch(verbose) {
+ case ACTION_LOG_TRUE:
+ return read_file(filename, "action", parse_action_true);
+ case ACTION_LOG_FALSE:
+ return read_file(filename, "action", parse_action_false);
+ case ACTION_LOG_VERBOSE:
+ return read_file(filename, "action", parse_action_verbose);
+ default:
+ return read_file(filename, "action", parse_action_nonverbose);
+ }
+}
+
+
+/*
+ * helper to evaluate whether action/test acts on this file type
+ */
+static int file_type_match(int st_mode, int type)
+{
+ switch(type) {
+ case ACTION_DIR:
+ return S_ISDIR(st_mode);
+ case ACTION_REG:
+ return S_ISREG(st_mode);
+ case ACTION_ALL:
+ return S_ISREG(st_mode) || S_ISDIR(st_mode) ||
+ S_ISCHR(st_mode) || S_ISBLK(st_mode) ||
+ S_ISFIFO(st_mode) || S_ISSOCK(st_mode);
+ case ACTION_LNK:
+ return S_ISLNK(st_mode);
+ case ACTION_ALL_LNK:
+ default:
+ return 1;
+ }
+}
+
+
+/*
+ * General action evaluation code
+ */
+int any_actions()
+{
+ return fragment_count + exclude_count + empty_count +
+ move_count + prune_count + other_count;
+}
+
+
+int actions()
+{
+ return other_count;
+}
+
+
+void eval_actions(struct dir_info *root, struct dir_ent *dir_ent)
+{
+ int i, match;
+ struct action_data action_data;
+ int st_mode = dir_ent->inode->buf.st_mode;
+
+ action_data.name = dir_ent->name;
+ action_data.pathname = strdup(pathname(dir_ent));
+ action_data.subpath = strdup(subpathname(dir_ent));
+ action_data.buf = &dir_ent->inode->buf;
+ action_data.depth = dir_ent->our_dir->depth;
+ action_data.dir_ent = dir_ent;
+ action_data.root = root;
+
+ for (i = 0; i < other_count; i++) {
+ struct action *action = &other_spec[i];
+
+ if (!file_type_match(st_mode, action->action->file_types))
+ /* action does not operate on this file type */
+ continue;
+
+ match = eval_expr_top(action, &action_data);
+
+ if (match)
+ action->action->run_action(action, dir_ent);
+ }
+
+ free(action_data.pathname);
+ free(action_data.subpath);
+}
+
+
+/*
+ * Fragment specific action code
+ */
+void *eval_frag_actions(struct dir_info *root, struct dir_ent *dir_ent, int tail)
+{
+ int i, match;
+ struct action_data action_data;
+
+ action_data.name = dir_ent->name;
+ action_data.pathname = strdup(pathname(dir_ent));
+ action_data.subpath = strdup(subpathname(dir_ent));
+ action_data.buf = &dir_ent->inode->buf;
+ action_data.depth = dir_ent->our_dir->depth;
+ action_data.dir_ent = dir_ent;
+ action_data.root = root;
+
+ for (i = 0; i < fragment_count; i++) {
+ match = eval_expr_top(&fragment_spec[i], &action_data);
+ if (match) {
+ free(action_data.pathname);
+ free(action_data.subpath);
+ return &fragment_spec[i].data;
+ }
+ }
+
+ free(action_data.pathname);
+ free(action_data.subpath);
+
+ if(tail)
+ return &tail_fragment;
+ else
+ return &def_fragment;
+}
+
+
+void *get_frag_action(void *fragment)
+{
+ struct action *spec_list_end = &fragment_spec[fragment_count];
+ struct action *action;
+
+ if (fragment == NULL)
+ return &def_fragment;
+
+ if(fragment == &def_fragment)
+ return &tail_fragment;
+
+ if (fragment_count == 0)
+ return NULL;
+
+ if (fragment == &tail_fragment)
+ action = &fragment_spec[0] - 1;
+ else
+ action = fragment - offsetof(struct action, data);
+
+ if (++action == spec_list_end)
+ return NULL;
+
+ return &action->data;
+}
+
+
+/*
+ * Exclude specific action code
+ */
+int exclude_actions()
+{
+ return exclude_count;
+}
+
+
+int eval_exclude_actions(char *name, char *pathname, char *subpath,
+ struct stat *buf, unsigned int depth, struct dir_ent *dir_ent)
+{
+ int i, match = 0;
+ struct action_data action_data;
+
+ action_data.name = name;
+ action_data.pathname = pathname;
+ action_data.subpath = subpath;
+ action_data.buf = buf;
+ action_data.depth = depth;
+ action_data.dir_ent = dir_ent;
+
+ for (i = 0; i < exclude_count && !match; i++)
+ match = eval_expr_top(&exclude_spec[i], &action_data);
+
+ return match;
+}
+
+
+/*
+ * Fragment specific action code
+ */
+static void frag_action(struct action *action, struct dir_ent *dir_ent)
+{
+ struct inode_info *inode = dir_ent->inode;
+
+ inode->no_fragments = 0;
+}
+
+static void no_frag_action(struct action *action, struct dir_ent *dir_ent)
+{
+ struct inode_info *inode = dir_ent->inode;
+
+ inode->no_fragments = 1;
+}
+
+static void always_frag_action(struct action *action, struct dir_ent *dir_ent)
+{
+ struct inode_info *inode = dir_ent->inode;
+
+ inode->always_use_fragments = 1;
+}
+
+static void no_always_frag_action(struct action *action, struct dir_ent *dir_ent)
+{
+ struct inode_info *inode = dir_ent->inode;
+
+ inode->always_use_fragments = 0;
+}
+
+
+/*
+ * Compression specific action code
+ */
+static void comp_action(struct action *action, struct dir_ent *dir_ent)
+{
+ struct inode_info *inode = dir_ent->inode;
+
+ inode->noD = inode->noF = 0;
+}
+
+static void uncomp_action(struct action *action, struct dir_ent *dir_ent)
+{
+ struct inode_info *inode = dir_ent->inode;
+
+ inode->noD = inode->noF = 1;
+}
+
+
+/*
+ * Uid/gid specific action code
+ */
+static long long parse_uid(char *arg) {
+ char *b;
+ long long uid = strtoll(arg, &b, 10);
+
+ if (*b == '\0') {
+ if (uid < 0 || uid >= (1LL << 32)) {
+ SYNTAX_ERROR("Uid out of range\n");
+ return -1;
+ }
+ } else {
+ struct passwd *passwd = getpwnam(arg);
+
+ if (passwd)
+ uid = passwd->pw_uid;
+ else {
+ SYNTAX_ERROR("Invalid uid or unknown user\n");
+ return -1;
+ }
+ }
+
+ return uid;
+}
+
+
+static long long parse_gid(char *arg) {
+ char *b;
+ long long gid = strtoll(arg, &b, 10);
+
+ if (*b == '\0') {
+ if (gid < 0 || gid >= (1LL << 32)) {
+ SYNTAX_ERROR("Gid out of range\n");
+ return -1;
+ }
+ } else {
+ struct group *group = getgrnam(arg);
+
+ if (group)
+ gid = group->gr_gid;
+ else {
+ SYNTAX_ERROR("Invalid gid or unknown group\n");
+ return -1;
+ }
+ }
+
+ return gid;
+}
+
+
+static int parse_uid_args(struct action_entry *action, int args, char **argv,
+ void **data)
+{
+ long long uid;
+ struct uid_info *uid_info;
+
+ uid = parse_uid(argv[0]);
+ if (uid == -1)
+ return 0;
+
+ uid_info = malloc(sizeof(struct uid_info));
+ if (uid_info == NULL)
+ MEM_ERROR();
+
+ uid_info->uid = uid;
+ *data = uid_info;
+
+ return 1;
+}
+
+
+static int parse_gid_args(struct action_entry *action, int args, char **argv,
+ void **data)
+{
+ long long gid;
+ struct gid_info *gid_info;
+
+ gid = parse_gid(argv[0]);
+ if (gid == -1)
+ return 0;
+
+ gid_info = malloc(sizeof(struct gid_info));
+ if (gid_info == NULL)
+ MEM_ERROR();
+
+ gid_info->gid = gid;
+ *data = gid_info;
+
+ return 1;
+}
+
+
+static int parse_guid_args(struct action_entry *action, int args, char **argv,
+ void **data)
+{
+ long long uid, gid;
+ struct guid_info *guid_info;
+
+ uid = parse_uid(argv[0]);
+ if (uid == -1)
+ return 0;
+
+ gid = parse_gid(argv[1]);
+ if (gid == -1)
+ return 0;
+
+ guid_info = malloc(sizeof(struct guid_info));
+ if (guid_info == NULL)
+ MEM_ERROR();
+
+ guid_info->uid = uid;
+ guid_info->gid = gid;
+ *data = guid_info;
+
+ return 1;
+}
+
+
+static void uid_action(struct action *action, struct dir_ent *dir_ent)
+{
+ struct inode_info *inode = dir_ent->inode;
+ struct uid_info *uid_info = action->data;
+
+ inode->buf.st_uid = uid_info->uid;
+}
+
+static void gid_action(struct action *action, struct dir_ent *dir_ent)
+{
+ struct inode_info *inode = dir_ent->inode;
+ struct gid_info *gid_info = action->data;
+
+ inode->buf.st_gid = gid_info->gid;
+}
+
+static void guid_action(struct action *action, struct dir_ent *dir_ent)
+{
+ struct inode_info *inode = dir_ent->inode;
+ struct guid_info *guid_info = action->data;
+
+ inode->buf.st_uid = guid_info->uid;
+ inode->buf.st_gid = guid_info->gid;
+
+}
+
+
+/*
+ * Mode specific action code
+ */
+static int parse_octal_mode_args(int args, char **argv,
+ void **data)
+{
+ int n, bytes;
+ unsigned int mode;
+ struct mode_data *mode_data;
+
+ /* octal mode number? */
+ n = sscanf(argv[0], "%o%n", &mode, &bytes);
+ if (n == 0)
+ return -1; /* not an octal number arg */
+
+
+ /* check there's no trailing junk */
+ if (argv[0][bytes] != '\0') {
+ SYNTAX_ERROR("Unexpected trailing bytes after octal "
+ "mode number\n");
+ return 0; /* bad octal number arg */
+ }
+
+ /* check there's only one argument */
+ if (args > 1) {
+ SYNTAX_ERROR("Octal mode number is first argument, "
+ "expected one argument, got %d\n", args);
+ return 0; /* bad octal number arg */
+ }
+
+ /* check mode is within range */
+ if (mode > 07777) {
+ SYNTAX_ERROR("Octal mode %o is out of range\n", mode);
+ return 0; /* bad octal number arg */
+ }
+
+ mode_data = malloc(sizeof(struct mode_data));
+ if (mode_data == NULL)
+ MEM_ERROR();
+
+ mode_data->operation = ACTION_MODE_OCT;
+ mode_data->mode = mode;
+ mode_data->next = NULL;
+ *data = mode_data;
+
+ return 1;
+}
+
+
+/*
+ * Parse symbolic mode of format [ugoa]*[[+-=]PERMS]+
+ * PERMS = [rwxXst]+ or [ugo]
+ */
+static int parse_sym_mode_arg(char *arg, struct mode_data **head,
+ struct mode_data **cur)
+{
+ struct mode_data *mode_data;
+ int mode;
+ int mask = 0;
+ int op;
+ char X;
+
+ if (arg[0] != 'u' && arg[0] != 'g' && arg[0] != 'o' && arg[0] != 'a') {
+ /* no ownership specifiers, default to a */
+ mask = 0777;
+ goto parse_operation;
+ }
+
+ /* parse ownership specifiers */
+ while(1) {
+ switch(*arg) {
+ case 'u':
+ mask |= 04700;
+ break;
+ case 'g':
+ mask |= 02070;
+ break;
+ case 'o':
+ mask |= 01007;
+ break;
+ case 'a':
+ mask = 07777;
+ break;
+ default:
+ goto parse_operation;
+ }
+ arg ++;
+ }
+
+parse_operation:
+ /* trap a symbolic mode with just an ownership specification */
+ if(*arg == '\0') {
+ SYNTAX_ERROR("Expected one of '+', '-' or '=', got EOF\n");
+ goto failed;
+ }
+
+ while(*arg != '\0') {
+ mode = 0;
+ X = 0;
+
+ switch(*arg) {
+ case '+':
+ op = ACTION_MODE_ADD;
+ break;
+ case '-':
+ op = ACTION_MODE_REM;
+ break;
+ case '=':
+ op = ACTION_MODE_SET;
+ break;
+ default:
+ SYNTAX_ERROR("Expected one of '+', '-' or '=', got "
+ "'%c'\n", *arg);
+ goto failed;
+ }
+
+ arg ++;
+
+ /* Parse PERMS */
+ if (*arg == 'u' || *arg == 'g' || *arg == 'o') {
+ /* PERMS = [ugo] */
+ mode = - *arg;
+ arg ++;
+ } else {
+ /* PERMS = [rwxXst]* */
+ while(1) {
+ switch(*arg) {
+ case 'r':
+ mode |= 0444;
+ break;
+ case 'w':
+ mode |= 0222;
+ break;
+ case 'x':
+ mode |= 0111;
+ break;
+ case 's':
+ mode |= 06000;
+ break;
+ case 't':
+ mode |= 01000;
+ break;
+ case 'X':
+ X = 1;
+ break;
+ case '+':
+ case '-':
+ case '=':
+ case '\0':
+ mode &= mask;
+ goto perms_parsed;
+ default:
+ SYNTAX_ERROR("Unrecognised permission "
+ "'%c'\n", *arg);
+ goto failed;
+ }
+
+ arg ++;
+ }
+ }
+
+perms_parsed:
+ mode_data = malloc(sizeof(*mode_data));
+ if (mode_data == NULL)
+ MEM_ERROR();
+
+ mode_data->operation = op;
+ mode_data->mode = mode;
+ mode_data->mask = mask;
+ mode_data->X = X;
+ mode_data->next = NULL;
+
+ if (*cur) {
+ (*cur)->next = mode_data;
+ *cur = mode_data;
+ } else
+ *head = *cur = mode_data;
+ }
+
+ return 1;
+
+failed:
+ return 0;
+}
+
+
+static int parse_sym_mode_args(struct action_entry *action, int args,
+ char **argv, void **data)
+{
+ int i, res = 1;
+ struct mode_data *head = NULL, *cur = NULL;
+
+ for (i = 0; i < args && res; i++)
+ res = parse_sym_mode_arg(argv[i], &head, &cur);
+
+ *data = head;
+
+ return res;
+}
+
+
+static int parse_mode_args(struct action_entry *action, int args,
+ char **argv, void **data)
+{
+ int res;
+
+ if (args == 0) {
+ SYNTAX_ERROR("Mode action expects one or more arguments\n");
+ return 0;
+ }
+
+ res = parse_octal_mode_args(args, argv, data);
+ if(res >= 0)
+ /* Got an octal mode argument */
+ return res;
+ else /* not an octal mode argument */
+ return parse_sym_mode_args(action, args, argv, data);
+}
+
+
+static int mode_execute(struct mode_data *mode_data, int st_mode)
+{
+ int mode = 0;
+
+ for (;mode_data; mode_data = mode_data->next) {
+ if (mode_data->mode < 0) {
+ /* 'u', 'g' or 'o' */
+ switch(-mode_data->mode) {
+ case 'u':
+ mode = (st_mode >> 6) & 07;
+ break;
+ case 'g':
+ mode = (st_mode >> 3) & 07;
+ break;
+ case 'o':
+ mode = st_mode & 07;
+ break;
+ }
+ mode = ((mode << 6) | (mode << 3) | mode) &
+ mode_data->mask;
+ } else if (mode_data->X &&
+ ((st_mode & S_IFMT) == S_IFDIR ||
+ (st_mode & 0111)))
+ /* X permission, only takes effect if inode is a
+ * directory or x is set for some owner */
+ mode = mode_data->mode | (0111 & mode_data->mask);
+ else
+ mode = mode_data->mode;
+
+ switch(mode_data->operation) {
+ case ACTION_MODE_OCT:
+ st_mode = (st_mode & S_IFMT) | mode;
+ break;
+ case ACTION_MODE_SET:
+ st_mode = (st_mode & ~mode_data->mask) | mode;
+ break;
+ case ACTION_MODE_ADD:
+ st_mode |= mode;
+ break;
+ case ACTION_MODE_REM:
+ st_mode &= ~mode;
+ }
+ }
+
+ return st_mode;
+}
+
+
+static void mode_action(struct action *action, struct dir_ent *dir_ent)
+{
+ dir_ent->inode->buf.st_mode = mode_execute(action->data,
+ dir_ent->inode->buf.st_mode);
+}
+
+
+/*
+ * Empty specific action code
+ */
+int empty_actions()
+{
+ return empty_count;
+}
+
+
+static int parse_empty_args(struct action_entry *action, int args,
+ char **argv, void **data)
+{
+ struct empty_data *empty_data;
+ int val;
+
+ if (args >= 2) {
+ SYNTAX_ERROR("Empty action expects zero or one argument\n");
+ return 0;
+ }
+
+ if (args == 0 || strcmp(argv[0], "all") == 0)
+ val = EMPTY_ALL;
+ else if (strcmp(argv[0], "source") == 0)
+ val = EMPTY_SOURCE;
+ else if (strcmp(argv[0], "excluded") == 0)
+ val = EMPTY_EXCLUDED;
+ else {
+ SYNTAX_ERROR("Empty action expects zero arguments, or one"
+ "argument containing \"all\", \"source\", or \"excluded\""
+ "\n");
+ return 0;
+ }
+
+ empty_data = malloc(sizeof(*empty_data));
+ if (empty_data == NULL)
+ MEM_ERROR();
+
+ empty_data->val = val;
+ *data = empty_data;
+
+ return 1;
+}
+
+
+int eval_empty_actions(struct dir_info *root, struct dir_ent *dir_ent)
+{
+ int i, match = 0;
+ struct action_data action_data;
+ struct empty_data *data;
+ struct dir_info *dir = dir_ent->dir;
+
+ /*
+ * Empty action only works on empty directories
+ */
+ if (dir->count != 0)
+ return 0;
+
+ action_data.name = dir_ent->name;
+ action_data.pathname = strdup(pathname(dir_ent));
+ action_data.subpath = strdup(subpathname(dir_ent));
+ action_data.buf = &dir_ent->inode->buf;
+ action_data.depth = dir_ent->our_dir->depth;
+ action_data.dir_ent = dir_ent;
+ action_data.root = root;
+
+ for (i = 0; i < empty_count && !match; i++) {
+ data = empty_spec[i].data;
+
+ /*
+ * determine the cause of the empty directory and evaluate
+ * the empty action specified. Three empty actions:
+ * - EMPTY_SOURCE: empty action triggers only if the directory
+ * was originally empty, i.e directories that are empty
+ * only due to excluding are ignored.
+ * - EMPTY_EXCLUDED: empty action triggers only if the directory
+ * is empty because of excluding, i.e. directories that
+ * were originally empty are ignored.
+ * - EMPTY_ALL (the default): empty action triggers if the
+ * directory is empty, irrespective of the reason, i.e.
+ * the directory could have been originally empty or could
+ * be empty due to excluding.
+ */
+ if ((data->val == EMPTY_EXCLUDED && !dir->excluded) ||
+ (data->val == EMPTY_SOURCE && dir->excluded))
+ continue;
+
+ match = eval_expr_top(&empty_spec[i], &action_data);
+ }
+
+ free(action_data.pathname);
+ free(action_data.subpath);
+
+ return match;
+}
+
+
+/*
+ * Move specific action code
+ */
+static struct move_ent *move_list = NULL;
+
+
+int move_actions()
+{
+ return move_count;
+}
+
+
+static char *move_pathname(struct move_ent *move)
+{
+ struct dir_info *dest;
+ char *name, *pathname;
+ int res;
+
+ dest = (move->ops & ACTION_MOVE_MOVE) ?
+ move->dest : move->dir_ent->our_dir;
+ name = (move->ops & ACTION_MOVE_RENAME) ?
+ move->name : move->dir_ent->name;
+
+ if(dest->subpath[0] != '\0')
+ res = asprintf(&pathname, "%s/%s", dest->subpath, name);
+ else
+ res = asprintf(&pathname, "/%s", name);
+
+ if(res == -1)
+ BAD_ERROR("asprintf failed in move_pathname\n");
+
+ return pathname;
+}
+
+
+static char *get_comp(char **pathname)
+{
+ char *path = *pathname, *start;
+
+ while(*path == '/')
+ path ++;
+
+ if(*path == '\0')
+ return NULL;
+
+ start = path;
+ while(*path != '/' && *path != '\0')
+ path ++;
+
+ *pathname = path;
+ return strndup(start, path - start);
+}
+
+
+static struct dir_ent *lookup_comp(char *comp, struct dir_info *dest)
+{
+ struct dir_ent *dir_ent;
+
+ for(dir_ent = dest->list; dir_ent; dir_ent = dir_ent->next)
+ if(strcmp(comp, dir_ent->name) == 0)
+ break;
+
+ return dir_ent;
+}
+
+
+void eval_move(struct action_data *action_data, struct move_ent *move,
+ struct dir_info *root, struct dir_ent *dir_ent, char *pathname)
+{
+ struct dir_info *dest, *source = dir_ent->our_dir;
+ struct dir_ent *comp_ent;
+ char *comp, *path = pathname;
+
+ /*
+ * Walk pathname to get the destination directory
+ *
+ * Like the mv command, if the last component exists and it
+ * is a directory, then move the file into that directory,
+ * otherwise, move the file into parent directory of the last
+ * component and rename to the last component.
+ */
+ if (pathname[0] == '/')
+ /* absolute pathname, walk from root directory */
+ dest = root;
+ else
+ /* relative pathname, walk from current directory */
+ dest = source;
+
+ for(comp = get_comp(&pathname); comp; free(comp),
+ comp = get_comp(&pathname)) {
+
+ if (strcmp(comp, ".") == 0)
+ continue;
+
+ if (strcmp(comp, "..") == 0) {
+ /* if we're in the root directory then ignore */
+ if(dest->depth > 1)
+ dest = dest->dir_ent->our_dir;
+ continue;
+ }
+
+ /*
+ * Look up comp in current directory, if it exists and it is a
+ * directory continue walking the pathname, otherwise exit,
+ * we've walked as far as we can go, normally this is because
+ * we've arrived at the leaf component which we are going to
+ * rename source to
+ */
+ comp_ent = lookup_comp(comp, dest);
+ if (comp_ent == NULL || (comp_ent->inode->buf.st_mode & S_IFMT)
+ != S_IFDIR)
+ break;
+
+ dest = comp_ent->dir;
+ }
+
+ if(comp) {
+ /* Leaf component? If so we're renaming to this */
+ char *remainder = get_comp(&pathname);
+ free(remainder);
+
+ if(remainder) {
+ /*
+ * trying to move source to a subdirectory of
+ * comp, but comp either doesn't exist, or it isn't
+ * a directory, which is impossible
+ */
+ if (comp_ent == NULL)
+ ERROR("Move action: cannot move %s to %s, no "
+ "such directory %s\n",
+ action_data->subpath, path, comp);
+ else
+ ERROR("Move action: cannot move %s to %s, %s "
+ "is not a directory\n",
+ action_data->subpath, path, comp);
+ free(comp);
+ return;
+ }
+
+ /*
+ * Multiple move actions triggering on one file can be merged
+ * if one is a RENAME and the other is a MOVE. Multiple RENAMEs
+ * can only merge if they're doing the same thing
+ */
+ if(move->ops & ACTION_MOVE_RENAME) {
+ if(strcmp(comp, move->name) != 0) {
+ char *conf_path = move_pathname(move);
+ ERROR("Move action: Cannot move %s to %s, "
+ "conflicting move, already moving "
+ "to %s via another move action!\n",
+ action_data->subpath, path, conf_path);
+ free(conf_path);
+ free(comp);
+ return;
+ }
+ free(comp);
+ } else {
+ move->name = comp;
+ move->ops |= ACTION_MOVE_RENAME;
+ }
+ }
+
+ if(dest != source) {
+ /*
+ * Multiple move actions triggering on one file can be merged
+ * if one is a RENAME and the other is a MOVE. Multiple MOVEs
+ * can only merge if they're doing the same thing
+ */
+ if(move->ops & ACTION_MOVE_MOVE) {
+ if(dest != move->dest) {
+ char *conf_path = move_pathname(move);
+ ERROR("Move action: Cannot move %s to %s, "
+ "conflicting move, already moving "
+ "to %s via another move action!\n",
+ action_data->subpath, path, conf_path);
+ free(conf_path);
+ return;
+ }
+ } else {
+ move->dest = dest;
+ move->ops |= ACTION_MOVE_MOVE;
+ }
+ }
+}
+
+
+static int subdirectory(struct dir_info *source, struct dir_info *dest)
+{
+ if(source == NULL)
+ return 0;
+
+ return strlen(source->subpath) <= strlen(dest->subpath) &&
+ (dest->subpath[strlen(source->subpath)] == '/' ||
+ dest->subpath[strlen(source->subpath)] == '\0') &&
+ strncmp(source->subpath, dest->subpath,
+ strlen(source->subpath)) == 0;
+}
+
+
+void eval_move_actions(struct dir_info *root, struct dir_ent *dir_ent)
+{
+ int i;
+ struct action_data action_data;
+ struct move_ent *move = NULL;
+
+ action_data.name = dir_ent->name;
+ action_data.pathname = strdup(pathname(dir_ent));
+ action_data.subpath = strdup(subpathname(dir_ent));
+ action_data.buf = &dir_ent->inode->buf;
+ action_data.depth = dir_ent->our_dir->depth;
+ action_data.dir_ent = dir_ent;
+ action_data.root = root;
+
+ /*
+ * Evaluate each move action against the current file. For any
+ * move actions that match don't actually perform the move now, but,
+ * store it, and execute all the stored move actions together once the
+ * directory scan is complete. This is done to ensure each separate
+ * move action does not nondeterministically interfere with other move
+ * actions. Each move action is considered to act independently, and
+ * each move action sees the directory tree in the same state.
+ */
+ for (i = 0; i < move_count; i++) {
+ struct action *action = &move_spec[i];
+ int match = eval_expr_top(action, &action_data);
+
+ if(match) {
+ if(move == NULL) {
+ move = malloc(sizeof(*move));
+ if(move == NULL)
+ MEM_ERROR();
+
+ move->ops = 0;
+ move->dir_ent = dir_ent;
+ }
+ eval_move(&action_data, move, root, dir_ent,
+ action->argv[0]);
+ }
+ }
+
+ if(move) {
+ struct dir_ent *comp_ent;
+ struct dir_info *dest;
+ char *name;
+
+ /*
+ * Move contains the result of all triggered move actions.
+ * Check the destination doesn't already exist
+ */
+ if(move->ops == 0) {
+ free(move);
+ goto finish;
+ }
+
+ dest = (move->ops & ACTION_MOVE_MOVE) ?
+ move->dest : dir_ent->our_dir;
+ name = (move->ops & ACTION_MOVE_RENAME) ?
+ move->name : dir_ent->name;
+ comp_ent = lookup_comp(name, dest);
+ if(comp_ent) {
+ char *conf_path = move_pathname(move);
+ ERROR("Move action: Cannot move %s to %s, "
+ "destination already exists\n",
+ action_data.subpath, conf_path);
+ free(conf_path);
+ free(move);
+ goto finish;
+ }
+
+ /*
+ * If we're moving a directory, check we're not moving it to a
+ * subdirectory of itself
+ */
+ if(subdirectory(dir_ent->dir, dest)) {
+ char *conf_path = move_pathname(move);
+ ERROR("Move action: Cannot move %s to %s, this is a "
+ "subdirectory of itself\n",
+ action_data.subpath, conf_path);
+ free(conf_path);
+ free(move);
+ goto finish;
+ }
+ move->next = move_list;
+ move_list = move;
+ }
+
+finish:
+ free(action_data.pathname);
+ free(action_data.subpath);
+}
+
+
+static void move_dir(struct dir_ent *dir_ent)
+{
+ struct dir_info *dir = dir_ent->dir;
+ struct dir_ent *comp_ent;
+
+ /* update our directory's subpath name */
+ free(dir->subpath);
+ dir->subpath = strdup(subpathname(dir_ent));
+
+ /* recursively update the subpaths of any sub-directories */
+ for(comp_ent = dir->list; comp_ent; comp_ent = comp_ent->next)
+ if(comp_ent->dir)
+ move_dir(comp_ent);
+}
+
+
+static void move_file(struct move_ent *move_ent)
+{
+ struct dir_ent *dir_ent = move_ent->dir_ent;
+
+ if(move_ent->ops & ACTION_MOVE_MOVE) {
+ struct dir_ent *comp_ent, *prev = NULL;
+ struct dir_info *source = dir_ent->our_dir,
+ *dest = move_ent->dest;
+ char *filename = pathname(dir_ent);
+
+ /*
+ * If we're moving a directory, check we're not moving it to a
+ * subdirectory of itself
+ */
+ if(subdirectory(dir_ent->dir, dest)) {
+ char *conf_path = move_pathname(move_ent);
+ ERROR("Move action: Cannot move %s to %s, this is a "
+ "subdirectory of itself\n",
+ subpathname(dir_ent), conf_path);
+ free(conf_path);
+ return;
+ }
+
+ /* Remove the file from source directory */
+ for(comp_ent = source->list; comp_ent != dir_ent;
+ prev = comp_ent, comp_ent = comp_ent->next);
+
+ if(prev)
+ prev->next = comp_ent->next;
+ else
+ source->list = comp_ent->next;
+
+ source->count --;
+ if((comp_ent->inode->buf.st_mode & S_IFMT) == S_IFDIR)
+ source->directory_count --;
+
+ /* Add the file to dest directory */
+ comp_ent->next = dest->list;
+ dest->list = comp_ent;
+ comp_ent->our_dir = dest;
+
+ dest->count ++;
+ if((comp_ent->inode->buf.st_mode & S_IFMT) == S_IFDIR)
+ dest->directory_count ++;
+
+ /*
+ * We've moved the file, and so we can't now use the
+ * parent directory's pathname to calculate the pathname
+ */
+ if(dir_ent->nonstandard_pathname == NULL) {
+ dir_ent->nonstandard_pathname = strdup(filename);
+ if(dir_ent->source_name) {
+ free(dir_ent->source_name);
+ dir_ent->source_name = NULL;
+ }
+ }
+ }
+
+ if(move_ent->ops & ACTION_MOVE_RENAME) {
+ /*
+ * If we're using name in conjunction with the parent
+ * directory's pathname to calculate the pathname, we need
+ * to use source_name to override. Otherwise it's already being
+ * over-ridden
+ */
+ if(dir_ent->nonstandard_pathname == NULL &&
+ dir_ent->source_name == NULL)
+ dir_ent->source_name = dir_ent->name;
+ else
+ free(dir_ent->name);
+
+ dir_ent->name = move_ent->name;
+ }
+
+ if(dir_ent->dir)
+ /*
+ * dir_ent is a directory, and we have to recursively fix-up
+ * its subpath, and the subpaths of all of its sub-directories
+ */
+ move_dir(dir_ent);
+}
+
+
+void do_move_actions()
+{
+ while(move_list) {
+ struct move_ent *temp = move_list;
+ struct dir_info *dest = (move_list->ops & ACTION_MOVE_MOVE) ?
+ move_list->dest : move_list->dir_ent->our_dir;
+ char *name = (move_list->ops & ACTION_MOVE_RENAME) ?
+ move_list->name : move_list->dir_ent->name;
+ struct dir_ent *comp_ent = lookup_comp(name, dest);
+ if(comp_ent) {
+ char *conf_path = move_pathname(move_list);
+ ERROR("Move action: Cannot move %s to %s, "
+ "destination already exists\n",
+ subpathname(move_list->dir_ent), conf_path);
+ free(conf_path);
+ } else
+ move_file(move_list);
+
+ move_list = move_list->next;
+ free(temp);
+ }
+}
+
+
+/*
+ * Prune specific action code
+ */
+int prune_actions()
+{
+ return prune_count;
+}
+
+
+int eval_prune_actions(struct dir_info *root, struct dir_ent *dir_ent)
+{
+ int i, match = 0;
+ struct action_data action_data;
+
+ action_data.name = dir_ent->name;
+ action_data.pathname = strdup(pathname(dir_ent));
+ action_data.subpath = strdup(subpathname(dir_ent));
+ action_data.buf = &dir_ent->inode->buf;
+ action_data.depth = dir_ent->our_dir->depth;
+ action_data.dir_ent = dir_ent;
+ action_data.root = root;
+
+ for (i = 0; i < prune_count && !match; i++)
+ match = eval_expr_top(&prune_spec[i], &action_data);
+
+ free(action_data.pathname);
+ free(action_data.subpath);
+
+ return match;
+}
+
+
+/*
+ * Xattr include/exclude specific action code
+ */
+static int parse_xattr_args(struct action_entry *action, int args,
+ char **argv, void **data)
+{
+ struct xattr_data *xattr_data;
+ int error;
+
+ xattr_data = malloc(sizeof(*xattr_data));
+ if (xattr_data == NULL)
+ MEM_ERROR();
+
+ error = regcomp(&xattr_data->preg, argv[0], REG_EXTENDED|REG_NOSUB);
+ if(error) {
+ char str[1024]; /* overflow safe */
+
+ regerror(error, &xattr_data->preg, str, 1024);
+ SYNTAX_ERROR("invalid regex %s because %s\n", argv[0], str);
+ free(xattr_data);
+ return 0;
+ }
+
+ *data = xattr_data;
+
+ return 1;
+}
+
+
+static struct xattr_data *eval_xattr_actions (struct action *spec,
+ int count, struct dir_info *root, struct dir_ent *dir_ent)
+{
+ int i;
+ struct action_data action_data;
+ struct xattr_data *head = NULL;
+
+ if(count == 0)
+ return NULL;
+
+ action_data.name = dir_ent->name;
+ action_data.pathname = strdup(pathname(dir_ent));
+ action_data.subpath = strdup(subpathname(dir_ent));
+ action_data.buf = &dir_ent->inode->buf;
+ action_data.depth = dir_ent->our_dir->depth;
+ action_data.dir_ent = dir_ent;
+ action_data.root = root;
+
+ for (i = 0; i < count; i++) {
+ struct xattr_data *data = spec[i].data;
+ int match = eval_expr_top(&spec[i], &action_data);
+
+ if(match) {
+ data->next = head;
+ head = data;
+ }
+ }
+
+ free(action_data.pathname);
+ free(action_data.subpath);
+
+ return head;
+}
+
+
+int xattr_exc_actions()
+{
+ return xattr_exc_count;
+}
+
+
+struct xattr_data *eval_xattr_exc_actions (struct dir_info *root,
+ struct dir_ent *dir_ent)
+{
+ return eval_xattr_actions(xattr_exc_spec, xattr_exc_count, root, dir_ent);
+}
+
+
+int match_xattr_exc_actions(struct xattr_data *head, char *name)
+{
+ struct xattr_data *cur;
+
+ for(cur = head; cur != NULL; cur = cur->next) {
+ int match = regexec(&cur->preg, name, (size_t) 0, NULL, 0);
+
+ if(match == 0)
+ return 1;
+ }
+
+ return 0;
+}
+
+
+int xattr_inc_actions()
+{
+ return xattr_inc_count;
+}
+
+
+struct xattr_data *eval_xattr_inc_actions (struct dir_info *root,
+ struct dir_ent *dir_ent)
+{
+ return eval_xattr_actions(xattr_inc_spec, xattr_inc_count, root, dir_ent);
+}
+
+
+int match_xattr_inc_actions(struct xattr_data *head, char *name)
+{
+ if(head == NULL)
+ return 0;
+ else
+ return !match_xattr_exc_actions(head, name);
+}
+
+
+/*
+ * Xattr add specific action code
+ */
+static int parse_xattr_add_args(struct action_entry *action, int args,
+ char **argv, void **data)
+{
+ struct xattr_add *xattr = xattr_parse(argv[0], "", "action xattr add");
+
+ if(xattr == NULL)
+ return 0;
+
+ *data = xattr;
+
+ return 1;
+}
+
+
+struct xattr_add *eval_xattr_add_actions(struct dir_info *root,
+ struct dir_ent *dir_ent, int *items)
+{
+ int i, count = 0;
+ struct action_data action_data;
+ struct xattr_add *head = NULL;
+
+ if(xattr_add_count == 0) {
+ *items = 0;
+ return NULL;
+ }
+
+ action_data.name = dir_ent->name;
+ action_data.pathname = strdup(pathname(dir_ent));
+ action_data.subpath = strdup(subpathname(dir_ent));
+ action_data.buf = &dir_ent->inode->buf;
+ action_data.depth = dir_ent->our_dir->depth;
+ action_data.dir_ent = dir_ent;
+ action_data.root = root;
+
+ for (i = 0; i < xattr_add_count; i++) {
+ struct xattr_add *data = xattr_add_spec[i].data;
+ int match = eval_expr_top(&xattr_add_spec[i], &action_data);
+
+ if(match) {
+ data->next = head;
+ head = data;
+ count ++;
+ }
+ }
+
+ free(action_data.pathname);
+ free(action_data.subpath);
+
+ *items = count;
+ return head;
+}
+
+
+int xattr_add_actions()
+{
+ return xattr_add_count;
+}
+
+
+/*
+ * Noop specific action code
+ */
+static void noop_action(struct action *action, struct dir_ent *dir_ent)
+{
+}
+
+
+/*
+ * General test evaluation code
+ */
+
+/*
+ * A number can be of the form [range]number[size]
+ * [range] is either:
+ * '<' or '-', match on less than number
+ * '>' or '+', match on greater than number
+ * '' (nothing), match on exactly number
+ * [size] is either:
+ * '' (nothing), number
+ * 'k' or 'K', number * 2^10
+ * 'm' or 'M', number * 2^20
+ * 'g' or 'G', number * 2^30
+ */
+static int parse_number(char *start, long long *size, int *range, char **error)
+{
+ char *end;
+ long long number;
+
+ if (*start == '>' || *start == '+') {
+ *range = NUM_GREATER;
+ start ++;
+ } else if (*start == '<' || *start == '-') {
+ *range = NUM_LESS;
+ start ++;
+ } else
+ *range = NUM_EQ;
+
+ errno = 0; /* To enable failure after call to be determined */
+ number = strtoll(start, &end, 10);
+
+ if((errno == ERANGE && (number == LLONG_MAX || number == LLONG_MIN))
+ || (errno != 0 && number == 0)) {
+ /* long long underflow or overflow in conversion, or other
+ * conversion error.
+ * Note: we don't check for LLONG_MIN and LLONG_MAX only
+ * because strtoll can validly return that if the
+ * user used these values
+ */
+ *error = "Long long underflow, overflow or other conversion "
+ "error";
+ return 0;
+ }
+
+ if (end == start) {
+ /* Couldn't read any number */
+ *error = "Number expected";
+ return 0;
+ }
+
+ switch (end[0]) {
+ case 'g':
+ case 'G':
+ number *= 1024;
+ case 'm':
+ case 'M':
+ number *= 1024;
+ case 'k':
+ case 'K':
+ number *= 1024;
+
+ if (end[1] != '\0') {
+ *error = "Trailing junk after size specifier";
+ return 0;
+ }
+
+ break;
+ case '\0':
+ break;
+ default:
+ *error = "Trailing junk after number";
+ return 0;
+ }
+
+ *size = number;
+
+ return 1;
+}
+
+
+static int parse_number_arg(struct test_entry *test, struct atom *atom)
+{
+ struct test_number_arg *number;
+ long long size;
+ int range;
+ char *error;
+ int res = parse_number(atom->argv[0], &size, &range, &error);
+
+ if (res == 0) {
+ TEST_SYNTAX_ERROR(test, 0, "%s\n", error);
+ return 0;
+ }
+
+ number = malloc(sizeof(*number));
+ if (number == NULL)
+ MEM_ERROR();
+
+ number->range = range;
+ number->size = size;
+
+ atom->data = number;
+
+ return 1;
+}
+
+
+static int parse_range_args(struct test_entry *test, struct atom *atom)
+{
+ struct test_range_args *range;
+ long long start, end;
+ int type;
+ int res;
+ char *error;
+
+ res = parse_number(atom->argv[0], &start, &type, &error);
+ if (res == 0) {
+ TEST_SYNTAX_ERROR(test, 0, "%s\n", error);
+ return 0;
+ }
+
+ if (type != NUM_EQ) {
+ TEST_SYNTAX_ERROR(test, 0, "Range specifier (<, >, -, +) not "
+ "expected\n");
+ return 0;
+ }
+
+ res = parse_number(atom->argv[1], &end, &type, &error);
+ if (res == 0) {
+ TEST_SYNTAX_ERROR(test, 1, "%s\n", error);
+ return 0;
+ }
+
+ if (type != NUM_EQ) {
+ TEST_SYNTAX_ERROR(test, 1, "Range specifier (<, >, -, +) not "
+ "expected\n");
+ return 0;
+ }
+
+ range = malloc(sizeof(*range));
+ if (range == NULL)
+ MEM_ERROR();
+
+ range->start = start;
+ range->end = end;
+
+ atom->data = range;
+
+ return 1;
+}
+
+
+/*
+ * Generic test code macro
+ */
+#define TEST_FN(NAME, MATCH, CODE) \
+static int NAME##_fn(struct atom *atom, struct action_data *action_data) \
+{ \
+ /* test operates on MATCH file types only */ \
+ if (!file_type_match(action_data->buf->st_mode, MATCH)) \
+ return 0; \
+ \
+ CODE \
+}
+
+/*
+ * Generic test code macro testing VAR for size (eq, less than, greater than)
+ */
+#define TEST_VAR_FN(NAME, MATCH, VAR) TEST_FN(NAME, MATCH, \
+ { \
+ int match = 0; \
+ struct test_number_arg *number = atom->data; \
+ \
+ switch (number->range) { \
+ case NUM_EQ: \
+ match = VAR == number->size; \
+ break; \
+ case NUM_LESS: \
+ match = VAR < number->size; \
+ break; \
+ case NUM_GREATER: \
+ match = VAR > number->size; \
+ break; \
+ } \
+ \
+ return match; \
+ })
+
+
+/*
+ * Generic test code macro testing VAR for range [x, y] (value between x and y
+ * inclusive).
+ */
+#define TEST_VAR_RANGE_FN(NAME, MATCH, VAR) TEST_FN(NAME##_range, MATCH, \
+ { \
+ struct test_range_args *range = atom->data; \
+ \
+ return range->start <= VAR && VAR <= range->end; \
+ })
+
+
+/*
+ * Name, Pathname and Subpathname test specific code
+ */
+
+/*
+ * Add a leading "/" if subpathname and pathname lacks it
+ */
+static int check_pathname(struct test_entry *test, struct atom *atom)
+{
+ int res;
+ char *name;
+
+ if(atom->argv[0][0] != '/') {
+ res = asprintf(&name, "/%s", atom->argv[0]);
+ if(res == -1)
+ BAD_ERROR("asprintf failed in check_pathname\n");
+
+ free(atom->argv[0]);
+ atom->argv[0] = name;
+ }
+
+ return 1;
+}
+
+
+TEST_FN(name, ACTION_ALL_LNK, \
+ return fnmatch(atom->argv[0], action_data->name,
+ FNM_PATHNAME|FNM_EXTMATCH) == 0;)
+
+TEST_FN(pathname, ACTION_ALL_LNK, \
+ return fnmatch(atom->argv[0], action_data->subpath,
+ FNM_PATHNAME|FNM_EXTMATCH) == 0;)
+
+
+static int count_components(char *path)
+{
+ int count;
+
+ for (count = 0; *path != '\0'; count ++) {
+ while (*path == '/')
+ path ++;
+
+ while (*path != '\0' && *path != '/')
+ path ++;
+ }
+
+ return count;
+}
+
+
+static char *get_start(char *s, int n)
+{
+ int count;
+ char *path = s;
+
+ for (count = 0; *path != '\0' && count < n; count ++) {
+ while (*path == '/')
+ path ++;
+
+ while (*path != '\0' && *path != '/')
+ path ++;
+ }
+
+ if (count == n)
+ *path = '\0';
+
+ return s;
+}
+
+
+static int subpathname_fn(struct atom *atom, struct action_data *data)
+{
+ char *s = get_start(strdup(data->subpath), count_components(atom->argv[0]));
+ int res = fnmatch(atom->argv[0], s, FNM_PATHNAME|FNM_EXTMATCH);
+
+ free(s);
+
+ return res == 0;
+}
+
+/*
+ * Inode attribute test operations using generic
+ * TEST_VAR_FN(test name, file scope, attribute name) macro.
+ * This is for tests that do not need to be specially handled in any way.
+ * They just take a variable and compare it against a number.
+ */
+TEST_VAR_FN(filesize, ACTION_REG, action_data->buf->st_size)
+
+TEST_VAR_FN(dirsize, ACTION_DIR, action_data->buf->st_size)
+
+TEST_VAR_FN(size, ACTION_ALL_LNK, action_data->buf->st_size)
+
+TEST_VAR_FN(inode, ACTION_ALL_LNK, action_data->buf->st_ino)
+
+TEST_VAR_FN(nlink, ACTION_ALL_LNK, action_data->buf->st_nlink)
+
+TEST_VAR_FN(fileblocks, ACTION_REG, action_data->buf->st_blocks)
+
+TEST_VAR_FN(dirblocks, ACTION_DIR, action_data->buf->st_blocks)
+
+TEST_VAR_FN(blocks, ACTION_ALL_LNK, action_data->buf->st_blocks)
+
+TEST_VAR_FN(dircount, ACTION_DIR, action_data->dir_ent->dir->count)
+
+TEST_VAR_FN(depth, ACTION_ALL_LNK, action_data->depth)
+
+TEST_VAR_RANGE_FN(filesize, ACTION_REG, action_data->buf->st_size)
+
+TEST_VAR_RANGE_FN(dirsize, ACTION_DIR, action_data->buf->st_size)
+
+TEST_VAR_RANGE_FN(size, ACTION_ALL_LNK, action_data->buf->st_size)
+
+TEST_VAR_RANGE_FN(inode, ACTION_ALL_LNK, action_data->buf->st_ino)
+
+TEST_VAR_RANGE_FN(nlink, ACTION_ALL_LNK, action_data->buf->st_nlink)
+
+TEST_VAR_RANGE_FN(fileblocks, ACTION_REG, action_data->buf->st_blocks)
+
+TEST_VAR_RANGE_FN(dirblocks, ACTION_DIR, action_data->buf->st_blocks)
+
+TEST_VAR_RANGE_FN(blocks, ACTION_ALL_LNK, action_data->buf->st_blocks)
+
+TEST_VAR_RANGE_FN(gid, ACTION_ALL_LNK, action_data->buf->st_gid)
+
+TEST_VAR_RANGE_FN(uid, ACTION_ALL_LNK, action_data->buf->st_uid)
+
+TEST_VAR_RANGE_FN(depth, ACTION_ALL_LNK, action_data->depth)
+
+TEST_VAR_RANGE_FN(dircount, ACTION_DIR, action_data->dir_ent->dir->count)
+
+TEST_VAR_FN(uid, ACTION_ALL_LNK, action_data->buf->st_uid)
+
+TEST_VAR_FN(gid, ACTION_ALL_LNK, action_data->buf->st_gid)
+
+/*
+ * user specific test code
+ */
+TEST_VAR_FN(user, ACTION_ALL_LNK, action_data->buf->st_uid)
+
+static int parse_user_arg(struct test_entry *test, struct atom *atom)
+{
+ struct test_number_arg *number;
+ long long size;
+ struct passwd *uid = getpwnam(atom->argv[0]);
+
+ if(uid)
+ size = uid->pw_uid;
+ else {
+ TEST_SYNTAX_ERROR(test, 1, "Unknown user\n");
+ return 0;
+ }
+
+ number = malloc(sizeof(*number));
+ if(number == NULL)
+ MEM_ERROR();
+
+ number->range = NUM_EQ;
+ number->size = size;
+
+ atom->data = number;
+
+ return 1;
+}
+
+
+/*
+ * group specific test code
+ */
+TEST_VAR_FN(group, ACTION_ALL_LNK, action_data->buf->st_gid)
+
+static int parse_group_arg(struct test_entry *test, struct atom *atom)
+{
+ struct test_number_arg *number;
+ long long size;
+ struct group *gid = getgrnam(atom->argv[0]);
+
+ if(gid)
+ size = gid->gr_gid;
+ else {
+ TEST_SYNTAX_ERROR(test, 1, "Unknown group\n");
+ return 0;
+ }
+
+ number = malloc(sizeof(*number));
+ if(number == NULL)
+ MEM_ERROR();
+
+ number->range = NUM_EQ;
+ number->size= size;
+
+ atom->data = number;
+
+ return 1;
+}
+
+
+/*
+ * Type test specific code
+ */
+static struct type_entry type_table[] = {
+ { S_IFSOCK, 's' },
+ { S_IFLNK, 'l' },
+ { S_IFREG, 'f' },
+ { S_IFBLK, 'b' },
+ { S_IFDIR, 'd' },
+ { S_IFCHR, 'c' },
+ { S_IFIFO, 'p' },
+ { 0, 0 },
+};
+
+
+static int parse_type_arg(struct test_entry *test, struct atom *atom)
+{
+ int i;
+
+ if (strlen(atom->argv[0]) != 1)
+ goto failed;
+
+ for(i = 0; type_table[i].type != 0; i++)
+ if (type_table[i].type == atom->argv[0][0])
+ break;
+
+ atom->data = &type_table[i];
+
+ if(type_table[i].type != 0)
+ return 1;
+
+failed:
+ TEST_SYNTAX_ERROR(test, 0, "Unexpected file type, expected 'f', 'd', "
+ "'c', 'b', 'l', 's' or 'p'\n");
+ return 0;
+}
+
+
+static int type_fn(struct atom *atom, struct action_data *action_data)
+{
+ struct type_entry *type = atom->data;
+
+ return (action_data->buf->st_mode & S_IFMT) == type->value;
+}
+
+
+/*
+ * True test specific code
+ */
+static int true_fn(struct atom *atom, struct action_data *action_data)
+{
+ return 1;
+}
+
+
+/*
+ * False test specific code
+ */
+static int false_fn(struct atom *atom, struct action_data *action_data)
+{
+ return 0;
+}
+
+
+/*
+ * File test specific code
+ */
+static int parse_file_arg(struct test_entry *test, struct atom *atom)
+{
+ int res;
+ regex_t *preg = malloc(sizeof(regex_t));
+
+ if (preg == NULL)
+ MEM_ERROR();
+
+ res = regcomp(preg, atom->argv[0], REG_EXTENDED);
+ if (res) {
+ char str[1024]; /* overflow safe */
+
+ regerror(res, preg, str, 1024);
+ free(preg);
+ TEST_SYNTAX_ERROR(test, 0, "invalid regex \"%s\" because "
+ "\"%s\"\n", atom->argv[0], str);
+ return 0;
+ }
+
+ atom->data = preg;
+
+ return 1;
+}
+
+
+static int file_fn(struct atom *atom, struct action_data *action_data)
+{
+ int child, res, size = 0, status;
+ int pipefd[2];
+ char *buffer = NULL;
+ regex_t *preg = atom->data;
+
+ res = pipe(pipefd);
+ if (res == -1)
+ BAD_ERROR("file_fn pipe failed\n");
+
+ child = fork();
+ if (child == -1)
+ BAD_ERROR("file_fn fork_failed\n");
+
+ if (child == 0) {
+ /*
+ * Child process
+ * Connect stdout to pipefd[1] and execute file command
+ */
+ close(STDOUT_FILENO);
+ res = dup(pipefd[1]);
+ if (res == -1)
+ exit(EXIT_FAILURE);
+
+ execlp("file", "file", "-b", action_data->pathname,
+ (char *) NULL);
+ exit(EXIT_FAILURE);
+ }
+
+ /*
+ * Parent process. Read stdout from file command
+ */
+ close(pipefd[1]);
+
+ do {
+ buffer = realloc(buffer, size + 512);
+ if (buffer == NULL)
+ MEM_ERROR();
+
+ res = read_bytes(pipefd[0], buffer + size, 512);
+
+ if (res == -1)
+ BAD_ERROR("file_fn pipe read error\n");
+
+ size += 512;
+
+ } while (res == 512);
+
+ size = size + res - 512;
+
+ buffer[size] = '\0';
+
+ res = waitpid(child, &status, 0);
+
+ if (res == -1)
+ BAD_ERROR("file_fn waitpid failed\n");
+
+ if (!WIFEXITED(status) || WEXITSTATUS(status) != 0)
+ BAD_ERROR("file_fn file returned error\n");
+
+ close(pipefd[0]);
+
+ res = regexec(preg, buffer, (size_t) 0, NULL, 0);
+
+ free(buffer);
+
+ return res == 0;
+}
+
+
+/*
+ * Exec test specific code
+ */
+static int exec_fn(struct atom *atom, struct action_data *action_data)
+{
+ int child, i, res, status;
+
+ child = fork();
+ if (child == -1)
+ BAD_ERROR("exec_fn fork_failed\n");
+
+ if (child == 0) {
+ /*
+ * Child process
+ * redirect stdin, stdout & stderr to /dev/null and
+ * execute atom->argv[0]
+ */
+ int fd = open("/dev/null", O_RDWR);
+ if(fd == -1)
+ exit(EXIT_FAILURE);
+
+ close(STDIN_FILENO);
+ close(STDOUT_FILENO);
+ close(STDERR_FILENO);
+ for(i = 0; i < 3; i++) {
+ res = dup(fd);
+ if (res == -1)
+ exit(EXIT_FAILURE);
+ }
+ close(fd);
+
+ /*
+ * Create environment variables
+ * NAME: name of file
+ * PATHNAME: pathname of file relative to squashfs root
+ * SOURCE_PATHNAME: the pathname of the file in the source
+ * directory
+ */
+ res = setenv("NAME", action_data->name, 1);
+ if(res == -1)
+ exit(EXIT_FAILURE);
+
+ res = setenv("PATHNAME", action_data->subpath, 1);
+ if(res == -1)
+ exit(EXIT_FAILURE);
+
+ res = setenv("SOURCE_PATHNAME", action_data->pathname, 1);
+ if(res == -1)
+ exit(EXIT_FAILURE);
+
+ execl("/bin/sh", "sh", "-c", atom->argv[0], (char *) NULL);
+ exit(EXIT_FAILURE);
+ }
+
+ /*
+ * Parent process.
+ */
+
+ res = waitpid(child, &status, 0);
+
+ if (res == -1)
+ BAD_ERROR("exec_fn waitpid failed\n");
+
+ return WIFEXITED(status) ? WEXITSTATUS(status) == 0 : 0;
+}
+
+
+/*
+ * Symbolic link specific test code
+ */
+
+/*
+ * Walk the supplied pathname and return the directory entry corresponding
+ * to the pathname. If any symlinks are encountered whilst walking the
+ * pathname, then recursively walk these, to obtain the fully
+ * dereferenced canonicalised directory entry.
+ *
+ * If follow_path fails to walk a pathname either because a component
+ * doesn't exist, it is a non directory component when a directory
+ * component is expected, a symlink with an absolute path is encountered,
+ * or a symlink is encountered which cannot be recursively walked due to
+ * the above failures, then return NULL.
+ */
+static struct dir_ent *follow_path(struct dir_info *dir, char *pathname)
+{
+ char *comp, *path = pathname;
+ struct dir_ent *dir_ent = NULL;
+
+ /* We cannot follow absolute paths */
+ if(pathname[0] == '/')
+ return NULL;
+
+ for(comp = get_comp(&path); comp; free(comp), comp = get_comp(&path)) {
+ if(strcmp(comp, ".") == 0)
+ continue;
+
+ if(strcmp(comp, "..") == 0) {
+ /* Move to parent if we're not in the root directory */
+ if(dir->depth > 1) {
+ dir = dir->dir_ent->our_dir;
+ dir_ent = NULL; /* lazily eval at loop exit */
+ continue;
+ } else
+ /* Failed to walk pathname */
+ return NULL;
+ }
+
+ /* Lookup comp in current directory */
+ dir_ent = lookup_comp(comp, dir);
+ if(dir_ent == NULL)
+ /* Doesn't exist, failed to walk pathname */
+ return NULL;
+
+ if((dir_ent->inode->buf.st_mode & S_IFMT) == S_IFLNK) {
+ /* Symbolic link, try to walk it */
+ dir_ent = follow_path(dir, dir_ent->inode->symlink);
+ if(dir_ent == NULL)
+ /* Failed to follow symlink */
+ return NULL;
+ }
+
+ if((dir_ent->inode->buf.st_mode & S_IFMT) != S_IFDIR)
+ /* Cannot walk further */
+ break;
+
+ dir = dir_ent->dir;
+ }
+
+ /* We will have exited the loop either because we've processed
+ * all the components, which means we've successfully walked the
+ * pathname, or because we've hit a non-directory, in which case
+ * it's success if this is the leaf component */
+ if(comp) {
+ free(comp);
+ comp = get_comp(&path);
+ free(comp);
+ if(comp != NULL)
+ /* Not a leaf component */
+ return NULL;
+ } else {
+ /* Fully walked pathname, dir_ent contains correct value unless
+ * we've walked to the parent ("..") in which case we need
+ * to resolve it here */
+ if(!dir_ent)
+ dir_ent = dir->dir_ent;
+ }
+
+ return dir_ent;
+}
+
+
+static int exists_fn(struct atom *atom, struct action_data *action_data)
+{
+ /*
+ * Test if a symlink exists within the output filesystem, that is,
+ * the symlink has a relative path, and the relative path refers
+ * to an entry within the output filesystem.
+ *
+ * This test function evaluates the path for symlinks - that is it
+ * follows any symlinks in the path (and any symlinks that it contains
+ * etc.), to discover the fully dereferenced canonicalised relative
+ * path.
+ *
+ * If any symlinks within the path do not exist or are absolute
+ * then the symlink is considered to not exist, as it cannot be
+ * fully dereferenced.
+ *
+ * exists operates on symlinks only, other files by definition
+ * exist
+ */
+ if (!file_type_match(action_data->buf->st_mode, ACTION_LNK))
+ return 1;
+
+ /* dereference the symlink, and return TRUE if it exists */
+ return follow_path(action_data->dir_ent->our_dir,
+ action_data->dir_ent->inode->symlink) ? 1 : 0;
+}
+
+
+static int absolute_fn(struct atom *atom, struct action_data *action_data)
+{
+ /*
+ * Test if a symlink has an absolute path, which by definition
+ * means the symbolic link may be broken (even if the absolute path
+ * does point into the filesystem being squashed, because the resultant
+ * filesystem can be mounted/unsquashed anywhere, it is unlikely the
+ * absolute path will still point to the right place). If you know that
+ * an absolute symlink will point to the right place then you don't need
+ * to use this function, and/or these symlinks can be excluded by
+ * use of other test operators.
+ *
+ * absolute operates on symlinks only, other files by definition
+ * don't have problems
+ */
+ if (!file_type_match(action_data->buf->st_mode, ACTION_LNK))
+ return 0;
+
+ return action_data->dir_ent->inode->symlink[0] == '/';
+}
+
+
+static int parse_expr_argX(struct test_entry *test, struct atom *atom,
+ int argno)
+{
+ /* Call parse_expr to parse argument, which should be an expression */
+
+ /* save the current parser state */
+ char *save_cur_ptr = cur_ptr;
+ char *save_source = source;
+
+ cur_ptr = source = atom->argv[argno];
+ atom->data = parse_expr(0);
+
+ cur_ptr = save_cur_ptr;
+ source = save_source;
+
+ if(atom->data == NULL) {
+ /* parse_expr(0) will have reported the exact syntax error,
+ * but, because we recursively evaluated the expression, it
+ * will have been reported without the context of the stat
+ * test(). So here additionally report our failure to parse
+ * the expression in the stat() test to give context */
+ TEST_SYNTAX_ERROR(test, 0, "Failed to parse expression\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+
+static int parse_expr_arg0(struct test_entry *test, struct atom *atom)
+{
+ return parse_expr_argX(test, atom, 0);
+}
+
+
+static int parse_expr_arg1(struct test_entry *test, struct atom *atom)
+{
+ return parse_expr_argX(test, atom, 1);
+}
+
+
+static int stat_fn(struct atom *atom, struct action_data *action_data)
+{
+ struct stat buf;
+ struct action_data eval_action;
+ int match, res;
+
+ /* evaluate the expression using the context of the inode
+ * pointed to by the symlink. This allows the inode attributes
+ * of the file pointed to by the symlink to be evaluated, rather
+ * than the symlink itself.
+ *
+ * Note, stat() deliberately does not evaluate the pathname, name or
+ * depth of the symlink, these are left with the symlink values.
+ * This allows stat() to be used on any symlink, rather than
+ * just symlinks which are contained (if the symlink is *not*
+ * contained then pathname, name and depth are meaningless as they
+ * are relative to the filesystem being squashed). */
+
+ /* if this isn't a symlink then stat will just return the current
+ * information, i.e. stat(expr) == expr. This is harmless and
+ * is better than returning TRUE or FALSE in a non symlink case */
+ res = stat(action_data->pathname, &buf);
+ if(res == -1) {
+ if(expr_log_cmnd(LOG_ENABLED)) {
+ expr_log(atom->test->name);
+ expr_log("(");
+ expr_log_match(0);
+ expr_log(")");
+ }
+ return 0;
+ }
+
+ /* fill in the inode values of the file pointed to by the
+ * symlink, but, leave everything else the same */
+ memcpy(&eval_action, action_data, sizeof(struct action_data));
+ eval_action.buf = &buf;
+
+ if(expr_log_cmnd(LOG_ENABLED)) {
+ expr_log(atom->test->name);
+ expr_log("(");
+ match = eval_expr_log(atom->data, &eval_action);
+ expr_log(")");
+ } else
+ match = eval_expr(atom->data, &eval_action);
+
+ return match;
+}
+
+
+static int readlink_fn(struct atom *atom, struct action_data *action_data)
+{
+ int match = 0;
+ struct dir_ent *dir_ent;
+ struct action_data eval_action;
+
+ /* Dereference the symlink and evaluate the expression in the
+ * context of the file pointed to by the symlink.
+ * All attributes are updated to refer to the file that is pointed to.
+ * Thus the inode attributes, pathname, name and depth all refer to
+ * the dereferenced file, and not the symlink.
+ *
+ * If the symlink cannot be dereferenced because it doesn't exist in
+ * the output filesystem, or due to some other failure to
+ * walk the pathname (see follow_path above), then FALSE is returned.
+ *
+ * If you wish to evaluate the inode attributes of symlinks which
+ * exist in the source filestem (but not in the output filesystem then
+ * use stat instead (see above).
+ *
+ * readlink operates on symlinks only */
+ if (!file_type_match(action_data->buf->st_mode, ACTION_LNK))
+ goto finish;
+
+ /* dereference the symlink, and get the directory entry it points to */
+ dir_ent = follow_path(action_data->dir_ent->our_dir,
+ action_data->dir_ent->inode->symlink);
+ if(dir_ent == NULL)
+ goto finish;
+
+ eval_action.name = dir_ent->name;
+ eval_action.pathname = strdup(pathname(dir_ent));
+ eval_action.subpath = strdup(subpathname(dir_ent));
+ eval_action.buf = &dir_ent->inode->buf;
+ eval_action.depth = dir_ent->our_dir->depth;
+ eval_action.dir_ent = dir_ent;
+ eval_action.root = action_data->root;
+
+ if(expr_log_cmnd(LOG_ENABLED)) {
+ expr_log(atom->test->name);
+ expr_log("(");
+ match = eval_expr_log(atom->data, &eval_action);
+ expr_log(")");
+ } else
+ match = eval_expr(atom->data, &eval_action);
+
+ free(eval_action.pathname);
+ free(eval_action.subpath);
+
+ return match;
+
+finish:
+ if(expr_log_cmnd(LOG_ENABLED)) {
+ expr_log(atom->test->name);
+ expr_log("(");
+ expr_log_match(0);
+ expr_log(")");
+ }
+
+ return 0;
+}
+
+
+static int eval_fn(struct atom *atom, struct action_data *action_data)
+{
+ int match;
+ char *path = atom->argv[0];
+ struct dir_ent *dir_ent = action_data->dir_ent;
+ struct stat *buf = action_data->buf;
+ struct action_data eval_action;
+
+ /* Follow path (arg1) and evaluate the expression (arg2)
+ * in the context of the file discovered. All attributes are updated
+ * to refer to the file that is pointed to.
+ *
+ * This test operation allows you to add additional context to the
+ * evaluation of the file being scanned, such as "if current file is
+ * XXX and the parent is YYY, then ..." Often times you need or
+ * want to test a combination of file status
+ *
+ * If the file referenced by the path does not exist in
+ * the output filesystem, or some other failure is experienced in
+ * walking the path (see follow_path above), then FALSE is returned.
+ *
+ * If you wish to evaluate the inode attributes of files which
+ * exist in the source filestem (but not in the output filesystem then
+ * use stat instead (see above). */
+
+ /* try to follow path, and get the directory entry it points to */
+ if(path[0] == '/') {
+ /* absolute, walk from root - first skip the leading / */
+ while(path[0] == '/')
+ path ++;
+ if(path[0] == '\0')
+ dir_ent = action_data->root->dir_ent;
+ else
+ dir_ent = follow_path(action_data->root, path);
+ } else {
+ /* relative, if first component is ".." walk from parent,
+ * otherwise walk from dir_ent.
+ * Note: this has to be handled here because follow_path
+ * will quite correctly refuse to execute ".." on anything
+ * which isn't a directory */
+ if(strncmp(path, "..", 2) == 0 && (path[2] == '\0' ||
+ path[2] == '/')) {
+ /* walk from parent */
+ path += 2;
+ while(path[0] == '/')
+ path ++;
+ if(path[0] == '\0')
+ dir_ent = dir_ent->our_dir->dir_ent;
+ else
+ dir_ent = follow_path(dir_ent->our_dir, path);
+ } else if(!file_type_match(buf->st_mode, ACTION_DIR))
+ dir_ent = NULL;
+ else
+ dir_ent = follow_path(dir_ent->dir, path);
+ }
+
+ if(dir_ent == NULL) {
+ if(expr_log_cmnd(LOG_ENABLED)) {
+ expr_log(atom->test->name);
+ expr_log("(");
+ expr_log(atom->argv[0]);
+ expr_log(",");
+ expr_log_match(0);
+ expr_log(")");
+ }
+
+ return 0;
+ }
+
+ eval_action.name = dir_ent->name;
+ eval_action.pathname = strdup(pathname(dir_ent));
+ eval_action.subpath = strdup(subpathname(dir_ent));
+ eval_action.buf = &dir_ent->inode->buf;
+ eval_action.depth = dir_ent->our_dir->depth;
+ eval_action.dir_ent = dir_ent;
+ eval_action.root = action_data->root;
+
+ if(expr_log_cmnd(LOG_ENABLED)) {
+ expr_log(atom->test->name);
+ expr_log("(");
+ expr_log(eval_action.subpath);
+ expr_log(",");
+ match = eval_expr_log(atom->data, &eval_action);
+ expr_log(")");
+ } else
+ match = eval_expr(atom->data, &eval_action);
+
+ free(eval_action.pathname);
+ free(eval_action.subpath);
+
+ return match;
+}
+
+
+/*
+ * Perm specific test code
+ */
+static int parse_perm_args(struct test_entry *test, struct atom *atom)
+{
+ int res = 1, mode, op, i;
+ char *arg;
+ struct mode_data *head = NULL, *cur = NULL;
+ struct perm_data *perm_data;
+
+ if(atom->args == 0) {
+ TEST_SYNTAX_ERROR(test, 0, "One or more arguments expected\n");
+ return 0;
+ }
+
+ switch(atom->argv[0][0]) {
+ case '-':
+ op = PERM_ALL;
+ arg = atom->argv[0] + 1;
+ break;
+ case '/':
+ op = PERM_ANY;
+ arg = atom->argv[0] + 1;
+ break;
+ default:
+ op = PERM_EXACT;
+ arg = atom->argv[0];
+ break;
+ }
+
+ /* try to parse as an octal number */
+ res = parse_octal_mode_args(atom->args, atom->argv, (void **) &head);
+ if(res == -1) {
+ /* parse as sym mode argument */
+ for(i = 0; i < atom->args && res; i++, arg = atom->argv[i])
+ res = parse_sym_mode_arg(arg, &head, &cur);
+ }
+
+ if (res == 0)
+ goto finish;
+
+ /*
+ * Evaluate the symbolic mode against a permission of 0000 octal
+ */
+ mode = mode_execute(head, 0);
+
+ perm_data = malloc(sizeof(struct perm_data));
+ if (perm_data == NULL)
+ MEM_ERROR();
+
+ perm_data->op = op;
+ perm_data->mode = mode;
+
+ atom->data = perm_data;
+
+finish:
+ while(head) {
+ struct mode_data *tmp = head;
+ head = head->next;
+ free(tmp);
+ }
+
+ return res;
+}
+
+
+static int perm_fn(struct atom *atom, struct action_data *action_data)
+{
+ struct perm_data *perm_data = atom->data;
+ struct stat *buf = action_data->buf;
+
+ switch(perm_data->op) {
+ case PERM_EXACT:
+ return (buf->st_mode & ~S_IFMT) == perm_data->mode;
+ case PERM_ALL:
+ return (buf->st_mode & perm_data->mode) == perm_data->mode;
+ case PERM_ANY:
+ default:
+ /*
+ * if no permission bits are set in perm_data->mode match
+ * on any file, this is to be consistent with find, which
+ * does this to be consistent with the behaviour of
+ * -perm -000
+ */
+ return perm_data->mode == 0 || (buf->st_mode & perm_data->mode);
+ }
+}
+
+
+#ifdef SQUASHFS_TRACE
+static void dump_parse_tree(struct expr *expr)
+{
+ int i;
+
+ if(expr->type == ATOM_TYPE) {
+ printf("%s", expr->atom.test->name);
+ if(expr->atom.args) {
+ printf("(");
+ for(i = 0; i < expr->atom.args; i++) {
+ printf("%s", expr->atom.argv[i]);
+ if (i + 1 < expr->atom.args)
+ printf(",");
+ }
+ printf(")");
+ }
+ } else if (expr->type == UNARY_TYPE) {
+ printf("%s", token_table[expr->unary_op.op].string);
+ dump_parse_tree(expr->unary_op.expr);
+ } else {
+ printf("(");
+ dump_parse_tree(expr->expr_op.lhs);
+ printf("%s", token_table[expr->expr_op.op].string);
+ dump_parse_tree(expr->expr_op.rhs);
+ printf(")");
+ }
+}
+
+
+void dump_action_list(struct action *spec_list, int spec_count)
+{
+ int i;
+
+ for (i = 0; i < spec_count; i++) {
+ printf("%s", spec_list[i].action->name);
+ if (spec_list[i].args) {
+ int n;
+
+ printf("(");
+ for (n = 0; n < spec_list[i].args; n++) {
+ printf("%s", spec_list[i].argv[n]);
+ if (n + 1 < spec_list[i].args)
+ printf(",");
+ }
+ printf(")");
+ }
+ printf("=");
+ dump_parse_tree(spec_list[i].expr);
+ printf("\n");
+ }
+}
+
+
+void dump_actions()
+{
+ dump_action_list(exclude_spec, exclude_count);
+ dump_action_list(fragment_spec, fragment_count);
+ dump_action_list(other_spec, other_count);
+ dump_action_list(move_spec, move_count);
+ dump_action_list(empty_spec, empty_count);
+}
+#else
+void dump_actions()
+{
+}
+#endif
+
+
+static struct test_entry test_table[] = {
+ { "name", 1, name_fn, NULL, 1},
+ { "pathname", 1, pathname_fn, check_pathname, 1, 0},
+ { "subpathname", 1, subpathname_fn, check_pathname, 1, 0},
+ { "filesize", 1, filesize_fn, parse_number_arg, 1, 0},
+ { "dirsize", 1, dirsize_fn, parse_number_arg, 1, 0},
+ { "size", 1, size_fn, parse_number_arg, 1, 0},
+ { "inode", 1, inode_fn, parse_number_arg, 1, 0},
+ { "nlink", 1, nlink_fn, parse_number_arg, 1, 0},
+ { "fileblocks", 1, fileblocks_fn, parse_number_arg, 1, 0},
+ { "dirblocks", 1, dirblocks_fn, parse_number_arg, 1, 0},
+ { "blocks", 1, blocks_fn, parse_number_arg, 1, 0},
+ { "gid", 1, gid_fn, parse_number_arg, 1, 0},
+ { "group", 1, group_fn, parse_group_arg, 1, 0},
+ { "uid", 1, uid_fn, parse_number_arg, 1, 0},
+ { "user", 1, user_fn, parse_user_arg, 1, 0},
+ { "depth", 1, depth_fn, parse_number_arg, 1, 0},
+ { "dircount", 1, dircount_fn, parse_number_arg, 0, 0},
+ { "filesize_range", 2, filesize_range_fn, parse_range_args, 1, 0},
+ { "dirsize_range", 2, dirsize_range_fn, parse_range_args, 1, 0},
+ { "size_range", 2, size_range_fn, parse_range_args, 1, 0},
+ { "inode_range", 2, inode_range_fn, parse_range_args, 1, 0},
+ { "nlink_range", 2, nlink_range_fn, parse_range_args, 1, 0},
+ { "fileblocks_range", 2, fileblocks_range_fn, parse_range_args, 1, 0},
+ { "dirblocks_range", 2, dirblocks_range_fn, parse_range_args, 1, 0},
+ { "blocks_range", 2, blocks_range_fn, parse_range_args, 1, 0},
+ { "gid_range", 2, gid_range_fn, parse_range_args, 1, 0},
+ { "uid_range", 2, uid_range_fn, parse_range_args, 1, 0},
+ { "depth_range", 2, depth_range_fn, parse_range_args, 1, 0},
+ { "dircount_range", 2, dircount_range_fn, parse_range_args, 0, 0},
+ { "type", 1, type_fn, parse_type_arg, 1, 0},
+ { "true", 0, true_fn, NULL, 1, 0},
+ { "false", 0, false_fn, NULL, 1, 0},
+ { "file", 1, file_fn, parse_file_arg, 1, 0},
+ { "exec", 1, exec_fn, NULL, 1, 0},
+ { "exists", 0, exists_fn, NULL, 0, 0},
+ { "absolute", 0, absolute_fn, NULL, 0, 0},
+ { "stat", 1, stat_fn, parse_expr_arg0, 1, 1},
+ { "readlink", 1, readlink_fn, parse_expr_arg0, 0, 1},
+ { "eval", 2, eval_fn, parse_expr_arg1, 0, 1},
+ { "perm", -2, perm_fn, parse_perm_args, 1, 0},
+ { "", -1 }
+};
+
+
+static struct action_entry action_table[] = {
+ { "fragment", FRAGMENT_ACTION, 1, ACTION_REG, NULL, NULL},
+ { "exclude", EXCLUDE_ACTION, 0, ACTION_ALL_LNK, NULL, NULL},
+ { "fragments", FRAGMENTS_ACTION, 0, ACTION_REG, NULL, frag_action},
+ { "no-fragments", NO_FRAGMENTS_ACTION, 0, ACTION_REG, NULL, no_frag_action},
+ { "always-use-fragments", ALWAYS_FRAGS_ACTION, 0, ACTION_REG, NULL, always_frag_action},
+ { "dont-always-use-fragments", NO_ALWAYS_FRAGS_ACTION, 0, ACTION_REG, NULL, no_always_frag_action},
+ { "tailend", ALWAYS_FRAGS_ACTION, 0, ACTION_REG, NULL, always_frag_action},
+ { "no-tailend", NO_ALWAYS_FRAGS_ACTION, 0, ACTION_REG, NULL, no_always_frag_action},
+ { "compressed", COMPRESSED_ACTION, 0, ACTION_REG, NULL, comp_action},
+ { "uncompressed", UNCOMPRESSED_ACTION, 0, ACTION_REG, NULL, uncomp_action},
+ { "uid", UID_ACTION, 1, ACTION_ALL_LNK, parse_uid_args, uid_action},
+ { "gid", GID_ACTION, 1, ACTION_ALL_LNK, parse_gid_args, gid_action},
+ { "guid", GUID_ACTION, 2, ACTION_ALL_LNK, parse_guid_args, guid_action},
+ { "mode", MODE_ACTION, -2, ACTION_ALL, parse_mode_args, mode_action },
+ { "empty", EMPTY_ACTION, -2, ACTION_DIR, parse_empty_args, NULL},
+ { "move", MOVE_ACTION, 1, ACTION_ALL_LNK, NULL, NULL},
+ { "prune", PRUNE_ACTION, 0, ACTION_ALL_LNK, NULL, NULL},
+ { "chmod", MODE_ACTION, -2, ACTION_ALL, parse_mode_args, mode_action },
+ { "xattrs-exclude", XATTR_EXC_ACTION, 1, ACTION_ALL, parse_xattr_args, NULL},
+ { "xattrs-include", XATTR_INC_ACTION, 1, ACTION_ALL, parse_xattr_args, NULL},
+ { "xattrs-add", XATTR_ADD_ACTION, 1, ACTION_ALL, parse_xattr_add_args, NULL},
+ { "noop", NOOP_ACTION, 0, ACTION_ALL, NULL, noop_action },
+ { "", 0, -1, 0, NULL, NULL}
+};
diff --git a/squashfs-tools/action.h b/squashfs-tools/action.h
new file mode 100644
index 0000000..183daaa
--- /dev/null
+++ b/squashfs-tools/action.h
@@ -0,0 +1,351 @@
+#ifndef ACTION_H
+#define ACTION_H
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2011, 2012, 2013, 2014, 2021, 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * action.h
+ */
+
+/*
+ * Lexical analyser definitions
+ */
+#define TOK_OPEN_BRACKET 0
+#define TOK_CLOSE_BRACKET 1
+#define TOK_AND 2
+#define TOK_OR 3
+#define TOK_NOT 4
+#define TOK_COMMA 5
+#define TOK_AT 6
+#define TOK_WHITE_SPACE 7
+#define TOK_STRING 8
+#define TOK_EOF 9
+
+#define TOK_TO_STR(OP, S) ({ \
+ char *s; \
+ switch(OP) { \
+ case TOK_EOF: \
+ s = "EOF"; \
+ break; \
+ case TOK_STRING: \
+ s = S; \
+ break; \
+ default: \
+ s = token_table[OP].string; \
+ break; \
+ } \
+ s; \
+})
+
+
+struct token_entry {
+ char *string;
+ int token;
+ int size;
+};
+
+/*
+ * Expression parser definitions
+ */
+#define OP_TYPE 0
+#define ATOM_TYPE 1
+#define UNARY_TYPE 2
+
+#define SYNTAX_ERROR(S, ARGS...) { \
+ char *src = strdup(source); \
+ src[cur_ptr - source] = '\0'; \
+ fprintf(stderr, "Failed to parse action \"%s\"\n", source); \
+ fprintf(stderr, "Syntax error: "S, ##ARGS); \
+ fprintf(stderr, "Got here \"%s\"\n", src); \
+ free(src); \
+}
+
+#define TEST_SYNTAX_ERROR(TEST, ARG, S, ARGS...) { \
+ char *src = strdup(source); \
+ src[cur_ptr - source] = '\0'; \
+ fprintf(stderr, "Failed to parse action \"%s\"\n", source); \
+ fprintf(stderr, "Syntax error in \"%s()\", arg %d: "S, TEST->name, \
+ ARG, ##ARGS); \
+ fprintf(stderr, "Got here \"%s\"\n", src); \
+ free(src); \
+}
+
+struct expr;
+
+struct expr_op {
+ struct expr *lhs;
+ struct expr *rhs;
+ int op;
+};
+
+
+struct atom {
+ struct test_entry *test;
+ int args;
+ char **argv;
+ void *data;
+};
+
+
+struct unary_op {
+ struct expr *expr;
+ int op;
+};
+
+
+struct expr {
+ int type;
+ union {
+ struct atom atom;
+ struct expr_op expr_op;
+ struct unary_op unary_op;
+ };
+};
+
+/*
+ * Test operation definitions
+ */
+#define NUM_EQ 1
+#define NUM_LESS 2
+#define NUM_GREATER 3
+
+struct test_number_arg {
+ long long size;
+ int range;
+};
+
+struct test_range_args {
+ long long start;
+ long long end;
+};
+
+struct action;
+struct action_data;
+
+struct test_entry {
+ char *name;
+ int args;
+ int (*fn)(struct atom *, struct action_data *);
+ int (*parse_args)(struct test_entry *, struct atom *);
+ int exclude_ok;
+ int handle_logging;
+};
+
+
+/*
+ * Type test specific definitions
+ */
+struct type_entry {
+ int value;
+ char type;
+};
+
+
+/*
+ * Action definitions
+ */
+#define FRAGMENT_ACTION 0
+#define EXCLUDE_ACTION 1
+#define FRAGMENTS_ACTION 2
+#define NO_FRAGMENTS_ACTION 3
+#define ALWAYS_FRAGS_ACTION 4
+#define NO_ALWAYS_FRAGS_ACTION 5
+#define COMPRESSED_ACTION 6
+#define UNCOMPRESSED_ACTION 7
+#define UID_ACTION 8
+#define GID_ACTION 9
+#define GUID_ACTION 10
+#define MODE_ACTION 11
+#define EMPTY_ACTION 12
+#define MOVE_ACTION 13
+#define PRUNE_ACTION 14
+#define NOOP_ACTION 15
+#define XATTR_EXC_ACTION 16
+#define XATTR_INC_ACTION 17
+#define XATTR_ADD_ACTION 18
+
+/*
+ * Define what file types each action operates over
+ */
+#define ACTION_DIR 1
+#define ACTION_REG 2
+#define ACTION_ALL_LNK 3
+#define ACTION_ALL 4
+#define ACTION_LNK 5
+
+
+/*
+ * Action logging requested, specified by the various
+ * -action, -true-action, -false-action and -verbose-action
+ * options
+ */
+#define ACTION_LOG_NONE 0
+#define ACTION_LOG_TRUE 1
+#define ACTION_LOG_FALSE 2
+#define ACTION_LOG_VERBOSE ACTION_LOG_TRUE | ACTION_LOG_FALSE
+
+struct action_entry {
+ char *name;
+ int type;
+ int args;
+ int file_types;
+ int (*parse_args)(struct action_entry *, int, char **, void **);
+ void (*run_action)(struct action *, struct dir_ent *);
+};
+
+
+struct action_data {
+ unsigned int depth;
+ char *name;
+ char *pathname;
+ char *subpath;
+ struct stat *buf;
+ struct dir_ent *dir_ent;
+ struct dir_info *root;
+};
+
+
+struct action {
+ int type;
+ struct action_entry *action;
+ int args;
+ char **argv;
+ struct expr *expr;
+ void *data;
+ int verbose;
+};
+
+
+/*
+ * Uid/gid action specific definitions
+ */
+struct uid_info {
+ uid_t uid;
+};
+
+struct gid_info {
+ gid_t gid;
+};
+
+struct guid_info {
+ uid_t uid;
+ gid_t gid;
+};
+
+
+/*
+ * Mode action specific definitions
+ */
+#define ACTION_MODE_SET 0
+#define ACTION_MODE_ADD 1
+#define ACTION_MODE_REM 2
+#define ACTION_MODE_OCT 3
+
+struct mode_data {
+ struct mode_data *next;
+ int operation;
+ int mode;
+ unsigned int mask;
+ char X;
+};
+
+
+/*
+ * Empty action specific definitions
+ */
+#define EMPTY_ALL 0
+#define EMPTY_SOURCE 1
+#define EMPTY_EXCLUDED 2
+
+struct empty_data {
+ int val;
+};
+
+
+/*
+ * Move action specific definitions
+ */
+#define ACTION_MOVE_RENAME 1
+#define ACTION_MOVE_MOVE 2
+
+struct move_ent {
+ int ops;
+ struct dir_ent *dir_ent;
+ char *name;
+ struct dir_info *dest;
+ struct move_ent *next;
+};
+
+
+/*
+ * Xattr action specific definitions
+ */
+struct xattr_data {
+ regex_t preg;
+ struct xattr_data *next;
+};
+
+
+/*
+ * Perm test function specific definitions
+ */
+#define PERM_ALL 1
+#define PERM_ANY 2
+#define PERM_EXACT 3
+
+struct perm_data {
+ int op;
+ int mode;
+};
+
+
+/*
+ * External function definitions
+ */
+extern int parse_action(char *, int verbose);
+extern void dump_actions();
+extern void *eval_frag_actions(struct dir_info *, struct dir_ent *, int);
+extern void *get_frag_action(void *);
+extern int eval_exclude_actions(char *, char *, char *, struct stat *,
+ unsigned int, struct dir_ent *);
+extern void eval_actions(struct dir_info *, struct dir_ent *);
+extern int eval_empty_actions(struct dir_info *, struct dir_ent *dir_ent);
+extern void eval_move_actions(struct dir_info *, struct dir_ent *);
+extern int eval_prune_actions(struct dir_info *, struct dir_ent *);
+extern struct xattr_data *eval_xattr_exc_actions(struct dir_info *,
+ struct dir_ent *);
+extern int match_xattr_exc_actions(struct xattr_data *, char *);
+extern struct xattr_data *eval_xattr_inc_actions(struct dir_info *,
+ struct dir_ent *);
+extern int match_xattr_inc_actions(struct xattr_data *, char *);
+extern struct xattr_add *eval_xattr_add_actions(struct dir_info *root,
+ struct dir_ent *dir_ent, int *items);
+extern void do_move_actions();
+extern long long read_bytes(int, void *, long long);
+extern int any_actions();
+extern int actions();
+extern int move_actions();
+extern int empty_actions();
+extern int read_action_file(char *, int);
+extern int exclude_actions();
+extern int prune_actions();
+extern int xattr_exc_actions();
+extern int xattr_add_actions();
+#endif
diff --git a/squashfs-tools/caches-queues-lists.c b/squashfs-tools/caches-queues-lists.c
new file mode 100644
index 0000000..f6bcba4
--- /dev/null
+++ b/squashfs-tools/caches-queues-lists.c
@@ -0,0 +1,647 @@
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2013, 2014, 2019, 2021
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * caches-queues-lists.c
+ */
+
+#include <pthread.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+
+#include "mksquashfs_error.h"
+#include "caches-queues-lists.h"
+
+extern int add_overflow(int, int);
+extern int multiply_overflow(int, int);
+
+#define TRUE 1
+#define FALSE 0
+
+struct queue *queue_init(int size)
+{
+ struct queue *queue = malloc(sizeof(struct queue));
+
+ if(queue == NULL)
+ MEM_ERROR();
+
+ if(add_overflow(size, 1) ||
+ multiply_overflow(size + 1, sizeof(void *)))
+ BAD_ERROR("Size too large in queue_init\n");
+
+ queue->data = malloc(sizeof(void *) * (size + 1));
+ if(queue->data == NULL)
+ MEM_ERROR();
+
+ queue->size = size + 1;
+ queue->readp = queue->writep = 0;
+ pthread_mutex_init(&queue->mutex, NULL);
+ pthread_cond_init(&queue->empty, NULL);
+ pthread_cond_init(&queue->full, NULL);
+
+ return queue;
+}
+
+
+void queue_put(struct queue *queue, void *data)
+{
+ int nextp;
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
+ pthread_mutex_lock(&queue->mutex);
+
+ while((nextp = (queue->writep + 1) % queue->size) == queue->readp)
+ pthread_cond_wait(&queue->full, &queue->mutex);
+
+ queue->data[queue->writep] = data;
+ queue->writep = nextp;
+ pthread_cond_signal(&queue->empty);
+ pthread_cleanup_pop(1);
+}
+
+
+void *queue_get(struct queue *queue)
+{
+ void *data;
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
+ pthread_mutex_lock(&queue->mutex);
+
+ while(queue->readp == queue->writep)
+ pthread_cond_wait(&queue->empty, &queue->mutex);
+
+ data = queue->data[queue->readp];
+ queue->readp = (queue->readp + 1) % queue->size;
+ pthread_cond_signal(&queue->full);
+ pthread_cleanup_pop(1);
+
+ return data;
+}
+
+
+int queue_empty(struct queue *queue)
+{
+ int empty;
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
+ pthread_mutex_lock(&queue->mutex);
+
+ empty = queue->readp == queue->writep;
+
+ pthread_cleanup_pop(1);
+
+ return empty;
+}
+
+
+void queue_flush(struct queue *queue)
+{
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
+ pthread_mutex_lock(&queue->mutex);
+
+ queue->readp = queue->writep;
+
+ pthread_cleanup_pop(1);
+}
+
+
+void dump_queue(struct queue *queue)
+{
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
+ pthread_mutex_lock(&queue->mutex);
+
+ printf("\tMax size %d, size %d%s\n", queue->size - 1,
+ queue->readp <= queue->writep ? queue->writep - queue->readp :
+ queue->size - queue->readp + queue->writep,
+ queue->readp == queue->writep ? " (EMPTY)" :
+ ((queue->writep + 1) % queue->size) == queue->readp ?
+ " (FULL)" : "");
+
+ pthread_cleanup_pop(1);
+}
+
+
+/* define seq queue hash tables */
+#define CALCULATE_SEQ_HASH(N) CALCULATE_HASH(N)
+
+/* Called with the seq queue mutex held */
+INSERT_HASH_TABLE(seq, struct seq_queue, CALCULATE_SEQ_HASH, sequence, seq)
+
+/* Called with the cache mutex held */
+REMOVE_HASH_TABLE(seq, struct seq_queue, CALCULATE_SEQ_HASH, sequence, seq);
+
+
+struct seq_queue *seq_queue_init()
+{
+ struct seq_queue *queue = malloc(sizeof(struct seq_queue));
+ if(queue == NULL)
+ MEM_ERROR();
+
+ memset(queue, 0, sizeof(struct seq_queue));
+
+ pthread_mutex_init(&queue->mutex, NULL);
+ pthread_cond_init(&queue->wait, NULL);
+
+ return queue;
+}
+
+
+void seq_queue_put(struct seq_queue *queue, struct file_buffer *entry)
+{
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
+ pthread_mutex_lock(&queue->mutex);
+
+ insert_seq_hash_table(queue, entry);
+
+ if(entry->fragment)
+ queue->fragment_count ++;
+ else
+ queue->block_count ++;
+
+ if(entry->sequence == queue->sequence)
+ pthread_cond_signal(&queue->wait);
+
+ pthread_cleanup_pop(1);
+}
+
+
+struct file_buffer *seq_queue_get(struct seq_queue *queue)
+{
+ /*
+ * Return next buffer from queue in sequence order (queue->sequence). If
+ * found return it, otherwise wait for it to arrive.
+ */
+ int hash = CALCULATE_SEQ_HASH(queue->sequence);
+ struct file_buffer *entry;
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
+ pthread_mutex_lock(&queue->mutex);
+
+ while(1) {
+ for(entry = queue->hash_table[hash]; entry;
+ entry = entry->seq_next)
+ if(entry->sequence == queue->sequence)
+ break;
+
+ if(entry) {
+ /*
+ * found the buffer in the queue, decrement the
+ * appropriate count, and remove from hash list
+ */
+ if(entry->fragment)
+ queue->fragment_count --;
+ else
+ queue->block_count --;
+
+ remove_seq_hash_table(queue, entry);
+
+ queue->sequence ++;
+
+ break;
+ }
+
+ /* entry not found, wait for it to arrive */
+ pthread_cond_wait(&queue->wait, &queue->mutex);
+ }
+
+ pthread_cleanup_pop(1);
+
+ return entry;
+}
+
+
+void seq_queue_flush(struct seq_queue *queue)
+{
+ int i;
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
+ pthread_mutex_lock(&queue->mutex);
+
+ for(i = 0; i < HASH_SIZE; i++)
+ queue->hash_table[i] = NULL;
+
+ queue->fragment_count = queue->block_count = 0;
+
+ pthread_cleanup_pop(1);
+}
+
+
+void dump_seq_queue(struct seq_queue *queue, int fragment_queue)
+{
+ int size;
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
+ pthread_mutex_lock(&queue->mutex);
+
+ size = fragment_queue ? queue->fragment_count : queue->block_count;
+
+ printf("\tMax size unlimited, size %d%s\n", size,
+ size == 0 ? " (EMPTY)" : "");
+
+ pthread_cleanup_pop(1);
+}
+
+
+/* define cache hash tables */
+#define CALCULATE_CACHE_HASH(N) CALCULATE_HASH(llabs(N))
+
+/* Called with the cache mutex held */
+INSERT_HASH_TABLE(cache, struct cache, CALCULATE_CACHE_HASH, index, hash)
+
+/* Called with the cache mutex held */
+REMOVE_HASH_TABLE(cache, struct cache, CALCULATE_CACHE_HASH, index, hash);
+
+/* define cache free list */
+
+/* Called with the cache mutex held */
+INSERT_LIST(free, struct file_buffer)
+
+/* Called with the cache mutex held */
+REMOVE_LIST(free, struct file_buffer)
+
+
+struct cache *cache_init(int buffer_size, int max_buffers, int noshrink_lookup,
+ int first_freelist)
+{
+ struct cache *cache = malloc(sizeof(struct cache));
+
+ if(cache == NULL)
+ MEM_ERROR();
+
+ cache->max_buffers = max_buffers;
+ cache->buffer_size = buffer_size;
+ cache->count = 0;
+ cache->used = 0;
+ cache->free_list = NULL;
+
+ /*
+ * The cache will grow up to max_buffers in size in response to
+ * an increase in readhead/number of buffers in flight. But
+ * once the outstanding buffers gets returned, we can either elect
+ * to shrink the cache, or to put the freed blocks onto a free list.
+ *
+ * For the caches where we want to do lookup (fragment/writer),
+ * a don't shrink policy is best, for the reader cache it
+ * makes no sense to keep buffers around longer than necessary as
+ * we don't do any lookup on those blocks.
+ */
+ cache->noshrink_lookup = noshrink_lookup;
+
+ /*
+ * The default use freelist before growing cache policy behaves
+ * poorly with appending - with many duplicates the caches
+ * do not grow due to the fact that large queues of outstanding
+ * fragments/writer blocks do not occur, leading to small caches
+ * and un-uncessary performance loss to frequent cache
+ * replacement in the small caches. Therefore with appending
+ * change the policy to grow the caches before reusing blocks
+ * from the freelist
+ */
+ cache->first_freelist = first_freelist;
+
+ memset(cache->hash_table, 0, sizeof(struct file_buffer *) * 65536);
+ pthread_mutex_init(&cache->mutex, NULL);
+ pthread_cond_init(&cache->wait_for_free, NULL);
+ pthread_cond_init(&cache->wait_for_unlock, NULL);
+
+ return cache;
+}
+
+
+struct file_buffer *cache_lookup(struct cache *cache, long long index)
+{
+ /* Lookup block in the cache, if found return with usage count
+ * incremented, if not found return NULL */
+ int hash = CALCULATE_CACHE_HASH(index);
+ struct file_buffer *entry;
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
+ pthread_mutex_lock(&cache->mutex);
+
+ for(entry = cache->hash_table[hash]; entry; entry = entry->hash_next)
+ if(entry->index == index)
+ break;
+
+ if(entry) {
+ /* found the block in the cache, increment used count and
+ * if necessary remove from free list so it won't disappear
+ */
+ if(entry->used == 0) {
+ remove_free_list(&cache->free_list, entry);
+ cache->used ++;
+ }
+ entry->used ++;
+ }
+
+ pthread_cleanup_pop(1);
+
+ return entry;
+}
+
+
+static struct file_buffer *cache_freelist(struct cache *cache)
+{
+ struct file_buffer *entry = cache->free_list;
+
+ remove_free_list(&cache->free_list, entry);
+
+ /* a block on the free_list is hashed */
+ remove_cache_hash_table(cache, entry);
+
+ cache->used ++;
+ return entry;
+}
+
+
+static struct file_buffer *cache_alloc(struct cache *cache)
+{
+ struct file_buffer *entry = malloc(sizeof(struct file_buffer) +
+ cache->buffer_size);
+ if(entry == NULL)
+ MEM_ERROR();
+
+ entry->cache = cache;
+ entry->free_prev = entry->free_next = NULL;
+ cache->count ++;
+ return entry;
+}
+
+
+static struct file_buffer *_cache_get(struct cache *cache, long long index,
+ int hash)
+{
+ /* Get a free block out of the cache indexed on index. */
+ struct file_buffer *entry = NULL;
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
+ pthread_mutex_lock(&cache->mutex);
+
+ while(1) {
+ if(cache->noshrink_lookup) {
+ /* first try to get a block from the free list */
+ if(cache->first_freelist && cache->free_list)
+ entry = cache_freelist(cache);
+ else if(cache->count < cache->max_buffers) {
+ entry = cache_alloc(cache);
+ cache->used ++;
+ } else if(!cache->first_freelist && cache->free_list)
+ entry = cache_freelist(cache);
+ } else { /* shrinking non-lookup cache */
+ if(cache->count < cache->max_buffers) {
+ entry = cache_alloc(cache);
+ if(cache->count > cache->max_count)
+ cache->max_count = cache->count;
+ }
+ }
+
+ if(entry)
+ break;
+
+ /* wait for a block */
+ pthread_cond_wait(&cache->wait_for_free, &cache->mutex);
+ }
+
+ /* initialise block and if hash is set insert into the hash table */
+ entry->used = 1;
+ entry->locked = FALSE;
+ entry->wait_on_unlock = FALSE;
+ entry->error = FALSE;
+ if(hash) {
+ entry->index = index;
+ insert_cache_hash_table(cache, entry);
+ }
+
+ pthread_cleanup_pop(1);
+
+ return entry;
+}
+
+
+struct file_buffer *cache_get(struct cache *cache, long long index)
+{
+ return _cache_get(cache, index, 1);
+}
+
+
+struct file_buffer *cache_get_nohash(struct cache *cache)
+{
+ return _cache_get(cache, 0, 0);
+}
+
+
+void cache_hash(struct file_buffer *entry, long long index)
+{
+ struct cache *cache = entry->cache;
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
+ pthread_mutex_lock(&cache->mutex);
+
+ entry->index = index;
+ insert_cache_hash_table(cache, entry);
+
+ pthread_cleanup_pop(1);
+}
+
+
+void cache_block_put(struct file_buffer *entry)
+{
+ struct cache *cache;
+
+ /*
+ * Finished with this cache entry, once the usage count reaches zero it
+ * can be reused.
+ *
+ * If noshrink_lookup is set, put the block onto the free list.
+ * As blocks remain accessible via the hash table they can be found
+ * getting a new lease of life before they are reused.
+ *
+ * if noshrink_lookup is not set then shrink the cache.
+ */
+
+ if(entry == NULL)
+ return;
+
+ if(entry->cache == NULL) {
+ free(entry);
+ return;
+ }
+
+ cache = entry->cache;
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
+ pthread_mutex_lock(&cache->mutex);
+
+ entry->used --;
+ if(entry->used == 0) {
+ if(cache->noshrink_lookup) {
+ insert_free_list(&cache->free_list, entry);
+ cache->used --;
+ } else {
+ free(entry);
+ cache->count --;
+ }
+
+ /* One or more threads may be waiting on this block */
+ pthread_cond_signal(&cache->wait_for_free);
+ }
+
+ pthread_cleanup_pop(1);
+}
+
+
+void dump_cache(struct cache *cache)
+{
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
+ pthread_mutex_lock(&cache->mutex);
+
+ if(cache->noshrink_lookup)
+ printf("\tMax buffers %d, Current size %d, Used %d, %s\n",
+ cache->max_buffers, cache->count, cache->used,
+ cache->free_list ? "Free buffers" : "No free buffers");
+ else
+ printf("\tMax buffers %d, Current size %d, Maximum historical "
+ "size %d\n", cache->max_buffers, cache->count,
+ cache->max_count);
+
+ pthread_cleanup_pop(1);
+}
+
+
+struct file_buffer *cache_get_nowait(struct cache *cache, long long index)
+{
+ struct file_buffer *entry = NULL;
+ /*
+ * block doesn't exist, create it, but return it with the
+ * locked flag set, so nothing tries to use it while it doesn't
+ * contain data.
+ *
+ * If there's no space in the cache then return NULL.
+ */
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
+ pthread_mutex_lock(&cache->mutex);
+
+ /* first try to get a block from the free list */
+ if(cache->first_freelist && cache->free_list)
+ entry = cache_freelist(cache);
+ else if(cache->count < cache->max_buffers) {
+ entry = cache_alloc(cache);
+ cache->used ++;
+ } else if(!cache->first_freelist && cache->free_list)
+ entry = cache_freelist(cache);
+
+ if(entry) {
+ /* initialise block and insert into the hash table */
+ entry->used = 1;
+ entry->locked = TRUE;
+ entry->wait_on_unlock = FALSE;
+ entry->error = FALSE;
+ entry->index = index;
+ insert_cache_hash_table(cache, entry);
+ }
+
+ pthread_cleanup_pop(1);
+
+ return entry;
+}
+
+
+struct file_buffer *cache_lookup_nowait(struct cache *cache, long long index,
+ char *locked)
+{
+ /*
+ * Lookup block in the cache, if found return it with the locked flag
+ * indicating whether it is currently locked. In both cases increment
+ * the used count.
+ *
+ * If it doesn't exist in the cache return NULL;
+ */
+ int hash = CALCULATE_CACHE_HASH(index);
+ struct file_buffer *entry;
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
+ pthread_mutex_lock(&cache->mutex);
+
+ /* first check if the entry already exists */
+ for(entry = cache->hash_table[hash]; entry; entry = entry->hash_next)
+ if(entry->index == index)
+ break;
+
+ if(entry) {
+ if(entry->used == 0) {
+ remove_free_list(&cache->free_list, entry);
+ cache->used ++;
+ }
+ entry->used ++;
+ *locked = entry->locked;
+ }
+
+ pthread_cleanup_pop(1);
+
+ return entry;
+}
+
+
+void cache_wait_unlock(struct file_buffer *buffer)
+{
+ struct cache *cache = buffer->cache;
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
+ pthread_mutex_lock(&cache->mutex);
+
+ while(buffer->locked) {
+ /*
+ * another thread is filling this in, wait until it
+ * becomes unlocked. Used has been incremented to ensure it
+ * doesn't get reused. By definition a block can't be
+ * locked and unused, and so we don't need to worry
+ * about it being on the freelist now, but, it may
+ * become unused when unlocked unless used is
+ * incremented
+ */
+ buffer->wait_on_unlock = TRUE;
+ pthread_cond_wait(&cache->wait_for_unlock, &cache->mutex);
+ }
+
+ pthread_cleanup_pop(1);
+}
+
+
+void cache_unlock(struct file_buffer *entry)
+{
+ struct cache *cache = entry->cache;
+
+ /*
+ * Unlock this locked cache entry. If anything is waiting for this
+ * to become unlocked, wake it up.
+ */
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
+ pthread_mutex_lock(&cache->mutex);
+
+ entry->locked = FALSE;
+
+ if(entry->wait_on_unlock) {
+ entry->wait_on_unlock = FALSE;
+ pthread_cond_broadcast(&cache->wait_for_unlock);
+ }
+
+ pthread_cleanup_pop(1);
+}
diff --git a/squashfs-tools/caches-queues-lists.h b/squashfs-tools/caches-queues-lists.h
new file mode 100644
index 0000000..353946b
--- /dev/null
+++ b/squashfs-tools/caches-queues-lists.h
@@ -0,0 +1,200 @@
+#ifndef CACHES_QUEUES_LISTS_H
+#define CACHES_QUEUES_LISTS_H
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2013, 2014, 2019, 2021
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * caches-queues-lists.h
+ */
+
+#define INSERT_LIST(NAME, TYPE) \
+void insert_##NAME##_list(TYPE **list, TYPE *entry) { \
+ if(*list) { \
+ entry->NAME##_next = *list; \
+ entry->NAME##_prev = (*list)->NAME##_prev; \
+ (*list)->NAME##_prev->NAME##_next = entry; \
+ (*list)->NAME##_prev = entry; \
+ } else { \
+ *list = entry; \
+ entry->NAME##_prev = entry->NAME##_next = entry; \
+ } \
+}
+
+
+#define REMOVE_LIST(NAME, TYPE) \
+void remove_##NAME##_list(TYPE **list, TYPE *entry) { \
+ if(entry->NAME##_prev == entry && entry->NAME##_next == entry) { \
+ /* only this entry in the list */ \
+ *list = NULL; \
+ } else if(entry->NAME##_prev != NULL && entry->NAME##_next != NULL) { \
+ /* more than one entry in the list */ \
+ entry->NAME##_next->NAME##_prev = entry->NAME##_prev; \
+ entry->NAME##_prev->NAME##_next = entry->NAME##_next; \
+ if(*list == entry) \
+ *list = entry->NAME##_next; \
+ } \
+ entry->NAME##_prev = entry->NAME##_next = NULL; \
+}
+
+
+#define INSERT_HASH_TABLE(NAME, TYPE, HASH_FUNCTION, FIELD, LINK) \
+void insert_##NAME##_hash_table(TYPE *container, struct file_buffer *entry) \
+{ \
+ int hash = HASH_FUNCTION(entry->FIELD); \
+\
+ entry->LINK##_next = container->hash_table[hash]; \
+ container->hash_table[hash] = entry; \
+ entry->LINK##_prev = NULL; \
+ if(entry->LINK##_next) \
+ entry->LINK##_next->LINK##_prev = entry; \
+}
+
+
+#define REMOVE_HASH_TABLE(NAME, TYPE, HASH_FUNCTION, FIELD, LINK) \
+void remove_##NAME##_hash_table(TYPE *container, struct file_buffer *entry) \
+{ \
+ if(entry->LINK##_prev) \
+ entry->LINK##_prev->LINK##_next = entry->LINK##_next; \
+ else \
+ container->hash_table[HASH_FUNCTION(entry->FIELD)] = \
+ entry->LINK##_next; \
+ if(entry->LINK##_next) \
+ entry->LINK##_next->LINK##_prev = entry->LINK##_prev; \
+\
+ entry->LINK##_prev = entry->LINK##_next = NULL; \
+}
+
+#define HASH_SIZE 65536
+#define CALCULATE_HASH(n) ((n) & 0xffff)
+
+
+/* struct describing a cache entry passed between threads */
+struct file_buffer {
+ long long index;
+ long long sequence;
+ long long file_size;
+ union {
+ long long block;
+ unsigned short checksum;
+ };
+ struct cache *cache;
+ union {
+ struct file_info *dupl_start;
+ struct file_buffer *hash_next;
+ };
+ union {
+ struct tar_file *tar_file;
+ struct file_buffer *hash_prev;
+ };
+ union {
+ struct {
+ struct file_buffer *free_next;
+ struct file_buffer *free_prev;
+ };
+ struct {
+ struct file_buffer *seq_next;
+ struct file_buffer *seq_prev;
+ };
+ };
+ int size;
+ int c_byte;
+ char used;
+ char fragment;
+ char error;
+ char locked;
+ char wait_on_unlock;
+ char noD;
+ char duplicate;
+ char data[0] __attribute__((aligned));
+};
+
+
+/* struct describing queues used to pass data between threads */
+struct queue {
+ int size;
+ int readp;
+ int writep;
+ pthread_mutex_t mutex;
+ pthread_cond_t empty;
+ pthread_cond_t full;
+ void **data;
+};
+
+
+/*
+ * struct describing seq_queues used to pass data between the read
+ * thread and the deflate and main threads
+ */
+struct seq_queue {
+ int fragment_count;
+ int block_count;
+ long long sequence;
+ struct file_buffer *hash_table[HASH_SIZE];
+ pthread_mutex_t mutex;
+ pthread_cond_t wait;
+};
+
+
+/* Cache status struct. Caches are used to keep
+ track of memory buffers passed between different threads */
+struct cache {
+ int max_buffers;
+ int count;
+ int buffer_size;
+ int noshrink_lookup;
+ int first_freelist;
+ union {
+ int used;
+ int max_count;
+ };
+ pthread_mutex_t mutex;
+ pthread_cond_t wait_for_free;
+ pthread_cond_t wait_for_unlock;
+ struct file_buffer *free_list;
+ struct file_buffer *hash_table[HASH_SIZE];
+};
+
+
+extern struct queue *queue_init(int);
+extern void queue_put(struct queue *, void *);
+extern void *queue_get(struct queue *);
+extern int queue_empty(struct queue *);
+extern void queue_flush(struct queue *);
+extern void dump_queue(struct queue *);
+extern struct seq_queue *seq_queue_init();
+extern void seq_queue_put(struct seq_queue *, struct file_buffer *);
+extern void dump_seq_queue(struct seq_queue *, int);
+extern struct file_buffer *seq_queue_get(struct seq_queue *);
+extern void seq_queue_flush(struct seq_queue *);
+extern struct cache *cache_init(int, int, int, int);
+extern struct file_buffer *cache_lookup(struct cache *, long long);
+extern struct file_buffer *cache_get(struct cache *, long long);
+extern struct file_buffer *cache_get_nohash(struct cache *);
+extern void cache_hash(struct file_buffer *, long long);
+extern void cache_block_put(struct file_buffer *);
+extern void dump_cache(struct cache *);
+extern struct file_buffer *cache_get_nowait(struct cache *, long long);
+extern struct file_buffer *cache_lookup_nowait(struct cache *, long long,
+ char *);
+extern void cache_wait_unlock(struct file_buffer *);
+extern void cache_unlock(struct file_buffer *);
+
+extern int first_freelist;
+#endif
diff --git a/squashfs-tools/compressor.c b/squashfs-tools/compressor.c
new file mode 100644
index 0000000..32300a9
--- /dev/null
+++ b/squashfs-tools/compressor.c
@@ -0,0 +1,145 @@
+/*
+ *
+ * Copyright (c) 2009, 2010, 2011, 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * compressor.c
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include "compressor.h"
+#include "squashfs_fs.h"
+
+#ifndef GZIP_SUPPORT
+static struct compressor gzip_comp_ops = {
+ ZLIB_COMPRESSION, "gzip"
+};
+#else
+extern struct compressor gzip_comp_ops;
+#endif
+
+#ifndef LZMA_SUPPORT
+static struct compressor lzma_comp_ops = {
+ LZMA_COMPRESSION, "lzma"
+};
+#else
+extern struct compressor lzma_comp_ops;
+#endif
+
+#ifndef LZO_SUPPORT
+static struct compressor lzo_comp_ops = {
+ LZO_COMPRESSION, "lzo"
+};
+#else
+extern struct compressor lzo_comp_ops;
+#endif
+
+#ifndef LZ4_SUPPORT
+static struct compressor lz4_comp_ops = {
+ LZ4_COMPRESSION, "lz4"
+};
+#else
+extern struct compressor lz4_comp_ops;
+#endif
+
+#ifndef XZ_SUPPORT
+static struct compressor xz_comp_ops = {
+ XZ_COMPRESSION, "xz"
+};
+#else
+extern struct compressor xz_comp_ops;
+#endif
+
+#ifndef ZSTD_SUPPORT
+static struct compressor zstd_comp_ops = {
+ ZSTD_COMPRESSION, "zstd"
+};
+#else
+extern struct compressor zstd_comp_ops;
+#endif
+
+static struct compressor unknown_comp_ops = {
+ 0, "unknown"
+};
+
+
+struct compressor *compressor[] = {
+ &gzip_comp_ops,
+ &lzo_comp_ops,
+ &lz4_comp_ops,
+ &xz_comp_ops,
+ &zstd_comp_ops,
+ &lzma_comp_ops,
+ &unknown_comp_ops
+};
+
+
+struct compressor *lookup_compressor(char *name)
+{
+ int i;
+
+ for(i = 0; compressor[i]->id; i++)
+ if(strcmp(compressor[i]->name, name) == 0)
+ break;
+
+ return compressor[i];
+}
+
+
+struct compressor *lookup_compressor_id(int id)
+{
+ int i;
+
+ for(i = 0; compressor[i]->id; i++)
+ if(id == compressor[i]->id)
+ break;
+
+ return compressor[i];
+}
+
+
+void display_compressors(FILE *stream, char *indent, char *def_comp)
+{
+ int i;
+
+ for(i = 0; compressor[i]->id; i++)
+ if(compressor[i]->supported)
+ fprintf(stream, "%s\t%s%s\n", indent,
+ compressor[i]->name,
+ strcmp(compressor[i]->name, def_comp) == 0 ?
+ " (default)" : "");
+}
+
+
+void display_compressor_usage(FILE *stream, char *def_comp)
+{
+ int i;
+
+ for(i = 0; compressor[i]->id; i++)
+ if(compressor[i]->supported) {
+ char *str = strcmp(compressor[i]->name, def_comp) == 0 ?
+ " (default)" : "";
+ if(compressor[i]->usage) {
+ fprintf(stream, "\t%s%s\n",
+ compressor[i]->name, str);
+ compressor[i]->usage(stream);
+ } else
+ fprintf(stream, "\t%s (no options)%s\n",
+ compressor[i]->name, str);
+ }
+}
diff --git a/squashfs-tools/compressor.h b/squashfs-tools/compressor.h
new file mode 100644
index 0000000..ba0de25
--- /dev/null
+++ b/squashfs-tools/compressor.h
@@ -0,0 +1,132 @@
+#ifndef COMPRESSOR_H
+#define COMPRESSOR_H
+/*
+ *
+ * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * compressor.h
+ */
+
+struct compressor {
+ int id;
+ char *name;
+ int supported;
+ int (*init)(void **, int, int);
+ int (*compress)(void *, void *, void *, int, int, int *);
+ int (*uncompress)(void *, void *, int, int, int *);
+ int (*options)(char **, int);
+ int (*options_post)(int);
+ void *(*dump_options)(int, int *);
+ int (*extract_options)(int, void *, int);
+ int (*check_options)(int, void *, int);
+ void (*display_options)(void *, int);
+ void (*usage)(FILE *);
+ int (*option_args)(char *);
+};
+
+extern struct compressor *lookup_compressor(char *);
+extern struct compressor *lookup_compressor_id(int);
+extern void display_compressors(FILE *stream, char *, char *);
+extern void display_compressor_usage(FILE *stream, char *);
+
+static inline int compressor_init(struct compressor *comp, void **stream,
+ int block_size, int datablock)
+{
+ if(comp->init == NULL)
+ return 0;
+ return comp->init(stream, block_size, datablock);
+}
+
+
+static inline int compressor_compress(struct compressor *comp, void *strm,
+ void *dest, void *src, int size, int block_size, int *error)
+{
+ return comp->compress(strm, dest, src, size, block_size, error);
+}
+
+
+static inline int compressor_uncompress(struct compressor *comp, void *dest,
+ void *src, int size, int block_size, int *error)
+{
+ return comp->uncompress(dest, src, size, block_size, error);
+}
+
+
+/*
+ * For the following functions please see the lzo, lz4 or xz
+ * compressors for commented examples of how they are used.
+ */
+static inline int compressor_options(struct compressor *comp, char *argv[],
+ int argc)
+{
+ if(comp->options == NULL)
+ return -1;
+
+ return comp->options(argv, argc);
+}
+
+
+static inline int compressor_options_post(struct compressor *comp, int block_size)
+{
+ if(comp->options_post == NULL)
+ return 0;
+ return comp->options_post(block_size);
+}
+
+
+static inline void *compressor_dump_options(struct compressor *comp,
+ int block_size, int *size)
+{
+ if(comp->dump_options == NULL)
+ return NULL;
+ return comp->dump_options(block_size, size);
+}
+
+
+static inline int compressor_extract_options(struct compressor *comp,
+ int block_size, void *buffer, int size)
+{
+ if(comp->extract_options == NULL)
+ return size ? -1 : 0;
+ return comp->extract_options(block_size, buffer, size);
+}
+
+
+static inline int compressor_check_options(struct compressor *comp,
+ int block_size, void *buffer, int size)
+{
+ if(comp->check_options == NULL)
+ return 0;
+ return comp->check_options(block_size, buffer, size);
+}
+
+
+static inline void compressor_display_options(struct compressor *comp,
+ void *buffer, int size)
+{
+ if(comp->display_options != NULL)
+ comp->display_options(buffer, size);
+}
+
+static inline int compressor_option_args(struct compressor *comp, char *option)
+{
+ if(comp == NULL || comp->option_args == NULL)
+ return 0;
+ return comp->option_args(option);
+}
+#endif
diff --git a/squashfs-tools/date.c b/squashfs-tools/date.c
new file mode 100644
index 0000000..74320ef
--- /dev/null
+++ b/squashfs-tools/date.c
@@ -0,0 +1,129 @@
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2023
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * date.c
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/wait.h>
+#include <errno.h>
+#include <limits.h>
+
+#include "date.h"
+#include "error.h"
+
+int exec_date(char *string, unsigned int *mtime)
+{
+ int res, pipefd[2], child, status;
+ int bytes = 0;
+ long long time;
+ char buffer[11];
+
+ res = pipe(pipefd);
+ if(res == -1) {
+ ERROR("Error executing date, pipe failed\n");
+ return FALSE;
+ }
+
+ child = fork();
+ if(child == -1) {
+ ERROR("Error executing date, fork failed\n");
+ goto failed;
+ }
+
+ if(child == 0) {
+ close(pipefd[0]);
+ close(STDOUT_FILENO);
+ res = dup(pipefd[1]);
+ if(res == -1)
+ exit(EXIT_FAILURE);
+
+ execl("/usr/bin/date", "date", "-d", string, "+%s", (char *) NULL);
+ exit(EXIT_FAILURE);
+ }
+
+ close(pipefd[1]);
+
+ while(1) {
+ res = read_bytes(pipefd[0], buffer, 11);
+ if(res == -1) {
+ ERROR("Error executing date\n");
+ goto failed2;
+ } else if(res == 0)
+ break;
+
+ bytes += res;
+ }
+
+ while(1) {
+ res = waitpid(child, &status, 0);
+ if(res != -1)
+ break;
+ else if(errno != EINTR) {
+ ERROR("Error executing data, waitpid failed\n");
+ goto failed2;
+ }
+ }
+
+ close(pipefd[0]);
+
+ if(!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
+ ERROR("Error executing date, failed to parse date string\n");
+ return FALSE;
+ }
+
+ if(bytes == 0 || bytes > 11) {
+ ERROR("Error executing date, unexpected result\n");
+ return FALSE;
+ }
+
+ /* replace trailing newline with string terminator */
+ buffer[bytes - 1] = '\0';
+
+ res = sscanf(buffer, "%lld", &time);
+
+ if(res < 1) {
+ ERROR("Error, unexpected result from date\n");
+ return FALSE;
+ }
+
+ if(time < 0) {
+ ERROR("Error, negative number returned from date, dates should be on or after the epoch of 1970-01-01 00:00 UTC\n");
+ return FALSE;
+ }
+
+ if(time > UINT_MAX) {
+ ERROR("Error, number returned from date >= 2^32, dates should be before 2106-02-07 06:28:16 UTC\n");
+ return FALSE;
+ }
+
+ *mtime = (unsigned int) time;
+
+ return TRUE;
+
+failed:
+ close(pipefd[1]);
+failed2:
+ close(pipefd[0]);
+ return FALSE;
+}
diff --git a/squashfs-tools/date.h b/squashfs-tools/date.h
new file mode 100644
index 0000000..55cc462
--- /dev/null
+++ b/squashfs-tools/date.h
@@ -0,0 +1,28 @@
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2023
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * date.h
+ */
+
+extern long long read_bytes(int, void *, long long);
+
+#define TRUE 1
+#define FALSE 0
diff --git a/squashfs-tools/endian_compat.h b/squashfs-tools/endian_compat.h
new file mode 100644
index 0000000..c416f7f
--- /dev/null
+++ b/squashfs-tools/endian_compat.h
@@ -0,0 +1,34 @@
+#ifndef ENDIAN_COMPAT_H
+#define ENDIAN_COMPAT_H
+
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2021
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * endian_compat.h
+ */
+#ifndef linux
+#define __BYTE_ORDER BYTE_ORDER
+#define __BIG_ENDIAN BIG_ENDIAN
+#define __LITTLE_ENDIAN LITTLE_ENDIAN
+#else
+#include <endian.h>
+#endif
+
+#endif
diff --git a/squashfs-tools/error.h b/squashfs-tools/error.h
new file mode 100644
index 0000000..78e39c0
--- /dev/null
+++ b/squashfs-tools/error.h
@@ -0,0 +1,43 @@
+#ifndef ERROR_H
+#define ERROR_H
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2012, 2013, 2014, 2019, 2021
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * error.h
+ */
+
+extern void progressbar_error(char *fmt, ...);
+extern void progressbar_info(char *fmt, ...);
+
+#ifdef SQUASHFS_TRACE
+#define TRACE(s, args...) \
+ do { \
+ progressbar_info("squashfs: "s, ## args);\
+ } while(0)
+#else
+#define TRACE(s, args...)
+#endif
+
+#define ERROR(s, args...) \
+ do {\
+ progressbar_error(s, ## args); \
+ } while(0)
+#endif
diff --git a/squashfs-tools/fnmatch_compat.h b/squashfs-tools/fnmatch_compat.h
new file mode 100644
index 0000000..7b4afd8
--- /dev/null
+++ b/squashfs-tools/fnmatch_compat.h
@@ -0,0 +1,32 @@
+#ifndef FNMATCH_COMPAT
+#define FNMATCH_COMPAT
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2015
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * fnmatch_compat.h
+ */
+
+#include <fnmatch.h>
+
+#ifndef FNM_EXTMATCH
+#define FNM_EXTMATCH 0
+#endif
+
+#endif
diff --git a/squashfs-tools/gzip_wrapper.c b/squashfs-tools/gzip_wrapper.c
new file mode 100644
index 0000000..3cb1cc0
--- /dev/null
+++ b/squashfs-tools/gzip_wrapper.c
@@ -0,0 +1,513 @@
+/*
+ * Copyright (c) 2009, 2010, 2013, 2014, 2021, 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * gzip_wrapper.c
+ *
+ * Support for ZLIB compression http://www.zlib.net
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <zlib.h>
+
+#include "squashfs_fs.h"
+#include "gzip_wrapper.h"
+#include "compressor.h"
+
+static struct strategy strategy[] = {
+ { "default", Z_DEFAULT_STRATEGY, 0 },
+ { "filtered", Z_FILTERED, 0 },
+ { "huffman_only", Z_HUFFMAN_ONLY, 0 },
+ { "run_length_encoded", Z_RLE, 0 },
+ { "fixed", Z_FIXED, 0 },
+ { NULL, 0, 0 }
+};
+
+static int strategy_count = 0;
+
+/* default compression level */
+static int compression_level = GZIP_DEFAULT_COMPRESSION_LEVEL;
+
+/* default window size */
+static int window_size = GZIP_DEFAULT_WINDOW_SIZE;
+
+/*
+ * This function is called by the options parsing code in mksquashfs.c
+ * to parse any -X compressor option.
+ *
+ * This function returns:
+ * >=0 (number of additional args parsed) on success
+ * -1 if the option was unrecognised, or
+ * -2 if the option was recognised, but otherwise bad in
+ * some way (e.g. invalid parameter)
+ *
+ * Note: this function sets internal compressor state, but does not
+ * pass back the results of the parsing other than success/failure.
+ * The gzip_dump_options() function is called later to get the options in
+ * a format suitable for writing to the filesystem.
+ */
+static int gzip_options(char *argv[], int argc)
+{
+ if(strcmp(argv[0], "-Xcompression-level") == 0) {
+ if(argc < 2) {
+ fprintf(stderr, "gzip: -Xcompression-level missing "
+ "compression level\n");
+ fprintf(stderr, "gzip: -Xcompression-level it "
+ "should be 1 >= n <= 9\n");
+ goto failed;
+ }
+
+ compression_level = atoi(argv[1]);
+ if(compression_level < 1 || compression_level > 9) {
+ fprintf(stderr, "gzip: -Xcompression-level invalid, it "
+ "should be 1 >= n <= 9\n");
+ goto failed;
+ }
+
+ return 1;
+ } else if(strcmp(argv[0], "-Xwindow-size") == 0) {
+ if(argc < 2) {
+ fprintf(stderr, "gzip: -Xwindow-size missing window "
+ " size\n");
+ fprintf(stderr, "gzip: -Xwindow-size <window-size>\n");
+ goto failed;
+ }
+
+ window_size = atoi(argv[1]);
+ if(window_size < 8 || window_size > 15) {
+ fprintf(stderr, "gzip: -Xwindow-size invalid, it "
+ "should be 8 >= n <= 15\n");
+ goto failed;
+ }
+
+ return 1;
+ } else if(strcmp(argv[0], "-Xstrategy") == 0) {
+ char *name;
+ int i;
+
+ if(argc < 2) {
+ fprintf(stderr, "gzip: -Xstrategy missing "
+ "strategies\n");
+ goto failed;
+ }
+
+ name = argv[1];
+ while(name[0] != '\0') {
+ for(i = 0; strategy[i].name; i++) {
+ int n = strlen(strategy[i].name);
+ if((strncmp(name, strategy[i].name, n) == 0) &&
+ (name[n] == '\0' ||
+ name[n] == ',')) {
+ if(strategy[i].selected == 0) {
+ strategy[i].selected = 1;
+ strategy_count++;
+ }
+ name += name[n] == ',' ? n + 1 : n;
+ break;
+ }
+ }
+ if(strategy[i].name == NULL) {
+ fprintf(stderr, "gzip: -Xstrategy unrecognised "
+ "strategy\n");
+ goto failed;
+ }
+ }
+
+ return 1;
+ }
+
+ return -1;
+
+failed:
+ return -2;
+}
+
+
+/*
+ * This function is called after all options have been parsed.
+ * It is used to do post-processing on the compressor options using
+ * values that were not expected to be known at option parse time.
+ *
+ * This function returns 0 on successful post processing, or
+ * -1 on error
+ */
+static int gzip_options_post(int block_size)
+{
+ if(strategy_count == 1 && strategy[0].selected) {
+ strategy_count = 0;
+ strategy[0].selected = 0;
+ }
+
+ return 0;
+}
+
+
+/*
+ * This function is called by mksquashfs to dump the parsed
+ * compressor options in a format suitable for writing to the
+ * compressor options field in the filesystem (stored immediately
+ * after the superblock).
+ *
+ * This function returns a pointer to the compression options structure
+ * to be stored (and the size), or NULL if there are no compression
+ * options
+ *
+ */
+static void *gzip_dump_options(int block_size, int *size)
+{
+ static struct gzip_comp_opts comp_opts;
+ int i, strategies = 0;
+
+ /*
+ * If default compression options of:
+ * compression-level: 8 and
+ * window-size: 15 and
+ * strategy_count == 0 then
+ * don't store a compression options structure (this is compatible
+ * with the legacy implementation of GZIP for Squashfs)
+ */
+ if(compression_level == GZIP_DEFAULT_COMPRESSION_LEVEL &&
+ window_size == GZIP_DEFAULT_WINDOW_SIZE &&
+ strategy_count == 0)
+ return NULL;
+
+ for(i = 0; strategy[i].name; i++)
+ strategies |= strategy[i].selected << i;
+
+ comp_opts.compression_level = compression_level;
+ comp_opts.window_size = window_size;
+ comp_opts.strategy = strategies;
+
+ SQUASHFS_INSWAP_COMP_OPTS(&comp_opts);
+
+ *size = sizeof(comp_opts);
+ return &comp_opts;
+}
+
+
+/*
+ * This function is a helper specifically for the append mode of
+ * mksquashfs. Its purpose is to set the internal compressor state
+ * to the stored compressor options in the passed compressor options
+ * structure.
+ *
+ * In effect this function sets up the compressor options
+ * to the same state they were when the filesystem was originally
+ * generated, this is to ensure on appending, the compressor uses
+ * the same compression options that were used to generate the
+ * original filesystem.
+ *
+ * Note, even if there are no compressor options, this function is still
+ * called with an empty compressor structure (size == 0), to explicitly
+ * set the default options, this is to ensure any user supplied
+ * -X options on the appending mksquashfs command line are over-ridden
+ *
+ * This function returns 0 on sucessful extraction of options, and
+ * -1 on error
+ */
+static int gzip_extract_options(int block_size, void *buffer, int size)
+{
+ struct gzip_comp_opts *comp_opts = buffer;
+ int i;
+
+ if(size == 0) {
+ /* Set default values */
+ compression_level = GZIP_DEFAULT_COMPRESSION_LEVEL;
+ window_size = GZIP_DEFAULT_WINDOW_SIZE;
+ strategy_count = 0;
+ return 0;
+ }
+
+ /* we expect a comp_opts structure of sufficient size to be present */
+ if(size < sizeof(*comp_opts))
+ goto failed;
+
+ SQUASHFS_INSWAP_COMP_OPTS(comp_opts);
+
+ /* Check comp_opts structure for correctness */
+ if(comp_opts->compression_level < 1 ||
+ comp_opts->compression_level > 9) {
+ fprintf(stderr, "gzip: bad compression level in "
+ "compression options structure\n");
+ goto failed;
+ }
+ compression_level = comp_opts->compression_level;
+
+ if(comp_opts->window_size < 8 ||
+ comp_opts->window_size > 15) {
+ fprintf(stderr, "gzip: bad window size in "
+ "compression options structure\n");
+ goto failed;
+ }
+ window_size = comp_opts->window_size;
+
+ strategy_count = 0;
+ for(i = 0; strategy[i].name; i++) {
+ if((comp_opts->strategy >> i) & 1) {
+ strategy[i].selected = 1;
+ strategy_count ++;
+ } else
+ strategy[i].selected = 0;
+ }
+
+ return 0;
+
+failed:
+ fprintf(stderr, "gzip: error reading stored compressor options from "
+ "filesystem!\n");
+
+ return -1;
+}
+
+
+static void gzip_display_options(void *buffer, int size)
+{
+ struct gzip_comp_opts *comp_opts = buffer;
+ int i, printed;
+
+ /* we expect a comp_opts structure of sufficient size to be present */
+ if(size < sizeof(*comp_opts))
+ goto failed;
+
+ SQUASHFS_INSWAP_COMP_OPTS(comp_opts);
+
+ /* Check comp_opts structure for correctness */
+ if(comp_opts->compression_level < 1 ||
+ comp_opts->compression_level > 9) {
+ fprintf(stderr, "gzip: bad compression level in "
+ "compression options structure\n");
+ goto failed;
+ }
+ printf("\tcompression-level %d\n", comp_opts->compression_level);
+
+ if(comp_opts->window_size < 8 ||
+ comp_opts->window_size > 15) {
+ fprintf(stderr, "gzip: bad window size in "
+ "compression options structure\n");
+ goto failed;
+ }
+ printf("\twindow-size %d\n", comp_opts->window_size);
+
+ for(i = 0, printed = 0; strategy[i].name; i++) {
+ if((comp_opts->strategy >> i) & 1) {
+ if(printed)
+ printf(", ");
+ else
+ printf("\tStrategies selected: ");
+ printf("%s", strategy[i].name);
+ printed = 1;
+ }
+ }
+
+ if(!printed)
+ printf("\tStrategies selected: default\n");
+ else
+ printf("\n");
+
+ return;
+
+failed:
+ fprintf(stderr, "gzip: error reading stored compressor options from "
+ "filesystem!\n");
+}
+
+
+/*
+ * This function is called by mksquashfs to initialise the
+ * compressor, before compress() is called.
+ *
+ * This function returns 0 on success, and
+ * -1 on error
+ */
+static int gzip_init(void **strm, int block_size, int datablock)
+{
+ int i, j, res;
+ struct gzip_stream *stream;
+
+ if(!datablock || !strategy_count) {
+ stream = malloc(sizeof(*stream) + sizeof(struct gzip_strategy));
+ if(stream == NULL)
+ goto failed;
+
+ stream->strategies = 1;
+ stream->strategy[0].strategy = Z_DEFAULT_STRATEGY;
+ } else {
+ stream = malloc(sizeof(*stream) +
+ sizeof(struct gzip_strategy) * strategy_count);
+ if(stream == NULL)
+ goto failed;
+
+ memset(stream->strategy, 0, sizeof(struct gzip_strategy) *
+ strategy_count);
+
+ stream->strategies = strategy_count;
+
+ for(i = 0, j = 0; strategy[i].name; i++) {
+ if(!strategy[i].selected)
+ continue;
+
+ stream->strategy[j].strategy = strategy[i].strategy;
+ if(j) {
+ stream->strategy[j].buffer = malloc(block_size);
+ if(stream->strategy[j].buffer == NULL)
+ goto failed2;
+ }
+ j++;
+ }
+ }
+
+ stream->stream.zalloc = Z_NULL;
+ stream->stream.zfree = Z_NULL;
+ stream->stream.opaque = 0;
+
+ res = deflateInit2(&stream->stream, compression_level, Z_DEFLATED,
+ window_size, 8, stream->strategy[0].strategy);
+ if(res != Z_OK)
+ goto failed2;
+
+ *strm = stream;
+ return 0;
+
+failed2:
+ for(i = 1; i < stream->strategies; i++)
+ free(stream->strategy[i].buffer);
+ free(stream);
+failed:
+ return -1;
+}
+
+
+static int gzip_compress(void *strm, void *d, void *s, int size, int block_size,
+ int *error)
+{
+ int i, res;
+ struct gzip_stream *stream = strm;
+ struct gzip_strategy *selected = NULL;
+
+ stream->strategy[0].buffer = d;
+
+ for(i = 0; i < stream->strategies; i++) {
+ struct gzip_strategy *strategy = &stream->strategy[i];
+
+ res = deflateReset(&stream->stream);
+ if(res != Z_OK)
+ goto failed;
+
+ stream->stream.next_in = s;
+ stream->stream.avail_in = size;
+ stream->stream.next_out = strategy->buffer;
+ stream->stream.avail_out = block_size;
+
+ if(stream->strategies > 1) {
+ res = deflateParams(&stream->stream,
+ compression_level, strategy->strategy);
+ if(res != Z_OK)
+ goto failed;
+ }
+
+ stream->stream.total_out = 0;
+ res = deflate(&stream->stream, Z_FINISH);
+ strategy->length = stream->stream.total_out;
+ if(res == Z_STREAM_END) {
+ if(!selected || selected->length > strategy->length)
+ selected = strategy;
+ } else if(res != Z_OK)
+ goto failed;
+ }
+
+ if(!selected)
+ /*
+ * Output buffer overflow. Return out of buffer space
+ */
+ return 0;
+
+ if(selected->buffer != d)
+ memcpy(d, selected->buffer, selected->length);
+
+ return (int) selected->length;
+
+failed:
+ /*
+ * All other errors return failure, with the compressor
+ * specific error code in *error
+ */
+ *error = res;
+ return -1;
+}
+
+
+static int gzip_uncompress(void *d, void *s, int size, int outsize, int *error)
+{
+ int res;
+ unsigned long bytes = outsize;
+
+ res = uncompress(d, &bytes, s, size);
+
+ if(res == Z_OK)
+ return (int) bytes;
+ else {
+ *error = res;
+ return -1;
+ }
+}
+
+
+static void gzip_usage(FILE *stream)
+{
+ fprintf(stream, "\t -Xcompression-level <compression-level>\n");
+ fprintf(stream, "\t\t<compression-level> should be 1 .. 9 (default "
+ "%d)\n", GZIP_DEFAULT_COMPRESSION_LEVEL);
+ fprintf(stream, "\t -Xwindow-size <window-size>\n");
+ fprintf(stream, "\t\t<window-size> should be 8 .. 15 (default "
+ "%d)\n", GZIP_DEFAULT_WINDOW_SIZE);
+ fprintf(stream, "\t -Xstrategy strategy1,strategy2,...,strategyN\n");
+ fprintf(stream, "\t\tCompress using strategy1,strategy2,...,strategyN"
+ " in turn\n");
+ fprintf(stream, "\t\tand choose the best compression.\n");
+ fprintf(stream, "\t\tAvailable strategies: default, filtered, "
+ "huffman_only,\n\t\trun_length_encoded and fixed\n");
+}
+
+
+static int option_args(char *option)
+{
+ if(strcmp(option, "-Xcompression-level") == 0 ||
+ strcmp(option, "-Xwindow-size") == 0 ||
+ strcmp(option, "-Xstrategy") == 0)
+ return 1;
+
+ return 0;
+}
+
+
+struct compressor gzip_comp_ops = {
+ .init = gzip_init,
+ .compress = gzip_compress,
+ .uncompress = gzip_uncompress,
+ .options = gzip_options,
+ .options_post = gzip_options_post,
+ .dump_options = gzip_dump_options,
+ .extract_options = gzip_extract_options,
+ .display_options = gzip_display_options,
+ .usage = gzip_usage,
+ .option_args = option_args,
+ .id = ZLIB_COMPRESSION,
+ .name = "gzip",
+ .supported = 1
+};
diff --git a/squashfs-tools/gzip_wrapper.h b/squashfs-tools/gzip_wrapper.h
new file mode 100644
index 0000000..5f87373
--- /dev/null
+++ b/squashfs-tools/gzip_wrapper.h
@@ -0,0 +1,69 @@
+#ifndef GZIP_WRAPPER_H
+#define GZIP_WRAPPER_H
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2014
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * gzip_wrapper.h
+ *
+ */
+
+#include "endian_compat.h"
+
+#if __BYTE_ORDER == __BIG_ENDIAN
+extern unsigned int inswap_le16(unsigned short);
+extern unsigned int inswap_le32(unsigned int);
+
+#define SQUASHFS_INSWAP_COMP_OPTS(s) { \
+ (s)->compression_level = inswap_le32((s)->compression_level); \
+ (s)->window_size = inswap_le16((s)->window_size); \
+ (s)->strategy = inswap_le16((s)->strategy); \
+}
+#else
+#define SQUASHFS_INSWAP_COMP_OPTS(s)
+#endif
+
+/* Default compression */
+#define GZIP_DEFAULT_COMPRESSION_LEVEL 9
+#define GZIP_DEFAULT_WINDOW_SIZE 15
+
+struct gzip_comp_opts {
+ int compression_level;
+ short window_size;
+ short strategy;
+};
+
+struct strategy {
+ char *name;
+ int strategy;
+ int selected;
+};
+
+struct gzip_strategy {
+ int strategy;
+ int length;
+ void *buffer;
+};
+
+struct gzip_stream {
+ z_stream stream;
+ int strategies;
+ struct gzip_strategy strategy[0];
+};
+#endif
diff --git a/squashfs-tools/info.c b/squashfs-tools/info.c
new file mode 100644
index 0000000..49f0c72
--- /dev/null
+++ b/squashfs-tools/info.c
@@ -0,0 +1,174 @@
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2013, 2014, 2019, 2021, 2022, 2023
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * info.c
+ */
+
+#include <pthread.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <stdio.h>
+#include <math.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <dirent.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <string.h>
+
+#include "squashfs_fs.h"
+#include "mksquashfs.h"
+#include "mksquashfs_error.h"
+#include "progressbar.h"
+#include "caches-queues-lists.h"
+#include "signals.h"
+
+static int silent = 0;
+static struct dir_ent *ent = NULL;
+
+pthread_t info_thread;
+
+
+void disable_info()
+{
+ ent = NULL;
+}
+
+
+void update_info(struct dir_ent *dir_ent)
+{
+ ent = dir_ent;
+}
+
+
+static void print_filename()
+{
+ struct dir_ent *dir_ent = ent;
+
+ if(dir_ent == NULL)
+ return;
+
+ if(dir_ent->our_dir->subpath[0] != '\0')
+ INFO("%s/%s\n", dir_ent->our_dir->subpath, dir_ent->name);
+ else
+ INFO("/%s\n", dir_ent->name);
+}
+
+
+static void dump_state()
+{
+ disable_progress_bar();
+
+ printf("Queue and Cache status dump\n");
+ printf("===========================\n");
+
+ printf("file buffer queue (reader thread -> deflate thread(s))\n");
+ dump_queue(to_deflate);
+
+ printf("uncompressed fragment queue (reader thread -> fragment"
+ " thread(s))\n");
+ dump_queue(to_process_frag);
+
+ printf("processed fragment queue (fragment thread(s) -> main"
+ " thread)\n");
+ dump_seq_queue(to_main, 1);
+
+ printf("compressed block queue (deflate thread(s) -> main thread)\n");
+ dump_seq_queue(to_main, 0);
+
+ printf("uncompressed packed fragment queue (main thread -> fragment"
+ " deflate thread(s))\n");
+ dump_queue(to_frag);
+
+ if(!reproducible) {
+ printf("locked frag queue (compressed frags waiting while multi-block"
+ " file is written)\n");
+ dump_queue(locked_fragment);
+
+ printf("compressed block queue (main & fragment deflate threads(s) ->"
+ " writer thread)\n");
+ dump_queue(to_writer);
+ } else {
+ printf("compressed fragment queue (fragment deflate threads(s) ->"
+ "fragment order thread)\n");
+
+ dump_seq_queue(to_order, 0);
+
+ printf("compressed block queue (main & fragment order threads ->"
+ " writer thread)\n");
+ dump_queue(to_writer);
+ }
+
+ printf("read cache (uncompressed blocks read by reader thread)\n");
+ dump_cache(reader_buffer);
+
+ printf("block write cache (compressed blocks waiting for the writer"
+ " thread)\n");
+ dump_cache(bwriter_buffer);
+ printf("fragment write cache (compressed fragments waiting for the"
+ " writer thread)\n");
+ dump_cache(fwriter_buffer);
+
+ printf("fragment cache (frags waiting to be compressed by fragment"
+ " deflate thread(s))\n");
+ dump_cache(fragment_buffer);
+
+ printf("fragment reserve cache (avoids pipeline stall if frag cache"
+ " full in dup check)\n");
+ dump_cache(reserve_cache);
+
+ enable_progress_bar();
+}
+
+
+static void *info_thrd(void *arg)
+{
+ sigset_t sigmask;
+ int sig, waiting = 0;
+
+ sigemptyset(&sigmask);
+ sigaddset(&sigmask, SIGQUIT);
+ sigaddset(&sigmask, SIGHUP);
+
+ while(1) {
+ sig = wait_for_signal(&sigmask, &waiting);
+
+ if(sig == SIGQUIT && !waiting) {
+ print_filename();
+
+ /* set one second interval period, if ^\ received
+ within then, dump queue and cache status */
+ waiting = 1;
+ } else
+ dump_state();
+ }
+
+ return NULL;
+}
+
+
+void init_info()
+{
+ pthread_create(&info_thread, NULL, info_thrd, NULL);
+}
diff --git a/squashfs-tools/info.h b/squashfs-tools/info.h
new file mode 100644
index 0000000..bcf03a2
--- /dev/null
+++ b/squashfs-tools/info.h
@@ -0,0 +1,30 @@
+#ifndef INFO_H
+#define INFO_H
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2013, 2014
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * info.h
+ */
+
+extern void disable_info();
+extern void update_info(struct dir_ent *);
+extern void init_info();
+#endif
diff --git a/squashfs-tools/lz4_wrapper.c b/squashfs-tools/lz4_wrapper.c
new file mode 100644
index 0000000..44cd35e
--- /dev/null
+++ b/squashfs-tools/lz4_wrapper.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright (c) 2013, 2019, 2021
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * lz4_wrapper.c
+ *
+ * Support for LZ4 compression http://fastcompression.blogspot.com/p/lz4.html
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <lz4.h>
+#include <lz4hc.h>
+
+#include "squashfs_fs.h"
+#include "lz4_wrapper.h"
+#include "compressor.h"
+
+/* LZ4 1.7.0 introduced new functions, and since r131,
+ * the older functions produce deprecated warnings.
+ *
+ * There are still too many distros using older versions
+ * to switch to the newer functions, but, the deprecated
+ * functions may completely disappear. This is a mess.
+ *
+ * Support both by checking the library version and
+ * using shadow definitions
+ */
+
+/* Earlier (but > 1.7.0) versions don't define this */
+#ifndef LZ4HC_CLEVEL_MAX
+#define LZ4HC_CLEVEL_MAX 12
+#endif
+
+#if LZ4_VERSION_NUMBER >= 10700
+#define COMPRESS(src, dest, size, max) LZ4_compress_default(src, dest, size, max)
+#define COMPRESS_HC(src, dest, size, max) LZ4_compress_HC(src, dest, size, max, LZ4HC_CLEVEL_MAX)
+#else
+#define COMPRESS(src, dest, size, max) LZ4_compress_limitedOutput(src, dest, size, max)
+#define COMPRESS_HC(src, dest, size, max) LZ4_compressHC_limitedOutput(src, dest, size, max)
+#endif
+
+static int hc = 0;
+
+/*
+ * This function is called by the options parsing code in mksquashfs.c
+ * to parse any -X compressor option.
+ *
+ * This function returns:
+ * >=0 (number of additional args parsed) on success
+ * -1 if the option was unrecognised, or
+ * -2 if the option was recognised, but otherwise bad in
+ * some way (e.g. invalid parameter)
+ *
+ * Note: this function sets internal compressor state, but does not
+ * pass back the results of the parsing other than success/failure.
+ * The lz4_dump_options() function is called later to get the options in
+ * a format suitable for writing to the filesystem.
+ */
+static int lz4_options(char *argv[], int argc)
+{
+ if(strcmp(argv[0], "-Xhc") == 0) {
+ hc = 1;
+ return 0;
+ }
+
+ return -1;
+}
+
+
+/*
+ * This function is called by mksquashfs to dump the parsed
+ * compressor options in a format suitable for writing to the
+ * compressor options field in the filesystem (stored immediately
+ * after the superblock).
+ *
+ * This function returns a pointer to the compression options structure
+ * to be stored (and the size), or NULL if there are no compression
+ * options
+ *
+ * Currently LZ4 always returns a comp_opts structure, with
+ * the version indicating LZ4_LEGACY stream fomat. This is to
+ * easily accomodate changes in the kernel code to different
+ * stream formats
+ */
+static void *lz4_dump_options(int block_size, int *size)
+{
+ static struct lz4_comp_opts comp_opts;
+
+ comp_opts.version = LZ4_LEGACY;
+ comp_opts.flags = hc ? LZ4_HC : 0;
+ SQUASHFS_INSWAP_COMP_OPTS(&comp_opts);
+
+ *size = sizeof(comp_opts);
+ return &comp_opts;
+}
+
+
+/*
+ * This function is a helper specifically for the append mode of
+ * mksquashfs. Its purpose is to set the internal compressor state
+ * to the stored compressor options in the passed compressor options
+ * structure.
+ *
+ * In effect this function sets up the compressor options
+ * to the same state they were when the filesystem was originally
+ * generated, this is to ensure on appending, the compressor uses
+ * the same compression options that were used to generate the
+ * original filesystem.
+ *
+ * Note, even if there are no compressor options, this function is still
+ * called with an empty compressor structure (size == 0), to explicitly
+ * set the default options, this is to ensure any user supplied
+ * -X options on the appending mksquashfs command line are over-ridden
+ *
+ * This function returns 0 on sucessful extraction of options, and
+ * -1 on error
+ */
+static int lz4_extract_options(int block_size, void *buffer, int size)
+{
+ struct lz4_comp_opts *comp_opts = buffer;
+
+ /* we expect a comp_opts structure to be present */
+ if(size < sizeof(*comp_opts))
+ goto failed;
+
+ SQUASHFS_INSWAP_COMP_OPTS(comp_opts);
+
+ /* we expect the stream format to be LZ4_LEGACY */
+ if(comp_opts->version != LZ4_LEGACY) {
+ fprintf(stderr, "lz4: unknown LZ4 version\n");
+ goto failed;
+ }
+
+ /*
+ * Check compression flags, currently only LZ4_HC ("high compression")
+ * can be set.
+ */
+ if(comp_opts->flags == LZ4_HC)
+ hc = 1;
+ else if(comp_opts->flags != 0) {
+ fprintf(stderr, "lz4: unknown LZ4 flags\n");
+ goto failed;
+ }
+
+ return 0;
+
+failed:
+ fprintf(stderr, "lz4: error reading stored compressor options from "
+ "filesystem!\n");
+
+ return -1;
+}
+
+
+/*
+ * This function is a helper specifically for unsquashfs.
+ * Its purpose is to check that the compression options are
+ * understood by this version of LZ4.
+ *
+ * This is important for LZ4 because the format understood by the
+ * Linux kernel may change from the already obsolete legacy format
+ * currently supported.
+ *
+ * If this does happen, then this version of LZ4 will not be able to decode
+ * the newer format. So we need to check for this.
+ *
+ * This function returns 0 on sucessful checking of options, and
+ * -1 on error
+ */
+static int lz4_check_options(int block_size, void *buffer, int size)
+{
+ struct lz4_comp_opts *comp_opts = buffer;
+
+ /* we expect a comp_opts structure to be present */
+ if(size < sizeof(*comp_opts))
+ goto failed;
+
+ SQUASHFS_INSWAP_COMP_OPTS(comp_opts);
+
+ /* we expect the stream format to be LZ4_LEGACY */
+ if(comp_opts->version != LZ4_LEGACY) {
+ fprintf(stderr, "lz4: unknown LZ4 version\n");
+ goto failed;
+ }
+
+ return 0;
+
+failed:
+ fprintf(stderr, "lz4: error reading stored compressor options from "
+ "filesystem!\n");
+ return -1;
+}
+
+
+static void lz4_display_options(void *buffer, int size)
+{
+ struct lz4_comp_opts *comp_opts = buffer;
+
+ /* check passed comp opts struct is of the correct length */
+ if(size < sizeof(*comp_opts))
+ goto failed;
+
+ SQUASHFS_INSWAP_COMP_OPTS(comp_opts);
+
+ /* we expect the stream format to be LZ4_LEGACY */
+ if(comp_opts->version != LZ4_LEGACY) {
+ fprintf(stderr, "lz4: unknown LZ4 version\n");
+ goto failed;
+ }
+
+ /*
+ * Check compression flags, currently only LZ4_HC ("high compression")
+ * can be set.
+ */
+ if(comp_opts->flags & ~LZ4_FLAGS_MASK) {
+ fprintf(stderr, "lz4: unknown LZ4 flags\n");
+ goto failed;
+ }
+
+ if(comp_opts->flags & LZ4_HC)
+ printf("\tHigh Compression option specified (-Xhc)\n");
+
+ return;
+
+failed:
+ fprintf(stderr, "lz4: error reading stored compressor options from "
+ "filesystem!\n");
+}
+
+
+static int lz4_compress(void *strm, void *dest, void *src, int size,
+ int block_size, int *error)
+{
+ int res;
+
+ if(hc)
+ res = COMPRESS_HC(src, dest, size, block_size);
+ else
+ res = COMPRESS(src, dest, size, block_size);
+
+ if(res == 0) {
+ /*
+ * Output buffer overflow. Return out of buffer space
+ */
+ return 0;
+ } else if(res < 0) {
+ /*
+ * All other errors return failure, with the compressor
+ * specific error code in *error
+ */
+ *error = res;
+ return -1;
+ }
+
+ return res;
+}
+
+
+static int lz4_uncompress(void *dest, void *src, int size, int outsize,
+ int *error)
+{
+ int res = LZ4_decompress_safe(src, dest, size, outsize);
+ if(res < 0) {
+ *error = res;
+ return -1;
+ }
+
+ return res;
+}
+
+
+static void lz4_usage(FILE *stream)
+{
+ fprintf(stream, "\t -Xhc\n");
+ fprintf(stream, "\t\tCompress using LZ4 High Compression\n");
+}
+
+
+struct compressor lz4_comp_ops = {
+ .compress = lz4_compress,
+ .uncompress = lz4_uncompress,
+ .options = lz4_options,
+ .dump_options = lz4_dump_options,
+ .extract_options = lz4_extract_options,
+ .check_options = lz4_check_options,
+ .display_options = lz4_display_options,
+ .usage = lz4_usage,
+ .id = LZ4_COMPRESSION,
+ .name = "lz4",
+ .supported = 1
+};
diff --git a/squashfs-tools/lz4_wrapper.h b/squashfs-tools/lz4_wrapper.h
new file mode 100644
index 0000000..5eef1af
--- /dev/null
+++ b/squashfs-tools/lz4_wrapper.h
@@ -0,0 +1,55 @@
+#ifndef LZ4_WRAPPER_H
+#define LZ4_WRAPPER_H
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2013
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * lz4_wrapper.h
+ *
+ */
+
+#include "endian_compat.h"
+
+#if __BYTE_ORDER == __BIG_ENDIAN
+extern unsigned int inswap_le32(unsigned int);
+
+#define SQUASHFS_INSWAP_COMP_OPTS(s) { \
+ (s)->version = inswap_le32((s)->version); \
+ (s)->flags = inswap_le32((s)->flags); \
+}
+#else
+#define SQUASHFS_INSWAP_COMP_OPTS(s)
+#endif
+
+/*
+ * Define the various stream formats recognised.
+ * Currently omly legacy stream format is supported by the
+ * kernel
+ */
+#define LZ4_LEGACY 1
+#define LZ4_FLAGS_MASK 1
+
+/* Define the compression flags recognised. */
+#define LZ4_HC 1
+
+struct lz4_comp_opts {
+ int version;
+ int flags;
+};
+#endif
diff --git a/squashfs-tools/lzma_wrapper.c b/squashfs-tools/lzma_wrapper.c
new file mode 100644
index 0000000..d06cacc
--- /dev/null
+++ b/squashfs-tools/lzma_wrapper.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2009, 2010, 2013, 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * lzma_wrapper.c
+ *
+ * Support for LZMA1 compression using LZMA SDK (4.65 used in
+ * development, other versions may work) http://www.7-zip.org/sdk.html
+ */
+
+#include <LzmaLib.h>
+
+#include "squashfs_fs.h"
+#include "compressor.h"
+
+#define LZMA_HEADER_SIZE (LZMA_PROPS_SIZE + 8)
+
+static int lzma_compress(void *strm, void *dest, void *src, int size, int block_size,
+ int *error)
+{
+ unsigned char *d = dest;
+ size_t props_size = LZMA_PROPS_SIZE,
+ outlen = block_size - LZMA_HEADER_SIZE;
+ int res;
+
+ res = LzmaCompress(dest + LZMA_HEADER_SIZE, &outlen, src, size, dest,
+ &props_size, 5, block_size, 3, 0, 2, 32, 1);
+
+ if(res == SZ_ERROR_OUTPUT_EOF) {
+ /*
+ * Output buffer overflow. Return out of buffer space error
+ */
+ return 0;
+ }
+
+ if(res != SZ_OK) {
+ /*
+ * All other errors return failure, with the compressor
+ * specific error code in *error
+ */
+ *error = res;
+ return -1;
+ }
+
+ /*
+ * Fill in the 8 byte little endian uncompressed size field in the
+ * LZMA header. 8 bytes is excessively large for squashfs but
+ * this is the standard LZMA header and which is expected by the kernel
+ * code
+ */
+ d[LZMA_PROPS_SIZE] = size & 255;
+ d[LZMA_PROPS_SIZE + 1] = (size >> 8) & 255;
+ d[LZMA_PROPS_SIZE + 2] = (size >> 16) & 255;
+ d[LZMA_PROPS_SIZE + 3] = (size >> 24) & 255;
+ d[LZMA_PROPS_SIZE + 4] = 0;
+ d[LZMA_PROPS_SIZE + 5] = 0;
+ d[LZMA_PROPS_SIZE + 6] = 0;
+ d[LZMA_PROPS_SIZE + 7] = 0;
+
+ /*
+ * Success, return the compressed size. Outlen returned by the LZMA
+ * compressor does not include the LZMA header space
+ */
+ return outlen + LZMA_HEADER_SIZE;
+}
+
+
+static int lzma_uncompress(void *dest, void *src, int size, int outsize,
+ int *error)
+{
+ unsigned char *s = src;
+ size_t outlen, inlen = size - LZMA_HEADER_SIZE;
+ int res;
+
+ outlen = s[LZMA_PROPS_SIZE] |
+ (s[LZMA_PROPS_SIZE + 1] << 8) |
+ (s[LZMA_PROPS_SIZE + 2] << 16) |
+ (s[LZMA_PROPS_SIZE + 3] << 24);
+
+ if(outlen > outsize) {
+ *error = 0;
+ return -1;
+ }
+
+ res = LzmaUncompress(dest, &outlen, src + LZMA_HEADER_SIZE, &inlen, src,
+ LZMA_PROPS_SIZE);
+
+ if(res == SZ_OK)
+ return outlen;
+ else {
+ *error = res;
+ return -1;
+ }
+}
+
+
+static void lzma_usage(FILE *stream)
+{
+ fprintf(stream, "\t (no options) (deprecated - no kernel support)\n");
+}
+
+
+struct compressor lzma_comp_ops = {
+ .init = NULL,
+ .compress = lzma_compress,
+ .uncompress = lzma_uncompress,
+ .options = NULL,
+ .usage = lzma_usage,
+ .id = LZMA_COMPRESSION,
+ .name = "lzma",
+ .supported = 1
+};
+
diff --git a/squashfs-tools/lzma_xz_wrapper.c b/squashfs-tools/lzma_xz_wrapper.c
new file mode 100644
index 0000000..ba90f60
--- /dev/null
+++ b/squashfs-tools/lzma_xz_wrapper.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2010, 2013, 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * lzma_xz_wrapper.c
+ *
+ * Support for LZMA1 compression using XZ Utils liblzma http://tukaani.org/xz/
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <lzma.h>
+
+#include "squashfs_fs.h"
+#include "compressor.h"
+
+#define LZMA_PROPS_SIZE 5
+#define LZMA_UNCOMP_SIZE 8
+#define LZMA_HEADER_SIZE (LZMA_PROPS_SIZE + LZMA_UNCOMP_SIZE)
+
+#define LZMA_OPTIONS 5
+#define MEMLIMIT (32 * 1024 * 1024)
+
+static int lzma_compress(void *dummy, void *dest, void *src, int size,
+ int block_size, int *error)
+{
+ unsigned char *d = (unsigned char *) dest;
+ lzma_options_lzma opt;
+ lzma_stream strm = LZMA_STREAM_INIT;
+ int res;
+
+ lzma_lzma_preset(&opt, LZMA_OPTIONS);
+ opt.dict_size = block_size;
+
+ res = lzma_alone_encoder(&strm, &opt);
+ if(res != LZMA_OK) {
+ lzma_end(&strm);
+ goto failed;
+ }
+
+ strm.next_out = dest;
+ strm.avail_out = block_size;
+ strm.next_in = src;
+ strm.avail_in = size;
+
+ res = lzma_code(&strm, LZMA_FINISH);
+ lzma_end(&strm);
+
+ if(res == LZMA_STREAM_END) {
+ /*
+ * Fill in the 8 byte little endian uncompressed size field in
+ * the LZMA header. 8 bytes is excessively large for squashfs
+ * but this is the standard LZMA header and which is expected by
+ * the kernel code
+ */
+
+ d[LZMA_PROPS_SIZE] = size & 255;
+ d[LZMA_PROPS_SIZE + 1] = (size >> 8) & 255;
+ d[LZMA_PROPS_SIZE + 2] = (size >> 16) & 255;
+ d[LZMA_PROPS_SIZE + 3] = (size >> 24) & 255;
+ d[LZMA_PROPS_SIZE + 4] = 0;
+ d[LZMA_PROPS_SIZE + 5] = 0;
+ d[LZMA_PROPS_SIZE + 6] = 0;
+ d[LZMA_PROPS_SIZE + 7] = 0;
+
+ return (int) strm.total_out;
+ }
+
+ if(res == LZMA_OK)
+ /*
+ * Output buffer overflow. Return out of buffer space
+ */
+ return 0;
+
+failed:
+ /*
+ * All other errors return failure, with the compressor
+ * specific error code in *error
+ */
+ *error = res;
+ return -1;
+}
+
+
+static int lzma_uncompress(void *dest, void *src, int size, int outsize,
+ int *error)
+{
+ lzma_stream strm = LZMA_STREAM_INIT;
+ int uncompressed_size = 0, res;
+ unsigned char lzma_header[LZMA_HEADER_SIZE];
+
+ res = lzma_alone_decoder(&strm, MEMLIMIT);
+ if(res != LZMA_OK) {
+ lzma_end(&strm);
+ goto failed;
+ }
+
+ memcpy(lzma_header, src, LZMA_HEADER_SIZE);
+ uncompressed_size = lzma_header[LZMA_PROPS_SIZE] |
+ (lzma_header[LZMA_PROPS_SIZE + 1] << 8) |
+ (lzma_header[LZMA_PROPS_SIZE + 2] << 16) |
+ (lzma_header[LZMA_PROPS_SIZE + 3] << 24);
+
+ if(uncompressed_size > outsize) {
+ res = 0;
+ goto failed;
+ }
+
+ memset(lzma_header + LZMA_PROPS_SIZE, 255, LZMA_UNCOMP_SIZE);
+
+ strm.next_out = dest;
+ strm.avail_out = outsize;
+ strm.next_in = lzma_header;
+ strm.avail_in = LZMA_HEADER_SIZE;
+
+ res = lzma_code(&strm, LZMA_RUN);
+
+ if(res != LZMA_OK || strm.avail_in != 0) {
+ lzma_end(&strm);
+ goto failed;
+ }
+
+ strm.next_in = src + LZMA_HEADER_SIZE;
+ strm.avail_in = size - LZMA_HEADER_SIZE;
+
+ res = lzma_code(&strm, LZMA_FINISH);
+ lzma_end(&strm);
+
+ if(res == LZMA_STREAM_END || (res == LZMA_OK &&
+ strm.total_out >= uncompressed_size && strm.avail_in == 0))
+ return uncompressed_size;
+
+failed:
+ *error = res;
+ return -1;
+}
+
+
+static void lzma_usage(FILE *stream)
+{
+ fprintf(stream, "\t (no options) (deprecated - no kernel support)\n");
+}
+
+
+struct compressor lzma_comp_ops = {
+ .init = NULL,
+ .compress = lzma_compress,
+ .uncompress = lzma_uncompress,
+ .options = NULL,
+ .usage = lzma_usage,
+ .id = LZMA_COMPRESSION,
+ .name = "lzma",
+ .supported = 1
+};
+
diff --git a/squashfs-tools/lzo_wrapper.c b/squashfs-tools/lzo_wrapper.c
new file mode 100644
index 0000000..147c8ef
--- /dev/null
+++ b/squashfs-tools/lzo_wrapper.c
@@ -0,0 +1,450 @@
+/*
+ * Copyright (c) 2013, 2014, 2021. 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * lzo_wrapper.c
+ *
+ * Support for LZO compression http://www.oberhumer.com/opensource/lzo
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <lzo/lzoconf.h>
+#include <lzo/lzo1x.h>
+
+#include "squashfs_fs.h"
+#include "lzo_wrapper.h"
+#include "compressor.h"
+
+static struct lzo_algorithm lzo[] = {
+ { "lzo1x_1", LZO1X_1_MEM_COMPRESS, lzo1x_1_compress },
+ { "lzo1x_1_11", LZO1X_1_11_MEM_COMPRESS, lzo1x_1_11_compress },
+ { "lzo1x_1_12", LZO1X_1_12_MEM_COMPRESS, lzo1x_1_12_compress },
+ { "lzo1x_1_15", LZO1X_1_15_MEM_COMPRESS, lzo1x_1_15_compress },
+ { "lzo1x_999", LZO1X_999_MEM_COMPRESS, lzo1x_999_wrapper },
+ { NULL, 0, NULL }
+};
+
+/* default LZO compression algorithm and compression level */
+static int algorithm = SQUASHFS_LZO1X_999;
+static int compression_level = SQUASHFS_LZO1X_999_COMP_DEFAULT;
+
+/* user specified compression level */
+static int user_comp_level = -1;
+
+
+/*
+ * This function is called by the options parsing code in mksquashfs.c
+ * to parse any -X compressor option.
+ *
+ * This function returns:
+ * >=0 (number of additional args parsed) on success
+ * -1 if the option was unrecognised, or
+ * -2 if the option was recognised, but otherwise bad in
+ * some way (e.g. invalid parameter)
+ *
+ * Note: this function sets internal compressor state, but does not
+ * pass back the results of the parsing other than success/failure.
+ * The lzo_dump_options() function is called later to get the options in
+ * a format suitable for writing to the filesystem.
+ */
+static int lzo_options(char *argv[], int argc)
+{
+ int i;
+
+ if(strcmp(argv[0], "-Xalgorithm") == 0) {
+ if(argc < 2) {
+ fprintf(stderr, "lzo: -Xalgorithm missing algorithm\n");
+ fprintf(stderr, "lzo: -Xalgorithm <algorithm>\n");
+ goto failed2;
+ }
+
+ for(i = 0; lzo[i].name; i++) {
+ if(strcmp(argv[1], lzo[i].name) == 0) {
+ algorithm = i;
+ return 1;
+ }
+ }
+
+ fprintf(stderr, "lzo: -Xalgorithm unrecognised algorithm\n");
+ goto failed2;
+ } else if(strcmp(argv[0], "-Xcompression-level") == 0) {
+ if(argc < 2) {
+ fprintf(stderr, "lzo: -Xcompression-level missing "
+ "compression level\n");
+ fprintf(stderr, "lzo: -Xcompression-level it "
+ "should be 1 >= n <= 9\n");
+ goto failed;
+ }
+
+ user_comp_level = atoi(argv[1]);
+ if(user_comp_level < 1 || user_comp_level > 9) {
+ fprintf(stderr, "lzo: -Xcompression-level invalid, it "
+ "should be 1 >= n <= 9\n");
+ goto failed;
+ }
+
+ return 1;
+ }
+
+ return -1;
+
+failed:
+ return -2;
+
+failed2:
+ fprintf(stderr, "lzo: compression algorithm should be one of:\n");
+ for(i = 0; lzo[i].name; i++)
+ fprintf(stderr, "\t%s\n", lzo[i].name);
+ return -2;
+}
+
+
+/*
+ * This function is called after all options have been parsed.
+ * It is used to do post-processing on the compressor options using
+ * values that were not expected to be known at option parse time.
+ *
+ * In this case the LZO algorithm may not be known until after the
+ * compression level has been set (-Xalgorithm used after -Xcompression-level)
+ *
+ * This function returns 0 on successful post processing, or
+ * -1 on error
+ */
+static int lzo_options_post(int block_size)
+{
+ /*
+ * Use of compression level only makes sense for
+ * LZO1X_999 algorithm
+ */
+ if(user_comp_level != -1) {
+ if(algorithm != SQUASHFS_LZO1X_999) {
+ fprintf(stderr, "lzo: -Xcompression-level not "
+ "supported by selected %s algorithm\n",
+ lzo[algorithm].name);
+ fprintf(stderr, "lzo: -Xcompression-level is only "
+ "applicable for the lzo1x_999 algorithm\n");
+ goto failed;
+ }
+ compression_level = user_comp_level;
+ }
+
+ return 0;
+
+failed:
+ return -1;
+}
+
+
+/*
+ * This function is called by mksquashfs to dump the parsed
+ * compressor options in a format suitable for writing to the
+ * compressor options field in the filesystem (stored immediately
+ * after the superblock).
+ *
+ * This function returns a pointer to the compression options structure
+ * to be stored (and the size), or NULL if there are no compression
+ * options
+ *
+ */
+static void *lzo_dump_options(int block_size, int *size)
+{
+ static struct lzo_comp_opts comp_opts;
+
+ /*
+ * If default compression options of SQUASHFS_LZO1X_999 and
+ * compression level of SQUASHFS_LZO1X_999_COMP_DEFAULT then
+ * don't store a compression options structure (this is compatible
+ * with the legacy implementation of LZO for Squashfs)
+ */
+ if(algorithm == SQUASHFS_LZO1X_999 &&
+ compression_level == SQUASHFS_LZO1X_999_COMP_DEFAULT)
+ return NULL;
+
+ comp_opts.algorithm = algorithm;
+ comp_opts.compression_level = algorithm == SQUASHFS_LZO1X_999 ?
+ compression_level : 0;
+
+ SQUASHFS_INSWAP_COMP_OPTS(&comp_opts);
+
+ *size = sizeof(comp_opts);
+ return &comp_opts;
+}
+
+
+/*
+ * This function is a helper specifically for the append mode of
+ * mksquashfs. Its purpose is to set the internal compressor state
+ * to the stored compressor options in the passed compressor options
+ * structure.
+ *
+ * In effect this function sets up the compressor options
+ * to the same state they were when the filesystem was originally
+ * generated, this is to ensure on appending, the compressor uses
+ * the same compression options that were used to generate the
+ * original filesystem.
+ *
+ * Note, even if there are no compressor options, this function is still
+ * called with an empty compressor structure (size == 0), to explicitly
+ * set the default options, this is to ensure any user supplied
+ * -X options on the appending mksquashfs command line are over-ridden
+ *
+ * This function returns 0 on sucessful extraction of options, and
+ * -1 on error
+ */
+static int lzo_extract_options(int block_size, void *buffer, int size)
+{
+ struct lzo_comp_opts *comp_opts = buffer;
+
+ if(size == 0) {
+ /* Set default values */
+ algorithm = SQUASHFS_LZO1X_999;
+ compression_level = SQUASHFS_LZO1X_999_COMP_DEFAULT;
+ return 0;
+ }
+
+ /* we expect a comp_opts structure of sufficient size to be present */
+ if(size < sizeof(*comp_opts))
+ goto failed;
+
+ SQUASHFS_INSWAP_COMP_OPTS(comp_opts);
+
+ /* Check comp_opts structure for correctness */
+ switch(comp_opts->algorithm) {
+ case SQUASHFS_LZO1X_1:
+ case SQUASHFS_LZO1X_1_11:
+ case SQUASHFS_LZO1X_1_12:
+ case SQUASHFS_LZO1X_1_15:
+ if(comp_opts->compression_level != 0) {
+ fprintf(stderr, "lzo: bad compression level in "
+ "compression options structure\n");
+ goto failed;
+ }
+ break;
+ case SQUASHFS_LZO1X_999:
+ if(comp_opts->compression_level < 1 ||
+ comp_opts->compression_level > 9) {
+ fprintf(stderr, "lzo: bad compression level in "
+ "compression options structure\n");
+ goto failed;
+ }
+ compression_level = comp_opts->compression_level;
+ break;
+ default:
+ fprintf(stderr, "lzo: bad algorithm in compression options "
+ "structure\n");
+ goto failed;
+ }
+
+ algorithm = comp_opts->algorithm;
+
+ return 0;
+
+failed:
+ fprintf(stderr, "lzo: error reading stored compressor options from "
+ "filesystem!\n");
+
+ return -1;
+}
+
+
+static void lzo_display_options(void *buffer, int size)
+{
+ struct lzo_comp_opts *comp_opts = buffer;
+
+ /* we expect a comp_opts structure of sufficient size to be present */
+ if(size < sizeof(*comp_opts))
+ goto failed;
+
+ SQUASHFS_INSWAP_COMP_OPTS(comp_opts);
+
+ /* Check comp_opts structure for correctness */
+ switch(comp_opts->algorithm) {
+ case SQUASHFS_LZO1X_1:
+ case SQUASHFS_LZO1X_1_11:
+ case SQUASHFS_LZO1X_1_12:
+ case SQUASHFS_LZO1X_1_15:
+ printf("\talgorithm %s\n", lzo[comp_opts->algorithm].name);
+ break;
+ case SQUASHFS_LZO1X_999:
+ if(comp_opts->compression_level < 1 ||
+ comp_opts->compression_level > 9) {
+ fprintf(stderr, "lzo: bad compression level in "
+ "compression options structure\n");
+ goto failed;
+ }
+ printf("\talgorithm %s\n", lzo[comp_opts->algorithm].name);
+ printf("\tcompression level %d\n",
+ comp_opts->compression_level);
+ break;
+ default:
+ fprintf(stderr, "lzo: bad algorithm in compression options "
+ "structure\n");
+ goto failed;
+ }
+
+ return;
+
+failed:
+ fprintf(stderr, "lzo: error reading stored compressor options from "
+ "filesystem!\n");
+}
+
+
+/*
+ * This function is called by mksquashfs to initialise the
+ * compressor, before compress() is called.
+ *
+ * This function returns 0 on success, and
+ * -1 on error
+ */
+static int squashfs_lzo_init(void **strm, int block_size, int datablock)
+{
+ struct lzo_stream *stream;
+
+ stream = *strm = malloc(sizeof(struct lzo_stream));
+ if(stream == NULL)
+ goto failed;
+
+ stream->workspace = malloc(lzo[algorithm].size);
+ if(stream->workspace == NULL)
+ goto failed2;
+
+ stream->buffer = malloc(LZO_MAX_EXPANSION(block_size));
+ if(stream->buffer != NULL)
+ return 0;
+
+ free(stream->workspace);
+failed2:
+ free(stream);
+failed:
+ return -1;
+}
+
+
+static int lzo_compress(void *strm, void *dest, void *src, int size,
+ int block_size, int *error)
+{
+ int res;
+ lzo_uint compsize, orig_size = size;
+ struct lzo_stream *stream = strm;
+
+ res = lzo[algorithm].compress(src, size, stream->buffer, &compsize,
+ stream->workspace);
+ if(res != LZO_E_OK)
+ goto failed;
+
+ /* Successful compression, however, we need to check that
+ * the compressed size is not larger than the available
+ * buffer space. Normally in other compressor APIs they take
+ * a destination buffer size, and overflows return an error.
+ * With LZO it lacks a destination size and so we must output
+ * to a temporary buffer large enough to accomodate any
+ * result, and explictly check here for overflow
+ */
+ if(compsize > block_size)
+ return 0;
+
+ res = lzo1x_optimize(stream->buffer, compsize, src, &orig_size, NULL);
+
+ if (res != LZO_E_OK || orig_size != size)
+ goto failed;
+
+ memcpy(dest, stream->buffer, compsize);
+ return compsize;
+
+failed:
+ /* fail, compressor specific error code returned in error */
+ *error = res;
+ return -1;
+}
+
+
+static int lzo_uncompress(void *dest, void *src, int size, int outsize,
+ int *error)
+{
+ int res;
+ lzo_uint outlen = outsize;
+
+ res = lzo1x_decompress_safe(src, size, dest, &outlen, NULL);
+ if(res != LZO_E_OK) {
+ *error = res;
+ return -1;
+ }
+
+ return outlen;
+}
+
+
+static void lzo_usage(FILE *stream)
+{
+ int i;
+
+ fprintf(stream, "\t -Xalgorithm <algorithm>\n");
+ fprintf(stream, "\t\tWhere <algorithm> is one of:\n");
+
+ for(i = 0; lzo[i].name; i++)
+ fprintf(stream, "\t\t\t%s%s\n", lzo[i].name,
+ i == SQUASHFS_LZO1X_999 ? " (default)" : "");
+
+ fprintf(stream, "\t -Xcompression-level <compression-level>\n");
+ fprintf(stream, "\t\t<compression-level> should be 1 .. 9 (default "
+ "%d)\n", SQUASHFS_LZO1X_999_COMP_DEFAULT);
+ fprintf(stream, "\t\tOnly applies to lzo1x_999 algorithm\n");
+}
+
+
+/*
+ * Helper function for lzo1x_999 compression algorithm.
+ * All other lzo1x_xxx compressors do not take a compression level,
+ * so we need to wrap lzo1x_999 to pass the compression level which
+ * is applicable to it
+ */
+int lzo1x_999_wrapper(const lzo_bytep src, lzo_uint src_len, lzo_bytep dst,
+ lzo_uintp compsize, lzo_voidp workspace)
+{
+ return lzo1x_999_compress_level(src, src_len, dst, compsize,
+ workspace, NULL, 0, 0, compression_level);
+}
+
+
+static int option_args(char *option)
+{
+ if(strcmp(option, "-Xalgorithm") == 0 ||
+ strcmp(option, "-Xcompression-level") == 0)
+ return 1;
+
+ return 0;
+}
+
+
+struct compressor lzo_comp_ops = {
+ .init = squashfs_lzo_init,
+ .compress = lzo_compress,
+ .uncompress = lzo_uncompress,
+ .options = lzo_options,
+ .options_post = lzo_options_post,
+ .dump_options = lzo_dump_options,
+ .extract_options = lzo_extract_options,
+ .display_options = lzo_display_options,
+ .usage = lzo_usage,
+ .option_args = option_args,
+ .id = LZO_COMPRESSION,
+ .name = "lzo",
+ .supported = 1
+};
diff --git a/squashfs-tools/lzo_wrapper.h b/squashfs-tools/lzo_wrapper.h
new file mode 100644
index 0000000..f6223e1
--- /dev/null
+++ b/squashfs-tools/lzo_wrapper.h
@@ -0,0 +1,72 @@
+#ifndef LZO_WRAPPER_H
+#define LZO_WRAPPER_H
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2013
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * lzo_wrapper.h
+ *
+ */
+
+#include "endian_compat.h"
+
+#if __BYTE_ORDER == __BIG_ENDIAN
+extern unsigned int inswap_le32(unsigned int);
+
+#define SQUASHFS_INSWAP_COMP_OPTS(s) { \
+ (s)->algorithm = inswap_le32((s)->algorithm); \
+ (s)->compression_level = inswap_le32((s)->compression_level); \
+}
+#else
+#define SQUASHFS_INSWAP_COMP_OPTS(s)
+#endif
+
+/* Define the compression flags recognised. */
+#define SQUASHFS_LZO1X_1 0
+#define SQUASHFS_LZO1X_1_11 1
+#define SQUASHFS_LZO1X_1_12 2
+#define SQUASHFS_LZO1X_1_15 3
+#define SQUASHFS_LZO1X_999 4
+
+/* Default compression level used by SQUASHFS_LZO1X_999 */
+#define SQUASHFS_LZO1X_999_COMP_DEFAULT 8
+
+struct lzo_comp_opts {
+ int algorithm;
+ int compression_level;
+};
+
+struct lzo_algorithm {
+ char *name;
+ int size;
+ int (*compress) (const lzo_bytep, lzo_uint, lzo_bytep, lzo_uintp,
+ lzo_voidp);
+};
+
+struct lzo_stream {
+ void *workspace;
+ void *buffer;
+};
+
+#define LZO_MAX_EXPANSION(size) (size + (size / 16) + 64 + 3)
+
+int lzo1x_999_wrapper(const lzo_bytep, lzo_uint, lzo_bytep, lzo_uintp,
+ lzo_voidp);
+
+#endif
diff --git a/squashfs-tools/merge_sort.h b/squashfs-tools/merge_sort.h
new file mode 100644
index 0000000..345942c
--- /dev/null
+++ b/squashfs-tools/merge_sort.h
@@ -0,0 +1,116 @@
+#ifndef MERGE_SORT_H
+#define MERGE_SORT_H
+
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * merge_sort.h
+ */
+
+/*
+ * Bottom up linked list merge sort.
+ *
+ * Qsort and other O(n log n) algorithms work well with arrays but not
+ * linked lists. Merge sort another O(n log n) sort algorithm on the other hand
+ * is not ideal for arrays (as it needs an additonal n storage locations
+ * as sorting is not done in place), but it is ideal for linked lists because
+ * it doesn't require any extra storage,
+ */
+
+#define SORT(FUNCTION_NAME, LIST_TYPE, NAME, NEXT) \
+void FUNCTION_NAME(struct LIST_TYPE **head, int count) \
+{ \
+ struct LIST_TYPE *cur, *l1, *l2, *next; \
+ int len1, len2, stride = 1; \
+ \
+ if(*head == NULL || count < 2) \
+ return; \
+ \
+ /* \
+ * We can consider our linked-list to be made up of stride length \
+ * sublists. Eacn iteration around this loop merges adjacent \
+ * stride length sublists into larger 2*stride sublists. We stop \
+ * when stride becomes equal to the entire list. \
+ * \
+ * Initially stride = 1 (by definition a sublist of 1 is sorted), and \
+ * these 1 element sublists are merged into 2 element sublists, which \
+ * are then merged into 4 element sublists and so on. \
+ */ \
+ do { \
+ l2 = *head; /* head of current linked list */ \
+ cur = NULL; /* empty output list */ \
+ \
+ /* \
+ * Iterate through the linked list, merging adjacent sublists. \
+ * On each interation l2 points to the next sublist pair to be \
+ * merged (if there's only one sublist left this is simply added \
+ * to the output list) \
+ */ \
+ while(l2) { \
+ l1 = l2; \
+ for(len1 = 0; l2 && len1 < stride; len1 ++, l2 = l2->NEXT); \
+ len2 = stride; \
+ \
+ /* \
+ * l1 points to first sublist. \
+ * l2 points to second sublist. \
+ * Merge them onto the output list \
+ */ \
+ while(len1 && l2 && len2) { \
+ if(strcmp(l1->NAME, l2->NAME) <= 0) { \
+ next = l1; \
+ l1 = l1->NEXT; \
+ len1 --; \
+ } else { \
+ next = l2; \
+ l2 = l2->NEXT; \
+ len2 --; \
+ } \
+ \
+ if(cur) { \
+ cur->NEXT = next; \
+ cur = next; \
+ } else \
+ *head = cur = next; \
+ } \
+ /* \
+ * One sublist is now empty, copy the other one onto the \
+ * output list \
+ */ \
+ for(; len1; len1 --, l1 = l1->NEXT) { \
+ if(cur) { \
+ cur->NEXT = l1; \
+ cur = l1; \
+ } else \
+ *head = cur = l1; \
+ } \
+ for(; l2 && len2; len2 --, l2 = l2->NEXT) { \
+ if(cur) { \
+ cur->NEXT = l2; \
+ cur = l2; \
+ } else \
+ *head = cur = l2; \
+ } \
+ } \
+ cur->NEXT = NULL; \
+ stride = stride << 1; \
+ } while(stride < count); \
+}
+#endif
diff --git a/squashfs-tools/mksquashfs.c b/squashfs-tools/mksquashfs.c
new file mode 100644
index 0000000..ba28d65
--- /dev/null
+++ b/squashfs-tools/mksquashfs.c
@@ -0,0 +1,8902 @@
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
+ * 2012, 2013, 2014, 2017, 2019, 2021, 2022, 2023
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * mksquashfs.c
+ */
+
+#define FALSE 0
+#define TRUE 1
+#define MAX_LINE 16384
+
+#include <pwd.h>
+#include <grp.h>
+#include <time.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <dirent.h>
+#include <string.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <pthread.h>
+#include <regex.h>
+#include <sys/wait.h>
+#include <limits.h>
+#include <ctype.h>
+
+#ifdef __linux__
+#include <sys/sysinfo.h>
+#include <sys/sysmacros.h>
+#include <sched.h>
+#else
+#include <sys/sysctl.h>
+#endif
+
+#include "squashfs_fs.h"
+#include "squashfs_swap.h"
+#include "mksquashfs.h"
+#include "sort.h"
+#include "pseudo.h"
+#include "compressor.h"
+#include "xattr.h"
+#include "action.h"
+#include "mksquashfs_error.h"
+#include "progressbar.h"
+#include "info.h"
+#include "caches-queues-lists.h"
+#include "read_fs.h"
+#include "restore.h"
+#include "process_fragments.h"
+#include "fnmatch_compat.h"
+#include "tar.h"
+#include "merge_sort.h"
+
+/* Compression options */
+int noF = FALSE;
+int noI = FALSE;
+int noId = FALSE;
+int noD = FALSE;
+int noX = FALSE;
+
+/* block size used to build filesystem */
+int block_size = SQUASHFS_FILE_SIZE;
+int block_log;
+
+/* Fragment options, are fragments in filesystem and are they used for tailends? */
+int no_fragments = FALSE;
+int always_use_fragments = FALSE;
+
+/* Are duplicates detected in fileystem ? */
+int duplicate_checking = TRUE;
+
+/* Are filesystems exportable via NFS? */
+int exportable = TRUE;
+
+/* Are sparse files detected and stored? */
+int sparse_files = TRUE;
+
+/* Options which override root inode settings */
+int root_mode_opt = FALSE;
+mode_t root_mode;
+int root_uid_opt = FALSE;
+unsigned int root_uid;
+int root_gid_opt = FALSE;
+unsigned int root_gid;
+unsigned int root_time;
+int root_time_opt = FALSE;
+
+/* Values that override uids and gids for all files and directories */
+int global_uid_opt = FALSE;
+unsigned int global_uid;
+int global_gid_opt = FALSE;
+unsigned int global_gid;
+
+/* Do pseudo uids and guids override -all-root, -force-uid and -force-gid? */
+int pseudo_override = FALSE;
+
+/* Time value over-ride options */
+unsigned int mkfs_time;
+int mkfs_time_opt = FALSE;
+unsigned int all_time;
+int all_time_opt = FALSE;
+int clamping = TRUE;
+
+/* Is max depth option in effect, and max depth to descend into directories */
+int max_depth_opt = FALSE;
+unsigned int max_depth;
+
+/* how should Mksquashfs treat the source files? */
+int tarstyle = FALSE;
+int keep_as_directory = FALSE;
+
+/* should Mksquashfs read files from stdin, like cpio? */
+int cpiostyle = FALSE;
+char filename_terminator = '\n';
+
+/* Should Mksquashfs detect hardlinked files? */
+int no_hardlinks = FALSE;
+
+/* Should Mksquashfs cross filesystem boundaries? */
+int one_file_system = FALSE;
+int one_file_system_x = FALSE;
+dev_t *source_dev;
+dev_t cur_dev;
+
+/* Is Mksquashfs processing a tarfile? */
+int tarfile = FALSE;
+
+/* Is Mksquashfs reading a pseudo file from stdin? */
+int pseudo_stdin = FALSE;
+
+/* Is Mksquashfs storing Xattrs, or excluding/including xattrs using regexs? */
+int no_xattrs = XATTR_DEF;
+unsigned int xattr_bytes = 0, total_xattr_bytes = 0;
+regex_t *xattr_exclude_preg = NULL;
+regex_t *xattr_include_preg = NULL;
+
+/* Does Mksquashfs print a summary and other information when running? */
+int quiet = FALSE;
+
+/* Does Mksquashfs display filenames as they are archived? */
+int silent = TRUE;
+
+/* Is Mksquashfs using the older non-wildcard exclude code? */
+int old_exclude = TRUE;
+
+/* Is Mksquashfs using regexs in exclude file matching (default wildcards)? */
+int use_regex = FALSE;
+
+/* Will Mksquashfs pad the filesystem to a multiple of 4 Kbytes? */
+int nopad = FALSE;
+
+/* Should Mksquashfs treat normally ignored errors as fatal? */
+int exit_on_error = FALSE;
+
+/* Is filesystem stored at an offset from the start of the block device/file? */
+long long start_offset = 0;
+
+/* File count statistics used to print summary and fill in superblock */
+unsigned int file_count = 0, sym_count = 0, dev_count = 0, dir_count = 0,
+fifo_count = 0, sock_count = 0, id_count = 0;
+long long hardlnk_count = 0;
+
+/* superblock attributes */
+struct squashfs_super_block sBlk;
+
+/* write position within data section */
+long long bytes = 0, total_bytes = 0;
+
+/* in memory directory table - possibly compressed */
+char *directory_table = NULL;
+long long directory_bytes = 0, directory_size = 0, total_directory_bytes = 0;
+
+/* cached directory table */
+char *directory_data_cache = NULL;
+unsigned int directory_cache_bytes = 0, directory_cache_size = 0;
+
+/* in memory inode table - possibly compressed */
+char *inode_table = NULL;
+long long inode_bytes = 0, inode_size = 0, total_inode_bytes = 0;
+
+/* cached inode table */
+char *data_cache = NULL;
+unsigned int cache_bytes = 0, cache_size = 0, inode_count = 0;
+
+/* inode lookup table */
+squashfs_inode *inode_lookup_table = NULL;
+struct inode_info *inode_info[INODE_HASH_SIZE];
+
+/* hash tables used to do fast duplicate searches in duplicate check */
+struct file_info **dupl_frag;
+struct file_info **dupl_block;
+unsigned int dup_files = 0;
+
+int exclude = 0;
+struct exclude_info *exclude_paths = NULL;
+
+struct path_entry {
+ char *name;
+ regex_t *preg;
+ struct pathname *paths;
+};
+
+struct pathnames *paths = NULL;
+struct pathname *path = NULL;
+struct pathname *stickypath = NULL;
+
+unsigned int fragments = 0;
+
+struct squashfs_fragment_entry *fragment_table = NULL;
+int fragments_outstanding = 0;
+
+int fragments_locked = FALSE;
+
+/* current inode number for directories and non directories */
+unsigned int inode_no = 1;
+unsigned int root_inode_number = 0;
+
+/* list of source dirs/files */
+int source = 0;
+char **source_path;
+int option_offset;
+
+/* flag whether destination file is a block device */
+int block_device = FALSE;
+
+/* flag indicating whether files are sorted using sort list(s) */
+int sorted = FALSE;
+
+/* save destination file name for deleting on error */
+char *destination_file = NULL;
+
+struct id *id_hash_table[ID_ENTRIES];
+struct id *id_table[SQUASHFS_IDS], *sid_table[SQUASHFS_IDS];
+unsigned int uid_count = 0, guid_count = 0;
+unsigned int sid_count = 0, suid_count = 0, sguid_count = 0;
+
+/* caches used to store buffers being worked on, and queues
+ * used to send buffers between threads */
+struct cache *reader_buffer, *fragment_buffer, *reserve_cache;
+struct cache *bwriter_buffer, *fwriter_buffer;
+struct queue *to_reader, *to_deflate, *to_writer, *from_writer,
+ *to_frag, *locked_fragment, *to_process_frag;
+struct seq_queue *to_main;
+
+/* pthread threads and mutexes */
+pthread_t reader_thread, writer_thread, main_thread;
+pthread_t *deflator_thread, *frag_deflator_thread, *frag_thread;
+pthread_t *restore_thread = NULL;
+pthread_mutex_t fragment_mutex = PTHREAD_MUTEX_INITIALIZER;
+pthread_mutex_t pos_mutex = PTHREAD_MUTEX_INITIALIZER;
+pthread_mutex_t dup_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/* reproducible image queues and threads */
+struct seq_queue *to_order;
+pthread_t order_thread;
+pthread_cond_t fragment_waiting = PTHREAD_COND_INITIALIZER;
+int sequence_count = 0;
+int reproducible = REP_DEF;
+
+/* user options that control parallelisation */
+int processors = -1;
+int bwriter_size;
+
+/* Compressor options (-X) and initialised compressor (-comp XXX) */
+int comp_opts = FALSE;
+int X_opt_parsed = FALSE;
+struct compressor *comp = NULL;
+int compressor_opt_parsed = FALSE;
+void *stream = NULL;
+
+/* root of the in-core directory structure */
+struct dir_info *root_dir;
+
+/* log file */
+FILE *log_fd;
+int logging=FALSE;
+
+/* file descriptor of the output filesystem */
+int fd;
+
+/* Variables used for appending */
+int appending = TRUE;
+
+/* restore orignal filesystem state if appending to existing filesystem is
+ * cancelled */
+char *sdata_cache, *sdirectory_data_cache, *sdirectory_compressed;
+long long sbytes, stotal_bytes;
+long long sinode_bytes, stotal_inode_bytes;
+long long sdirectory_bytes, stotal_directory_bytes;
+unsigned int scache_bytes, sdirectory_cache_bytes,
+ sdirectory_compressed_bytes, sinode_count = 0,
+ sfile_count, ssym_count, sdev_count, sdir_count,
+ sfifo_count, ssock_count, sdup_files;
+unsigned int sfragments;
+
+/* list of root directory entries read from original filesystem */
+int old_root_entries = 0;
+struct old_root_entry_info *old_root_entry;
+
+/* fragment to file mapping used when appending */
+struct append_file **file_mapping;
+
+/* recovery file for abnormal exit on appending */
+char *recovery_file = NULL;
+char *recovery_pathname = NULL;
+int recover = TRUE;
+
+/* list of options that have an argument */
+char *option_table[] = { "comp", "b", "mkfs-time", "fstime", "all-time",
+ "root-mode", "force-uid", "force-gid", "action", "log-action",
+ "true-action", "false-action", "action-file", "log-action-file",
+ "true-action-file", "false-action-file", "p", "pf", "sort",
+ "root-becomes", "recover", "recovery-path", "throttle", "limit",
+ "processors", "mem", "offset", "o", "log", "a", "va", "ta", "fa", "af",
+ "vaf", "taf", "faf", "read-queue", "write-queue", "fragment-queue",
+ "root-time", "root-uid", "root-gid", "xattrs-exclude", "xattrs-include",
+ "xattrs-add", "default-mode", "default-uid", "default-gid",
+ "mem-percent", NULL
+};
+
+char *sqfstar_option_table[] = { "comp", "b", "mkfs-time", "fstime", "all-time",
+ "root-mode", "force-uid", "force-gid", "throttle", "limit",
+ "processors", "mem", "offset", "o", "root-time", "root-uid",
+ "root-gid", "xattrs-exclude", "xattrs-include", "xattrs-add", "p", "pf",
+ "default-mode", "default-uid", "default-gid", "mem-percent", NULL
+};
+
+static char *read_from_disk(long long start, unsigned int avail_bytes);
+static void add_old_root_entry(char *name, squashfs_inode inode,
+ unsigned int inode_number, int type);
+static struct file_info *duplicate(int *dup, int *block_dup,
+ long long file_size, long long bytes, unsigned int *block_list,
+ long long start, struct dir_ent *dir_ent,
+ struct file_buffer *file_buffer, int blocks, long long sparse,
+ int bl_hash);
+static struct dir_info *dir_scan1(char *, char *, struct pathnames *,
+ struct dir_ent *(_readdir)(struct dir_info *), unsigned int);
+static void dir_scan2(struct dir_info *dir, struct pseudo *pseudo);
+static void dir_scan3(struct dir_info *dir);
+static void dir_scan4(struct dir_info *dir, int symlink);
+static void dir_scan5(struct dir_info *dir);
+static void dir_scan6(struct dir_info *dir);
+static void dir_scan7(squashfs_inode *inode, struct dir_info *dir_info);
+static struct dir_ent *scan1_readdir(struct dir_info *dir);
+static struct dir_ent *scan1_single_readdir(struct dir_info *dir);
+static struct dir_ent *scan1_encomp_readdir(struct dir_info *dir);
+static struct file_info *add_non_dup(long long file_size, long long bytes,
+ unsigned int blocks, long long sparse, unsigned int *block_list,
+ long long start, struct fragment *fragment, unsigned short checksum,
+ unsigned short fragment_checksum, int checksum_flag,
+ int checksum_frag_flag, int blocks_dup, int frag_dup, int bl_hash);
+long long generic_write_table(long long, void *, int, void *, int);
+void restorefs();
+struct dir_info *scan1_opendir(char *pathname, char *subpath,
+ unsigned int depth);
+static void write_filesystem_tables(struct squashfs_super_block *sBlk);
+unsigned short get_checksum_mem(char *buff, int bytes);
+static void check_usable_phys_mem(int total_mem);
+static void print_summary();
+void write_destination(int fd, long long byte, long long bytes, void *buff);
+static int old_excluded(char *filename, struct stat *buf);
+
+
+void prep_exit()
+{
+ if(restore_thread) {
+ if(pthread_self() == *restore_thread) {
+ /*
+ * Recursive failure when trying to restore filesystem!
+ * Nothing to do except to exit, otherwise we'll just
+ * appear to hang. The user should be able to restore
+ * from the recovery file (which is why it was added, in
+ * case of catastrophic failure in Mksquashfs)
+ */
+ exit(1);
+ } else {
+ /* signal the restore thread to restore */
+ pthread_kill(*restore_thread, SIGUSR1);
+ pthread_exit(NULL);
+ }
+ } else if(!appending) {
+ if(destination_file && !block_device)
+ unlink(destination_file);
+ } else if(recovery_file)
+ unlink(recovery_file);
+}
+
+
+int add_overflow(int a, int b)
+{
+ return (INT_MAX - a) < b;
+}
+
+
+int shift_overflow(int a, int shift)
+{
+ return (INT_MAX >> shift) < a;
+}
+
+
+int multiply_overflow(int a, int multiplier)
+{
+ return (INT_MAX / multiplier) < a;
+}
+
+
+int multiply_overflowll(long long a, int multiplier)
+{
+ return (LLONG_MAX / multiplier) < a;
+}
+
+
+#define MKINODE(A) ((squashfs_inode)(((squashfs_inode) inode_bytes << 16) \
+ + (((char *)A) - data_cache)))
+
+
+void restorefs()
+{
+ int i, res;
+
+ ERROR("Exiting - restoring original filesystem!\n\n");
+
+ bytes = sbytes;
+ memcpy(data_cache, sdata_cache, cache_bytes = scache_bytes);
+ memcpy(directory_data_cache, sdirectory_data_cache,
+ sdirectory_cache_bytes);
+ directory_cache_bytes = sdirectory_cache_bytes;
+ inode_bytes = sinode_bytes;
+ directory_bytes = sdirectory_bytes;
+ memcpy(directory_table + directory_bytes, sdirectory_compressed,
+ sdirectory_compressed_bytes);
+ directory_bytes += sdirectory_compressed_bytes;
+ total_bytes = stotal_bytes;
+ total_inode_bytes = stotal_inode_bytes;
+ total_directory_bytes = stotal_directory_bytes;
+ inode_count = sinode_count;
+ file_count = sfile_count;
+ sym_count = ssym_count;
+ dev_count = sdev_count;
+ dir_count = sdir_count;
+ fifo_count = sfifo_count;
+ sock_count = ssock_count;
+ dup_files = sdup_files;
+ fragments = sfragments;
+ id_count = sid_count;
+ restore_xattrs();
+ write_filesystem_tables(&sBlk);
+
+ if(!block_device) {
+ int res = ftruncate(fd, bytes);
+ if(res != 0)
+ BAD_ERROR("Failed to truncate dest file because %s\n",
+ strerror(errno));
+ }
+
+ if(!nopad && (i = bytes & (4096 - 1))) {
+ char temp[4096] = {0};
+ write_destination(fd, bytes, 4096 - i, temp);
+ }
+
+ res = close(fd);
+
+ if(res == -1)
+ BAD_ERROR("Failed to close output filesystem, close returned %s\n",
+ strerror(errno));
+
+ if(recovery_file)
+ unlink(recovery_file);
+
+ if(!quiet)
+ print_summary();
+
+ exit(1);
+}
+
+
+void sighandler(int arg)
+{
+ EXIT_MKSQUASHFS();
+}
+
+
+static int mangle2(void *strm, char *d, char *s, int size,
+ int block_size, int uncompressed, int data_block)
+{
+ int error, c_byte = 0;
+
+ if(!uncompressed) {
+ c_byte = compressor_compress(comp, strm, d, s, size, block_size,
+ &error);
+ if(c_byte == -1)
+ BAD_ERROR("mangle2:: %s compress failed with error "
+ "code %d\n", comp->name, error);
+ }
+
+ if(c_byte == 0 || c_byte >= size) {
+ memcpy(d, s, size);
+ return size | (data_block ? SQUASHFS_COMPRESSED_BIT_BLOCK :
+ SQUASHFS_COMPRESSED_BIT);
+ }
+
+ return c_byte;
+}
+
+
+int mangle(char *d, char *s, int size, int block_size,
+ int uncompressed, int data_block)
+{
+ return mangle2(stream, d, s, size, block_size, uncompressed,
+ data_block);
+}
+
+
+static void *get_inode(int req_size)
+{
+ int data_space;
+ unsigned short c_byte;
+
+ while(cache_bytes >= SQUASHFS_METADATA_SIZE) {
+ if((inode_size - inode_bytes) <
+ ((SQUASHFS_METADATA_SIZE << 1)) + 2) {
+ void *it = realloc(inode_table, inode_size +
+ (SQUASHFS_METADATA_SIZE << 1) + 2);
+ if(it == NULL)
+ MEM_ERROR();
+ inode_table = it;
+ inode_size += (SQUASHFS_METADATA_SIZE << 1) + 2;
+ }
+
+ c_byte = mangle(inode_table + inode_bytes + BLOCK_OFFSET,
+ data_cache, SQUASHFS_METADATA_SIZE,
+ SQUASHFS_METADATA_SIZE, noI, 0);
+ TRACE("Inode block @ 0x%x, size %d\n", inode_bytes, c_byte);
+ SQUASHFS_SWAP_SHORTS(&c_byte, inode_table + inode_bytes, 1);
+ inode_bytes += SQUASHFS_COMPRESSED_SIZE(c_byte) + BLOCK_OFFSET;
+ total_inode_bytes += SQUASHFS_METADATA_SIZE + BLOCK_OFFSET;
+ memmove(data_cache, data_cache + SQUASHFS_METADATA_SIZE,
+ cache_bytes - SQUASHFS_METADATA_SIZE);
+ cache_bytes -= SQUASHFS_METADATA_SIZE;
+ }
+
+ data_space = (cache_size - cache_bytes);
+ if(data_space < req_size) {
+ int realloc_size = cache_size == 0 ?
+ ((req_size + SQUASHFS_METADATA_SIZE) &
+ ~(SQUASHFS_METADATA_SIZE - 1)) : req_size -
+ data_space;
+
+ void *dc = realloc(data_cache, cache_size +
+ realloc_size);
+ if(dc == NULL)
+ MEM_ERROR();
+ cache_size += realloc_size;
+ data_cache = dc;
+ }
+
+ cache_bytes += req_size;
+
+ return data_cache + cache_bytes - req_size;
+}
+
+
+long long read_bytes(int fd, void *buff, long long bytes)
+{
+ long long res, count;
+
+ for(count = 0; count < bytes; count += res) {
+ int len = (bytes - count) > MAXIMUM_READ_SIZE ?
+ MAXIMUM_READ_SIZE : bytes - count;
+
+ res = read(fd, buff + count, len);
+ if(res < 1) {
+ if(res == 0)
+ goto bytes_read;
+ else if(errno != EINTR) {
+ ERROR("Read failed because %s\n",
+ strerror(errno));
+ return -1;
+ } else
+ res = 0;
+ }
+ }
+
+bytes_read:
+ return count;
+}
+
+
+int read_fs_bytes(int fd, long long byte, long long bytes, void *buff)
+{
+ off_t off = byte;
+ int res = 1;
+
+ TRACE("read_fs_bytes: reading from position 0x%llx, bytes %lld\n",
+ byte, bytes);
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &pos_mutex);
+ pthread_mutex_lock(&pos_mutex);
+ if(lseek(fd, start_offset + off, SEEK_SET) == -1) {
+ ERROR("read_fs_bytes: Lseek on destination failed because %s, "
+ "offset=0x%llx\n", strerror(errno), start_offset + off);
+ res = 0;
+ } else if(read_bytes(fd, buff, bytes) < bytes) {
+ ERROR("Read on destination failed\n");
+ res = 0;
+ }
+
+ pthread_cleanup_pop(1);
+ return res;
+}
+
+
+int write_bytes(int fd, void *buff, long long bytes)
+{
+ long long res, count;
+
+ for(count = 0; count < bytes; count += res) {
+ int len = (bytes - count) > MAXIMUM_READ_SIZE ?
+ MAXIMUM_READ_SIZE : bytes - count;
+
+ res = write(fd, buff + count, len);
+ if(res == -1) {
+ if(errno != EINTR) {
+ ERROR("Write failed because %s\n",
+ strerror(errno));
+ return -1;
+ }
+ res = 0;
+ }
+ }
+
+ return 0;
+}
+
+
+void write_destination(int fd, long long byte, long long bytes, void *buff)
+{
+ off_t off = byte;
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &pos_mutex);
+ pthread_mutex_lock(&pos_mutex);
+
+ if(lseek(fd, start_offset + off, SEEK_SET) == -1) {
+ ERROR("write_destination: Lseek on destination "
+ "failed because %s, offset=0x%llx\n", strerror(errno),
+ start_offset + off);
+ BAD_ERROR("Probably out of space on output %s\n",
+ block_device ? "block device" : "filesystem");
+ }
+
+ if(write_bytes(fd, buff, bytes) == -1)
+ BAD_ERROR("Failed to write to output %s\n",
+ block_device ? "block device" : "filesystem");
+
+ pthread_cleanup_pop(1);
+}
+
+
+static long long write_inodes()
+{
+ unsigned short c_byte;
+ int avail_bytes;
+ char *datap = data_cache;
+ long long start_bytes = bytes;
+
+ while(cache_bytes) {
+ if(inode_size - inode_bytes <
+ ((SQUASHFS_METADATA_SIZE << 1) + 2)) {
+ void *it = realloc(inode_table, inode_size +
+ ((SQUASHFS_METADATA_SIZE << 1) + 2));
+ if(it == NULL)
+ MEM_ERROR();
+ inode_size += (SQUASHFS_METADATA_SIZE << 1) + 2;
+ inode_table = it;
+ }
+ avail_bytes = cache_bytes > SQUASHFS_METADATA_SIZE ?
+ SQUASHFS_METADATA_SIZE : cache_bytes;
+ c_byte = mangle(inode_table + inode_bytes + BLOCK_OFFSET, datap,
+ avail_bytes, SQUASHFS_METADATA_SIZE, noI, 0);
+ TRACE("Inode block @ 0x%x, size %d\n", inode_bytes, c_byte);
+ SQUASHFS_SWAP_SHORTS(&c_byte, inode_table + inode_bytes, 1);
+ inode_bytes += SQUASHFS_COMPRESSED_SIZE(c_byte) + BLOCK_OFFSET;
+ total_inode_bytes += avail_bytes + BLOCK_OFFSET;
+ datap += avail_bytes;
+ cache_bytes -= avail_bytes;
+ }
+
+ write_destination(fd, bytes, inode_bytes, inode_table);
+ bytes += inode_bytes;
+
+ return start_bytes;
+}
+
+
+static long long write_directories()
+{
+ unsigned short c_byte;
+ int avail_bytes;
+ char *directoryp = directory_data_cache;
+ long long start_bytes = bytes;
+
+ while(directory_cache_bytes) {
+ if(directory_size - directory_bytes <
+ ((SQUASHFS_METADATA_SIZE << 1) + 2)) {
+ void *dt = realloc(directory_table,
+ directory_size + ((SQUASHFS_METADATA_SIZE << 1)
+ + 2));
+ if(dt == NULL)
+ MEM_ERROR();
+ directory_size += (SQUASHFS_METADATA_SIZE << 1) + 2;
+ directory_table = dt;
+ }
+ avail_bytes = directory_cache_bytes > SQUASHFS_METADATA_SIZE ?
+ SQUASHFS_METADATA_SIZE : directory_cache_bytes;
+ c_byte = mangle(directory_table + directory_bytes +
+ BLOCK_OFFSET, directoryp, avail_bytes,
+ SQUASHFS_METADATA_SIZE, noI, 0);
+ TRACE("Directory block @ 0x%x, size %d\n", directory_bytes,
+ c_byte);
+ SQUASHFS_SWAP_SHORTS(&c_byte,
+ directory_table + directory_bytes, 1);
+ directory_bytes += SQUASHFS_COMPRESSED_SIZE(c_byte) +
+ BLOCK_OFFSET;
+ total_directory_bytes += avail_bytes + BLOCK_OFFSET;
+ directoryp += avail_bytes;
+ directory_cache_bytes -= avail_bytes;
+ }
+ write_destination(fd, bytes, directory_bytes, directory_table);
+ bytes += directory_bytes;
+
+ return start_bytes;
+}
+
+
+static long long write_id_table()
+{
+ unsigned int id_bytes = SQUASHFS_ID_BYTES(id_count);
+ unsigned int p[id_count];
+ int i;
+
+ TRACE("write_id_table: ids %d, id_bytes %d\n", id_count, id_bytes);
+ for(i = 0; i < id_count; i++) {
+ TRACE("write_id_table: id index %d, id %d", i, id_table[i]->id);
+ SQUASHFS_SWAP_INTS(&id_table[i]->id, p + i, 1);
+ }
+
+ return generic_write_table(id_bytes, p, 0, NULL, noI || noId);
+}
+
+
+static struct id *get_id(unsigned int id)
+{
+ int hash = ID_HASH(id);
+ struct id *entry = id_hash_table[hash];
+
+ for(; entry; entry = entry->next)
+ if(entry->id == id)
+ break;
+
+ return entry;
+}
+
+
+struct id *create_id(unsigned int id)
+{
+ int hash = ID_HASH(id);
+ struct id *entry = malloc(sizeof(struct id));
+ if(entry == NULL)
+ MEM_ERROR();
+ entry->id = id;
+ entry->index = id_count ++;
+ entry->flags = 0;
+ entry->next = id_hash_table[hash];
+ id_hash_table[hash] = entry;
+ id_table[entry->index] = entry;
+ return entry;
+}
+
+
+unsigned int get_uid(unsigned int uid)
+{
+ struct id *entry = get_id(uid);
+
+ if(entry == NULL) {
+ if(id_count == SQUASHFS_IDS)
+ BAD_ERROR("Out of uids!\n");
+ entry = create_id(uid);
+ }
+
+ if((entry->flags & ISA_UID) == 0) {
+ entry->flags |= ISA_UID;
+ uid_count ++;
+ }
+
+ return entry->index;
+}
+
+
+unsigned int get_guid(unsigned int guid)
+{
+ struct id *entry = get_id(guid);
+
+ if(entry == NULL) {
+ if(id_count == SQUASHFS_IDS)
+ BAD_ERROR("Out of gids!\n");
+ entry = create_id(guid);
+ }
+
+ if((entry->flags & ISA_GID) == 0) {
+ entry->flags |= ISA_GID;
+ guid_count ++;
+ }
+
+ return entry->index;
+}
+
+
+char *pathname(struct dir_ent *dir_ent)
+{
+ static char *pathname = NULL;
+ static int size = ALLOC_SIZE;
+
+ if (dir_ent->nonstandard_pathname)
+ return dir_ent->nonstandard_pathname;
+
+ if(pathname == NULL) {
+ pathname = malloc(ALLOC_SIZE);
+ if(pathname == NULL)
+ MEM_ERROR();
+ }
+
+ for(;;) {
+ int res = snprintf(pathname, size, "%s/%s",
+ dir_ent->our_dir->pathname,
+ dir_ent->source_name ? : dir_ent->name);
+
+ if(res < 0)
+ BAD_ERROR("snprintf failed in pathname\n");
+ else if(res >= size) {
+ /*
+ * pathname is too small to contain the result, so
+ * increase it and try again
+ */
+ size = (res + ALLOC_SIZE) & ~(ALLOC_SIZE - 1);
+ pathname = realloc(pathname, size);
+ if(pathname == NULL)
+ MEM_ERROR();
+ } else
+ break;
+ }
+
+ return pathname;
+}
+
+
+
+char *subpathname(struct dir_ent *dir_ent)
+{
+ static char *subpath = NULL;
+ static int size = ALLOC_SIZE;
+ int res;
+
+ if(subpath == NULL) {
+ subpath = malloc(ALLOC_SIZE);
+ if(subpath == NULL)
+ MEM_ERROR();
+ }
+
+ for(;;) {
+ if(dir_ent->our_dir->subpath[0] != '\0')
+ res = snprintf(subpath, size, "%s/%s",
+ dir_ent->our_dir->subpath, dir_ent->name);
+ else
+ res = snprintf(subpath, size, "/%s", dir_ent->name);
+
+ if(res < 0)
+ BAD_ERROR("snprintf failed in subpathname\n");
+ else if(res >= size) {
+ /*
+ * subpath is too small to contain the result, so
+ * increase it and try again
+ */
+ size = (res + ALLOC_SIZE) & ~(ALLOC_SIZE - 1);
+ subpath = realloc(subpath, size);
+ if(subpath == NULL)
+ MEM_ERROR();
+ } else
+ break;
+ }
+
+ return subpath;
+}
+
+
+static inline unsigned int get_inode_no(struct inode_info *inode)
+{
+ return inode->inode_number;
+}
+
+
+static inline unsigned int get_parent_no(struct dir_info *dir)
+{
+ return dir->depth ? get_inode_no(dir->dir_ent->inode) : inode_no;
+}
+
+
+static inline time_t get_time(time_t time)
+{
+ if(all_time_opt) {
+ if(clamping)
+ return time > all_time ? all_time : time;
+ else
+ return all_time;
+ }
+
+ return time;
+}
+
+
+squashfs_inode create_inode(struct dir_info *dir_info,
+ struct dir_ent *dir_ent, int type, long long byte_size,
+ long long start_block, unsigned int offset, unsigned int *block_list,
+ struct fragment *fragment, struct directory *dir_in, long long sparse)
+{
+ struct stat *buf = &dir_ent->inode->buf;
+ union squashfs_inode_header inode_header;
+ struct squashfs_base_inode_header *base = &inode_header.base;
+ void *inode;
+ char *filename = pathname(dir_ent);
+ int nlink = dir_ent->inode->nlink;
+ int xattr = read_xattrs(dir_ent, type);
+ unsigned int uid, gid;
+
+ switch(type) {
+ case SQUASHFS_FILE_TYPE:
+ if(dir_ent->inode->nlink > 1 ||
+ byte_size >= (1LL << 32) ||
+ start_block >= (1LL << 32) ||
+ sparse || IS_XATTR(xattr))
+ type = SQUASHFS_LREG_TYPE;
+ break;
+ case SQUASHFS_DIR_TYPE:
+ if(dir_info->dir_is_ldir || IS_XATTR(xattr))
+ type = SQUASHFS_LDIR_TYPE;
+ break;
+ case SQUASHFS_SYMLINK_TYPE:
+ if(IS_XATTR(xattr))
+ type = SQUASHFS_LSYMLINK_TYPE;
+ break;
+ case SQUASHFS_BLKDEV_TYPE:
+ if(IS_XATTR(xattr))
+ type = SQUASHFS_LBLKDEV_TYPE;
+ break;
+ case SQUASHFS_CHRDEV_TYPE:
+ if(IS_XATTR(xattr))
+ type = SQUASHFS_LCHRDEV_TYPE;
+ break;
+ case SQUASHFS_FIFO_TYPE:
+ if(IS_XATTR(xattr))
+ type = SQUASHFS_LFIFO_TYPE;
+ break;
+ case SQUASHFS_SOCKET_TYPE:
+ if(IS_XATTR(xattr))
+ type = SQUASHFS_LSOCKET_TYPE;
+ break;
+ }
+
+ if(!pseudo_override && global_uid_opt)
+ uid = global_uid;
+ else
+ uid = buf->st_uid;
+
+ if(!pseudo_override && global_gid_opt)
+ gid = global_gid;
+ else
+ gid = buf->st_gid;
+
+ base->mode = SQUASHFS_MODE(buf->st_mode);
+ base->inode_type = type;
+ base->uid = get_uid(uid);
+ base->guid = get_guid(gid);
+ base->mtime = get_time(buf->st_mtime);
+ base->inode_number = get_inode_no(dir_ent->inode);
+
+ if(type == SQUASHFS_FILE_TYPE) {
+ int i;
+ struct squashfs_reg_inode_header *reg = &inode_header.reg;
+ size_t off = offsetof(struct squashfs_reg_inode_header, block_list);
+
+ inode = get_inode(sizeof(*reg) + offset * sizeof(unsigned int));
+ reg->file_size = byte_size;
+ reg->start_block = start_block;
+ reg->fragment = fragment->index;
+ reg->offset = fragment->offset;
+ SQUASHFS_SWAP_REG_INODE_HEADER(reg, inode);
+ SQUASHFS_SWAP_INTS(block_list, inode + off, offset);
+ TRACE("File inode, file_size %lld, start_block 0x%llx, blocks "
+ "%d, fragment %d, offset %d, size %d\n", byte_size,
+ start_block, offset, fragment->index, fragment->offset,
+ fragment->size);
+ for(i = 0; i < offset; i++)
+ TRACE("Block %d, size %d\n", i, block_list[i]);
+ }
+ else if(type == SQUASHFS_LREG_TYPE) {
+ int i;
+ struct squashfs_lreg_inode_header *reg = &inode_header.lreg;
+ size_t off = offsetof(struct squashfs_lreg_inode_header, block_list);
+
+ inode = get_inode(sizeof(*reg) + offset * sizeof(unsigned int));
+ reg->nlink = nlink;
+ reg->file_size = byte_size;
+ reg->start_block = start_block;
+ reg->fragment = fragment->index;
+ reg->offset = fragment->offset;
+ if(sparse && sparse >= byte_size)
+ sparse = byte_size - 1;
+ reg->sparse = sparse;
+ reg->xattr = xattr;
+ SQUASHFS_SWAP_LREG_INODE_HEADER(reg, inode);
+ SQUASHFS_SWAP_INTS(block_list, inode + off, offset);
+ TRACE("Long file inode, file_size %lld, start_block 0x%llx, "
+ "blocks %d, fragment %d, offset %d, size %d, nlink %d"
+ "\n", byte_size, start_block, offset, fragment->index,
+ fragment->offset, fragment->size, nlink);
+ for(i = 0; i < offset; i++)
+ TRACE("Block %d, size %d\n", i, block_list[i]);
+ }
+ else if(type == SQUASHFS_LDIR_TYPE) {
+ int i;
+ unsigned char *p;
+ struct squashfs_ldir_inode_header *dir = &inode_header.ldir;
+ struct cached_dir_index *index = dir_in->index;
+ unsigned int i_count = dir_in->i_count;
+ unsigned int i_size = dir_in->i_size;
+
+ if(byte_size >= 1LL << 32)
+ BAD_ERROR("directory greater than 2^32-1 bytes!\n");
+
+ inode = get_inode(sizeof(*dir) + i_size);
+ dir->inode_type = SQUASHFS_LDIR_TYPE;
+ dir->nlink = dir_ent->dir->directory_count + 2;
+ dir->file_size = byte_size;
+ dir->offset = offset;
+ dir->start_block = start_block;
+ dir->i_count = i_count;
+ dir->parent_inode = get_parent_no(dir_ent->our_dir);
+ dir->xattr = xattr;
+
+ SQUASHFS_SWAP_LDIR_INODE_HEADER(dir, inode);
+ p = inode + offsetof(struct squashfs_ldir_inode_header, index);
+ for(i = 0; i < i_count; i++) {
+ SQUASHFS_SWAP_DIR_INDEX(&index[i].index, p);
+ p += offsetof(struct squashfs_dir_index, name);
+ memcpy(p, index[i].name, index[i].index.size + 1);
+ p += index[i].index.size + 1;
+ }
+ TRACE("Long directory inode, file_size %lld, start_block "
+ "0x%llx, offset 0x%x, nlink %d\n", byte_size,
+ start_block, offset, dir_ent->dir->directory_count + 2);
+ }
+ else if(type == SQUASHFS_DIR_TYPE) {
+ struct squashfs_dir_inode_header *dir = &inode_header.dir;
+
+ inode = get_inode(sizeof(*dir));
+ dir->nlink = dir_ent->dir->directory_count + 2;
+ dir->file_size = byte_size;
+ dir->offset = offset;
+ dir->start_block = start_block;
+ dir->parent_inode = get_parent_no(dir_ent->our_dir);
+ SQUASHFS_SWAP_DIR_INODE_HEADER(dir, inode);
+ TRACE("Directory inode, file_size %lld, start_block 0x%llx, "
+ "offset 0x%x, nlink %d\n", byte_size, start_block,
+ offset, dir_ent->dir->directory_count + 2);
+ }
+ else if(type == SQUASHFS_CHRDEV_TYPE || type == SQUASHFS_BLKDEV_TYPE) {
+ struct squashfs_dev_inode_header *dev = &inode_header.dev;
+ unsigned int major = major(buf->st_rdev);
+ unsigned int minor = minor(buf->st_rdev);
+
+ if(major > 0xfff) {
+ ERROR("Major %d out of range in device node %s, "
+ "truncating to %d\n", major, filename,
+ major & 0xfff);
+ major &= 0xfff;
+ }
+ if(minor > 0xfffff) {
+ ERROR("Minor %d out of range in device node %s, "
+ "truncating to %d\n", minor, filename,
+ minor & 0xfffff);
+ minor &= 0xfffff;
+ }
+ inode = get_inode(sizeof(*dev));
+ dev->nlink = nlink;
+ dev->rdev = (major << 8) | (minor & 0xff) |
+ ((minor & ~0xff) << 12);
+ SQUASHFS_SWAP_DEV_INODE_HEADER(dev, inode);
+ TRACE("Device inode, rdev 0x%x, nlink %d\n", dev->rdev, nlink);
+ }
+ else if(type == SQUASHFS_LCHRDEV_TYPE || type == SQUASHFS_LBLKDEV_TYPE) {
+ struct squashfs_ldev_inode_header *dev = &inode_header.ldev;
+ unsigned int major = major(buf->st_rdev);
+ unsigned int minor = minor(buf->st_rdev);
+
+ if(major > 0xfff) {
+ ERROR("Major %d out of range in device node %s, "
+ "truncating to %d\n", major, filename,
+ major & 0xfff);
+ major &= 0xfff;
+ }
+ if(minor > 0xfffff) {
+ ERROR("Minor %d out of range in device node %s, "
+ "truncating to %d\n", minor, filename,
+ minor & 0xfffff);
+ minor &= 0xfffff;
+ }
+ inode = get_inode(sizeof(*dev));
+ dev->nlink = nlink;
+ dev->rdev = (major << 8) | (minor & 0xff) |
+ ((minor & ~0xff) << 12);
+ dev->xattr = xattr;
+ SQUASHFS_SWAP_LDEV_INODE_HEADER(dev, inode);
+ TRACE("Device inode, rdev 0x%x, nlink %d\n", dev->rdev, nlink);
+ }
+ else if(type == SQUASHFS_SYMLINK_TYPE) {
+ struct squashfs_symlink_inode_header *symlink = &inode_header.symlink;
+ int byte = strlen(dir_ent->inode->symlink);
+ size_t off = offsetof(struct squashfs_symlink_inode_header, symlink);
+
+ inode = get_inode(sizeof(*symlink) + byte);
+ symlink->nlink = nlink;
+ symlink->symlink_size = byte;
+ SQUASHFS_SWAP_SYMLINK_INODE_HEADER(symlink, inode);
+ strncpy(inode + off, dir_ent->inode->symlink, byte);
+ TRACE("Symbolic link inode, symlink_size %d, nlink %d\n", byte,
+ nlink);
+ }
+ else if(type == SQUASHFS_LSYMLINK_TYPE) {
+ struct squashfs_symlink_inode_header *symlink = &inode_header.symlink;
+ int byte = strlen(dir_ent->inode->symlink);
+ size_t off = offsetof(struct squashfs_symlink_inode_header, symlink);
+
+ inode = get_inode(sizeof(*symlink) + byte +
+ sizeof(unsigned int));
+ symlink->nlink = nlink;
+ symlink->symlink_size = byte;
+ SQUASHFS_SWAP_SYMLINK_INODE_HEADER(symlink, inode);
+ strncpy(inode + off, dir_ent->inode->symlink, byte);
+ SQUASHFS_SWAP_INTS(&xattr, inode + off + byte, 1);
+ TRACE("Symbolic link inode, symlink_size %d, nlink %d\n", byte,
+ nlink);
+ }
+ else if(type == SQUASHFS_FIFO_TYPE || type == SQUASHFS_SOCKET_TYPE) {
+ struct squashfs_ipc_inode_header *ipc = &inode_header.ipc;
+
+ inode = get_inode(sizeof(*ipc));
+ ipc->nlink = nlink;
+ SQUASHFS_SWAP_IPC_INODE_HEADER(ipc, inode);
+ TRACE("ipc inode, type %s, nlink %d\n", type ==
+ SQUASHFS_FIFO_TYPE ? "fifo" : "socket", nlink);
+ }
+ else if(type == SQUASHFS_LFIFO_TYPE || type == SQUASHFS_LSOCKET_TYPE) {
+ struct squashfs_lipc_inode_header *ipc = &inode_header.lipc;
+
+ inode = get_inode(sizeof(*ipc));
+ ipc->nlink = nlink;
+ ipc->xattr = xattr;
+ SQUASHFS_SWAP_LIPC_INODE_HEADER(ipc, inode);
+ TRACE("ipc inode, type %s, nlink %d\n", type ==
+ SQUASHFS_FIFO_TYPE ? "fifo" : "socket", nlink);
+ } else
+ BAD_ERROR("Unrecognised inode %d in create_inode\n", type);
+
+ inode_count ++;
+
+ TRACE("Created inode 0x%llx, type %d, uid %d, guid %d\n",
+ MKINODE(inode), type, base->uid, base->guid);
+
+ return MKINODE(inode);
+}
+
+
+static void add_dir(squashfs_inode inode, unsigned int inode_number, char *name,
+ int type, struct directory *dir)
+{
+ unsigned char *buff;
+ struct squashfs_dir_entry idir;
+ unsigned int start_block = inode >> 16;
+ unsigned int offset = inode & 0xffff;
+ unsigned int size = strlen(name);
+ size_t name_off = offsetof(struct squashfs_dir_entry, name);
+
+ if(size > SQUASHFS_NAME_LEN) {
+ size = SQUASHFS_NAME_LEN;
+ ERROR("Filename is greater than %d characters, truncating! ..."
+ "\n", SQUASHFS_NAME_LEN);
+ }
+
+ if(dir->p + sizeof(struct squashfs_dir_entry) + size +
+ sizeof(struct squashfs_dir_header)
+ >= dir->buff + dir->size) {
+ buff = realloc(dir->buff, dir->size += SQUASHFS_METADATA_SIZE);
+ if(buff == NULL)
+ MEM_ERROR();
+
+ dir->p = (dir->p - dir->buff) + buff;
+ if(dir->entry_count_p)
+ dir->entry_count_p = (dir->entry_count_p - dir->buff +
+ buff);
+ dir->index_count_p = dir->index_count_p - dir->buff + buff;
+ dir->buff = buff;
+ }
+
+ if(dir->entry_count == 256 || start_block != dir->start_block ||
+ ((dir->entry_count_p != NULL) &&
+ ((dir->p + sizeof(struct squashfs_dir_entry) + size -
+ dir->index_count_p) > SQUASHFS_METADATA_SIZE)) ||
+ ((long long) inode_number - dir->inode_number) > 32767
+ || ((long long) inode_number - dir->inode_number)
+ < -32768) {
+ if(dir->entry_count_p) {
+ struct squashfs_dir_header dir_header;
+
+ if((dir->p + sizeof(struct squashfs_dir_entry) + size -
+ dir->index_count_p) >
+ SQUASHFS_METADATA_SIZE) {
+ if(dir->i_count % I_COUNT_SIZE == 0) {
+ dir->index = realloc(dir->index,
+ (dir->i_count + I_COUNT_SIZE) *
+ sizeof(struct cached_dir_index));
+ if(dir->index == NULL)
+ MEM_ERROR();
+ }
+ dir->index[dir->i_count].index.index =
+ dir->p - dir->buff;
+ dir->index[dir->i_count].index.size = size - 1;
+ dir->index[dir->i_count++].name = name;
+ dir->i_size += sizeof(struct squashfs_dir_index)
+ + size;
+ dir->index_count_p = dir->p;
+ }
+
+ dir_header.count = dir->entry_count - 1;
+ dir_header.start_block = dir->start_block;
+ dir_header.inode_number = dir->inode_number;
+ SQUASHFS_SWAP_DIR_HEADER(&dir_header,
+ dir->entry_count_p);
+
+ }
+
+
+ dir->entry_count_p = dir->p;
+ dir->start_block = start_block;
+ dir->entry_count = 0;
+ dir->inode_number = inode_number;
+ dir->p += sizeof(struct squashfs_dir_header);
+ }
+
+ idir.offset = offset;
+ idir.type = type;
+ idir.size = size - 1;
+ idir.inode_number = ((long long) inode_number - dir->inode_number);
+ SQUASHFS_SWAP_DIR_ENTRY(&idir, dir->p);
+ strncpy((char *) dir->p + name_off, name, size);
+ dir->p += sizeof(struct squashfs_dir_entry) + size;
+ dir->entry_count ++;
+}
+
+
+static squashfs_inode write_dir(struct dir_info *dir_info,
+ struct directory *dir)
+{
+ long long dir_size = dir->p - dir->buff;
+ int data_space = directory_cache_size - directory_cache_bytes;
+ unsigned int directory_block, directory_offset, i_count, index;
+ unsigned short c_byte;
+
+ if(data_space < dir_size) {
+ int realloc_size = directory_cache_size == 0 ?
+ ((dir_size + SQUASHFS_METADATA_SIZE) &
+ ~(SQUASHFS_METADATA_SIZE - 1)) : dir_size - data_space;
+
+ void *dc = realloc(directory_data_cache,
+ directory_cache_size + realloc_size);
+ if(dc == NULL)
+ MEM_ERROR();
+ directory_cache_size += realloc_size;
+ directory_data_cache = dc;
+ }
+
+ if(dir_size) {
+ struct squashfs_dir_header dir_header;
+
+ dir_header.count = dir->entry_count - 1;
+ dir_header.start_block = dir->start_block;
+ dir_header.inode_number = dir->inode_number;
+ SQUASHFS_SWAP_DIR_HEADER(&dir_header, dir->entry_count_p);
+ memcpy(directory_data_cache + directory_cache_bytes, dir->buff,
+ dir_size);
+ }
+ directory_offset = directory_cache_bytes;
+ directory_block = directory_bytes;
+ directory_cache_bytes += dir_size;
+ i_count = 0;
+ index = SQUASHFS_METADATA_SIZE - directory_offset;
+
+ while(1) {
+ while(i_count < dir->i_count &&
+ dir->index[i_count].index.index < index)
+ dir->index[i_count++].index.start_block =
+ directory_bytes;
+ index += SQUASHFS_METADATA_SIZE;
+
+ if(directory_cache_bytes < SQUASHFS_METADATA_SIZE)
+ break;
+
+ if((directory_size - directory_bytes) <
+ ((SQUASHFS_METADATA_SIZE << 1) + 2)) {
+ void *dt = realloc(directory_table,
+ directory_size + (SQUASHFS_METADATA_SIZE << 1)
+ + 2);
+ if(dt == NULL)
+ MEM_ERROR();
+ directory_size += SQUASHFS_METADATA_SIZE << 1;
+ directory_table = dt;
+ }
+
+ c_byte = mangle(directory_table + directory_bytes +
+ BLOCK_OFFSET, directory_data_cache,
+ SQUASHFS_METADATA_SIZE, SQUASHFS_METADATA_SIZE,
+ noI, 0);
+ TRACE("Directory block @ 0x%x, size %d\n", directory_bytes,
+ c_byte);
+ SQUASHFS_SWAP_SHORTS(&c_byte,
+ directory_table + directory_bytes, 1);
+ directory_bytes += SQUASHFS_COMPRESSED_SIZE(c_byte) +
+ BLOCK_OFFSET;
+ total_directory_bytes += SQUASHFS_METADATA_SIZE + BLOCK_OFFSET;
+ memmove(directory_data_cache, directory_data_cache +
+ SQUASHFS_METADATA_SIZE, directory_cache_bytes -
+ SQUASHFS_METADATA_SIZE);
+ directory_cache_bytes -= SQUASHFS_METADATA_SIZE;
+ }
+
+ dir_count ++;
+
+#ifndef SQUASHFS_TRACE
+ return create_inode(dir_info, dir_info->dir_ent, SQUASHFS_DIR_TYPE,
+ dir_size + 3, directory_block, directory_offset, NULL, NULL,
+ dir, 0);
+#else
+ {
+ unsigned char *dirp;
+ int count;
+ squashfs_inode inode;
+
+ inode = create_inode(dir_info, dir_info->dir_ent, SQUASHFS_DIR_TYPE,
+ dir_size + 3, directory_block, directory_offset, NULL, NULL,
+ dir, 0);
+
+ TRACE("Directory contents of inode 0x%llx\n", inode);
+ dirp = dir->buff;
+ while(dirp < dir->p) {
+ char buffer[SQUASHFS_NAME_LEN + 1];
+ struct squashfs_dir_entry idir, *idirp;
+ struct squashfs_dir_header dirh;
+ SQUASHFS_SWAP_DIR_HEADER((struct squashfs_dir_header *) dirp,
+ &dirh);
+ count = dirh.count + 1;
+ dirp += sizeof(struct squashfs_dir_header);
+
+ TRACE("\tStart block 0x%x, count %d\n",
+ dirh.start_block, count);
+
+ while(count--) {
+ idirp = (struct squashfs_dir_entry *) dirp;
+ SQUASHFS_SWAP_DIR_ENTRY(idirp, &idir);
+ strncpy(buffer, idirp->name, idir.size + 1);
+ buffer[idir.size + 1] = '\0';
+ TRACE("\t\tname %s, inode offset 0x%x, type "
+ "%d\n", buffer, idir.offset, idir.type);
+ dirp += sizeof(struct squashfs_dir_entry) + idir.size +
+ 1;
+ }
+ }
+
+ return inode;
+ }
+#endif
+}
+
+
+static struct file_buffer *get_fragment(struct fragment *fragment)
+{
+ struct squashfs_fragment_entry *disk_fragment;
+ struct file_buffer *buffer, *compressed_buffer;
+ long long start_block;
+ int res, size, index = fragment->index, compressed;
+ char locked;
+
+ /*
+ * Lookup fragment block in cache.
+ * If the fragment block doesn't exist, then get the compressed version
+ * from the writer cache or off disk, and decompress it.
+ *
+ * This routine has two things which complicate the code:
+ *
+ * 1. Multiple threads can simultaneously lookup/create the
+ * same buffer. This means a buffer needs to be "locked"
+ * when it is being filled in, to prevent other threads from
+ * using it when it is not ready. This is because we now do
+ * fragment duplicate checking in parallel.
+ * 2. We have two caches which need to be checked for the
+ * presence of fragment blocks: the normal fragment cache
+ * and a "reserve" cache. The reserve cache is used to
+ * prevent an unnecessary pipeline stall when the fragment cache
+ * is full of fragments waiting to be compressed.
+ */
+
+ if(fragment->index == SQUASHFS_INVALID_FRAG)
+ return NULL;
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &dup_mutex);
+ pthread_mutex_lock(&dup_mutex);
+
+again:
+ buffer = cache_lookup_nowait(fragment_buffer, index, &locked);
+ if(buffer) {
+ pthread_mutex_unlock(&dup_mutex);
+ if(locked)
+ /* got a buffer being filled in. Wait for it */
+ cache_wait_unlock(buffer);
+ goto finished;
+ }
+
+ /* not in fragment cache, is it in the reserve cache? */
+ buffer = cache_lookup_nowait(reserve_cache, index, &locked);
+ if(buffer) {
+ pthread_mutex_unlock(&dup_mutex);
+ if(locked)
+ /* got a buffer being filled in. Wait for it */
+ cache_wait_unlock(buffer);
+ goto finished;
+ }
+
+ /* in neither cache, try to get it from the fragment cache */
+ buffer = cache_get_nowait(fragment_buffer, index);
+ if(!buffer) {
+ /*
+ * no room, get it from the reserve cache, this is
+ * dimensioned so it will always have space (no more than
+ * processors + 1 can have an outstanding reserve buffer)
+ */
+ buffer = cache_get_nowait(reserve_cache, index);
+ if(!buffer) {
+ /* failsafe */
+ ERROR("no space in reserve cache\n");
+ goto again;
+ }
+ }
+
+ pthread_mutex_unlock(&dup_mutex);
+
+ compressed_buffer = cache_lookup(fwriter_buffer, index);
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex);
+ pthread_mutex_lock(&fragment_mutex);
+ disk_fragment = &fragment_table[index];
+ size = SQUASHFS_COMPRESSED_SIZE_BLOCK(disk_fragment->size);
+ compressed = SQUASHFS_COMPRESSED_BLOCK(disk_fragment->size);
+ start_block = disk_fragment->start_block;
+ pthread_cleanup_pop(1);
+
+ if(compressed) {
+ int error;
+ char *data;
+
+ if(compressed_buffer)
+ data = compressed_buffer->data;
+ else {
+ data = read_from_disk(start_block, size);
+ if(data == NULL) {
+ ERROR("Failed to read fragment from output"
+ " filesystem\n");
+ BAD_ERROR("Output filesystem corrupted?\n");
+ }
+ }
+
+ res = compressor_uncompress(comp, buffer->data, data, size,
+ block_size, &error);
+ if(res == -1)
+ BAD_ERROR("%s uncompress failed with error code %d\n",
+ comp->name, error);
+ } else if(compressed_buffer)
+ memcpy(buffer->data, compressed_buffer->data, size);
+ else {
+ res = read_fs_bytes(fd, start_block, size, buffer->data);
+ if(res == 0) {
+ ERROR("Failed to read fragment from output "
+ "filesystem\n");
+ BAD_ERROR("Output filesystem corrupted?\n");
+ }
+ }
+
+ cache_unlock(buffer);
+ cache_block_put(compressed_buffer);
+
+finished:
+ pthread_cleanup_pop(0);
+
+ return buffer;
+}
+
+
+static unsigned short get_fragment_checksum(struct file_info *file)
+{
+ struct file_buffer *frag_buffer;
+ struct append_file *append;
+ int res, index = file->fragment->index;
+ unsigned short checksum;
+
+ if(index == SQUASHFS_INVALID_FRAG)
+ return 0;
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &dup_mutex);
+ pthread_mutex_lock(&dup_mutex);
+ res = file->have_frag_checksum;
+ checksum = file->fragment_checksum;
+ pthread_cleanup_pop(1);
+
+ if(res)
+ return checksum;
+
+ frag_buffer = get_fragment(file->fragment);
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &dup_mutex);
+
+ for(append = file_mapping[index]; append; append = append->next) {
+ int offset = append->file->fragment->offset;
+ int size = append->file->fragment->size;
+ unsigned short cksum =
+ get_checksum_mem(frag_buffer->data + offset, size);
+
+ if(file == append->file)
+ checksum = cksum;
+
+ pthread_mutex_lock(&dup_mutex);
+ append->file->fragment_checksum = cksum;
+ append->file->have_frag_checksum = TRUE;
+ pthread_mutex_unlock(&dup_mutex);
+ }
+
+ cache_block_put(frag_buffer);
+ pthread_cleanup_pop(0);
+
+ return checksum;
+}
+
+
+static void ensure_fragments_flushed()
+{
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex);
+ pthread_mutex_lock(&fragment_mutex);
+
+ while(fragments_outstanding)
+ pthread_cond_wait(&fragment_waiting, &fragment_mutex);
+
+ pthread_cleanup_pop(1);
+}
+
+
+static void lock_fragments()
+{
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex);
+ pthread_mutex_lock(&fragment_mutex);
+ fragments_locked = TRUE;
+ pthread_cleanup_pop(1);
+}
+
+
+static void log_fragment(unsigned int fragment, long long start)
+{
+ if(logging)
+ fprintf(log_fd, "Fragment %u, %lld\n", fragment, start);
+}
+
+
+static void unlock_fragments()
+{
+ int frg, size;
+ struct file_buffer *write_buffer;
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex);
+ pthread_mutex_lock(&fragment_mutex);
+
+ /*
+ * Note queue_empty() is inherently racy with respect to concurrent
+ * queue get and pushes. We avoid this because we're holding the
+ * fragment_mutex which ensures no other threads can be using the
+ * queue at this time.
+ */
+ while(!queue_empty(locked_fragment)) {
+ write_buffer = queue_get(locked_fragment);
+ frg = write_buffer->block;
+ size = SQUASHFS_COMPRESSED_SIZE_BLOCK(fragment_table[frg].size);
+ fragment_table[frg].start_block = bytes;
+ write_buffer->block = bytes;
+ bytes += size;
+ fragments_outstanding --;
+ queue_put(to_writer, write_buffer);
+ log_fragment(frg, fragment_table[frg].start_block);
+ TRACE("fragment_locked writing fragment %d, compressed size %d"
+ "\n", frg, size);
+ }
+ fragments_locked = FALSE;
+ pthread_cleanup_pop(1);
+}
+
+/* Called with the fragment_mutex locked */
+static void add_pending_fragment(struct file_buffer *write_buffer, int c_byte,
+ int fragment)
+{
+ fragment_table[fragment].size = c_byte;
+ write_buffer->block = fragment;
+
+ queue_put(locked_fragment, write_buffer);
+}
+
+
+static void write_fragment(struct file_buffer *fragment)
+{
+ static long long sequence = 0;
+
+ if(fragment == NULL)
+ return;
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex);
+ pthread_mutex_lock(&fragment_mutex);
+ fragment_table[fragment->block].unused = 0;
+ fragment->sequence = sequence ++;
+ fragments_outstanding ++;
+ queue_put(to_frag, fragment);
+ pthread_cleanup_pop(1);
+}
+
+
+static struct file_buffer *allocate_fragment()
+{
+ struct file_buffer *fragment = cache_get(fragment_buffer, fragments);
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex);
+ pthread_mutex_lock(&fragment_mutex);
+
+ if(fragments % FRAG_SIZE == 0) {
+ void *ft = realloc(fragment_table, (fragments +
+ FRAG_SIZE) * sizeof(struct squashfs_fragment_entry));
+ if(ft == NULL)
+ MEM_ERROR();
+ fragment_table = ft;
+ }
+
+ fragment->size = 0;
+ fragment->block = fragments ++;
+
+ pthread_cleanup_pop(1);
+
+ return fragment;
+}
+
+
+static struct fragment empty_fragment = {SQUASHFS_INVALID_FRAG, 0, 0};
+
+
+void free_fragment(struct fragment *fragment)
+{
+ if(fragment != &empty_fragment)
+ free(fragment);
+}
+
+
+static struct fragment *get_and_fill_fragment(struct file_buffer *file_buffer,
+ struct dir_ent *dir_ent, int tail)
+{
+ struct fragment *ffrg;
+ struct file_buffer **fragment;
+
+ if(file_buffer == NULL || file_buffer->size == 0)
+ return &empty_fragment;
+
+ fragment = eval_frag_actions(root_dir, dir_ent, tail);
+
+ if((*fragment) && (*fragment)->size + file_buffer->size > block_size) {
+ write_fragment(*fragment);
+ *fragment = NULL;
+ }
+
+ ffrg = malloc(sizeof(struct fragment));
+ if(ffrg == NULL)
+ MEM_ERROR();
+
+ if(*fragment == NULL)
+ *fragment = allocate_fragment();
+
+ ffrg->index = (*fragment)->block;
+ ffrg->offset = (*fragment)->size;
+ ffrg->size = file_buffer->size;
+ memcpy((*fragment)->data + (*fragment)->size, file_buffer->data,
+ file_buffer->size);
+ (*fragment)->size += file_buffer->size;
+
+ return ffrg;
+}
+
+
+long long generic_write_table(long long length, void *buffer, int length2,
+ void *buffer2, int uncompressed)
+{
+ int meta_blocks = (length + SQUASHFS_METADATA_SIZE - 1) /
+ SQUASHFS_METADATA_SIZE;
+ long long *list, start_bytes;
+ int compressed_size, i, list_size = meta_blocks * sizeof(long long);
+ unsigned short c_byte;
+ char cbuffer[(SQUASHFS_METADATA_SIZE << 2) + 2];
+
+#ifdef SQUASHFS_TRACE
+ long long obytes = bytes;
+ long long olength = length;
+#endif
+
+ list = malloc(list_size);
+ if(list == NULL)
+ MEM_ERROR();
+
+ for(i = 0; i < meta_blocks; i++) {
+ int avail_bytes = length > SQUASHFS_METADATA_SIZE ?
+ SQUASHFS_METADATA_SIZE : length;
+ c_byte = mangle(cbuffer + BLOCK_OFFSET, buffer + i *
+ SQUASHFS_METADATA_SIZE , avail_bytes,
+ SQUASHFS_METADATA_SIZE, uncompressed, 0);
+ SQUASHFS_SWAP_SHORTS(&c_byte, cbuffer, 1);
+ list[i] = bytes;
+ compressed_size = SQUASHFS_COMPRESSED_SIZE(c_byte) +
+ BLOCK_OFFSET;
+ TRACE("block %d @ 0x%llx, compressed size %d\n", i, bytes,
+ compressed_size);
+ write_destination(fd, bytes, compressed_size, cbuffer);
+ bytes += compressed_size;
+ total_bytes += avail_bytes;
+ length -= avail_bytes;
+ }
+
+ start_bytes = bytes;
+ if(length2) {
+ write_destination(fd, bytes, length2, buffer2);
+ bytes += length2;
+ total_bytes += length2;
+ }
+
+ SQUASHFS_INSWAP_LONG_LONGS(list, meta_blocks);
+ write_destination(fd, bytes, list_size, list);
+ bytes += list_size;
+ total_bytes += list_size;
+
+ TRACE("generic_write_table: total uncompressed %lld compressed %lld\n",
+ olength, bytes - obytes);
+
+ free(list);
+
+ return start_bytes;
+}
+
+
+static long long write_fragment_table()
+{
+ long long frag_bytes = SQUASHFS_FRAGMENT_BYTES(fragments);
+ unsigned int i;
+
+ TRACE("write_fragment_table: fragments %u, frag_bytes %d\n", fragments,
+ frag_bytes);
+ for(i = 0; i < fragments; i++) {
+ TRACE("write_fragment_table: fragment %u, start_block 0x%llx, "
+ "size %d\n", i, fragment_table[i].start_block,
+ fragment_table[i].size);
+ SQUASHFS_INSWAP_FRAGMENT_ENTRY(&fragment_table[i]);
+ }
+
+ return generic_write_table(frag_bytes, fragment_table, 0, NULL, noF);
+}
+
+
+char read_from_file_buffer[SQUASHFS_FILE_MAX_SIZE];
+static char *read_from_disk(long long start, unsigned int avail_bytes)
+{
+ int res;
+
+ res = read_fs_bytes(fd, start, avail_bytes, read_from_file_buffer);
+ if(res == 0)
+ return NULL;
+
+ return read_from_file_buffer;
+}
+
+
+char read_from_file_buffer2[SQUASHFS_FILE_MAX_SIZE];
+static char *read_from_disk2(long long start, unsigned int avail_bytes)
+{
+ int res;
+
+ res = read_fs_bytes(fd, start, avail_bytes, read_from_file_buffer2);
+ if(res == 0)
+ return NULL;
+
+ return read_from_file_buffer2;
+}
+
+
+/*
+ * Compute 16 bit BSD checksum over the data
+ */
+unsigned short get_checksum(char *buff, int bytes, unsigned short chksum)
+{
+ unsigned char *b = (unsigned char *) buff;
+
+ while(bytes --) {
+ chksum = (chksum & 1) ? (chksum >> 1) | 0x8000 : chksum >> 1;
+ chksum += *b++;
+ }
+
+ return chksum;
+}
+
+
+static unsigned short get_checksum_disk(long long start, long long l,
+ unsigned int *blocks)
+{
+ unsigned short chksum = 0;
+ unsigned int bytes;
+ struct file_buffer *write_buffer;
+ int i;
+
+ for(i = 0; l; i++) {
+ bytes = SQUASHFS_COMPRESSED_SIZE_BLOCK(blocks[i]);
+ if(bytes == 0) /* sparse block */
+ continue;
+ write_buffer = cache_lookup(bwriter_buffer, start);
+ if(write_buffer) {
+ chksum = get_checksum(write_buffer->data, bytes,
+ chksum);
+ cache_block_put(write_buffer);
+ } else {
+ void *data = read_from_disk(start, bytes);
+ if(data == NULL) {
+ ERROR("Failed to checksum data from output"
+ " filesystem\n");
+ BAD_ERROR("Output filesystem corrupted?\n");
+ }
+
+ chksum = get_checksum(data, bytes, chksum);
+ }
+
+ l -= bytes;
+ start += bytes;
+ }
+
+ return chksum;
+}
+
+
+unsigned short get_checksum_mem(char *buff, int bytes)
+{
+ return get_checksum(buff, bytes, 0);
+}
+
+
+static int block_hash(int size, int blocks)
+{
+ return ((size << 10) & 0xffc00) | (blocks & 0x3ff);
+}
+
+
+void add_file(long long start, long long file_size, long long file_bytes,
+ unsigned int *block_listp, int blocks, unsigned int fragment,
+ int offset, int bytes)
+{
+ struct fragment *frg;
+ unsigned int *block_list = block_listp;
+ struct file_info *dupl_ptr;
+ struct append_file *append_file;
+ struct file_info *file;
+ int blocks_dup = FALSE, frag_dup = FALSE;
+ int bl_hash = 0;
+
+ if(!duplicate_checking || file_size == 0)
+ return;
+
+ if(blocks) {
+ bl_hash = block_hash(block_list[0], blocks);
+ dupl_ptr = dupl_block[bl_hash];
+
+ for(; dupl_ptr; dupl_ptr = dupl_ptr->block_next) {
+ if(start == dupl_ptr->start)
+ break;
+ }
+
+ if(dupl_ptr) {
+ /*
+ * Our blocks have already been added. If we don't
+ * have a fragment, then we've finished checking
+ */
+ if(fragment == SQUASHFS_INVALID_FRAG)
+ return;
+
+ /*
+ * This entry probably created both the blocks and
+ * the tail-end fragment, and so check for that
+ */
+ if((fragment == dupl_ptr->fragment->index) &&
+ (offset == dupl_ptr->fragment->offset)
+ && (bytes == dupl_ptr->fragment->size))
+ return;
+
+ /*
+ * Remember our blocks are duplicate, and continue
+ * looking for the tail-end fragment
+ */
+ blocks_dup = TRUE;
+ }
+ }
+
+ if(fragment != SQUASHFS_INVALID_FRAG) {
+ dupl_ptr = dupl_frag[bytes];
+
+ for(; dupl_ptr; dupl_ptr = dupl_ptr->frag_next)
+ if((fragment == dupl_ptr->fragment->index) &&
+ (offset == dupl_ptr->fragment->offset)
+ && (bytes == dupl_ptr->fragment->size))
+ break;
+
+ if(dupl_ptr) {
+ /*
+ * Our tail-end fragment entry has already been added.
+ * If there's no blocks or they're dup, then we're done
+ * here
+ */
+ if(blocks == 0 || blocks_dup)
+ return;
+
+ /* Remember our tail-end fragment entry is duplicate */
+ frag_dup = TRUE;
+ }
+ }
+
+ frg = malloc(sizeof(struct fragment));
+ if(frg == NULL)
+ MEM_ERROR();
+
+ frg->index = fragment;
+ frg->offset = offset;
+ frg->size = bytes;
+
+ file = add_non_dup(file_size, file_bytes, blocks, 0, block_list, start,
+ frg, 0, 0, FALSE, FALSE, blocks_dup, frag_dup, bl_hash);
+
+ if(fragment == SQUASHFS_INVALID_FRAG)
+ return;
+
+ append_file = malloc(sizeof(struct append_file));
+ if(append_file == NULL)
+ MEM_ERROR();
+
+ append_file->file = file;
+ append_file->next = file_mapping[fragment];
+ file_mapping[fragment] = append_file;
+}
+
+
+static int pre_duplicate(long long file_size, struct inode_info *inode,
+ struct file_buffer *buffer, int *bl_hash)
+{
+ struct file_info *dupl_ptr;
+ long long fragment_size;
+ int blocks;
+
+ if(inode->no_fragments || (!inode->always_use_fragments && file_size >=
+ block_size)) {
+ blocks = (file_size + block_size - 1) >> block_log;
+ fragment_size = 0;
+ } else {
+ blocks = file_size >> block_log;
+ fragment_size = file_size & (block_size - 1);
+ }
+
+ /* Look for a possible duplicate set of blocks */
+ if(blocks) {
+ *bl_hash = block_hash(buffer->size, blocks);
+ for(dupl_ptr = dupl_block[*bl_hash]; dupl_ptr;dupl_ptr = dupl_ptr->block_next)
+ if(dupl_ptr->blocks == blocks && dupl_ptr->block_list[0] == buffer->c_byte)
+ return TRUE;
+ }
+
+ /* Look for a possible duplicate fragment */
+ if(fragment_size) {
+ for(dupl_ptr = dupl_frag[fragment_size]; dupl_ptr; dupl_ptr = dupl_ptr->frag_next)
+ if(dupl_ptr->fragment->size == fragment_size)
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+
+static struct file_info *create_non_dup(long long file_size, long long bytes,
+ unsigned int blocks, long long sparse, unsigned int *block_list,
+ long long start,struct fragment *fragment,unsigned short checksum,
+ unsigned short fragment_checksum, int checksum_flag,
+ int checksum_frag_flag)
+{
+ struct file_info *dupl_ptr = malloc(sizeof(struct file_info));
+
+ if(dupl_ptr == NULL)
+ MEM_ERROR();
+
+ dupl_ptr->file_size = file_size;
+ dupl_ptr->bytes = bytes;
+ dupl_ptr->blocks = blocks;
+ dupl_ptr->sparse = sparse;
+ dupl_ptr->block_list = block_list;
+ dupl_ptr->start = start;
+ dupl_ptr->fragment = fragment;
+ dupl_ptr->checksum = checksum;
+ dupl_ptr->fragment_checksum = fragment_checksum;
+ dupl_ptr->have_frag_checksum = checksum_frag_flag;
+ dupl_ptr->have_checksum = checksum_flag;
+ dupl_ptr->block_next = NULL;
+ dupl_ptr->frag_next = NULL;
+ dupl_ptr->dup = NULL;
+
+ return dupl_ptr;
+}
+
+
+static struct file_info *add_non_dup(long long file_size, long long bytes,
+ unsigned int blocks, long long sparse, unsigned int *block_list,
+ long long start,struct fragment *fragment,unsigned short checksum,
+ unsigned short fragment_checksum, int checksum_flag,
+ int checksum_frag_flag, int blocks_dup, int frag_dup, int bl_hash)
+{
+ struct file_info *dupl_ptr = malloc(sizeof(struct file_info));
+ int fragment_size = fragment->size;
+
+ if(dupl_ptr == NULL)
+ MEM_ERROR();
+
+ dupl_ptr->file_size = file_size;
+ dupl_ptr->bytes = bytes;
+ dupl_ptr->blocks = blocks;
+ dupl_ptr->sparse = sparse;
+ dupl_ptr->block_list = block_list;
+ dupl_ptr->start = start;
+ dupl_ptr->fragment = fragment;
+ dupl_ptr->checksum = checksum;
+ dupl_ptr->fragment_checksum = fragment_checksum;
+ dupl_ptr->have_frag_checksum = checksum_frag_flag;
+ dupl_ptr->have_checksum = checksum_flag;
+ dupl_ptr->block_next = NULL;
+ dupl_ptr->frag_next = NULL;
+ dupl_ptr->dup = NULL;
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &dup_mutex);
+ pthread_mutex_lock(&dup_mutex);
+
+ if(blocks && !blocks_dup) {
+ dupl_ptr->block_next = dupl_block[bl_hash];
+ dupl_block[bl_hash] = dupl_ptr;
+ }
+
+ if(fragment_size && !frag_dup) {
+ dupl_ptr->frag_next = dupl_frag[fragment_size];
+ dupl_frag[fragment_size] = dupl_ptr;
+ }
+
+ dup_files ++;
+
+ pthread_cleanup_pop(1);
+
+ return dupl_ptr;
+}
+
+
+static struct file_info *frag_duplicate(struct file_buffer *file_buffer, int *duplicate)
+{
+ struct file_info *dupl_ptr;
+ struct file_buffer *buffer;
+ struct file_info *dupl_start = file_buffer->dupl_start;
+ long long file_size = file_buffer->file_size;
+ unsigned short checksum = file_buffer->checksum;
+ int res;
+
+ if(file_buffer->duplicate)
+ dupl_ptr = dupl_start;
+ else {
+ for(dupl_ptr = dupl_frag[file_size];
+ dupl_ptr && dupl_ptr != dupl_start;
+ dupl_ptr = dupl_ptr->frag_next) {
+ if(file_size == dupl_ptr->fragment->size) {
+ if(get_fragment_checksum(dupl_ptr) == checksum) {
+ buffer = get_fragment(dupl_ptr->fragment);
+ res = memcmp(file_buffer->data,
+ buffer->data +
+ dupl_ptr->fragment->offset,
+ file_size);
+ cache_block_put(buffer);
+ if(res == 0)
+ break;
+ }
+ }
+ }
+
+ if(!dupl_ptr || dupl_ptr == dupl_start) {
+ *duplicate = FALSE;
+ return NULL;
+ }
+ }
+
+ if(dupl_ptr->file_size == file_size) {
+ /* File only has a fragment, and so this is an exact match */
+ TRACE("Found duplicate file, fragment %u, size %d, offset %d, "
+ "checksum 0x%x\n", dupl_ptr->fragment->index, file_size,
+ dupl_ptr->fragment->offset, checksum);
+ *duplicate = TRUE;
+ return dupl_ptr;
+ } else {
+ struct dup_info *dup;
+
+ /*
+ * File also has a block list. Create a new file without
+ * a block_list, and link it to this file. First check whether
+ * it is already there.
+ */
+ if(dupl_ptr->dup) {
+ *duplicate = TRUE;
+ return dupl_ptr->dup->file;
+ }
+
+ dup = malloc(sizeof(struct dup_info));
+ if(dup == NULL)
+ MEM_ERROR();
+
+ dup->file = create_non_dup(file_size, 0, 0, 0, NULL, 0,
+ dupl_ptr->fragment, 0, checksum, TRUE, TRUE);
+ dup->next = NULL;
+ dupl_ptr->dup = dup;
+ *duplicate = FALSE;
+ return dup->file;
+ }
+}
+
+
+static struct file_info *duplicate(int *dupf, int *block_dup,
+ long long file_size, long long bytes, unsigned int *block_list,
+ long long start, struct dir_ent *dir_ent,
+ struct file_buffer *file_buffer, int blocks, long long sparse,
+ int bl_hash)
+{
+ struct file_info *dupl_ptr, *file;
+ struct file_info *block_dupl = NULL, *frag_dupl = NULL;
+ struct dup_info *dup;
+ int frag_bytes = file_buffer ? file_buffer->size : 0;
+ unsigned short fragment_checksum = file_buffer ?
+ file_buffer->checksum : 0;
+ unsigned short checksum = 0;
+ char checksum_flag = FALSE;
+ struct fragment *fragment;
+
+ /* Look for a possible duplicate set of blocks */
+ for(dupl_ptr = dupl_block[bl_hash]; dupl_ptr;
+ dupl_ptr = dupl_ptr->block_next) {
+ if(bytes == dupl_ptr->bytes && blocks == dupl_ptr->blocks) {
+ long long target_start, dup_start = dupl_ptr->start;
+ int block;
+
+ /*
+ * Block list has same uncompressed size and same
+ * compressed size. Now check if each block compressed
+ * to the same size
+ */
+ if(memcmp(block_list, dupl_ptr->block_list, blocks *
+ sizeof(unsigned int)) != 0)
+ continue;
+
+ /* Now get the checksums and compare */
+ if(checksum_flag == FALSE) {
+ checksum = get_checksum_disk(start, bytes, block_list);
+ checksum_flag = TRUE;
+ }
+
+ if(!dupl_ptr->have_checksum) {
+ dupl_ptr->checksum =
+ get_checksum_disk(dupl_ptr->start,
+ dupl_ptr->bytes, dupl_ptr->block_list);
+ dupl_ptr->have_checksum = TRUE;
+ }
+
+ if(checksum != dupl_ptr->checksum)
+ continue;
+
+ /*
+ * Checksums match, so now we need to do a byte by byte
+ * comparison
+ */
+ target_start = start;
+ for(block = 0; block < blocks; block ++) {
+ int size = SQUASHFS_COMPRESSED_SIZE_BLOCK(block_list[block]);
+ struct file_buffer *target_buffer = NULL;
+ struct file_buffer *dup_buffer = NULL;
+ char *target_data, *dup_data;
+ int res;
+
+ /* Sparse blocks obviously match */
+ if(size == 0)
+ continue;
+
+ /*
+ * Get the block for our file. This will be in
+ * the cache unless the cache wasn't large
+ * enough to hold the entire file, in which case
+ * the block will have been written to disk.
+ */
+ target_buffer = cache_lookup(bwriter_buffer,
+ target_start);
+ if(target_buffer)
+ target_data = target_buffer->data;
+ else {
+ target_data = read_from_disk(target_start, size);
+ if(target_data == NULL) {
+ ERROR("Failed to read data from"
+ " output filesystem\n");
+ BAD_ERROR("Output filesystem"
+ " corrupted?\n");
+ }
+ }
+
+ /*
+ * Get the block for the other file. This may
+ * still be in the cache (if it was written
+ * recently), otherwise it will have to be read
+ * back from disk
+ */
+ dup_buffer = cache_lookup(bwriter_buffer, dup_start);
+ if(dup_buffer)
+ dup_data = dup_buffer->data;
+ else {
+ dup_data = read_from_disk2(dup_start, size);
+ if(dup_data == NULL) {
+ ERROR("Failed to read data from"
+ " output filesystem\n");
+ BAD_ERROR("Output filesystem"
+ " corrupted?\n");
+ }
+ }
+
+ res = memcmp(target_data, dup_data, size);
+ cache_block_put(target_buffer);
+ cache_block_put(dup_buffer);
+ if(res != 0)
+ break;
+ target_start += size;
+ dup_start += size;
+ }
+
+ if(block != blocks)
+ continue;
+
+ /*
+ * Yes, the block list matches. We can use this, rather
+ * than writing an identical block list.
+ * If both it and us doesn't have a tail-end fragment
+ * then we're finished. Return the duplicate.
+ *
+ * We have to deal with the special case where the
+ * last block is a sparse block. This means the
+ * file will have matched, but, it may be a different
+ * file length (because a tail-end sparse block may be
+ * anything from 1 byte to block_size - 1 in size, but
+ * stored as zero). We can still use the block list in
+ * this case, but, we must return a new entry with the
+ * correct file size
+ */
+ if(!frag_bytes && !dupl_ptr->fragment->size) {
+ *dupf = *block_dup = TRUE;
+ if(file_size == dupl_ptr->file_size)
+ return dupl_ptr;
+ else
+ return create_non_dup(file_size, bytes,
+ blocks, sparse,
+ dupl_ptr->block_list,
+ dupl_ptr->start,
+ dupl_ptr->fragment, checksum, 0,
+ checksum_flag, FALSE);
+ }
+
+ /*
+ * We've got a tail-end fragment, and this file most
+ * likely has a matching tail-end fragment (i.e. it is
+ * a completely duplicate file). So save time and have
+ * a look now.
+ */
+ if(frag_bytes == dupl_ptr->fragment->size &&
+ fragment_checksum ==
+ get_fragment_checksum(dupl_ptr)) {
+ /*
+ * Checksums match, so now we need to do a byte
+ * by byte comparison
+ * */
+ struct file_buffer *frag_buffer = get_fragment(dupl_ptr->fragment);
+ int res = memcmp(file_buffer->data,
+ frag_buffer->data +
+ dupl_ptr->fragment->offset, frag_bytes);
+
+ cache_block_put(frag_buffer);
+
+ if(res == 0) {
+ /*
+ * Yes, the fragment matches. We're now
+ * finished. Return the duplicate
+ */
+ *dupf = *block_dup = TRUE;
+ return dupl_ptr;
+ }
+ }
+
+ /*
+ * No, the fragment didn't match. Remember the file
+ * with the matching blocks, and look for a matching
+ * fragment in the fragment list
+ */
+ block_dupl = dupl_ptr;
+ break;
+ }
+ }
+
+ /* Look for a possible duplicate fragment */
+ if(frag_bytes) {
+ for(dupl_ptr = dupl_frag[frag_bytes]; dupl_ptr;
+ dupl_ptr = dupl_ptr->frag_next) {
+ if(frag_bytes == dupl_ptr->fragment->size &&
+ fragment_checksum ==
+ get_fragment_checksum(dupl_ptr)) {
+ /*
+ * Checksums match, so now we need to do a byte
+ * by byte comparison
+ */
+ struct file_buffer *frag_buffer = get_fragment(dupl_ptr->fragment);
+ int res = memcmp(file_buffer->data,
+ frag_buffer->data +
+ dupl_ptr->fragment->offset, frag_bytes);
+
+ cache_block_put(frag_buffer);
+
+ if(res == 0) {
+ /*
+ * Yes, the fragment matches. This file
+ * may have a matching block list and
+ * fragment, in which case we're
+ * finished.
+ */
+ if(block_dupl && block_dupl->start == dupl_ptr->start) {
+ *dupf = *block_dup = TRUE;
+ return dupl_ptr;
+ }
+
+ /*
+ * Block list doesn't match. We can
+ * construct a hybrid from these two
+ * partially matching files
+ */
+ frag_dupl = dupl_ptr;
+ break;
+ }
+ }
+ }
+ }
+
+ /*
+ * If we've got here, then we've either matched on nothing, or got a
+ * partial match. Matched on nothing is straightforward
+ */
+ if(!block_dupl && !frag_dupl) {
+ *dupf = *block_dup = FALSE;
+ fragment = get_and_fill_fragment(file_buffer, dir_ent, TRUE);
+
+ return add_non_dup(file_size, bytes, blocks, sparse, block_list,
+ start, fragment, checksum, fragment_checksum,
+ checksum_flag, file_buffer != NULL, FALSE,
+ FALSE, bl_hash);
+ }
+
+ /*
+ * At this point, we may have
+ * 1. A partially matching single file. For example the file may
+ * contain the block list we want, but, it has the wrong tail-end,
+ * or vice-versa,
+ * 2. A partially matching single file for another reason. For example
+ * it has the block list we want, and a tail-end, whereas we don't
+ * have a tail-end. Note the vice-versa situation doesn't appear
+ * here (it is handled in frag_duplicate).
+ * 3. We have two partially matching files. One has the block list we
+ * want, and the other has the tail-end we want.
+ *
+ * Strictly speaking, a file which is constructed from one or two
+ * partial matches isn't a duplicate (of any single file), and it will
+ * be confusing to list it as such (using the -info option). But a
+ * second and thereafter appearance of this combination *is* a
+ * duplicate of another file. Some of this second and thereafter
+ * appearance is already handled above
+ */
+
+ if(block_dupl && (!frag_bytes || frag_dupl)) {
+ /*
+ * This file won't be added to any hash list, because it is a
+ * complete duplicate, and it doesn't need extra data to be
+ * stored, e.g. part 2 & 3 above. So keep track of it by adding
+ * it to a linked list. Obviously check if it's already there
+ * first.
+ */
+ for(dup = block_dupl->dup; dup; dup = dup->next)
+ if((!frag_bytes && dup->frag == NULL) ||
+ (frag_bytes && dup->frag == frag_dupl))
+ break;
+
+ if(dup) {
+ /* Found a matching file. Return the duplicate */
+ *dupf = *block_dup = TRUE;
+ return dup->file;
+ }
+ }
+
+ if(frag_dupl)
+ fragment = frag_dupl->fragment;
+ else
+ fragment = get_and_fill_fragment(file_buffer, dir_ent, TRUE);
+
+ if(block_dupl) {
+ start = block_dupl->start;
+ block_list = block_dupl->block_list;
+ }
+
+ *dupf = FALSE;
+ *block_dup = block_dupl != NULL;
+
+ file = create_non_dup(file_size, bytes, blocks, sparse, block_list,
+ start, fragment, checksum, fragment_checksum, checksum_flag,
+ file_buffer != NULL);
+
+ if(!block_dupl || (frag_bytes && !frag_dupl)) {
+ /*
+ * Partial duplicate, had to store some extra data for this
+ * file, either a block list, or a fragment
+ */
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &dup_mutex);
+ pthread_mutex_lock(&dup_mutex);
+
+ if(!block_dupl) {
+ file->block_next = dupl_block[bl_hash];
+ dupl_block[bl_hash] = file;
+ }
+
+ if(frag_bytes && !frag_dupl) {
+ file->frag_next = dupl_frag[frag_bytes];
+ dupl_frag[frag_bytes] = file;
+ }
+
+ dup_files ++;
+
+ pthread_cleanup_pop(1);
+ } else {
+ dup = malloc(sizeof(struct dup_info));
+ if(dup == NULL)
+ MEM_ERROR();
+
+ dup->frag = frag_dupl;
+ dup->file = file;
+ dup->next = block_dupl->dup;
+ block_dupl->dup = dup;
+ }
+
+ return file;
+}
+
+
+static void *writer(void *arg)
+{
+ while(1) {
+ struct file_buffer *file_buffer = queue_get(to_writer);
+ off_t off;
+
+ if(file_buffer == NULL) {
+ queue_put(from_writer, NULL);
+ continue;
+ }
+
+ off = file_buffer->block;
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &pos_mutex);
+ pthread_mutex_lock(&pos_mutex);
+
+ if(lseek(fd, start_offset + off, SEEK_SET) == -1) {
+ ERROR("writer: Lseek on destination failed because "
+ "%s, offset=0x%llx\n", strerror(errno), start_offset + off);
+ BAD_ERROR("Probably out of space on output "
+ "%s\n", block_device ? "block device" :
+ "filesystem");
+ }
+
+ if(write_bytes(fd, file_buffer->data,
+ file_buffer->size) == -1)
+ BAD_ERROR("Failed to write to output %s\n",
+ block_device ? "block device" : "filesystem");
+
+ pthread_cleanup_pop(1);
+
+ cache_block_put(file_buffer);
+ }
+}
+
+
+static int all_zero(struct file_buffer *file_buffer)
+{
+ int i;
+ long entries = file_buffer->size / sizeof(long);
+ long *p = (long *) file_buffer->data;
+
+ for(i = 0; i < entries && p[i] == 0; i++);
+
+ if(i == entries) {
+ for(i = file_buffer->size & ~(sizeof(long) - 1);
+ i < file_buffer->size && file_buffer->data[i] == 0;
+ i++);
+
+ return i == file_buffer->size;
+ }
+
+ return 0;
+}
+
+
+static void *deflator(void *arg)
+{
+ struct file_buffer *write_buffer = cache_get_nohash(bwriter_buffer);
+ void *stream = NULL;
+ int res;
+
+ res = compressor_init(comp, &stream, block_size, 1);
+ if(res)
+ BAD_ERROR("deflator:: compressor_init failed\n");
+
+ while(1) {
+ struct file_buffer *file_buffer = queue_get(to_deflate);
+
+ if(sparse_files && all_zero(file_buffer)) {
+ file_buffer->c_byte = 0;
+ seq_queue_put(to_main, file_buffer);
+ } else {
+ write_buffer->c_byte = mangle2(stream,
+ write_buffer->data, file_buffer->data,
+ file_buffer->size, block_size,
+ file_buffer->noD, 1);
+ write_buffer->sequence = file_buffer->sequence;
+ write_buffer->file_size = file_buffer->file_size;
+ write_buffer->block = file_buffer->block;
+ write_buffer->size = SQUASHFS_COMPRESSED_SIZE_BLOCK
+ (write_buffer->c_byte);
+ write_buffer->fragment = FALSE;
+ write_buffer->error = FALSE;
+ cache_block_put(file_buffer);
+ seq_queue_put(to_main, write_buffer);
+ write_buffer = cache_get_nohash(bwriter_buffer);
+ }
+ }
+}
+
+
+static void *frag_deflator(void *arg)
+{
+ void *stream = NULL;
+ int res;
+
+ res = compressor_init(comp, &stream, block_size, 1);
+ if(res)
+ BAD_ERROR("frag_deflator:: compressor_init failed\n");
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex);
+
+ while(1) {
+ int c_byte, compressed_size;
+ struct file_buffer *file_buffer = queue_get(to_frag);
+ struct file_buffer *write_buffer =
+ cache_get(fwriter_buffer, file_buffer->block);
+
+ c_byte = mangle2(stream, write_buffer->data, file_buffer->data,
+ file_buffer->size, block_size, noF, 1);
+ compressed_size = SQUASHFS_COMPRESSED_SIZE_BLOCK(c_byte);
+ write_buffer->size = compressed_size;
+ pthread_mutex_lock(&fragment_mutex);
+ if(fragments_locked == FALSE) {
+ fragment_table[file_buffer->block].size = c_byte;
+ fragment_table[file_buffer->block].start_block = bytes;
+ write_buffer->block = bytes;
+ bytes += compressed_size;
+ fragments_outstanding --;
+ queue_put(to_writer, write_buffer);
+ log_fragment(file_buffer->block, fragment_table[file_buffer->block].start_block);
+ pthread_mutex_unlock(&fragment_mutex);
+ TRACE("Writing fragment %lld, uncompressed size %d, "
+ "compressed size %d\n", file_buffer->block,
+ file_buffer->size, compressed_size);
+ } else {
+ add_pending_fragment(write_buffer, c_byte,
+ file_buffer->block);
+ pthread_mutex_unlock(&fragment_mutex);
+ }
+ cache_block_put(file_buffer);
+ }
+
+ pthread_cleanup_pop(0);
+ return NULL;
+
+}
+
+
+static void *frag_order_deflator(void *arg)
+{
+ void *stream = NULL;
+ int res;
+
+ res = compressor_init(comp, &stream, block_size, 1);
+ if(res)
+ BAD_ERROR("frag_deflator:: compressor_init failed\n");
+
+ while(1) {
+ int c_byte;
+ struct file_buffer *file_buffer = queue_get(to_frag);
+ struct file_buffer *write_buffer =
+ cache_get(fwriter_buffer, file_buffer->block);
+
+ c_byte = mangle2(stream, write_buffer->data, file_buffer->data,
+ file_buffer->size, block_size, noF, 1);
+ write_buffer->block = file_buffer->block;
+ write_buffer->sequence = file_buffer->sequence;
+ write_buffer->size = SQUASHFS_COMPRESSED_SIZE_BLOCK(c_byte);
+ write_buffer->fragment = FALSE;
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex);
+ pthread_mutex_lock(&fragment_mutex);
+ fragment_table[file_buffer->block].size = c_byte;
+ pthread_cleanup_pop(1);
+ seq_queue_put(to_order, write_buffer);
+ TRACE("Writing fragment %lld, uncompressed size %d, "
+ "compressed size %d\n", file_buffer->block,
+ file_buffer->size, SQUASHFS_COMPRESSED_SIZE_BLOCK(c_byte));
+ cache_block_put(file_buffer);
+ }
+}
+
+
+static void *frag_orderer(void *arg)
+{
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex);
+
+ while(1) {
+ struct file_buffer *write_buffer = seq_queue_get(to_order);
+ int block = write_buffer->block;
+
+ pthread_mutex_lock(&fragment_mutex);
+ fragment_table[block].start_block = bytes;
+ write_buffer->block = bytes;
+ bytes += SQUASHFS_COMPRESSED_SIZE_BLOCK(write_buffer->size);
+ fragments_outstanding --;
+ log_fragment(block, write_buffer->block);
+ queue_put(to_writer, write_buffer);
+ pthread_cond_signal(&fragment_waiting);
+ pthread_mutex_unlock(&fragment_mutex);
+ }
+
+ pthread_cleanup_pop(0);
+ return NULL;
+}
+
+
+static struct file_buffer *get_file_buffer()
+{
+ struct file_buffer *file_buffer = seq_queue_get(to_main);
+
+ return file_buffer;
+}
+
+
+static struct file_info *write_file_empty(struct dir_ent *dir_ent,
+ struct file_buffer *file_buffer, int *duplicate_file)
+{
+ file_count ++;
+ *duplicate_file = FALSE;
+ cache_block_put(file_buffer);
+ return create_non_dup(0, 0, 0, 0, NULL, 0, &empty_fragment, 0, 0,
+ FALSE, FALSE);
+}
+
+
+static struct file_info *write_file_frag(struct dir_ent *dir_ent,
+ struct file_buffer *file_buffer, int *duplicate_file)
+{
+ int size = file_buffer->file_size;
+ struct fragment *fragment;
+ unsigned short checksum = file_buffer->checksum;
+ struct file_info *file;
+
+ file = frag_duplicate(file_buffer, duplicate_file);
+ if(!file) {
+ fragment = get_and_fill_fragment(file_buffer, dir_ent, FALSE);
+
+ if(duplicate_checking)
+ file = add_non_dup(size, 0, 0, 0, NULL, 0, fragment, 0,
+ checksum, TRUE, TRUE, FALSE, FALSE, 0);
+ else
+ file = create_non_dup(size, 0, 0, 0, NULL, 0, fragment,
+ 0, checksum, TRUE, TRUE);
+ }
+
+ cache_block_put(file_buffer);
+
+ total_bytes += size;
+ file_count ++;
+
+ inc_progress_bar();
+
+ return file;
+}
+
+
+static void log_file(struct dir_ent *dir_ent, long long start)
+{
+ if(logging && start)
+ fprintf(log_fd, "%s, %lld\n", pathname(dir_ent), start);
+}
+
+
+static struct file_info *write_file_process(int *status, struct dir_ent *dir_ent,
+ struct file_buffer *read_buffer, int *duplicate_file)
+{
+ long long read_size, file_bytes, start;
+ struct fragment *fragment;
+ unsigned int *block_list = NULL;
+ int block = 0;
+ long long sparse = 0;
+ struct file_buffer *fragment_buffer = NULL;
+ struct file_info *file;
+
+ *duplicate_file = FALSE;
+
+ if(reproducible)
+ ensure_fragments_flushed();
+ else
+ lock_fragments();
+
+ file_bytes = 0;
+ start = bytes;
+ while (1) {
+ read_size = read_buffer->file_size;
+ if(read_buffer->fragment) {
+ fragment_buffer = read_buffer;
+ if(block == 0)
+ start=0;
+ } else {
+ block_list = realloc(block_list, (block + 1) *
+ sizeof(unsigned int));
+ if(block_list == NULL)
+ MEM_ERROR();
+ block_list[block ++] = read_buffer->c_byte;
+ if(read_buffer->c_byte) {
+ read_buffer->block = bytes;
+ bytes += read_buffer->size;
+ cache_hash(read_buffer, read_buffer->block);
+ file_bytes += read_buffer->size;
+ queue_put(to_writer, read_buffer);
+ } else {
+ sparse += read_buffer->size;
+ cache_block_put(read_buffer);
+ }
+ }
+ inc_progress_bar();
+
+ if(read_size != -1)
+ break;
+
+ read_buffer = get_file_buffer();
+ if(read_buffer->error)
+ goto read_err;
+ }
+
+ if(!reproducible)
+ unlock_fragments();
+
+ fragment = get_and_fill_fragment(fragment_buffer, dir_ent, block != 0);
+
+ if(duplicate_checking) {
+ int bl_hash = block ? block_hash(block_list[0], block) : 0;
+
+ file = add_non_dup(read_size, file_bytes, block, sparse,
+ block_list, start, fragment, 0, fragment_buffer ?
+ fragment_buffer->checksum : 0, FALSE, TRUE, FALSE,
+ FALSE, bl_hash);
+ } else
+ file = create_non_dup(read_size, file_bytes, block, sparse,
+ block_list, start, fragment, 0, fragment_buffer ?
+ fragment_buffer->checksum : 0, FALSE, TRUE);
+
+ cache_block_put(fragment_buffer);
+ file_count ++;
+ total_bytes += read_size;
+
+ log_file(dir_ent, start);
+
+ *status = 0;
+ return file;
+
+read_err:
+ dec_progress_bar(block);
+ *status = read_buffer->error;
+ bytes = start;
+ if(!block_device) {
+ int res;
+
+ queue_put(to_writer, NULL);
+ if(queue_get(from_writer) != 0)
+ EXIT_MKSQUASHFS();
+ res = ftruncate(fd, bytes);
+ if(res != 0)
+ BAD_ERROR("Failed to truncate dest file because %s\n",
+ strerror(errno));
+ }
+ if(!reproducible)
+ unlock_fragments();
+ free(block_list);
+ cache_block_put(read_buffer);
+ return NULL;
+}
+
+
+static struct file_info *write_file_blocks_dup(int *status, struct dir_ent *dir_ent,
+ struct file_buffer *read_buffer, int *duplicate_file, int bl_hash)
+{
+ int block, thresh;
+ long long read_size = read_buffer->file_size;
+ long long file_bytes, start;
+ int blocks = (read_size + block_size - 1) >> block_log;
+ unsigned int *block_list;
+ struct file_buffer **buffer_list;
+ long long sparse = 0;
+ struct file_buffer *fragment_buffer = NULL;
+ struct file_info *file;
+ int block_dup;
+
+ block_list = malloc(blocks * sizeof(unsigned int));
+ if(block_list == NULL)
+ MEM_ERROR();
+
+ buffer_list = malloc(blocks * sizeof(struct file_buffer *));
+ if(buffer_list == NULL)
+ MEM_ERROR();
+
+ if(reproducible)
+ ensure_fragments_flushed();
+ else
+ lock_fragments();
+
+ file_bytes = 0;
+ start = bytes;
+ thresh = blocks > bwriter_size ? blocks - bwriter_size : 0;
+
+ for(block = 0; block < blocks;) {
+ if(read_buffer->fragment) {
+ block_list[block] = 0;
+ buffer_list[block] = NULL;
+ fragment_buffer = read_buffer;
+ blocks = read_size >> block_log;
+ } else {
+ block_list[block] = read_buffer->c_byte;
+
+ if(read_buffer->c_byte) {
+ read_buffer->block = bytes;
+ bytes += read_buffer->size;
+ file_bytes += read_buffer->size;
+ cache_hash(read_buffer, read_buffer->block);
+ if(block < thresh) {
+ buffer_list[block] = NULL;
+ queue_put(to_writer, read_buffer);
+ } else
+ buffer_list[block] = read_buffer;
+ } else {
+ buffer_list[block] = NULL;
+ sparse += read_buffer->size;
+ cache_block_put(read_buffer);
+ }
+ }
+ inc_progress_bar();
+
+ if(++block < blocks) {
+ read_buffer = get_file_buffer();
+ if(read_buffer->error)
+ goto read_err;
+ }
+ }
+
+ /*
+ * sparse count is needed to ensure squashfs correctly reports a
+ * a smaller block count on stat calls to sparse files. This is
+ * to ensure intelligent applications like cp correctly handle the
+ * file as a sparse file. If the file in the original filesystem isn't
+ * stored as a sparse file then still store it sparsely in squashfs, but
+ * report it as non-sparse on stat calls to preserve semantics
+ */
+ if(sparse && (dir_ent->inode->buf.st_blocks << 9) >= read_size)
+ sparse = 0;
+
+ file = duplicate(duplicate_file, &block_dup, read_size, file_bytes, block_list,
+ start, dir_ent, fragment_buffer, blocks, sparse, bl_hash);
+
+ if(block_dup == FALSE) {
+ for(block = thresh; block < blocks; block ++)
+ if(buffer_list[block])
+ queue_put(to_writer, buffer_list[block]);
+ } else {
+ for(block = thresh; block < blocks; block ++)
+ cache_block_put(buffer_list[block]);
+ bytes = start;
+ if(thresh && !block_device) {
+ int res;
+
+ queue_put(to_writer, NULL);
+ if(queue_get(from_writer) != 0)
+ EXIT_MKSQUASHFS();
+ res = ftruncate(fd, bytes);
+ if(res != 0)
+ BAD_ERROR("Failed to truncate dest file because"
+ " %s\n", strerror(errno));
+ }
+ }
+
+ if(!reproducible)
+ unlock_fragments();
+ cache_block_put(fragment_buffer);
+ free(buffer_list);
+ file_count ++;
+ total_bytes += read_size;
+
+ if(block_dup == TRUE)
+ free(block_list);
+ else
+ log_file(dir_ent, file->start);
+
+ *status = 0;
+ return file;
+
+read_err:
+ dec_progress_bar(block);
+ *status = read_buffer->error;
+ bytes = start;
+ if(thresh && !block_device) {
+ int res;
+
+ queue_put(to_writer, NULL);
+ if(queue_get(from_writer) != 0)
+ EXIT_MKSQUASHFS();
+ res = ftruncate(fd, bytes);
+ if(res != 0)
+ BAD_ERROR("Failed to truncate dest file because %s\n",
+ strerror(errno));
+ }
+ if(!reproducible)
+ unlock_fragments();
+ for(blocks = thresh; blocks < block; blocks ++)
+ cache_block_put(buffer_list[blocks]);
+ free(buffer_list);
+ free(block_list);
+ cache_block_put(read_buffer);
+ return NULL;
+}
+
+
+static struct file_info *write_file_blocks(int *status, struct dir_ent *dir_ent,
+ struct file_buffer *read_buffer, int *dup)
+{
+ long long read_size = read_buffer->file_size;
+ long long file_bytes, start;
+ struct fragment *fragment;
+ unsigned int *block_list;
+ int block;
+ int blocks = (read_size + block_size - 1) >> block_log;
+ long long sparse = 0;
+ struct file_buffer *fragment_buffer = NULL;
+ struct file_info *file;
+ int bl_hash = 0;
+
+ if(pre_duplicate(read_size, dir_ent->inode, read_buffer, &bl_hash))
+ return write_file_blocks_dup(status, dir_ent, read_buffer, dup, bl_hash);
+
+ *dup = FALSE;
+
+ block_list = malloc(blocks * sizeof(unsigned int));
+ if(block_list == NULL)
+ MEM_ERROR();
+
+ if(reproducible)
+ ensure_fragments_flushed();
+ else
+ lock_fragments();
+
+ file_bytes = 0;
+ start = bytes;
+ for(block = 0; block < blocks;) {
+ if(read_buffer->fragment) {
+ block_list[block] = 0;
+ fragment_buffer = read_buffer;
+ blocks = read_size >> block_log;
+ } else {
+ block_list[block] = read_buffer->c_byte;
+ if(read_buffer->c_byte) {
+ read_buffer->block = bytes;
+ bytes += read_buffer->size;
+ cache_hash(read_buffer, read_buffer->block);
+ file_bytes += read_buffer->size;
+ queue_put(to_writer, read_buffer);
+ } else {
+ sparse += read_buffer->size;
+ cache_block_put(read_buffer);
+ }
+ }
+ inc_progress_bar();
+
+ if(++block < blocks) {
+ read_buffer = get_file_buffer();
+ if(read_buffer->error)
+ goto read_err;
+ }
+ }
+
+ /*
+ * sparse count is needed to ensure squashfs correctly reports a
+ * a smaller block count on stat calls to sparse files. This is
+ * to ensure intelligent applications like cp correctly handle the
+ * file as a sparse file. If the file in the original filesystem isn't
+ * stored as a sparse file then still store it sparsely in squashfs, but
+ * report it as non-sparse on stat calls to preserve semantics
+ */
+ if(sparse && (dir_ent->inode->buf.st_blocks << 9) >= read_size)
+ sparse = 0;
+
+ if(!reproducible)
+ unlock_fragments();
+
+ fragment = get_and_fill_fragment(fragment_buffer, dir_ent, TRUE);
+
+ if(duplicate_checking)
+ file = add_non_dup(read_size, file_bytes, blocks, sparse,
+ block_list, start, fragment, 0, fragment_buffer ?
+ fragment_buffer->checksum : 0, FALSE, TRUE, FALSE,
+ FALSE, bl_hash);
+ else
+ file = create_non_dup(read_size, file_bytes, blocks, sparse,
+ block_list, start, fragment, 0, fragment_buffer ?
+ fragment_buffer->checksum : 0, FALSE, TRUE);
+
+ cache_block_put(fragment_buffer);
+ file_count ++;
+ total_bytes += read_size;
+
+ log_file(dir_ent, start);
+
+ *status = 0;
+ return file;
+
+read_err:
+ dec_progress_bar(block);
+ *status = read_buffer->error;
+ bytes = start;
+ if(!block_device) {
+ int res;
+
+ queue_put(to_writer, NULL);
+ if(queue_get(from_writer) != 0)
+ EXIT_MKSQUASHFS();
+ res = ftruncate(fd, bytes);
+ if(res != 0)
+ BAD_ERROR("Failed to truncate dest file because %s\n",
+ strerror(errno));
+ }
+ if(!reproducible)
+ unlock_fragments();
+ free(block_list);
+ cache_block_put(read_buffer);
+ return NULL;
+}
+
+
+struct file_info *write_file(struct dir_ent *dir, int *dup)
+{
+ int status;
+ struct file_buffer *read_buffer;
+ struct file_info *file;
+
+again:
+ read_buffer = get_file_buffer();
+ status = read_buffer->error;
+
+ if(status)
+ cache_block_put(read_buffer);
+ else if(read_buffer->file_size == -1)
+ file = write_file_process(&status, dir, read_buffer, dup);
+ else if(read_buffer->file_size == 0)
+ file = write_file_empty(dir, read_buffer, dup);
+ else if(read_buffer->fragment && read_buffer->c_byte)
+ file = write_file_frag(dir, read_buffer, dup);
+ else
+ file = write_file_blocks(&status, dir, read_buffer, dup);
+
+ if(status == 2) {
+ ERROR("File %s changed size while reading filesystem, "
+ "attempting to re-read\n", pathname(dir));
+ goto again;
+ } else if(status == 1) {
+ ERROR_START("Failed to read file %s", pathname(dir));
+ ERROR_EXIT(", creating empty file\n");
+ file = write_file_empty(dir, NULL, dup);
+ } else if(status)
+ BAD_ERROR("Unexpected status value in write_file()");
+
+ return file;
+}
+
+
+#define BUFF_SIZE 512
+char *name;
+static char *basename_r();
+
+static char *getbase(char *pathname)
+{
+ static char *b_buffer = NULL;
+ static int b_size = BUFF_SIZE;
+ char *result;
+
+ if(b_buffer == NULL) {
+ b_buffer = malloc(b_size);
+ if(b_buffer == NULL)
+ MEM_ERROR();
+ }
+
+ while(1) {
+ if(*pathname != '/') {
+ result = getcwd(b_buffer, b_size);
+ if(result == NULL && errno != ERANGE)
+ BAD_ERROR("Getcwd failed in getbase\n");
+
+ /* enough room for pathname + "/" + '\0' terminator? */
+ if(result && strlen(pathname) + 2 <=
+ b_size - strlen(b_buffer)) {
+ strcat(strcat(b_buffer, "/"), pathname);
+ break;
+ }
+ } else if(strlen(pathname) < b_size) {
+ strcpy(b_buffer, pathname);
+ break;
+ }
+
+ /* Buffer not large enough, realloc and try again */
+ b_buffer = realloc(b_buffer, b_size += BUFF_SIZE);
+ if(b_buffer == NULL)
+ MEM_ERROR();
+ }
+
+ name = b_buffer;
+ if(((result = basename_r()) == NULL) || (strcmp(result, "..") == 0))
+ return NULL;
+ else
+ return result;
+}
+
+
+static char *basename_r()
+{
+ char *s;
+ char *p;
+ int n = 1;
+
+ for(;;) {
+ s = name;
+ if(*name == '\0')
+ return NULL;
+ if(*name != '/') {
+ while(*name != '\0' && *name != '/') name++;
+ n = name - s;
+ }
+ while(*name == '/') name++;
+ if(strncmp(s, ".", n) == 0)
+ continue;
+ if((*name == '\0') || (strncmp(s, "..", n) == 0) ||
+ ((p = basename_r()) == NULL)) {
+ s[n] = '\0';
+ return s;
+ }
+ if(strcmp(p, "..") == 0)
+ continue;
+ return p;
+ }
+}
+
+
+static inline void dec_nlink_inode(struct dir_ent *dir_ent)
+{
+ if(dir_ent->inode == NULL || dir_ent->inode->root_entry)
+ return;
+
+ if(dir_ent->inode->nlink == 1) {
+ /* Delete this inode, as the last or only reference
+ * to it is going away */
+ struct stat *buf = &dir_ent->inode->buf;
+ int ino_hash = INODE_HASH(buf->st_dev, buf->st_ino);
+ struct inode_info *inode = inode_info[ino_hash];
+ struct inode_info *prev = NULL;
+
+ while(inode && inode != dir_ent->inode) {
+ prev = inode;
+ inode = inode->next;
+ }
+
+ if(inode) {
+ if(prev)
+ prev->next = inode->next;
+ else
+ inode_info[ino_hash] = inode->next;
+ }
+
+ /* Decrement the progress bar */
+ if((buf->st_mode & S_IFMT) == S_IFREG)
+ progress_bar_size(-((buf->st_size + block_size - 1)
+ >> block_log));
+
+ free(dir_ent->inode);
+ dir_ent->inode = NULL;
+ } else
+ dir_ent->inode->nlink --;
+}
+
+
+static struct inode_info *lookup_inode3(struct stat *buf, struct pseudo_dev *pseudo,
+ char *symlink, int bytes)
+{
+ int ino_hash = INODE_HASH(buf->st_dev, buf->st_ino);
+ struct inode_info *inode;
+
+ /*
+ * Look-up inode in hash table, if it already exists we have a
+ * hardlink, so increment the nlink count and return it.
+ * Don't do the look-up for directories because Unix/Linux doesn't
+ * allow hard-links to directories.
+ */
+ if ((buf->st_mode & S_IFMT) != S_IFDIR && !no_hardlinks) {
+ for(inode = inode_info[ino_hash]; inode; inode = inode->next) {
+ if(memcmp(buf, &inode->buf, sizeof(struct stat)) == 0) {
+ inode->nlink ++;
+ return inode;
+ }
+ }
+ }
+
+ if((buf->st_mode & S_IFMT) == S_IFREG)
+ progress_bar_size((buf->st_size + block_size - 1)
+ >> block_log);
+
+ inode = malloc(sizeof(struct inode_info) + bytes);
+ if(inode == NULL)
+ MEM_ERROR();
+
+ if(bytes)
+ memcpy(&inode->symlink, symlink, bytes);
+ memcpy(&inode->buf, buf, sizeof(struct stat));
+ inode->read = FALSE;
+ inode->root_entry = FALSE;
+ inode->pseudo = pseudo;
+ inode->inode = SQUASHFS_INVALID_BLK;
+ inode->nlink = 1;
+ inode->inode_number = 0;
+ inode->dummy_root_dir = FALSE;
+ inode->xattr = NULL;
+ inode->tarfile = FALSE;
+
+ /*
+ * Copy filesystem wide defaults into inode, these filesystem
+ * wide defaults may be altered on an individual inode basis by
+ * user specified actions
+ *
+ */
+ inode->no_fragments = no_fragments;
+ inode->always_use_fragments = always_use_fragments;
+ inode->noD = noD;
+ inode->noF = noF;
+
+ inode->next = inode_info[ino_hash];
+ inode_info[ino_hash] = inode;
+
+ return inode;
+}
+
+
+static struct inode_info *lookup_inode2(struct stat *buf, struct pseudo_dev *pseudo)
+{
+ return lookup_inode3(buf, pseudo, NULL, 0);
+}
+
+
+struct inode_info *lookup_inode(struct stat *buf)
+{
+ return lookup_inode2(buf, NULL);
+}
+
+
+static inline void alloc_inode_no(struct inode_info *inode, unsigned int use_this)
+{
+ if (inode->inode_number == 0) {
+ inode->inode_number = use_this ? : inode_no ++;
+ }
+}
+
+
+struct dir_info *create_dir(char *pathname, char *subpath, unsigned int depth)
+{
+ struct dir_info *dir;
+
+ dir = malloc(sizeof(struct dir_info));
+ if(dir == NULL)
+ MEM_ERROR();
+
+ dir->pathname = strdup(pathname);
+ dir->subpath = strdup(subpath);
+ dir->count = 0;
+ dir->directory_count = 0;
+ dir->dir_is_ldir = TRUE;
+ dir->list = NULL;
+ dir->depth = depth;
+ dir->excluded = 0;
+
+ return dir;
+}
+
+
+struct dir_ent *lookup_name(struct dir_info *dir, char *name)
+{
+ struct dir_ent *dir_ent = dir->list;
+
+ for(; dir_ent && strcmp(dir_ent->name, name) != 0;
+ dir_ent = dir_ent->next);
+
+ return dir_ent;
+}
+
+
+struct dir_ent *create_dir_entry(char *name, char *source_name,
+ char *nonstandard_pathname, struct dir_info *dir)
+{
+ struct dir_ent *dir_ent = malloc(sizeof(struct dir_ent));
+ if(dir_ent == NULL)
+ MEM_ERROR();
+
+ dir_ent->name = name;
+ dir_ent->source_name = source_name;
+ dir_ent->nonstandard_pathname = nonstandard_pathname;
+ dir_ent->our_dir = dir;
+ dir_ent->inode = NULL;
+ dir_ent->next = NULL;
+
+ return dir_ent;
+}
+
+
+void add_dir_entry(struct dir_ent *dir_ent, struct dir_info *sub_dir,
+ struct inode_info *inode_info)
+{
+ struct dir_info *dir = dir_ent->our_dir;
+
+ if(sub_dir)
+ sub_dir->dir_ent = dir_ent;
+ dir_ent->inode = inode_info;
+ dir_ent->dir = sub_dir;
+
+ dir_ent->next = dir->list;
+ dir->list = dir_ent;
+ dir->count++;
+}
+
+
+static inline void add_dir_entry2(char *name, char *source_name,
+ char *nonstandard_pathname, struct dir_info *sub_dir,
+ struct inode_info *inode_info, struct dir_info *dir)
+{
+ struct dir_ent *dir_ent = create_dir_entry(name, source_name,
+ nonstandard_pathname, dir);
+
+
+ add_dir_entry(dir_ent, sub_dir, inode_info);
+}
+
+
+void free_dir_entry(struct dir_ent *dir_ent)
+{
+ if(dir_ent->name)
+ free(dir_ent->name);
+
+ if(dir_ent->source_name)
+ free(dir_ent->source_name);
+
+ if(dir_ent->nonstandard_pathname)
+ free(dir_ent->nonstandard_pathname);
+
+ /* if this entry has been associated with an inode, then we need
+ * to update the inode nlink count */
+ dec_nlink_inode(dir_ent);
+
+ free(dir_ent);
+}
+
+
+static inline void add_excluded(struct dir_info *dir)
+{
+ dir->excluded ++;
+}
+
+
+squashfs_inode do_directory_scans(struct dir_ent *dir_ent, int progress)
+{
+ squashfs_inode inode;
+ struct pseudo *pseudo = get_pseudo();
+
+ /*
+ * Process most actions and any pseudo files
+ */
+
+ /* if there's a root pseudo definition skip it, it will have already
+ * been handled if no sources specified on command line.
+ * If sources have been specified, then just ignore it, as sources
+ * on the command line take precedence.
+ */
+ if(pseudo != NULL && pseudo->names == 1 && strcmp(pseudo->name[0].name, "/") == 0) {
+ if(pseudo->name[0].xattr)
+ root_dir->dir_ent->inode->xattr = pseudo->name[0].xattr;
+
+ pseudo = pseudo->name[0].pseudo;
+ }
+
+ if(actions() || pseudo)
+ dir_scan2(root_dir, pseudo);
+
+ /*
+ * Process move actions
+ */
+ if(move_actions()) {
+ dir_scan3(root_dir);
+ do_move_actions();
+ }
+
+ /*
+ * Process prune actions
+ */
+ if(prune_actions()) {
+ dir_scan4(root_dir, TRUE);
+ dir_scan4(root_dir, FALSE);
+ }
+
+ /*
+ * Process empty actions
+ */
+ if(empty_actions())
+ dir_scan5(root_dir);
+
+ /*
+ * Sort directories and compute the inode numbers
+ */
+ dir_scan6(root_dir);
+
+ alloc_inode_no(dir_ent->inode, root_inode_number);
+
+ eval_actions(root_dir, dir_ent);
+
+ if(sorted)
+ generate_file_priorities(root_dir, 0,
+ &root_dir->dir_ent->inode->buf);
+
+ if(appending) {
+ sigset_t sigmask;
+
+ restore_thread = init_restore_thread();
+ sigemptyset(&sigmask);
+ sigaddset(&sigmask, SIGINT);
+ sigaddset(&sigmask, SIGTERM);
+ sigaddset(&sigmask, SIGUSR1);
+ if(pthread_sigmask(SIG_BLOCK, &sigmask, NULL) != 0)
+ BAD_ERROR("Failed to set signal mask\n");
+ write_destination(fd, SQUASHFS_START, 4, "\0\0\0\0");
+ }
+
+ queue_put(to_reader, root_dir);
+
+ if(sorted)
+ sort_files_and_write(root_dir);
+
+ dir_scan7(&inode, root_dir);
+ dir_ent->inode->inode = inode;
+ dir_ent->inode->type = SQUASHFS_DIR_TYPE;
+
+ return inode;
+}
+
+
+static squashfs_inode scan_single(char *pathname, int progress)
+{
+ struct stat buf;
+ struct dir_ent *dir_ent;
+
+ if(appending)
+ root_dir = dir_scan1(pathname, "", paths, scan1_single_readdir, 1);
+ else
+ root_dir = dir_scan1(pathname, "", paths, scan1_readdir, 1);
+
+ if(root_dir == NULL)
+ BAD_ERROR("Failed to scan source directory\n");
+
+ /* Create root directory dir_ent and associated inode, and connect
+ * it to the root directory dir_info structure */
+ dir_ent = create_dir_entry("", NULL, pathname, scan1_opendir("", "", 0));
+
+ if(lstat(pathname, &buf) == -1)
+ /* source directory has disappeared? */
+ BAD_ERROR("Cannot stat source directory %s because %s\n",
+ pathname, strerror(errno));
+ if(root_mode_opt)
+ buf.st_mode = root_mode | S_IFDIR;
+
+ if(root_uid_opt)
+ buf.st_uid = root_uid;
+
+ if(root_gid_opt)
+ buf.st_gid = root_gid;
+
+ if(root_time_opt)
+ buf.st_mtime = root_time;
+
+ if(pseudo_override && global_uid_opt)
+ buf.st_uid = global_uid;
+
+ if(pseudo_override && global_gid_opt)
+ buf.st_gid = global_gid;
+
+ dir_ent->inode = lookup_inode(&buf);
+ dir_ent->dir = root_dir;
+ root_dir->dir_ent = dir_ent;
+
+ return do_directory_scans(dir_ent, progress);
+}
+
+
+static squashfs_inode scan_encomp(int progress)
+{
+ struct stat buf;
+ struct dir_ent *dir_ent;
+
+ root_dir = dir_scan1("", "", paths, scan1_encomp_readdir, 1);
+ if(root_dir == NULL)
+ BAD_ERROR("Failed to scan source\n");
+
+ /* Create root directory dir_ent and associated inode, and connect
+ * it to the root directory dir_info structure */
+ dir_ent = create_dir_entry("", NULL, "", scan1_opendir("", "", 0));
+
+ /*
+ * dummy top level directory, multiple sources specified on
+ * command line
+ */
+ memset(&buf, 0, sizeof(buf));
+ if(root_mode_opt)
+ buf.st_mode = root_mode | S_IFDIR;
+ else
+ buf.st_mode = S_IRWXU | S_IRWXG | S_IRWXO | S_IFDIR;
+ if(root_uid_opt)
+ buf.st_uid = root_uid;
+ else
+ buf.st_uid = getuid();
+ if(root_gid_opt)
+ buf.st_gid = root_gid;
+ else
+ buf.st_gid = getgid();
+ if(root_time_opt)
+ buf.st_mtime = root_time;
+ else
+ buf.st_mtime = time(NULL);
+ if(pseudo_override && global_uid_opt)
+ buf.st_uid = global_uid;
+
+ if(pseudo_override && global_gid_opt)
+ buf.st_gid = global_gid;
+
+ buf.st_dev = 0;
+ buf.st_ino = 0;
+ dir_ent->inode = lookup_inode(&buf);
+ dir_ent->inode->dummy_root_dir = TRUE;
+ dir_ent->dir = root_dir;
+ root_dir->dir_ent = dir_ent;
+
+ return do_directory_scans(dir_ent, progress);
+}
+
+
+squashfs_inode dir_scan(int directory, int progress)
+{
+ int single = !keep_as_directory && source == 1;
+
+ if(single && directory)
+ return scan_single(source_path[0], progress);
+ else
+ return scan_encomp(progress);
+}
+
+
+/*
+ * dir_scan1 routines...
+ * These scan the source directories into memory for processing.
+ * Exclude actions are processed here (in contrast to the other actions)
+ * because they affect what is scanned.
+ */
+struct dir_info *scan1_opendir(char *pathname, char *subpath, unsigned int depth)
+{
+ struct dir_info *dir;
+
+ dir = malloc(sizeof(struct dir_info));
+ if(dir == NULL)
+ MEM_ERROR();
+
+ if(pathname[0] != '\0') {
+ dir->linuxdir = opendir(pathname);
+ if(dir->linuxdir == NULL) {
+ free(dir);
+ return NULL;
+ }
+ }
+
+ dir->pathname = strdup(pathname);
+ dir->subpath = strdup(subpath);
+ dir->count = 0;
+ dir->directory_count = 0;
+ dir->dir_is_ldir = TRUE;
+ dir->list = NULL;
+ dir->depth = depth;
+ dir->excluded = 0;
+
+ return dir;
+}
+
+
+static struct dir_ent *scan1_encomp_readdir(struct dir_info *dir)
+{
+ static int index = 0;
+
+ if(dir->count < old_root_entries) {
+ int i;
+
+ for(i = 0; i < old_root_entries; i++) {
+ if(old_root_entry[i].inode.type == SQUASHFS_DIR_TYPE)
+ dir->directory_count ++;
+ add_dir_entry2(old_root_entry[i].name, NULL, NULL, NULL,
+ &old_root_entry[i].inode, dir);
+ }
+ }
+
+ while(index < source) {
+ char *basename = NULL;
+ char *dir_name = getbase(source_path[index]);
+ int pass = 1, res;
+
+ if(dir_name == NULL) {
+ ERROR_START("Bad source directory %s",
+ source_path[index]);
+ ERROR_EXIT(" - skipping ...\n");
+ index ++;
+ continue;
+ }
+ dir_name = strdup(dir_name);
+ for(;;) {
+ struct dir_ent *dir_ent = dir->list;
+
+ for(; dir_ent && strcmp(dir_ent->name, dir_name) != 0;
+ dir_ent = dir_ent->next);
+ if(dir_ent == NULL)
+ break;
+ ERROR("Source directory entry %s already used! - trying"
+ " ", dir_name);
+ if(pass == 1)
+ basename = dir_name;
+ else
+ free(dir_name);
+ res = asprintf(&dir_name, "%s_%d", basename, pass++);
+ if(res == -1)
+ BAD_ERROR("asprintf failed in "
+ "scan1_encomp_readdir\n");
+ ERROR("%s\n", dir_name);
+ }
+
+ if(one_file_system && source > 1)
+ cur_dev = source_dev[index];
+
+ return create_dir_entry(dir_name, basename,
+ strdup(source_path[index ++]), dir);
+ }
+ return NULL;
+}
+
+
+static struct dir_ent *scan1_single_readdir(struct dir_info *dir)
+{
+ struct dirent *d_name;
+ int i;
+
+ if(dir->count < old_root_entries) {
+ for(i = 0; i < old_root_entries; i++) {
+ if(old_root_entry[i].inode.type == SQUASHFS_DIR_TYPE)
+ dir->directory_count ++;
+ add_dir_entry2(old_root_entry[i].name, NULL, NULL, NULL,
+ &old_root_entry[i].inode, dir);
+ }
+ }
+
+ if((d_name = readdir(dir->linuxdir)) != NULL) {
+ char *basename = NULL;
+ char *dir_name = strdup(d_name->d_name);
+ int pass = 1, res;
+
+ for(;;) {
+ struct dir_ent *dir_ent = dir->list;
+
+ for(; dir_ent && strcmp(dir_ent->name, dir_name) != 0;
+ dir_ent = dir_ent->next);
+ if(dir_ent == NULL)
+ break;
+ ERROR("Source directory entry %s already used! - trying"
+ " ", dir_name);
+ if (pass == 1)
+ basename = dir_name;
+ else
+ free(dir_name);
+ res = asprintf(&dir_name, "%s_%d", d_name->d_name, pass++);
+ if(res == -1)
+ BAD_ERROR("asprintf failed in "
+ "scan1_single_readdir\n");
+ ERROR("%s\n", dir_name);
+ }
+ return create_dir_entry(dir_name, basename, NULL, dir);
+ }
+
+ return NULL;
+}
+
+
+static struct dir_ent *scan1_readdir(struct dir_info *dir)
+{
+ struct dirent *d_name = readdir(dir->linuxdir);
+
+ return d_name ?
+ create_dir_entry(strdup(d_name->d_name), NULL, NULL, dir) :
+ NULL;
+}
+
+
+static void scan1_freedir(struct dir_info *dir)
+{
+ if(dir->pathname[0] != '\0')
+ closedir(dir->linuxdir);
+}
+
+
+static struct dir_info *dir_scan1(char *filename, char *subpath,
+ struct pathnames *paths,
+ struct dir_ent *(_readdir)(struct dir_info *), unsigned int depth)
+{
+ struct dir_info *dir = scan1_opendir(filename, subpath, depth);
+ struct dir_ent *dir_ent;
+
+ if(dir == NULL) {
+ ERROR_START("Could not open %s", filename);
+ ERROR_EXIT(", skipping...\n");
+ return NULL;
+ }
+
+ if(max_depth_opt && depth > max_depth) {
+ add_excluded(dir);
+ scan1_freedir(dir);
+ return dir;
+ }
+
+ while((dir_ent = _readdir(dir))) {
+ struct dir_info *sub_dir;
+ struct stat buf;
+ struct pathnames *new = NULL;
+ char *filename = pathname(dir_ent);
+ char *subpath = NULL;
+ char *dir_name = dir_ent->name;
+ int create_empty_directory = FALSE;
+
+ if(strcmp(dir_name, ".") == 0 || strcmp(dir_name, "..") == 0) {
+ free_dir_entry(dir_ent);
+ continue;
+ }
+
+ if(lstat(filename, &buf) == -1) {
+ ERROR_START("Cannot stat dir/file %s because %s",
+ filename, strerror(errno));
+ ERROR_EXIT(", ignoring\n");
+ free_dir_entry(dir_ent);
+ continue;
+ }
+
+ if(one_file_system) {
+ if(buf.st_dev != cur_dev) {
+ if(!S_ISDIR(buf.st_mode) || one_file_system_x) {
+ ERROR("%s is on a different filesystem, ignored\n", filename);
+ free_dir_entry(dir_ent);
+ continue;
+ }
+
+ create_empty_directory = TRUE;
+ }
+ }
+
+ if((buf.st_mode & S_IFMT) != S_IFREG &&
+ (buf.st_mode & S_IFMT) != S_IFDIR &&
+ (buf.st_mode & S_IFMT) != S_IFLNK &&
+ (buf.st_mode & S_IFMT) != S_IFCHR &&
+ (buf.st_mode & S_IFMT) != S_IFBLK &&
+ (buf.st_mode & S_IFMT) != S_IFIFO &&
+ (buf.st_mode & S_IFMT) != S_IFSOCK) {
+ ERROR_START("File %s has unrecognised filetype %d",
+ filename, buf.st_mode & S_IFMT);
+ ERROR_EXIT(", ignoring\n");
+ free_dir_entry(dir_ent);
+ continue;
+ }
+
+ if(old_exclude && old_excluded(filename, &buf)) {
+ add_excluded(dir);
+ free_dir_entry(dir_ent);
+ continue;
+ }
+
+ if(!old_exclude && excluded(dir_name, paths, &new)) {
+ add_excluded(dir);
+ free_dir_entry(dir_ent);
+ continue;
+ }
+
+ if(exclude_actions()) {
+ subpath = subpathname(dir_ent);
+
+ if(eval_exclude_actions(dir_name, filename, subpath,
+ &buf, depth, dir_ent)) {
+ add_excluded(dir);
+ free_dir_entry(dir_ent);
+ continue;
+ }
+ }
+
+ switch(buf.st_mode & S_IFMT) {
+ case S_IFDIR:
+ if(subpath == NULL)
+ subpath = subpathname(dir_ent);
+
+ if(create_empty_directory) {
+ ERROR("%s is on a different filesystem, creating empty directory\n", filename);
+ sub_dir = create_dir(filename, subpath, depth + 1);
+ } else
+ sub_dir = dir_scan1(filename, subpath, new,
+ scan1_readdir, depth + 1);
+ if(sub_dir) {
+ dir->directory_count ++;
+ add_dir_entry(dir_ent, sub_dir,
+ lookup_inode(&buf));
+ } else
+ free_dir_entry(dir_ent);
+ break;
+ case S_IFLNK: {
+ int byte;
+ static char buff[65536]; /* overflow safe */
+
+ byte = readlink(filename, buff, 65536);
+ if(byte == -1) {
+ ERROR_START("Failed to read symlink %s",
+ filename);
+ ERROR_EXIT(", ignoring\n");
+ } else if(byte == 65536) {
+ ERROR_START("Symlink %s is greater than 65536 "
+ "bytes!", filename);
+ ERROR_EXIT(", ignoring\n");
+ } else {
+ /* readlink doesn't 0 terminate the returned
+ * path */
+ buff[byte] = '\0';
+ add_dir_entry(dir_ent, NULL, lookup_inode3(&buf,
+ NULL, buff, byte + 1));
+ }
+ break;
+ }
+ default:
+ add_dir_entry(dir_ent, NULL, lookup_inode(&buf));
+ }
+
+ free(new);
+ }
+
+ scan1_freedir(dir);
+
+ return dir;
+}
+
+
+/*
+ * dir_scan2 routines...
+ * This processes most actions and any pseudo files
+ */
+static struct dir_ent *scan2_readdir(struct dir_info *dir, struct dir_ent *dir_ent)
+{
+ if (dir_ent == NULL)
+ dir_ent = dir->list;
+ else
+ dir_ent = dir_ent->next;
+
+ for(; dir_ent && dir_ent->inode->root_entry; dir_ent = dir_ent->next);
+
+ return dir_ent;
+}
+
+
+static void dir_scan2(struct dir_info *dir, struct pseudo *pseudo)
+{
+ struct dir_ent *dirent = NULL;
+ struct pseudo_entry *pseudo_ent;
+ struct stat buf;
+
+ while((dirent = scan2_readdir(dir, dirent)) != NULL) {
+ struct inode_info *inode_info = dirent->inode;
+ struct stat *buf = &inode_info->buf;
+ char *name = dirent->name;
+
+ eval_actions(root_dir, dirent);
+
+ if(pseudo_override && global_uid_opt)
+ buf->st_uid = global_uid;
+
+ if(pseudo_override && global_gid_opt)
+ buf->st_gid = global_gid;
+
+ if((buf->st_mode & S_IFMT) == S_IFDIR)
+ dir_scan2(dirent->dir, pseudo_subdir(name, pseudo));
+ }
+
+ /*
+ * Process pseudo modify and add (file, directory etc) definitions
+ */
+ while((pseudo_ent = pseudo_readdir(pseudo)) != NULL) {
+ struct dir_ent *dir_ent = NULL;
+
+ if(appending && dir->depth == 1) {
+ dir_ent = lookup_name(dir, pseudo_ent->name);
+
+ if(dir_ent && dir_ent->inode->root_entry) {
+ BAD_ERROR("Pseudo files: File \"%s\" "
+ "already exists in root directory of "
+ "the\nfilesystem being appended to. "
+ "Pseudo definitions can\'t modify it "
+ "or (if directory) add files to it\n",
+ pseudo_ent->name);
+ }
+ }
+
+ if(pseudo_ent->dev == NULL)
+ continue;
+
+ if(dir_ent == NULL)
+ dir_ent = lookup_name(dir, pseudo_ent->name);
+
+ if(pseudo_ent->dev->type == 'm' || pseudo_ent->dev->type == 'M') {
+ struct stat *buf;
+ if(dir_ent == NULL) {
+ ERROR_START("Pseudo modify file \"%s\" does "
+ "not exist in source filesystem.",
+ pseudo_ent->pathname);
+ ERROR_EXIT(" Ignoring.\n");
+ continue;
+ }
+ buf = &dir_ent->inode->buf;
+ buf->st_mode = (buf->st_mode & S_IFMT) |
+ pseudo_ent->dev->buf->mode;
+ buf->st_uid = pseudo_ent->dev->buf->uid;
+ buf->st_gid = pseudo_ent->dev->buf->gid;
+ if(pseudo_ent->dev->type == 'M')
+ buf->st_mtime = pseudo_ent->dev->buf->mtime;
+ continue;
+ }
+
+ if(dir_ent) {
+ ERROR_START("Pseudo file \"%s\" exists in source "
+ "filesystem \"%s\".", pseudo_ent->pathname,
+ pathname(dir_ent));
+ ERROR_EXIT("\nIgnoring, exclude it (-e/-ef) to override.\n");
+ continue;
+ }
+
+ if(pseudo_ent->dev->type != 'l') {
+ memset(&buf, 0, sizeof(buf));
+ buf.st_mode = pseudo_ent->dev->buf->mode;
+ buf.st_uid = pseudo_ent->dev->buf->uid;
+ buf.st_gid = pseudo_ent->dev->buf->gid;
+ buf.st_rdev = makedev(pseudo_ent->dev->buf->major,
+ pseudo_ent->dev->buf->minor);
+ buf.st_mtime = pseudo_ent->dev->buf->mtime;
+ buf.st_ino = pseudo_ent->dev->buf->ino;
+
+ if(pseudo_ent->dev->type == 'r') {
+ buf.st_size = pseudo_ent->dev->data->length;
+ if(pseudo_ent->dev->data->sparse == FALSE)
+ buf.st_blocks = (buf.st_size + 511) >> 9;
+ }
+ }
+
+ if(pseudo_ent->dev->type == 'd') {
+ struct dir_ent *dir_ent =
+ create_dir_entry(pseudo_ent->name, NULL,
+ pseudo_ent->pathname, dir);
+ char *subpath = subpathname(dir_ent);
+ struct dir_info *sub_dir = scan1_opendir("", subpath,
+ dir->depth + 1);
+ dir_scan2(sub_dir, pseudo_ent->pseudo);
+ dir->directory_count ++;
+ add_dir_entry(dir_ent, sub_dir,
+ lookup_inode2(&buf, pseudo_ent->dev));
+ } else if(pseudo_ent->dev->type == 's') {
+ add_dir_entry2(pseudo_ent->name, NULL,
+ pseudo_ent->pathname, NULL,
+ lookup_inode3(&buf, pseudo_ent->dev,
+ pseudo_ent->dev->symlink,
+ strlen(pseudo_ent->dev->symlink) + 1), dir);
+ } else if(pseudo_ent->dev->type == 'l') {
+ add_dir_entry2(pseudo_ent->name, NULL,
+ pseudo_ent->dev->linkname, NULL,
+ lookup_inode(pseudo_ent->dev->linkbuf), dir);
+ } else {
+ add_dir_entry2(pseudo_ent->name, NULL,
+ pseudo_ent->pathname, NULL,
+ lookup_inode2(&buf, pseudo_ent->dev), dir);
+ }
+ }
+
+ /*
+ * Process pseudo xattr definitions
+ */
+ if(pseudo)
+ pseudo->count = 0;
+
+ while((pseudo_ent = pseudo_readdir(pseudo)) != NULL) {
+ struct dir_ent *dir_ent = NULL;
+
+ if(pseudo_ent->xattr == NULL)
+ continue;
+
+ dir_ent = lookup_name(dir, pseudo_ent->name);
+ if(dir_ent == NULL) {
+ ERROR_START("Pseudo xattr file \"%s\" does not "
+ "exist in source filesystem.",
+ pseudo_ent->pathname);
+ ERROR_EXIT(" Ignoring.\n");
+ continue;
+ }
+
+ dir_ent->inode->xattr = pseudo_ent->xattr;
+ }
+}
+
+
+/*
+ * dir_scan3 routines...
+ * This processes the move action
+ */
+static void dir_scan3(struct dir_info *dir)
+{
+ struct dir_ent *dir_ent = NULL;
+
+ while((dir_ent = scan2_readdir(dir, dir_ent)) != NULL) {
+
+ eval_move_actions(root_dir, dir_ent);
+
+ if((dir_ent->inode->buf.st_mode & S_IFMT) == S_IFDIR)
+ dir_scan3(dir_ent->dir);
+ }
+}
+
+
+/*
+ * dir_scan4 routines...
+ * This processes the prune action. This action is designed to do fine
+ * grained tuning of the in-core directory structure after the exclude,
+ * move and pseudo actions have been performed. This allows complex
+ * tests to be performed which are impossible at exclude time (i.e.
+ * tests which rely on the in-core directory structure)
+ */
+void free_dir(struct dir_info *dir)
+{
+ struct dir_ent *dir_ent = dir->list;
+
+ while(dir_ent) {
+ struct dir_ent *tmp = dir_ent;
+
+ if((dir_ent->inode->buf.st_mode & S_IFMT) == S_IFDIR)
+ if(dir_ent->dir)
+ free_dir(dir_ent->dir);
+
+ dir_ent = dir_ent->next;
+ free_dir_entry(tmp);
+ }
+
+ free(dir->pathname);
+ free(dir->subpath);
+ free(dir);
+}
+
+
+static void dir_scan4(struct dir_info *dir, int symlink)
+{
+ struct dir_ent *dir_ent = dir->list, *prev = NULL;
+
+ while(dir_ent) {
+ if(dir_ent->inode->root_entry) {
+ prev = dir_ent;
+ dir_ent = dir_ent->next;
+ continue;
+ }
+
+ if((dir_ent->inode->buf.st_mode & S_IFMT) == S_IFDIR)
+ dir_scan4(dir_ent->dir, symlink);
+
+ if(symlink != ((dir_ent->inode->buf.st_mode & S_IFMT) == S_IFLNK)) {
+ prev = dir_ent;
+ dir_ent = dir_ent->next;
+ continue;
+ }
+
+ if(eval_prune_actions(root_dir, dir_ent)) {
+ struct dir_ent *tmp = dir_ent;
+
+ if((dir_ent->inode->buf.st_mode & S_IFMT) == S_IFDIR) {
+ free_dir(dir_ent->dir);
+ dir->directory_count --;
+ }
+
+ dir->count --;
+
+ /* remove dir_ent from list */
+ dir_ent = dir_ent->next;
+ if(prev)
+ prev->next = dir_ent;
+ else
+ dir->list = dir_ent;
+
+ /* free it */
+ free_dir_entry(tmp);
+
+ add_excluded(dir);
+ continue;
+ }
+
+ prev = dir_ent;
+ dir_ent = dir_ent->next;
+ }
+}
+
+
+/*
+ * dir_scan5 routines...
+ * This processes the empty action. This action has to be processed after
+ * all other actions because the previous exclude and move actions and the
+ * pseudo actions affect whether a directory is empty
+ */
+static void dir_scan5(struct dir_info *dir)
+{
+ struct dir_ent *dir_ent = dir->list, *prev = NULL;
+
+ while(dir_ent) {
+ if(dir_ent->inode->root_entry) {
+ prev = dir_ent;
+ dir_ent = dir_ent->next;
+ continue;
+ }
+
+ if((dir_ent->inode->buf.st_mode & S_IFMT) == S_IFDIR) {
+ dir_scan5(dir_ent->dir);
+
+ if(eval_empty_actions(root_dir, dir_ent)) {
+ struct dir_ent *tmp = dir_ent;
+
+ /*
+ * delete sub-directory, this is by definition
+ * empty
+ */
+ free(dir_ent->dir->pathname);
+ free(dir_ent->dir->subpath);
+ free(dir_ent->dir);
+
+ /* remove dir_ent from list */
+ dir_ent = dir_ent->next;
+ if(prev)
+ prev->next = dir_ent;
+ else
+ dir->list = dir_ent;
+
+ /* free it */
+ free_dir_entry(tmp);
+
+ /* update counts */
+ dir->directory_count --;
+ dir->count --;
+ add_excluded(dir);
+ continue;
+ }
+ }
+
+ prev = dir_ent;
+ dir_ent = dir_ent->next;
+ }
+}
+
+
+/*
+ * dir_scan6 routines...
+ * This sorts every directory and computes the inode numbers
+ */
+
+/*
+ * Instantiate bottom up linked list merge sort.
+ *
+ * Qsort and other O(n log n) algorithms work well with arrays but not
+ * linked lists. Merge sort another O(n log n) sort algorithm on the other hand
+ * is not ideal for arrays (as it needs an additonal n storage locations
+ * as sorting is not done in place), but it is ideal for linked lists because
+ * it doesn't require any extra storage,
+ */
+SORT(sort_directory, dir_ent, name, next);
+
+static void dir_scan6(struct dir_info *dir)
+{
+ struct dir_ent *dir_ent;
+ unsigned int byte_count = 0;
+
+ sort_directory(&(dir->list), dir->count);
+
+ for(dir_ent = dir->list; dir_ent; dir_ent = dir_ent->next) {
+ byte_count += strlen(dir_ent->name) +
+ sizeof(struct squashfs_dir_entry);
+
+ if(dir_ent->inode->root_entry)
+ continue;
+
+ alloc_inode_no(dir_ent->inode, 0);
+
+ if((dir_ent->inode->buf.st_mode & S_IFMT) == S_IFDIR)
+ dir_scan6(dir_ent->dir);
+ }
+
+ if((dir->count < 257 && byte_count < SQUASHFS_METADATA_SIZE))
+ dir->dir_is_ldir = FALSE;
+}
+
+
+/*
+ * dir_scan6 routines...
+ * This generates the filesystem metadata and writes it out to the destination
+ */
+static void scan7_init_dir(struct directory *dir)
+{
+ dir->buff = malloc(SQUASHFS_METADATA_SIZE);
+ if(dir->buff == NULL)
+ MEM_ERROR();
+
+ dir->size = SQUASHFS_METADATA_SIZE;
+ dir->p = dir->index_count_p = dir->buff;
+ dir->entry_count = 256;
+ dir->entry_count_p = NULL;
+ dir->index = NULL;
+ dir->i_count = dir->i_size = 0;
+}
+
+
+static struct dir_ent *scan7_readdir(struct directory *dir, struct dir_info *dir_info,
+ struct dir_ent *dir_ent)
+{
+ if (dir_ent == NULL)
+ dir_ent = dir_info->list;
+ else
+ dir_ent = dir_ent->next;
+
+ for(; dir_ent && dir_ent->inode->root_entry; dir_ent = dir_ent->next)
+ add_dir(dir_ent->inode->inode, dir_ent->inode->inode_number,
+ dir_ent->name, dir_ent->inode->type, dir);
+
+ return dir_ent;
+}
+
+
+static void scan7_freedir(struct directory *dir)
+{
+ if(dir->index)
+ free(dir->index);
+ free(dir->buff);
+}
+
+
+static void dir_scan7(squashfs_inode *inode, struct dir_info *dir_info)
+{
+ int squashfs_type;
+ int duplicate_file;
+ struct directory dir;
+ struct dir_ent *dir_ent = NULL;
+ struct file_info *file;
+
+ scan7_init_dir(&dir);
+
+ while((dir_ent = scan7_readdir(&dir, dir_info, dir_ent)) != NULL) {
+ struct stat *buf = &dir_ent->inode->buf;
+
+ update_info(dir_ent);
+
+ if(dir_ent->inode->inode == SQUASHFS_INVALID_BLK) {
+ switch(buf->st_mode & S_IFMT) {
+ case S_IFREG:
+ if(dir_ent->inode->tarfile && dir_ent->inode->tar_file->file)
+ file = dir_ent->inode->tar_file->file;
+ else {
+ file = write_file(dir_ent, &duplicate_file);
+ INFO("file %s, uncompressed size %lld "
+ "bytes %s\n",
+ subpathname(dir_ent),
+ (long long) buf->st_size,
+ duplicate_file ? "DUPLICATE" :
+ "");
+ }
+ squashfs_type = SQUASHFS_FILE_TYPE;
+ *inode = create_inode(NULL, dir_ent,
+ squashfs_type, file->file_size,
+ file->start, file->blocks,
+ file->block_list,
+ file->fragment, NULL,
+ file->sparse);
+ if((duplicate_checking == FALSE &&
+ !(tarfile && no_hardlinks)) ||
+ file->file_size == 0) {
+ free_fragment(file->fragment);
+ free(file->block_list);
+ free(file);
+ }
+ break;
+
+ case S_IFDIR:
+ squashfs_type = SQUASHFS_DIR_TYPE;
+ dir_scan7(inode, dir_ent->dir);
+ break;
+
+ case S_IFLNK:
+ squashfs_type = SQUASHFS_SYMLINK_TYPE;
+ *inode = create_inode(NULL, dir_ent,
+ squashfs_type, 0, 0, 0, NULL,
+ NULL, NULL, 0);
+ INFO("symbolic link %s inode 0x%llx\n",
+ subpathname(dir_ent), *inode);
+ sym_count ++;
+ break;
+
+ case S_IFCHR:
+ squashfs_type = SQUASHFS_CHRDEV_TYPE;
+ *inode = create_inode(NULL, dir_ent,
+ squashfs_type, 0, 0, 0, NULL,
+ NULL, NULL, 0);
+ INFO("character device %s inode 0x%llx"
+ "\n", subpathname(dir_ent),
+ *inode);
+ dev_count ++;
+ break;
+
+ case S_IFBLK:
+ squashfs_type = SQUASHFS_BLKDEV_TYPE;
+ *inode = create_inode(NULL, dir_ent,
+ squashfs_type, 0, 0, 0, NULL,
+ NULL, NULL, 0);
+ INFO("block device %s inode 0x%llx\n",
+ subpathname(dir_ent), *inode);
+ dev_count ++;
+ break;
+
+ case S_IFIFO:
+ squashfs_type = SQUASHFS_FIFO_TYPE;
+ *inode = create_inode(NULL, dir_ent,
+ squashfs_type, 0, 0, 0, NULL,
+ NULL, NULL, 0);
+ INFO("fifo %s inode 0x%llx\n",
+ subpathname(dir_ent), *inode);
+ fifo_count ++;
+ break;
+
+ case S_IFSOCK:
+ squashfs_type = SQUASHFS_SOCKET_TYPE;
+ *inode = create_inode(NULL, dir_ent,
+ squashfs_type, 0, 0, 0, NULL,
+ NULL, NULL, 0);
+ INFO("unix domain socket %s inode "
+ "0x%llx\n",
+ subpathname(dir_ent), *inode);
+ sock_count ++;
+ break;
+
+ default:
+ BAD_ERROR("%s unrecognised file type, "
+ "mode is %x\n",
+ subpathname(dir_ent),
+ buf->st_mode);
+ }
+ dir_ent->inode->inode = *inode;
+ dir_ent->inode->type = squashfs_type;
+ } else {
+ *inode = dir_ent->inode->inode;
+ squashfs_type = dir_ent->inode->type;
+ switch(squashfs_type) {
+ case SQUASHFS_FILE_TYPE:
+ if(!sorted)
+ INFO("file %s, uncompressed "
+ "size %lld bytes LINK"
+ "\n",
+ subpathname(dir_ent),
+ (long long)
+ buf->st_size);
+ break;
+ case SQUASHFS_SYMLINK_TYPE:
+ INFO("symbolic link %s inode 0x%llx "
+ "LINK\n", subpathname(dir_ent),
+ *inode);
+ break;
+ case SQUASHFS_CHRDEV_TYPE:
+ INFO("character device %s inode 0x%llx "
+ "LINK\n", subpathname(dir_ent),
+ *inode);
+ break;
+ case SQUASHFS_BLKDEV_TYPE:
+ INFO("block device %s inode 0x%llx "
+ "LINK\n", subpathname(dir_ent),
+ *inode);
+ break;
+ case SQUASHFS_FIFO_TYPE:
+ INFO("fifo %s inode 0x%llx LINK\n",
+ subpathname(dir_ent), *inode);
+ break;
+ case SQUASHFS_SOCKET_TYPE:
+ INFO("unix domain socket %s inode "
+ "0x%llx LINK\n",
+ subpathname(dir_ent), *inode);
+ break;
+ }
+ hardlnk_count ++;
+ }
+
+ add_dir(*inode, get_inode_no(dir_ent->inode), dir_ent->name,
+ squashfs_type, &dir);
+ }
+
+ *inode = write_dir(dir_info, &dir);
+ INFO("directory %s inode 0x%llx\n", subpathname(dir_info->dir_ent),
+ *inode);
+
+ scan7_freedir(&dir);
+}
+
+
+static void handle_root_entries(struct dir_info *dir)
+{
+ int i;
+
+ if(dir->count == 0) {
+ for(i = 0; i < old_root_entries; i++) {
+ if(old_root_entry[i].inode.type == SQUASHFS_DIR_TYPE)
+ dir->directory_count ++;
+ add_dir_entry2(strdup(old_root_entry[i].name), NULL,
+ NULL, NULL, &old_root_entry[i].inode, dir);
+ }
+ }
+}
+
+
+static char *walk_source(char *source, char **pathname, char **name)
+{
+ char *path = source, *start;
+
+ while(*source == '/')
+ source ++;
+
+ start = source;
+ while(*source != '/' && *source != '\0')
+ source ++;
+
+ *name = strndup(start, source - start);
+
+ if(*pathname == NULL)
+ *pathname = strndup(path, source - path);
+ else {
+ char *orig = *pathname;
+ int size = strlen(orig) + (source - path) + 2;
+
+ *pathname = malloc(size);
+ strcpy(*pathname, orig);
+ strcat(*pathname, "/");
+ strncat(*pathname, path, source - path);
+ }
+
+ while(*source == '/')
+ source ++;
+
+ return source;
+}
+
+
+static struct dir_info *add_source(struct dir_info *sdir, char *source,
+ char *subpath, char *file, char **prefix,
+ struct pathnames *paths, unsigned int depth)
+{
+ struct dir_info *sub;
+ struct dir_ent *entry;
+ struct pathnames *new = NULL;
+ struct dir_info *dir = sdir;
+ struct stat buf;
+ char *name, *newsubpath = NULL;
+ int res;
+
+ if(max_depth_opt && depth > max_depth)
+ return NULL;
+
+ if(dir == NULL)
+ dir = create_dir("", subpath, depth);
+
+ if(depth == 1)
+ *prefix = source[0] == '/' ? strdup("/") : strdup(".");
+
+ if(appending && file == NULL)
+ handle_root_entries(dir);
+
+ source = walk_source(source, &file, &name);
+
+ while(depth == 1 && (name[0] == '\0' || strcmp(name, "..") == 0
+ || strcmp(name, ".") == 0)){
+ char *old = file;
+
+ if(name[0] == '\0' || source[0] == '\0') {
+ /* Ran out of pathname skipping leading ".." and "."
+ * If cpiostyle, just ignore it, find always produces
+ * these if run as "find ." or "find .." etc.
+ *
+ * If tarstyle after skipping what we *must* skip
+ * in the pathname (we can't store directories named
+ * ".." or "." or simply "/") there's nothing left after
+ * stripping (i.e. someone just typed "..", "." on
+ * the command line). This isn't what -tarstyle is
+ * intended for, and Mksquashfs without -tarstyle
+ * can handle this scenario */
+ if(cpiostyle)
+ goto failed_early;
+ else
+ BAD_ERROR("Empty source after stripping '/', "
+ "'..' and '.'. Run Mksquashfs without "
+ "-tarstyle to handle this!\n");
+ }
+
+ source = walk_source(source, &file, &name);
+ if(name[0] == '\0' || strcmp(name, "..") == 0 || strcmp(name, ".") == 0)
+ free(old);
+ else
+ *prefix = old;
+ }
+
+ if((strcmp(name, ".") == 0) || strcmp(name, "..") == 0)
+ BAD_ERROR("Source path can't have '.' or '..' embedded in it with -tarstyle/-cpiostyle[0]\n");
+
+ res = lstat(file, &buf);
+ if (res == -1)
+ BAD_ERROR("Can't stat %s because %s\n", file, strerror(errno));
+
+ entry = lookup_name(dir, name);
+
+ if(entry) {
+ /*
+ * name already there. This must be the same file, otherwise
+ * we have a clash, as we can't have two different files with
+ * the same pathname.
+ *
+ * An original root entry from the file being appended to
+ * is never the same file.
+ */
+ if(entry->inode->root_entry)
+ BAD_ERROR("Source %s conflicts with name in filesystem "
+ "being appended to\n", name);
+
+ res = memcmp(&buf, &(entry->inode->buf), sizeof(buf));
+ if(res)
+ BAD_ERROR("Can't have two different sources with same "
+ "pathname\n");
+
+ /*
+ * Matching file.
+ *
+ * For tarstyle source handling (leaf directores are
+ * recursively descended)
+ *
+ * - If we're at the leaf of the source, then we either match
+ * or encompass this pre-existing include. So delete any
+ * sub-directories of this pre-existing include.
+ *
+ * - If we're not at the leaf of the source, but we're at
+ * the leaf of the pre-existing include, then the
+ * pre-existing include encompasses this source. So nothing
+ * more to do.
+ *
+ * - Otherwise this is not the leaf of the source, or the leaf
+ * of the pre-existing include, so recurse continuing walking
+ * the source.
+ *
+ * For cpiostyle source handling (leaf directories are not
+ * recursively descended)
+ *
+ * - If we're at the leaf of the source, then we have a
+ * pre-existing include. So nothing to do.
+ *
+ * - If we're not at the leaf of the source, but we're at
+ * the leaf of the pre-existing include, then recurse
+ * walking the source.
+ *
+ * - Otherwise this is not the leaf of the source, or the leaf
+ * of the pre-existing include, so recurse continuing walking
+ * the source.
+ */
+ if(source[0] == '\0') {
+ if(tarstyle && entry->dir) {
+ free_dir(entry->dir);
+ entry->dir = NULL;
+ }
+ } else if(S_ISDIR(buf.st_mode)) {
+ if(cpiostyle || entry->dir) {
+ excluded(entry->name, paths, &new);
+ subpath = subpathname(entry);
+ sub = add_source(entry->dir, source, subpath,
+ file, prefix, new, depth + 1);
+ if(sub == NULL)
+ goto failed_match;
+ entry->dir = sub;
+ sub->dir_ent = entry;
+ }
+ } else
+ BAD_ERROR("Source component %s is not a directory\n", name);
+
+ free(name);
+ free(file);
+ } else {
+ /*
+ * No matching name found.
+ *
+ * - If we're at the leaf of the source, then add it.
+ *
+ * - If we're not at the leaf of the source, we will add it,
+ * and recurse walking the source
+ */
+ if(old_exclude && old_excluded(file, &buf))
+ goto failed_early;
+
+ if(old_exclude == FALSE && excluded(name, paths, &new))
+ goto failed_early;
+
+ entry = create_dir_entry(name, NULL, file, dir);
+
+ if(exclude_actions()) {
+ newsubpath = subpathname(entry);
+ if(eval_exclude_actions(name, file, newsubpath, &buf,
+ depth, entry)) {
+ goto failed_entry;
+ }
+ }
+
+ if(source[0] == '\0' && S_ISLNK(buf.st_mode)) {
+ int byte;
+ static char buff[65536]; /* overflow safe */
+ struct inode_info *i;
+
+ byte = readlink(file, buff, 65536);
+ if(byte == -1)
+ BAD_ERROR("Failed to read source symlink %s", file);
+ else if(byte == 65536)
+ BAD_ERROR("Symlink %s is greater than 65536 "
+ "bytes!", file);
+
+ /* readlink doesn't 0 terminate the returned path */
+ buff[byte] = '\0';
+ i = lookup_inode3(&buf, NULL, buff, byte + 1);
+ add_dir_entry(entry, NULL, i);
+ } else if(source[0] == '\0') {
+ add_dir_entry(entry, NULL, lookup_inode(&buf));
+ if(S_ISDIR(buf.st_mode))
+ dir->directory_count ++;
+ } else if(S_ISDIR(buf.st_mode)) {
+ if(newsubpath == NULL)
+ newsubpath = subpathname(entry);
+ sub = add_source(NULL, source, newsubpath, file, prefix,
+ new, depth + 1);
+ if(sub == NULL)
+ goto failed_entry;
+ add_dir_entry(entry, sub, lookup_inode(&buf));
+ dir->directory_count ++;
+ } else
+ BAD_ERROR("Source component %s is not a directory\n", name);
+ }
+
+ free(new);
+ return dir;
+
+failed_early:
+ free(new);
+ free(name);
+ free(file);
+ if(sdir == NULL)
+ free_dir(dir);
+ return NULL;
+
+failed_entry:
+ free(new);
+ free_dir_entry(entry);
+ if(sdir == NULL)
+ free_dir(dir);
+ return NULL;
+
+failed_match:
+ free(new);
+ free(name);
+ free(file);
+ return NULL;
+}
+
+
+static struct dir_info *populate_tree(struct dir_info *dir, struct pathnames *paths)
+{
+ struct dir_ent *entry;
+ struct dir_info *new;
+
+ for(entry = dir->list; entry; entry = entry->next)
+ if(S_ISDIR(entry->inode->buf.st_mode) && !entry->inode->root_entry) {
+ struct pathnames *newp = NULL;
+
+ excluded(entry->name, paths, &newp);
+
+ if(entry->dir == NULL && cpiostyle) {
+ entry->dir = create_dir(pathname(entry),
+ subpathname(entry), dir->depth + 1);
+ entry->dir->dir_ent = entry;
+ } else if(entry->dir == NULL) {
+ cur_dev = entry->inode->buf.st_dev;
+ new = dir_scan1(pathname(entry),
+ subpathname(entry), newp, scan1_readdir,
+ dir->depth + 1);
+ if(new == NULL)
+ return NULL;
+
+ entry->dir = new;
+ new->dir_ent = entry;
+ } else {
+ new = populate_tree(entry->dir, newp);
+ if(new == NULL)
+ return NULL;
+ }
+
+ free(newp);
+ }
+
+ return dir;
+}
+
+
+static char *get_filename_from_stdin(char terminator)
+{
+ static int path_max = -1;
+ static int bytes = 0;
+ static int size = 0;
+ static char *buffer = NULL;
+ static char *filename = NULL;
+ static char *src = NULL;
+ char *dest = filename;
+ int used = 0;
+
+ /* Get the maximum pathname size supported on this system */
+ if(path_max == -1) {
+#ifdef PATH_MAX
+ path_max = PATH_MAX;
+#else
+ path_max = pathconf(".", _PC_PATH_MAX);
+ if(path_max <= 0)
+ path_max = 4096;
+#endif
+ /* limit to no more than 64K */
+ if(path_max > 65536)
+ path_max = 65536;
+ }
+
+ if(buffer == NULL) {
+ buffer = malloc(4096);
+ if(buffer == NULL)
+ MEM_ERROR();
+ }
+
+ while(1) {
+ if(bytes == 0) {
+ bytes = read_bytes(STDIN_FILENO, buffer, 4096);
+
+ if(bytes == -1)
+ BAD_ERROR("Failed to read Tar file from STDIN\n");
+
+ if(bytes == 0) {
+ if(used)
+ ERROR("Got EOF when reading filename from STDIN, ignoring\n");
+ free(filename);
+ free(buffer);
+ return NULL;
+ }
+ src = buffer;
+ }
+
+ if(size - used <= 1) {
+ int offset = dest - filename;
+ char *buff = realloc(filename, size += 100);
+ if(buff == NULL)
+ MEM_ERROR();
+ dest = buff + offset;
+ filename = buff;
+ }
+
+ if(*src == terminator) {
+ src++;
+ bytes--;
+ break;
+ }
+
+ if(used >= (path_max - 1))
+ BAD_ERROR("Cpiostyle input filename exceeds maximum "
+ "path limit of %d bytes!\n", path_max);
+
+ *dest++ = *src++;
+ bytes --;
+ used ++;
+ }
+
+ *dest = '\0';
+ return filename;
+}
+
+
+static char *get_next_filename()
+{
+ static int cur = 0;
+ char *filename;
+
+ if(cpiostyle) {
+ while(1) {
+ filename = get_filename_from_stdin(filename_terminator);
+ if(filename == NULL || strlen(filename) != 0)
+ break;
+ }
+ return filename;
+ } else if(cur < source)
+ return source_path[cur ++];
+ else
+ return NULL;
+}
+
+
+static squashfs_inode process_source(int progress)
+{
+ int i, res, first = TRUE, same = FALSE;
+ char *filename, *prefix, *pathname;
+ struct stat buf, buf2;
+ struct dir_ent *entry;
+ struct dir_info *new;
+
+ for(i = 0; (filename = get_next_filename()); i++) {
+ new = add_source(root_dir, filename, "", NULL, &prefix, paths, 1);
+
+ if(new) {
+ /* does argv[i] start from the same directory? */
+ if(first) {
+ res = lstat(prefix, &buf);
+ if (res == -1)
+ BAD_ERROR("Can't stat %s because %s\n",
+ prefix, strerror(errno));
+ first = FALSE;
+ same = TRUE;
+ pathname = strdup(prefix);
+ } else if(same) {
+ res = lstat(prefix, &buf2);
+ if (res == -1)
+ BAD_ERROR("Can't stat %s because %s\n",
+ prefix, strerror(errno));
+
+ if(buf.st_dev != buf2.st_dev ||
+ buf.st_ino != buf2.st_ino)
+ same = FALSE;
+ }
+ free(prefix);
+ root_dir = new;
+ }
+ }
+
+ if(root_dir == NULL) {
+ /* Empty directory tree after processing the sources, and
+ * so everything was excluded.
+ * We need to create an empty directory to reflect this, and
+ * if appending, fill it with the original root directory
+ * contents */
+ root_dir = scan1_opendir("", "", 0);
+
+ if(appending)
+ handle_root_entries(root_dir);
+ }
+
+ new = scan1_opendir("", "", 0);
+
+ if(!same) {
+ /*
+ * Top level directory conflict. Create dummy
+ * top level directory
+ */
+ memset(&buf, 0, sizeof(buf));
+ buf.st_mode = (root_mode_opt) ? root_mode | S_IFDIR :
+ S_IRWXU | S_IRWXG | S_IRWXO | S_IFDIR;
+ if(root_uid_opt)
+ buf.st_uid = root_uid;
+ else
+ buf.st_uid = getuid();
+ if(root_gid_opt)
+ buf.st_gid = root_gid;
+ else
+ buf.st_gid = getgid();
+ if(root_time_opt)
+ buf.st_mtime = root_time;
+ else
+ buf.st_mtime = time(NULL);
+ if(pseudo_override && global_uid_opt)
+ buf.st_uid = global_uid;
+ if(pseudo_override && global_gid_opt)
+ buf.st_gid = global_gid;
+
+ entry = create_dir_entry("", NULL, "", new);
+ entry->inode = lookup_inode(&buf);
+ entry->inode->dummy_root_dir = TRUE;
+ } else {
+ if(root_mode_opt)
+ buf.st_mode = root_mode | S_IFDIR;
+ if(root_uid_opt)
+ buf.st_uid = root_uid;
+ if(root_gid_opt)
+ buf.st_gid = root_gid;
+ if(root_time_opt)
+ buf.st_mtime = root_time;
+ if(pseudo_override && global_uid_opt)
+ buf.st_uid = global_uid;
+ if(pseudo_override && global_gid_opt)
+ buf.st_gid = global_gid;
+
+ entry = create_dir_entry("", NULL, pathname, new);
+ entry->inode = lookup_inode(&buf);
+ }
+
+
+ entry->dir = root_dir;
+ root_dir->dir_ent = entry;
+
+ root_dir = populate_tree(root_dir, paths);
+ if(root_dir == NULL)
+ BAD_ERROR("Failed to read directory hierarchy\n");
+
+ return do_directory_scans(entry, progress);
+}
+
+
+/*
+ * Source directory specified as - which means no source directories
+ *
+ * Here the pseudo definitions will be providing the source directory
+ */
+static squashfs_inode no_sources(int progress)
+{
+ struct stat buf;
+ struct dir_ent *dir_ent;
+ struct pseudo_entry *pseudo_ent;
+ struct pseudo *pseudo = get_pseudo();
+
+ if(pseudo == NULL || pseudo->names != 1 || strcmp(pseudo->name[0].name, "/") != 0) {
+ ERROR_START("Source is \"-\", but no pseudo definition for \"/\"\n");
+ ERROR_EXIT("Did you forget to specify -cpiostyle or -tar?\n");
+ EXIT_MKSQUASHFS();
+ }
+
+ pseudo_ent = &pseudo->name[0];
+
+ /* create root directory */
+ root_dir = scan1_opendir("", "", 1);
+
+ if(appending)
+ handle_root_entries(root_dir);
+
+ /* Create root directory dir_ent and associated inode, and connect
+ * it to the root directory dir_info structure */
+ dir_ent = create_dir_entry("", NULL, "", scan1_opendir("", "", 0));
+
+ memset(&buf, 0, sizeof(buf));
+
+ if(root_mode_opt)
+ buf.st_mode = root_mode | S_IFDIR;
+ else
+ buf.st_mode = pseudo_ent->dev->buf->mode;
+
+ if(root_uid_opt)
+ buf.st_uid = root_uid;
+ else
+ buf.st_uid = pseudo_ent->dev->buf->uid;
+
+ if(root_gid_opt)
+ buf.st_gid = root_gid;
+ else
+ buf.st_gid = pseudo_ent->dev->buf->gid;
+
+ if(root_time_opt)
+ buf.st_mtime = root_time;
+ else
+ buf.st_mtime = pseudo_ent->dev->buf->mtime;
+
+ buf.st_ino = pseudo_ent->dev->buf->ino;
+
+ dir_ent->inode = lookup_inode2(&buf, pseudo_ent->dev);
+ dir_ent->dir = root_dir;
+ root_dir->dir_ent = dir_ent;
+
+ return do_directory_scans(dir_ent, progress);
+}
+
+
+static unsigned int slog(unsigned int block)
+{
+ int i;
+
+ for(i = 12; i <= 20; i++)
+ if(block == (1 << i))
+ return i;
+ return 0;
+}
+
+
+static int old_excluded(char *filename, struct stat *buf)
+{
+ int i;
+
+ for(i = 0; i < exclude; i++)
+ if((exclude_paths[i].st_dev == buf->st_dev) &&
+ (exclude_paths[i].st_ino == buf->st_ino))
+ return TRUE;
+ return FALSE;
+}
+
+
+#define ADD_ENTRY(buf) \
+ if(exclude % EXCLUDE_SIZE == 0) { \
+ exclude_paths = realloc(exclude_paths, (exclude + EXCLUDE_SIZE) \
+ * sizeof(struct exclude_info)); \
+ if(exclude_paths == NULL) \
+ MEM_ERROR(); \
+ } \
+ exclude_paths[exclude].st_dev = buf.st_dev; \
+ exclude_paths[exclude++].st_ino = buf.st_ino;
+static int old_add_exclude(char *path)
+{
+ int i;
+ char *filename;
+ struct stat buf;
+
+ if(path[0] == '/' || strncmp(path, "./", 2) == 0 ||
+ strncmp(path, "../", 3) == 0) {
+ if(lstat(path, &buf) == -1) {
+ ERROR_START("Cannot stat exclude dir/file %s because "
+ "%s", path, strerror(errno));
+ ERROR_EXIT(", ignoring\n");
+ return TRUE;
+ }
+ ADD_ENTRY(buf);
+ return TRUE;
+ }
+
+ for(i = 0; i < source; i++) {
+ int res = asprintf(&filename, "%s/%s", source_path[i], path);
+ if(res == -1)
+ BAD_ERROR("asprintf failed in old_add_exclude\n");
+ if(lstat(filename, &buf) == -1) {
+ if(!(errno == ENOENT || errno == ENOTDIR)) {
+ ERROR_START("Cannot stat exclude dir/file %s "
+ "because %s", filename, strerror(errno));
+ ERROR_EXIT(", ignoring\n");
+ }
+ free(filename);
+ continue;
+ }
+ free(filename);
+ ADD_ENTRY(buf);
+ }
+ return TRUE;
+}
+
+
+static void add_old_root_entry(char *name, squashfs_inode inode,
+ unsigned int inode_number, int type)
+{
+ old_root_entry = realloc(old_root_entry,
+ sizeof(struct old_root_entry_info) * (old_root_entries + 1));
+ if(old_root_entry == NULL)
+ MEM_ERROR();
+
+ old_root_entry[old_root_entries].name = strdup(name);
+ old_root_entry[old_root_entries].inode.inode = inode;
+ old_root_entry[old_root_entries].inode.inode_number = inode_number;
+ old_root_entry[old_root_entries].inode.type = type;
+ old_root_entry[old_root_entries++].inode.root_entry = TRUE;
+}
+
+
+static void initialise_threads(int readq, int fragq, int bwriteq, int fwriteq,
+ int freelst, char *destination_file)
+{
+ int i;
+ sigset_t sigmask, old_mask;
+ int total_mem = readq;
+ int reader_size;
+ int fragment_size;
+ int fwriter_size;
+ /*
+ * bwriter_size is global because it is needed in
+ * write_file_blocks_dup()
+ */
+
+ /*
+ * Never allow the total size of the queues to be larger than
+ * physical memory
+ *
+ * When adding together the possibly user supplied values, make
+ * sure they've not been deliberately contrived to overflow an int
+ */
+ if(add_overflow(total_mem, fragq))
+ BAD_ERROR("Queue sizes rediculously too large\n");
+ total_mem += fragq;
+ if(add_overflow(total_mem, bwriteq))
+ BAD_ERROR("Queue sizes rediculously too large\n");
+ total_mem += bwriteq;
+ if(add_overflow(total_mem, fwriteq))
+ BAD_ERROR("Queue sizes rediculously too large\n");
+ total_mem += fwriteq;
+
+ check_usable_phys_mem(total_mem);
+
+ /*
+ * convert from queue size in Mbytes to queue size in
+ * blocks.
+ *
+ * This isn't going to overflow an int unless there exists
+ * systems with more than 8 Petabytes of RAM!
+ */
+ reader_size = readq << (20 - block_log);
+ fragment_size = fragq << (20 - block_log);
+ bwriter_size = bwriteq << (20 - block_log);
+ fwriter_size = fwriteq << (20 - block_log);
+
+ /*
+ * setup signal handlers for the main thread, these cleanup
+ * deleting the destination file, if appending the
+ * handlers for SIGTERM and SIGINT will be replaced with handlers
+ * allowing the user to press ^C twice to restore the existing
+ * filesystem.
+ *
+ * SIGUSR1 is an internal signal, which is used by the sub-threads
+ * to tell the main thread to terminate, deleting the destination file,
+ * or if necessary restoring the filesystem on appending
+ */
+ signal(SIGTERM, sighandler);
+ signal(SIGINT, sighandler);
+ signal(SIGUSR1, sighandler);
+
+ /* block SIGQUIT and SIGHUP, these are handled by the info thread */
+ sigemptyset(&sigmask);
+ sigaddset(&sigmask, SIGQUIT);
+ sigaddset(&sigmask, SIGHUP);
+ if(pthread_sigmask(SIG_BLOCK, &sigmask, NULL) != 0)
+ BAD_ERROR("Failed to set signal mask in intialise_threads\n");
+
+ /*
+ * temporarily block these signals, so the created sub-threads
+ * will ignore them, ensuring the main thread handles them
+ */
+ sigemptyset(&sigmask);
+ sigaddset(&sigmask, SIGINT);
+ sigaddset(&sigmask, SIGTERM);
+ sigaddset(&sigmask, SIGUSR1);
+ if(pthread_sigmask(SIG_BLOCK, &sigmask, &old_mask) != 0)
+ BAD_ERROR("Failed to set signal mask in intialise_threads\n");
+
+ if(processors == -1) {
+#ifdef __linux__
+ cpu_set_t cpu_set;
+ CPU_ZERO(&cpu_set);
+
+ if(sched_getaffinity(0, sizeof cpu_set, &cpu_set) == -1)
+ processors = sysconf(_SC_NPROCESSORS_ONLN);
+ else
+ processors = CPU_COUNT(&cpu_set);
+#else
+ int mib[2];
+ size_t len = sizeof(processors);
+
+ mib[0] = CTL_HW;
+#ifdef HW_AVAILCPU
+ mib[1] = HW_AVAILCPU;
+#else
+ mib[1] = HW_NCPU;
+#endif
+
+ if(sysctl(mib, 2, &processors, &len, NULL, 0) == -1) {
+ ERROR_START("Failed to get number of available "
+ "processors.");
+ ERROR_EXIT(" Defaulting to 1\n");
+ processors = 1;
+ }
+#endif
+ }
+
+ if(multiply_overflow(processors, 3) ||
+ multiply_overflow(processors * 3, sizeof(pthread_t)))
+ BAD_ERROR("Processors too large\n");
+
+ deflator_thread = malloc(processors * 3 * sizeof(pthread_t));
+ if(deflator_thread == NULL)
+ MEM_ERROR();
+
+ frag_deflator_thread = &deflator_thread[processors];
+ frag_thread = &frag_deflator_thread[processors];
+
+ to_reader = queue_init(1);
+ to_deflate = queue_init(reader_size);
+ to_process_frag = queue_init(reader_size);
+ to_writer = queue_init(bwriter_size + fwriter_size);
+ from_writer = queue_init(1);
+ to_frag = queue_init(fragment_size);
+ to_main = seq_queue_init();
+ if(reproducible)
+ to_order = seq_queue_init();
+ else
+ locked_fragment = queue_init(fragment_size);
+ reader_buffer = cache_init(block_size, reader_size, 0, 0);
+ bwriter_buffer = cache_init(block_size, bwriter_size, 1, freelst);
+ fwriter_buffer = cache_init(block_size, fwriter_size, 1, freelst);
+ fragment_buffer = cache_init(block_size, fragment_size, 1, 0);
+ reserve_cache = cache_init(block_size, processors + 1, 1, 0);
+ pthread_create(&reader_thread, NULL, reader, NULL);
+ pthread_create(&writer_thread, NULL, writer, NULL);
+ init_progress_bar();
+ init_info();
+
+ for(i = 0; i < processors; i++) {
+ if(pthread_create(&deflator_thread[i], NULL, deflator, NULL))
+ BAD_ERROR("Failed to create thread\n");
+ if(pthread_create(&frag_deflator_thread[i], NULL, reproducible ?
+ frag_order_deflator : frag_deflator, NULL) != 0)
+ BAD_ERROR("Failed to create thread\n");
+ if(pthread_create(&frag_thread[i], NULL, frag_thrd,
+ (void *) destination_file) != 0)
+ BAD_ERROR("Failed to create thread\n");
+ }
+
+ main_thread = pthread_self();
+
+ if(reproducible)
+ pthread_create(&order_thread, NULL, frag_orderer, NULL);
+
+ if(!quiet)
+ printf("Parallel mksquashfs: Using %d processor%s\n", processors,
+ processors == 1 ? "" : "s");
+
+ /* Restore the signal mask for the main thread */
+ if(pthread_sigmask(SIG_SETMASK, &old_mask, NULL) != 0)
+ BAD_ERROR("Failed to set signal mask in intialise_threads\n");
+}
+
+
+static long long write_inode_lookup_table()
+{
+ int i, lookup_bytes = SQUASHFS_LOOKUP_BYTES(inode_count);
+ unsigned int inode_number;
+ void *it;
+
+ if(inode_count == sinode_count)
+ goto skip_inode_hash_table;
+
+ it = realloc(inode_lookup_table, lookup_bytes);
+ if(it == NULL)
+ MEM_ERROR();
+ inode_lookup_table = it;
+
+ for(i = 0; i < INODE_HASH_SIZE; i ++) {
+ struct inode_info *inode;
+
+ for(inode = inode_info[i]; inode; inode = inode->next) {
+
+ inode_number = get_inode_no(inode);
+
+ /* The empty action will produce orphaned inode
+ * entries in the inode_info[] table. These
+ * entries because they are orphaned will not be
+ * allocated an inode number in dir_scan5(), so
+ * skip any entries with the default dummy inode
+ * number of 0 */
+ if(inode_number == 0)
+ continue;
+
+ SQUASHFS_SWAP_LONG_LONGS(&inode->inode,
+ &inode_lookup_table[inode_number - 1], 1);
+
+ }
+ }
+
+skip_inode_hash_table:
+ return generic_write_table(lookup_bytes, inode_lookup_table, 0, NULL,
+ noI);
+}
+
+
+static char *get_component(char *target, char **targname)
+{
+ char *start;
+
+ while(*target == '/')
+ target ++;
+
+ start = target;
+ while(*target != '/' && *target != '\0')
+ target ++;
+
+ *targname = strndup(start, target - start);
+
+ while(*target == '/')
+ target ++;
+
+ return target;
+}
+
+
+static void free_path(struct pathname *paths)
+{
+ int i;
+
+ for(i = 0; i < paths->names; i++) {
+ if(paths->name[i].paths)
+ free_path(paths->name[i].paths);
+ free(paths->name[i].name);
+ if(paths->name[i].preg) {
+ regfree(paths->name[i].preg);
+ free(paths->name[i].preg);
+ }
+ }
+
+ free(paths);
+}
+
+
+static struct pathname *add_path(struct pathname *paths, char *target, char *alltarget)
+{
+ char *targname;
+ int i, error;
+
+ target = get_component(target, &targname);
+
+ if(paths == NULL) {
+ paths = malloc(sizeof(struct pathname));
+ if(paths == NULL)
+ MEM_ERROR();
+
+ paths->names = 0;
+ paths->name = NULL;
+ }
+
+ for(i = 0; i < paths->names; i++)
+ if(strcmp(paths->name[i].name, targname) == 0)
+ break;
+
+ if(i == paths->names) {
+ /* allocate new name entry */
+ paths->names ++;
+ paths->name = realloc(paths->name, (i + 1) *
+ sizeof(struct path_entry));
+ if(paths->name == NULL)
+ MEM_ERROR();
+ paths->name[i].name = targname;
+ paths->name[i].paths = NULL;
+ if(use_regex) {
+ paths->name[i].preg = malloc(sizeof(regex_t));
+ if(paths->name[i].preg == NULL)
+ MEM_ERROR();
+ error = regcomp(paths->name[i].preg, targname,
+ REG_EXTENDED|REG_NOSUB);
+ if(error) {
+ char str[1024]; /* overflow safe */
+
+ regerror(error, paths->name[i].preg, str, 1024);
+ BAD_ERROR("invalid regex %s in export %s, "
+ "because %s\n", targname, alltarget,
+ str);
+ }
+ } else
+ paths->name[i].preg = NULL;
+
+ if(target[0] == '\0')
+ /* at leaf pathname component */
+ paths->name[i].paths = NULL;
+ else
+ /* recurse adding child components */
+ paths->name[i].paths = add_path(NULL, target,
+ alltarget);
+ } else {
+ /* existing matching entry */
+ free(targname);
+
+ if(paths->name[i].paths == NULL) {
+ /* No sub-directory which means this is the leaf
+ * component of a pre-existing exclude which subsumes
+ * the exclude currently being added, in which case stop
+ * adding components */
+ } else if(target[0] == '\0') {
+ /* at leaf pathname component and child components exist
+ * from more specific excludes, delete as they're
+ * subsumed by this exclude */
+ free_path(paths->name[i].paths);
+ paths->name[i].paths = NULL;
+ } else
+ /* recurse adding child components */
+ add_path(paths->name[i].paths, target, alltarget);
+ }
+
+ return paths;
+}
+
+
+static void add_exclude(char *target)
+{
+
+ if(target[0] == '/' || strncmp(target, "./", 2) == 0 ||
+ strncmp(target, "../", 3) == 0)
+ BAD_ERROR("/, ./ and ../ prefixed excludes not supported with "
+ "-wildcards or -regex options\n");
+ else if(strncmp(target, "... ", 4) == 0)
+ stickypath = add_path(stickypath, target + 4, target + 4);
+ else
+ path = add_path(path, target, target);
+}
+
+
+static struct pathnames *add_subdir(struct pathnames *paths, struct pathname *path)
+{
+ int count = paths == NULL ? 0 : paths->count;
+
+ if(count % PATHS_ALLOC_SIZE == 0) {
+ paths = realloc(paths, sizeof(struct pathnames) +
+ (count + PATHS_ALLOC_SIZE) * sizeof(struct pathname *));
+ if(paths == NULL)
+ MEM_ERROR();
+ }
+
+ paths->path[count] = path;
+ paths->count = count + 1;
+ return paths;
+}
+
+
+static int excluded_match(char *name, struct pathname *path, struct pathnames **new)
+{
+ int i;
+
+ for(i = 0; i < path->names; i++) {
+ int match = use_regex ?
+ regexec(path->name[i].preg, name, (size_t) 0,
+ NULL, 0) == 0 :
+ fnmatch(path->name[i].name, name,
+ FNM_PATHNAME|FNM_PERIOD|FNM_EXTMATCH) == 0;
+
+ if(match) {
+ if(path->name[i].paths == NULL) {
+ /* match on a leaf component, any subdirectories
+ * in the filesystem should be excluded */
+ free(*new);
+ *new = NULL;
+ return TRUE;
+ } else
+ /* match on a non-leaf component, add any
+ * subdirectories to the new set of
+ * subdirectories to scan for this name */
+ *new = add_subdir(*new, path->name[i].paths);
+ }
+ }
+
+ return FALSE;
+}
+
+
+int excluded(char *name, struct pathnames *paths, struct pathnames **new)
+{
+ int n;
+
+ if(stickypath && excluded_match(name, stickypath, new))
+ return TRUE;
+
+ for(n = 0; paths && n < paths->count; n++) {
+ int res = excluded_match(name, paths->path[n], new);
+ if(res)
+ return TRUE;
+ }
+
+ /*
+ * Either:
+ * - no matching names found, return empty new search set, or
+ * - one or more matches with sub-directories found (no leaf matches),
+ * in which case return new search set.
+ *
+ * In either case return FALSE as we don't want to exclude this entry
+ */
+ return FALSE;
+}
+
+
+static void process_exclude_file(char *argv)
+{
+ FILE *fd;
+ char buffer[MAX_LINE + 1]; /* overflow safe */
+ char *filename;
+
+ fd = fopen(argv, "r");
+ if(fd == NULL)
+ BAD_ERROR("Failed to open exclude file \"%s\" because %s\n",
+ argv, strerror(errno));
+
+ while(fgets(filename = buffer, MAX_LINE + 1, fd) != NULL) {
+ int len = strlen(filename);
+
+ if(len == MAX_LINE && filename[len - 1] != '\n')
+ /* line too large */
+ BAD_ERROR("Line too long when reading "
+ "exclude file \"%s\", larger than %d "
+ "bytes\n", argv, MAX_LINE);
+
+ /*
+ * Remove '\n' terminator if it exists (the last line
+ * in the file may not be '\n' terminated)
+ */
+ if(len && filename[len - 1] == '\n')
+ filename[len - 1] = '\0';
+
+ /* Skip any leading whitespace */
+ while(isspace(*filename))
+ filename ++;
+
+ /* if comment line, skip */
+ if(*filename == '#')
+ continue;
+
+ /*
+ * check for initial backslash, to accommodate
+ * filenames with leading space or leading # character
+ */
+ if(*filename == '\\')
+ filename ++;
+
+ /* if line is now empty after skipping characters, skip it */
+ if(*filename == '\0')
+ continue;
+
+ if(old_exclude)
+ old_add_exclude(filename);
+ else
+ add_exclude(filename);
+ }
+
+ if(ferror(fd))
+ BAD_ERROR("Reading exclude file \"%s\" failed because %s\n",
+ argv, strerror(errno));
+
+ fclose(fd);
+}
+
+
+#define RECOVER_ID "Squashfs recovery file v1.0\n"
+#define RECOVER_ID_SIZE 28
+
+static void write_recovery_data(struct squashfs_super_block *sBlk)
+{
+ int recoverfd;
+ long long res, bytes = sBlk->bytes_used - sBlk->inode_table_start;
+ pid_t pid = getpid();
+ char *metadata;
+ char header[] = RECOVER_ID;
+
+ if(recover == FALSE) {
+ if(!quiet) {
+ printf("No recovery data option specified.\n");
+ printf("Skipping saving recovery file.\n\n");
+ }
+
+ return;
+ }
+
+ if(recovery_pathname == NULL) {
+ recovery_pathname = getenv("HOME");
+ if(recovery_pathname == NULL)
+ BAD_ERROR("Could not read $HOME, use -recovery-path or -no-recovery options\n");
+ }
+
+ res = asprintf(&recovery_file, "%s/squashfs_recovery_%s_%d", recovery_pathname,
+ getbase(destination_file), pid);
+ if(res == -1)
+ MEM_ERROR();
+
+ metadata = malloc(bytes);
+ if(metadata == NULL)
+ MEM_ERROR();
+
+ res = read_fs_bytes(fd, sBlk->inode_table_start, bytes, metadata);
+ if(res == 0) {
+ ERROR("Failed to read append filesystem metadata\n");
+ BAD_ERROR("Filesystem corrupted?\n");
+ }
+
+ recoverfd = open(recovery_file, O_CREAT | O_TRUNC | O_RDWR, S_IRWXU);
+ if(recoverfd == -1)
+ BAD_ERROR("Failed to create recovery file, because %s. "
+ "Aborting\n", strerror(errno));
+
+ if(write_bytes(recoverfd, header, RECOVER_ID_SIZE) == -1)
+ BAD_ERROR("Failed to write recovery file, because %s\n",
+ strerror(errno));
+
+ if(write_bytes(recoverfd, sBlk, sizeof(struct squashfs_super_block)) == -1)
+ BAD_ERROR("Failed to write recovery file, because %s\n",
+ strerror(errno));
+
+ if(write_bytes(recoverfd, metadata, bytes) == -1)
+ BAD_ERROR("Failed to write recovery file, because %s\n",
+ strerror(errno));
+
+ res = close(recoverfd);
+
+ if(res == -1)
+ BAD_ERROR("Failed to close recovery file, close returned %s\n",
+ strerror(errno));
+
+ free(metadata);
+
+ printf("Recovery file \"%s\" written\n", recovery_file);
+ printf("If Mksquashfs aborts abnormally (i.e. power failure), run\n");
+ printf("mksquashfs - %s -recover %s\n", destination_file,
+ recovery_file);
+ printf("to restore filesystem\n\n");
+}
+
+
+static void read_recovery_data(char *recovery_file, char *destination_file)
+{
+ int fd, recoverfd;
+ struct squashfs_super_block orig_sBlk, sBlk;
+ char *metadata;
+ long long res, bytes;
+ struct stat buf;
+ char header[] = RECOVER_ID;
+ char header2[RECOVER_ID_SIZE];
+
+ recoverfd = open(recovery_file, O_RDONLY);
+ if(recoverfd == -1)
+ BAD_ERROR("Failed to open recovery file because %s\n",
+ strerror(errno));
+
+ if(stat(destination_file, &buf) == -1)
+ BAD_ERROR("Failed to stat destination file, because %s\n",
+ strerror(errno));
+
+ fd = open(destination_file, O_RDWR);
+ if(fd == -1)
+ BAD_ERROR("Failed to open destination file because %s\n",
+ strerror(errno));
+
+ res = read_bytes(recoverfd, header2, RECOVER_ID_SIZE);
+ if(res == -1)
+ BAD_ERROR("Failed to read recovery file, because %s\n",
+ strerror(errno));
+ if(res < RECOVER_ID_SIZE)
+ BAD_ERROR("Recovery file appears to be truncated\n");
+ if(strncmp(header, header2, RECOVER_ID_SIZE) !=0 )
+ BAD_ERROR("Not a recovery file\n");
+
+ res = read_bytes(recoverfd, &sBlk, sizeof(struct squashfs_super_block));
+ if(res == -1)
+ BAD_ERROR("Failed to read recovery file, because %s\n",
+ strerror(errno));
+ if(res < sizeof(struct squashfs_super_block))
+ BAD_ERROR("Recovery file appears to be truncated\n");
+
+ res = read_fs_bytes(fd, 0, sizeof(struct squashfs_super_block), &orig_sBlk);
+ if(res == 0) {
+ ERROR("Failed to read superblock from output filesystem\n");
+ BAD_ERROR("Output filesystem is empty!\n");
+ }
+
+ if(memcmp(((char *) &sBlk) + 4, ((char *) &orig_sBlk) + 4,
+ sizeof(struct squashfs_super_block) - 4) != 0)
+ BAD_ERROR("Recovery file and destination file do not seem to "
+ "match\n");
+
+ bytes = sBlk.bytes_used - sBlk.inode_table_start;
+
+ metadata = malloc(bytes);
+ if(metadata == NULL)
+ MEM_ERROR();
+
+ res = read_bytes(recoverfd, metadata, bytes);
+ if(res == -1)
+ BAD_ERROR("Failed to read recovery file, because %s\n",
+ strerror(errno));
+ if(res < bytes)
+ BAD_ERROR("Recovery file appears to be truncated\n");
+
+ write_destination(fd, 0, sizeof(struct squashfs_super_block), &sBlk);
+
+ write_destination(fd, sBlk.inode_table_start, bytes, metadata);
+
+ res = close(recoverfd);
+
+ if(res == -1)
+ BAD_ERROR("Failed to close recovery file, close returned %s\n",
+ strerror(errno));
+
+ res = close(fd);
+
+ if(res == -1)
+ BAD_ERROR("Failed to close output filesystem, close returned %s\n",
+ strerror(errno));
+
+ printf("Successfully wrote recovery file \"%s\". Exiting\n",
+ recovery_file);
+
+ exit(0);
+}
+
+
+static void write_filesystem_tables(struct squashfs_super_block *sBlk)
+{
+ sBlk->fragments = fragments;
+ sBlk->no_ids = id_count;
+ sBlk->inode_table_start = write_inodes();
+ sBlk->directory_table_start = write_directories();
+ sBlk->fragment_table_start = write_fragment_table();
+ sBlk->lookup_table_start = exportable ? write_inode_lookup_table() :
+ SQUASHFS_INVALID_BLK;
+ sBlk->id_table_start = write_id_table();
+ sBlk->xattr_id_table_start = write_xattrs();
+
+ TRACE("sBlk->inode_table_start 0x%llx\n", sBlk->inode_table_start);
+ TRACE("sBlk->directory_table_start 0x%llx\n",
+ sBlk->directory_table_start);
+ TRACE("sBlk->fragment_table_start 0x%llx\n", sBlk->fragment_table_start);
+ if(exportable)
+ TRACE("sBlk->lookup_table_start 0x%llx\n",
+ sBlk->lookup_table_start);
+
+ sBlk->bytes_used = bytes;
+
+ sBlk->compression = comp->id;
+
+ SQUASHFS_INSWAP_SUPER_BLOCK(sBlk);
+ write_destination(fd, SQUASHFS_START, sizeof(*sBlk), sBlk);
+
+ total_bytes += total_inode_bytes + total_directory_bytes +
+ sizeof(struct squashfs_super_block) + total_xattr_bytes;
+}
+
+
+static int _parse_numberll(char *start, long long *res, int size, int base)
+{
+ char *end;
+ long long number;
+
+ errno = 0; /* To distinguish success/failure after call */
+
+ number = strtoll(start, &end, base);
+
+ /*
+ * check for strtoll underflow or overflow in conversion, and other
+ * errors.
+ */
+ if((errno == ERANGE && (number == LLONG_MIN || number == LLONG_MAX)) ||
+ (errno != 0 && number == 0))
+ return 0;
+
+ /* reject negative numbers as invalid */
+ if(number < 0)
+ return 0;
+
+ if(size) {
+ /*
+ * Check for multiplier and trailing junk.
+ * But first check that a number exists before the
+ * multiplier
+ */
+ if(end == start)
+ return 0;
+
+ switch(end[0]) {
+ case 'g':
+ case 'G':
+ if(multiply_overflowll(number, 1073741824))
+ return 0;
+ number *= 1073741824;
+
+ if(end[1] != '\0')
+ /* trailing junk after multiplier, but
+ * allow it to be "bytes" */
+ if(strcmp(end + 1, "bytes"))
+ return 0;
+
+ break;
+ case 'm':
+ case 'M':
+ if(multiply_overflowll(number, 1048576))
+ return 0;
+ number *= 1048576;
+
+ if(end[1] != '\0')
+ /* trailing junk after multiplier, but
+ * allow it to be "bytes" */
+ if(strcmp(end + 1, "bytes"))
+ return 0;
+
+ break;
+ case 'k':
+ case 'K':
+ if(multiply_overflowll(number, 1024))
+ return 0;
+ number *= 1024;
+
+ if(end[1] != '\0')
+ /* trailing junk after multiplier, but
+ * allow it to be "bytes" */
+ if(strcmp(end + 1, "bytes"))
+ return 0;
+
+ break;
+ case '\0':
+ break;
+ default:
+ /* trailing junk after number */
+ return 0;
+ }
+ } else if(end[0] != '\0')
+ /* trailing junk after number */
+ return 0;
+
+ *res = number;
+ return 1;
+}
+
+
+static int parse_numberll(char *start, long long *res, int size)
+{
+ return _parse_numberll(start, res, size, 10);
+}
+
+
+static int parse_number(char *start, int *res, int size)
+{
+ long long number;
+
+ if(!_parse_numberll(start, &number, size, 10))
+ return 0;
+
+ /* check if long result will overflow signed int */
+ if(number > INT_MAX)
+ return 0;
+
+ *res = (int) number;
+ return 1;
+}
+
+
+static int parse_number_unsigned(char *start, unsigned int *res, int size)
+{
+ long long number;
+
+ if(!_parse_numberll(start, &number, size, 10))
+ return 0;
+
+ /* check if long result will overflow unsigned int */
+ if(number > UINT_MAX)
+ return 0;
+
+ *res = (unsigned int) number;
+ return 1;
+}
+
+
+static int parse_num(char *arg, int *res)
+{
+ return parse_number(arg, res, 0);
+}
+
+
+static int parse_num_unsigned(char *arg, unsigned int *res)
+{
+ return parse_number_unsigned(arg, res, 0);
+}
+
+
+static int parse_mode(char *arg, mode_t *res)
+{
+ long long number;
+
+ if(!_parse_numberll(arg, &number, 0, 8))
+ return 0;
+
+ if(number > 07777)
+ return 0;
+
+ *res = (mode_t) number;
+ return 1;
+}
+
+
+static int get_physical_memory()
+{
+ /*
+ * Long longs are used here because with PAE, a 32-bit
+ * machine can have more than 4GB of physical memory
+ *
+ * sysconf(_SC_PHYS_PAGES) relies on /proc being mounted.
+ * If it fails use sysinfo, if that fails return 0
+ */
+ long long num_pages = sysconf(_SC_PHYS_PAGES);
+ long long page_size = sysconf(_SC_PAGESIZE);
+ int phys_mem;
+
+#ifdef __linux__
+ if(num_pages == -1 || page_size == -1) {
+ struct sysinfo sys;
+ int res = sysinfo(&sys);
+
+ if(res == -1)
+ return 0;
+
+ num_pages = sys.totalram;
+ page_size = sys.mem_unit;
+ }
+#endif
+
+ phys_mem = num_pages * page_size >> 20;
+
+ if(phys_mem < SQUASHFS_LOWMEM)
+ BAD_ERROR("Mksquashfs requires more physical memory than is "
+ "available!\n");
+
+ return phys_mem;
+}
+
+
+static void check_usable_phys_mem(int total_mem)
+{
+ /*
+ * We want to allow users to use as much of their physical
+ * memory as they wish. However, for practical reasons there are
+ * limits which need to be imposed, to protect users from themselves
+ * and to prevent people from using Mksquashfs as a DOS attack by using
+ * all physical memory. Mksquashfs uses memory to cache data from disk
+ * to optimise performance. It is pointless to ask it to use more
+ * than 75% of physical memory, as this causes thrashing and it is thus
+ * self-defeating.
+ */
+ int mem = get_physical_memory();
+
+ mem = (mem >> 1) + (mem >> 2); /* 75% */
+
+ if(total_mem > mem && mem) {
+ ERROR("Total memory requested is more than 75%% of physical "
+ "memory.\n");
+ ERROR("Mksquashfs uses memory to cache data from disk to "
+ "optimise performance.\n");
+ ERROR("It is pointless to ask it to use more than this amount "
+ "of memory, as this\n");
+ ERROR("causes thrashing and it is thus self-defeating.\n");
+ BAD_ERROR("Requested memory size too large\n");
+ }
+
+ if(sizeof(void *) == 4 && total_mem > 2048) {
+ /*
+ * If we're running on a kernel with PAE or on a 64-bit kernel,
+ * then the 75% physical memory limit can still easily exceed
+ * the addressable memory by this process.
+ *
+ * Due to the typical kernel/user-space split (1GB/3GB, or
+ * 2GB/2GB), we have to conservatively assume the 32-bit
+ * processes can only address 2-3GB. So refuse if the user
+ * tries to allocate more than 2GB.
+ */
+ ERROR("Total memory requested may exceed maximum "
+ "addressable memory by this process\n");
+ BAD_ERROR("Requested memory size too large\n");
+ }
+}
+
+
+static int get_default_phys_mem()
+{
+ /*
+ * get_physical_memory() relies on /proc being mounted.
+ * If it fails, issue a warning, and use
+ * SQUASHFS_LOWMEM / SQUASHFS_TAKE as default,
+ * and allow a larger value to be set with -mem.
+ */
+ int mem = get_physical_memory();
+
+ if(mem == 0) {
+ mem = SQUASHFS_LOWMEM / SQUASHFS_TAKE;
+
+ ERROR("Warning: Cannot get size of physical memory, probably "
+ "because /proc is missing.\n");
+ ERROR("Warning: Defaulting to minimal use of %d Mbytes, use "
+ "-mem to set a better value,\n", mem);
+ ERROR("Warning: or fix /proc.\n");
+ } else
+ mem /= SQUASHFS_TAKE;
+
+ if(sizeof(void *) == 4 && mem > 640) {
+ /*
+ * If we're running on a kernel with PAE or on a 64-bit kernel,
+ * the default memory usage can exceed the addressable
+ * memory by this process.
+ * Due to the typical kernel/user-space split (1GB/3GB, or
+ * 2GB/2GB), we have to conservatively assume the 32-bit
+ * processes can only address 2-3GB. So limit the default
+ * usage to 640M, which gives room for other data.
+ */
+ mem = 640;
+ }
+
+ return mem;
+}
+
+
+static void calculate_queue_sizes(int mem, int *readq, int *fragq, int *bwriteq,
+ int *fwriteq)
+{
+ *readq = mem / SQUASHFS_READQ_MEM;
+ *bwriteq = mem / SQUASHFS_BWRITEQ_MEM;
+ *fwriteq = mem / SQUASHFS_FWRITEQ_MEM;
+ *fragq = mem - *readq - *bwriteq - *fwriteq;
+}
+
+
+static void open_log_file(char *filename)
+{
+ log_fd=fopen(filename, "w");
+ if(log_fd == NULL)
+ BAD_ERROR("Failed to open log file \"%s\" because %s\n", filename, strerror(errno));
+
+ logging=TRUE;
+}
+
+
+static void check_env_var()
+{
+ char *time_string = getenv("SOURCE_DATE_EPOCH");
+ unsigned int time;
+
+ if(time_string != NULL) {
+ /*
+ * We cannot have both command-line options and environment
+ * variable trying to set the timestamp(s) at the same
+ * time. Semantically both are FORCE options which cannot be
+ * over-ridden elsewhere (otherwise they can't be relied on).
+ *
+ * So refuse to continue if both are set.
+ */
+ if(mkfs_time_opt || all_time_opt)
+ BAD_ERROR("SOURCE_DATE_EPOCH and command line options "
+ "can't be used at the same time to set "
+ "timestamp(s)\n");
+
+ if(!parse_num_unsigned(time_string, &time)) {
+ ERROR("Env Var SOURCE_DATE_EPOCH has invalid time value\n");
+ EXIT_MKSQUASHFS();
+ }
+
+ all_time = mkfs_time = time;
+ all_time_opt = mkfs_time_opt = TRUE;
+ }
+}
+
+
+static void print_options(FILE *stream, char *name, int total_mem)
+{
+ fprintf(stream, "SYNTAX:%s source1 source2 ... FILESYSTEM [OPTIONS] ", name);
+ fprintf(stream, "[-e list of\nexclude dirs/files]\n");
+ fprintf(stream, "\nFilesystem compression options:\n");
+ fprintf(stream, "-b <block_size>\t\tset data block to <block_size>. Default ");
+ fprintf(stream, "128 Kbytes.\n");
+ fprintf(stream, "\t\t\tOptionally a suffix of K or M can be given to ");
+ fprintf(stream, "specify\n\t\t\tKbytes or Mbytes respectively\n");
+ fprintf(stream, "-comp <comp>\t\tselect <comp> compression\n");
+ fprintf(stream, "\t\t\tCompressors available:\n");
+ display_compressors(stream, "\t\t\t", COMP_DEFAULT);
+ fprintf(stream, "-noI\t\t\tdo not compress inode table\n");
+ fprintf(stream, "-noId\t\t\tdo not compress the uid/gid table (implied by ");
+ fprintf(stream, "-noI)\n");
+ fprintf(stream, "-noD\t\t\tdo not compress data blocks\n");
+ fprintf(stream, "-noF\t\t\tdo not compress fragment blocks\n");
+ fprintf(stream, "-noX\t\t\tdo not compress extended attributes\n");
+ fprintf(stream, "-no-compression\t\tdo not compress any of the data ");
+ fprintf(stream, "or metadata. This is\n\t\t\tequivalent to ");
+ fprintf(stream, "specifying -noI -noD -noF and -noX\n");
+ fprintf(stream, "\nFilesystem build options:\n");
+ fprintf(stream, "-tar\t\t\tread uncompressed tar file from standard in (stdin)\n");
+ fprintf(stream, "-no-strip\t\tact like tar, and do not strip leading ");
+ fprintf(stream, "directories\n\t\t\tfrom source files\n");
+ fprintf(stream, "-tarstyle\t\talternative name for -no-strip\n");
+ fprintf(stream, "-cpiostyle\t\tact like cpio, and read file ");
+ fprintf(stream, "pathnames from standard in\n\t\t\t(stdin)\n");
+ fprintf(stream, "-cpiostyle0\t\tlike -cpiostyle, but filenames are ");
+ fprintf(stream, "null terminated. Can\n\t\t\tbe used with find ");
+ fprintf(stream, "-print0 action\n");
+ fprintf(stream, "-reproducible\t\tbuild filesystems that are reproducible");
+ fprintf(stream, REP_STR "\n");
+ fprintf(stream, "-not-reproducible\tbuild filesystems that are not reproducible");
+ fprintf(stream, NOREP_STR "\n");
+ fprintf(stream, "-mkfs-time <time>\tset filesystem creation ");
+ fprintf(stream, "timestamp to <time>. <time> can\n\t\t\tbe an ");
+ fprintf(stream, "unsigned 32-bit int indicating seconds since the\n");
+ fprintf(stream, "\t\t\tepoch (1970-01-01) or a string value which ");
+ fprintf(stream, "is passed to\n\t\t\tthe \"date\" command to ");
+ fprintf(stream, "parse. Any string value which the\n\t\t\tdate ");
+ fprintf(stream, "command recognises can be used such as \"now\",\n");
+ fprintf(stream, "\t\t\t\"last week\", or \"Wed Feb 15 21:02:39 ");
+ fprintf(stream, "GMT 2023\"\n");
+ fprintf(stream, "-all-time <time>\tset all file timestamps to ");
+ fprintf(stream, "<time>. <time> can be an\n\t\t\tunsigned 32-bit ");
+ fprintf(stream, "int indicating seconds since the epoch\n\t\t\t");
+ fprintf(stream, "(1970-01-01) or a string value which is passed to ");
+ fprintf(stream, "the\n\t\t\t\"date\" command to parse. Any string ");
+ fprintf(stream, "value which the date\n\t\t\tcommand recognises can ");
+ fprintf(stream, "be used such as \"now\", \"last\n\t\t\tweek\", or ");
+ fprintf(stream, "\"Wed Feb 15 21:02:39 GMT 2023\"\n");
+ fprintf(stream, "-root-time <time>\tset root directory time to ");
+ fprintf(stream, "<time>. <time> can be an\n\t\t\tunsigned 32-bit ");
+ fprintf(stream, "int indicating seconds since the epoch\n\t\t\t");
+ fprintf(stream, "(1970-01-01) or a string value which is passed to ");
+ fprintf(stream, "the\n\t\t\t\"date\" command to parse. Any string ");
+ fprintf(stream, "value which the date\n\t\t\tcommand recognises can ");
+ fprintf(stream, "be used such as \"now\", \"last\n\t\t\tweek\", or ");
+ fprintf(stream, "\"Wed Feb 15 21:02:39 GMT 2023\"\n");
+ fprintf(stream, "-root-mode <mode>\tset root directory permissions ");
+ fprintf(stream, "to octal <mode>\n");
+ fprintf(stream, "-root-uid <value>\tset root directory owner to ");
+ fprintf(stream, "specified <value>,\n\t\t\t<value> can be either an ");
+ fprintf(stream, "integer uid or user name\n");
+ fprintf(stream, "-root-gid <value>\tset root directory group to ");
+ fprintf(stream, "specified <value>,\n\t\t\t<value> can be either an ");
+ fprintf(stream, "integer gid or group name\n");
+ fprintf(stream, "-all-root\t\tmake all files owned by root\n");
+ fprintf(stream, "-force-uid <value>\tset all file uids to specified ");
+ fprintf(stream, "<value>, <value> can be\n\t\t\teither an integer ");
+ fprintf(stream, "uid or user name\n");
+ fprintf(stream, "-force-gid <value>\tset all file gids to specified ");
+ fprintf(stream, "<value>, <value> can be\n\t\t\teither an integer ");
+ fprintf(stream, "gid or group name\n");
+ fprintf(stream, "-pseudo-override\tmake pseudo file uids and gids ");
+ fprintf(stream, "override -all-root,\n\t\t\t-force-uid and ");
+ fprintf(stream, "-force-gid options\n");
+ fprintf(stream, "-no-exports\t\tdo not make filesystem exportable via NFS (-tar default)\n");
+ fprintf(stream, "-exports\t\tmake filesystem exportable via NFS (default)\n");
+ fprintf(stream, "-no-sparse\t\tdo not detect sparse files\n");
+ fprintf(stream, "-no-tailends\t\tdo not pack tail ends into fragments (default)\n");
+ fprintf(stream, "-tailends\t\tpack tail ends into fragments\n");
+ fprintf(stream, "-no-fragments\t\tdo not use fragments\n");
+ fprintf(stream, "-no-duplicates\t\tdo not perform duplicate checking\n");
+ fprintf(stream, "-no-hardlinks\t\tdo not hardlink files, instead store duplicates\n");
+ fprintf(stream, "-keep-as-directory\tif one source directory is specified, ");
+ fprintf(stream, "create a root\n");
+ fprintf(stream, "\t\t\tdirectory containing that directory, rather than the\n");
+ fprintf(stream, "\t\t\tcontents of the directory\n");
+ fprintf(stream, "\nFilesystem filter options:\n");
+ fprintf(stream, "-p <pseudo-definition>\tadd pseudo file ");
+ fprintf(stream, "definition. The definition should\n");
+ fprintf(stream, "\t\t\tbe quoted\n");
+ fprintf(stream, "-pf <pseudo-file>\tadd list of pseudo file ");
+ fprintf(stream, "definitions from <pseudo-file>,\n\t\t\tuse - for ");
+ fprintf(stream, "stdin. Pseudo file definitions should not be\n");
+ fprintf(stream, "\t\t\tquoted\n");
+ fprintf(stream, "-sort <sort_file>\tsort files according to priorities in ");
+ fprintf(stream, "<sort_file>. One\n\t\t\tfile or dir with priority per ");
+ fprintf(stream, "line. Priority -32768 to\n\t\t\t32767, default priority 0\n");
+ fprintf(stream, "-ef <exclude_file>\tlist of exclude dirs/files. ");
+ fprintf(stream, "One per line\n");
+ fprintf(stream, "-wildcards\t\tallow extended shell wildcards (globbing) to be ");
+ fprintf(stream, "used in\n\t\t\texclude dirs/files\n");
+ fprintf(stream, "-regex\t\t\tallow POSIX regular expressions to be used in ");
+ fprintf(stream, "exclude\n\t\t\tdirs/files\n");
+ fprintf(stream, "-max-depth <levels>\tdescend at most <levels> of ");
+ fprintf(stream, "directories when scanning\n\t\t\tfilesystem\n");
+ fprintf(stream, "-one-file-system\tdo not cross filesystem ");
+ fprintf(stream, "boundaries. If a directory\n\t\t\tcrosses the ");
+ fprintf(stream, "boundary, create an empty directory for\n\t\t\teach ");
+ fprintf(stream, "mount point. If a file crosses the boundary\n\t\t\t");
+ fprintf(stream, "ignore it\n");
+ fprintf(stream, "-one-file-system-x\tdo not cross filesystem ");
+ fprintf(stream, "boundaries. Like\n\t\t\t-one-file-system option ");
+ fprintf(stream, "except directories are also\n\t\t\tignored if they ");
+ fprintf(stream, "cross the boundary\n");
+ fprintf(stream, "\nFilesystem extended attribute (xattrs) options:\n");
+ fprintf(stream, "-no-xattrs\t\tdo not store extended attributes" NOXOPT_STR "\n");
+ fprintf(stream, "-xattrs\t\t\tstore extended attributes" XOPT_STR "\n");
+ fprintf(stream, "-xattrs-exclude <regex>\texclude any xattr names ");
+ fprintf(stream, "matching <regex>. <regex> is a\n\t\t\tPOSIX ");
+ fprintf(stream, "regular expression, e.g. -xattrs-exclude ");
+ fprintf(stream, "'^user.'\n\t\t\texcludes xattrs from the user ");
+ fprintf(stream, "namespace\n");
+ fprintf(stream, "-xattrs-include <regex>\tinclude any xattr names ");
+ fprintf(stream, "matching <regex>. <regex> is a\n\t\t\tPOSIX ");
+ fprintf(stream, "regular expression, e.g. -xattrs-include ");
+ fprintf(stream, "'^user.'\n\t\t\tincludes xattrs from the user ");
+ fprintf(stream, "namespace\n");
+ fprintf(stream, "-xattrs-add <name=val>\tadd the xattr <name> with ");
+ fprintf(stream, "<val> to files. If an\n\t\t\tuser xattr it ");
+ fprintf(stream, "will be added to regular files and\n");
+ fprintf(stream, "\t\t\tdirectories (see man 7 xattr). Otherwise it ");
+ fprintf(stream, "will be\n\t\t\tadded to all files. <val> by ");
+ fprintf(stream, "default will be treated as\n\t\t\tbinary (i.e. an ");
+ fprintf(stream, "uninterpreted byte sequence), but it can\n\t\t\tbe ");
+ fprintf(stream, "prefixed with 0s, where it will be treated as ");
+ fprintf(stream, "base64\n\t\t\tencoded, or prefixed with 0x, where ");
+ fprintf(stream, "val will be treated\n\t\t\tas hexidecimal. ");
+ fprintf(stream, "Additionally it can be prefixed with\n\t\t\t0t ");
+ fprintf(stream, "where this encoding is similar to binary encoding,\n");
+ fprintf(stream, "\t\t\texcept backslashes are specially treated, and ");
+ fprintf(stream, "a\n\t\t\tbackslash followed by 3 octal digits can ");
+ fprintf(stream, "be used to\n\t\t\tencode any ASCII character, ");
+ fprintf(stream, "which obviously can be used\n\t\t\tto encode ");
+ fprintf(stream, "control codes. The option can be repeated\n");
+ fprintf(stream, "\t\t\tmultiple times to add multiple xattrs\n");
+ fprintf(stream, "\nMksquashfs runtime options:\n");
+ fprintf(stream, "-version\t\tprint version, licence and copyright message\n");
+ fprintf(stream, "-exit-on-error\t\ttreat normally ignored errors as fatal\n");
+ fprintf(stream, "-quiet\t\t\tno verbose output\n");
+ fprintf(stream, "-info\t\t\tprint files written to filesystem\n");
+ fprintf(stream, "-no-progress\t\tdo not display the progress bar\n");
+ fprintf(stream, "-progress\t\tdisplay progress bar when using the -info ");
+ fprintf(stream, "option\n");
+ fprintf(stream, "-percentage\t\tdisplay a percentage rather than the ");
+ fprintf(stream, "full progress bar.\n\t\t\tCan be used with dialog ");
+ fprintf(stream, "--gauge etc.\n");
+ fprintf(stream, "-throttle <percentage>\tthrottle the I/O input rate by the ");
+ fprintf(stream, "given percentage.\n\t\t\tThis can be used to reduce the I/O ");
+ fprintf(stream, "and CPU consumption\n\t\t\tof Mksquashfs\n");
+ fprintf(stream, "-limit <percentage>\tlimit the I/O input rate to the given ");
+ fprintf(stream, "percentage.\n\t\t\tThis can be used to reduce the I/O and CPU ");
+ fprintf(stream, "consumption\n\t\t\tof Mksquashfs (alternative to -throttle)\n");
+ fprintf(stream, "-processors <number>\tuse <number> processors. By default ");
+ fprintf(stream, "will use number of\n\t\t\tprocessors available\n");
+ fprintf(stream, "-mem <size>\t\tuse <size> physical memory for ");
+ fprintf(stream, "caches. Use K, M or G to\n\t\t\tspecify Kbytes,");
+ fprintf(stream, " Mbytes or Gbytes respectively\n");
+ fprintf(stream, "-mem-percent <percent>\tuse <percent> physical ");
+ fprintf(stream, "memory for caches. Default 25%%\n");
+ fprintf(stream, "-mem-default\t\tprint default memory usage in Mbytes\n");
+ fprintf(stream, "\nFilesystem append options:\n");
+ fprintf(stream, "-noappend\t\tdo not append to existing filesystem\n");
+ fprintf(stream, "-root-becomes <name>\twhen appending source ");
+ fprintf(stream, "files/directories, make the\n");
+ fprintf(stream, "\t\t\toriginal root become a subdirectory in the new root\n");
+ fprintf(stream, "\t\t\tcalled <name>, rather than adding the new source items\n");
+ fprintf(stream, "\t\t\tto the original root\n");
+ fprintf(stream, "-no-recovery\t\tdo not generate a recovery file\n");
+ fprintf(stream, "-recovery-path <name>\tuse <name> as the directory ");
+ fprintf(stream, "to store the recovery file\n");
+ fprintf(stream, "-recover <name>\t\trecover filesystem data using recovery ");
+ fprintf(stream, "file <name>\n");
+ fprintf(stream, "\nFilesystem actions options:\n");
+ fprintf(stream, "-action <action@expr>\tevaluate <expr> on every file, ");
+ fprintf(stream, "and execute <action>\n\t\t\tif it returns TRUE\n");
+ fprintf(stream, "-log-action <act@expr>\tas above, but log expression ");
+ fprintf(stream, "evaluation results and\n\t\t\tactions performed\n");
+ fprintf(stream, "-true-action <act@expr>\tas above, but only log expressions ");
+ fprintf(stream, "which return TRUE\n");
+ fprintf(stream, "-false-action <act@exp>\tas above, but only log expressions ");
+ fprintf(stream, "which return FALSE\n");
+ fprintf(stream, "-action-file <file>\tas action, but read actions ");
+ fprintf(stream, "from <file>\n");
+ fprintf(stream, "-log-action-file <file>\tas -log-action, but read ");
+ fprintf(stream, "actions from <file>\n");
+ fprintf(stream, "-true-action-file <f>\tas -true-action, but read ");
+ fprintf(stream, "actions from <f>\n");
+ fprintf(stream, "-false-action-file <f>\tas -false-action, but read ");
+ fprintf(stream, "actions from <f>\n");
+ fprintf(stream, "\nTar file only options:\n");
+ fprintf(stream, "-default-mode <mode>\ttar files often do not store ");
+ fprintf(stream, "permissions for\n\t\t\tintermediate directories. ");
+ fprintf(stream, "This option sets the default\n\t\t\tdirectory ");
+ fprintf(stream, "permissions to octal <mode>, rather than 0755.\n");
+ fprintf(stream, "\t\t\tThis also sets the root inode mode\n");
+ fprintf(stream, "-default-uid <uid>\ttar files often do not store ");
+ fprintf(stream, "uids for intermediate\n\t\t\tdirectories. This ");
+ fprintf(stream, "option sets the default directory\n\t\t\towner to ");
+ fprintf(stream, "<uid>, rather than the user running Mksquashfs.\n");
+ fprintf(stream, "\t\t\tThis also sets the root inode uid\n");
+ fprintf(stream, "-default-gid <gid>\ttar files often do not store ");
+ fprintf(stream, "gids for intermediate\n\t\t\tdirectories. This ");
+ fprintf(stream, "option sets the default directory\n\t\t\tgroup to ");
+ fprintf(stream, "<gid>, rather than the group of the user\n");
+ fprintf(stream, "\t\t\trunning Mksquashfs. This also sets the root ");
+ fprintf(stream, "inode gid\n");
+ fprintf(stream, "-ignore-zeros\t\tallow tar files to be concatenated ");
+ fprintf(stream, "together and fed to\n\t\t\tMksquashfs. Normally a ");
+ fprintf(stream, "tarfile has two consecutive 512\n\t\t\tbyte blocks ");
+ fprintf(stream, "filled with zeros which means EOF and\n");
+ fprintf(stream, "\t\t\tMksquashfs will stop reading after the first tar ");
+ fprintf(stream, "file on\n\t\t\tencountering them. This option makes ");
+ fprintf(stream, "Mksquashfs ignore\n\t\t\tthe zero filled blocks\n");
+ fprintf(stream, "\nExpert options (these may make the filesystem unmountable):\n");
+ fprintf(stream, "-nopad\t\t\tdo not pad filesystem to a multiple of 4K\n");
+ fprintf(stream, "-offset <offset>\tskip <offset> bytes at the beginning of ");
+ fprintf(stream, "FILESYSTEM.\n\t\t\tOptionally a suffix of K, M or G can be given ");
+ fprintf(stream, "to specify\n\t\t\tKbytes, Mbytes or Gbytes respectively.\n");
+ fprintf(stream, "\t\t\tDefault 0 bytes\n");
+ fprintf(stream, "-o <offset>\t\tsynonym for -offset\n");
+ fprintf(stream, "\nMiscellaneous options:\n");
+ fprintf(stream, "-fstime <time>\t\talternative name for -mkfs-time\n");
+ fprintf(stream, "-always-use-fragments\talternative name for -tailends\n");
+ fprintf(stream, "-root-owned\t\talternative name for -all-root\n");
+ fprintf(stream, "-noInodeCompression\talternative name for -noI\n");
+ fprintf(stream, "-noIdTableCompression\talternative name for -noId\n");
+ fprintf(stream, "-noDataCompression\talternative name for -noD\n");
+ fprintf(stream, "-noFragmentCompression\talternative name for -noF\n");
+ fprintf(stream, "-noXattrCompression\talternative name for -noX\n");
+ fprintf(stream, "\n-help\t\t\toutput this options text to stdout\n");
+ fprintf(stream, "-h\t\t\toutput this options text to stdout\n");
+ fprintf(stream, "\n-Xhelp\t\t\tprint compressor options for selected ");
+ fprintf(stream, "compressor\n");
+ fprintf(stream, "\nPseudo file definition format:\n");;
+ fprintf(stream, "\"filename d mode uid gid\"\t\tcreate a directory\n");
+ fprintf(stream, "\"filename m mode uid gid\"\t\tmodify filename\n");
+ fprintf(stream, "\"filename b mode uid gid major minor\"\tcreate a block device\n");
+ fprintf(stream, "\"filename c mode uid gid major minor\"\tcreate a character device\n");
+ fprintf(stream, "\"filename f mode uid gid command\"\tcreate file from stdout of command\n");
+ fprintf(stream, "\"filename s mode uid gid symlink\"\tcreate a symbolic link\n");
+ fprintf(stream, "\"filename i mode uid gid [s|f]\"\t\tcreate a socket (s) or FIFO (f)\n");
+ fprintf(stream, "\"filename x name=val\"\t\t\tcreate an extended attribute\n");
+ fprintf(stream, "\"filename l linkname\"\t\t\tcreate a hard-link to linkname\n");
+ fprintf(stream, "\"filename L pseudo_filename\"\t\tsame, but link to pseudo file\n");
+ fprintf(stream, "\"filename D time mode uid gid\"\t\tcreate a directory with timestamp time\n");
+ fprintf(stream, "\"filename M time mode uid gid\"\t\tmodify a file with timestamp time\n");
+ fprintf(stream, "\"filename B time mode uid gid major minor\"\n\t\t\t\t\tcreate block device with timestamp time\n");
+ fprintf(stream, "\"filename C time mode uid gid major minor\"\n\t\t\t\t\tcreate char device with timestamp time\n");
+ fprintf(stream, "\"filename F time mode uid gid command\"\tcreate file with timestamp time\n");
+ fprintf(stream, "\"filename S time mode uid gid symlink\"\tcreate symlink with timestamp time\n");
+ fprintf(stream, "\"filename I time mode uid gid [s|f]\"\tcreate socket/fifo with timestamp time\n");
+ fprintf(stream, "\nCompressors available and compressor specific options:\n");
+ display_compressor_usage(stream, COMP_DEFAULT);
+
+ fprintf(stream, "\nEnvironment:\n");
+ fprintf(stream, "SOURCE_DATE_EPOCH\tIf set, this is used as the ");
+ fprintf(stream, "filesystem creation\n");
+ fprintf(stream, "\t\t\ttimestamp. Also any file timestamps which are\n");
+ fprintf(stream, "\t\t\tafter SOURCE_DATE_EPOCH will be clamped to\n");
+ fprintf(stream, "\t\t\tSOURCE_DATE_EPOCH. See\n");
+ fprintf(stream, "\t\t\thttps://reproducible-builds.org/docs/source-date-epoch/\n");
+ fprintf(stream, "\t\t\tfor more information\n");
+ fprintf(stream, "\nSee also:");
+ fprintf(stream, "\nThe README for the Squashfs-tools 4.6.1 release, ");
+ fprintf(stream, "describing the new features can be\n");
+ fprintf(stream, "read here https://github.com/plougher/squashfs-tools/blob/master/README-4.6.1\n");
+
+ fprintf(stream, "\nThe Squashfs-tools USAGE guide can be read here\n");
+ fprintf(stream, "https://github.com/plougher/squashfs-tools/blob/master/USAGE-4.6\n");
+ fprintf(stream, "\nThe ACTIONS-README file describing how to use the new actions feature can be\n");
+ fprintf(stream, "read here https://github.com/plougher/squashfs-tools/blob/master/ACTIONS-README\n");
+}
+
+
+static void print_sqfstar_options(FILE *stream, char *name, int total_mem)
+{
+ fprintf(stream, "SYNTAX:%s [OPTIONS] FILESYSTEM ", name);
+ fprintf(stream, "[list of exclude dirs/files]\n");
+ fprintf(stream, "\nFilesystem compression options:\n");
+ fprintf(stream, "-b <block_size>\t\tset data block to <block_size>. Default ");
+ fprintf(stream, "128 Kbytes.\n");
+ fprintf(stream, "\t\t\tOptionally a suffix of K or M can be given to ");
+ fprintf(stream, "specify\n\t\t\tKbytes or Mbytes respectively\n");
+ fprintf(stream, "-comp <comp>\t\tselect <comp> compression\n");
+ fprintf(stream, "\t\t\tCompressors available:\n");
+ display_compressors(stream, "\t\t\t", COMP_DEFAULT);
+ fprintf(stream, "-noI\t\t\tdo not compress inode table\n");
+ fprintf(stream, "-noId\t\t\tdo not compress the uid/gid table (implied by ");
+ fprintf(stream, "-noI)\n");
+ fprintf(stream, "-noD\t\t\tdo not compress data blocks\n");
+ fprintf(stream, "-noF\t\t\tdo not compress fragment blocks\n");
+ fprintf(stream, "-noX\t\t\tdo not compress extended attributes\n");
+ fprintf(stream, "-no-compression\t\tdo not compress any of the data ");
+ fprintf(stream, "or metadata. This is\n\t\t\tequivalent to ");
+ fprintf(stream, "specifying -noI -noD -noF and -noX\n");
+ fprintf(stream, "\nFilesystem build options:\n");
+ fprintf(stream, "-reproducible\t\tbuild filesystems that are reproducible");
+ fprintf(stream, REP_STR "\n");
+ fprintf(stream, "-not-reproducible\tbuild filesystems that are not reproducible");
+ fprintf(stream, NOREP_STR "\n");
+ fprintf(stream, "-mkfs-time <time>\tset filesystem creation ");
+ fprintf(stream, "timestamp to <time>. <time> can\n\t\t\tbe an ");
+ fprintf(stream, "unsigned 32-bit int indicating seconds since the\n");
+ fprintf(stream, "\t\t\tepoch (1970-01-01) or a string value which ");
+ fprintf(stream, "is passed to\n\t\t\tthe \"date\" command to ");
+ fprintf(stream, "parse. Any string value which the\n\t\t\tdate ");
+ fprintf(stream, "command recognises can be used such as \"now\",\n");
+ fprintf(stream, "\t\t\t\"last week\", or \"Wed Feb 15 21:02:39 ");
+ fprintf(stream, "GMT 2023\"\n");
+ fprintf(stream, "-all-time <time>\tset all file timestamps to ");
+ fprintf(stream, "<time>. <time> can be an\n\t\t\tunsigned 32-bit ");
+ fprintf(stream, "int indicating seconds since the epoch\n\t\t\t");
+ fprintf(stream, "(1970-01-01) or a string value which is passed to ");
+ fprintf(stream, "the\n\t\t\t\"date\" command to parse. Any string ");
+ fprintf(stream, "value which the date\n\t\t\tcommand recognises can ");
+ fprintf(stream, "be used such as \"now\", \"last\n\t\t\tweek\", or ");
+ fprintf(stream, "\"Wed Feb 15 21:02:39 GMT 2023\"\n");
+ fprintf(stream, "-root-time <time>\tset root directory time to ");
+ fprintf(stream, "<time>. <time> can be an\n\t\t\tunsigned 32-bit ");
+ fprintf(stream, "int indicating seconds since the epoch\n\t\t\t");
+ fprintf(stream, "(1970-01-01) or a string value which is passed to ");
+ fprintf(stream, "the\n\t\t\t\"date\" command to parse. Any string ");
+ fprintf(stream, "value which the date\n\t\t\tcommand recognises can ");
+ fprintf(stream, "be used such as \"now\", \"last\n\t\t\tweek\", or ");
+ fprintf(stream, "\"Wed Feb 15 21:02:39 GMT 2023\"\n");
+ fprintf(stream, "-root-mode <mode>\tset root directory permissions to octal ");
+ fprintf(stream, "<mode>\n");
+ fprintf(stream, "-root-uid <value>\tset root directory owner to ");
+ fprintf(stream, "specified <value>,\n\t\t\t<value> can be either an ");
+ fprintf(stream, "integer uid or user name\n");
+ fprintf(stream, "-root-gid <value>\tset root directory group to ");
+ fprintf(stream, "specified <value>,\n\t\t\t<value> can be either an ");
+ fprintf(stream, "integer gid or group name\n");
+ fprintf(stream, "-all-root\t\tmake all files owned by root\n");
+ fprintf(stream, "-force-uid <value>\tset all file uids to specified ");
+ fprintf(stream, "<value>, <value> can be\n\t\t\teither an integer ");
+ fprintf(stream, "uid or user name\n");
+ fprintf(stream, "-force-gid <value>\tset all file gids to specified ");
+ fprintf(stream, "<value>, <value> can be\n\t\t\teither an integer ");
+ fprintf(stream, "gid or group name\n");
+ fprintf(stream, "-default-mode <mode>\ttar files often do not store ");
+ fprintf(stream, "permissions for\n\t\t\tintermediate directories. ");
+ fprintf(stream, "This option sets the default\n\t\t\tdirectory ");
+ fprintf(stream, "permissions to octal <mode>, rather than 0755.\n");
+ fprintf(stream, "\t\t\tThis also sets the root inode mode\n");
+ fprintf(stream, "-default-uid <uid>\ttar files often do not store ");
+ fprintf(stream, "uids for intermediate\n\t\t\tdirectories. This ");
+ fprintf(stream, "option sets the default directory\n\t\t\towner to ");
+ fprintf(stream, "<uid>, rather than the user running Sqfstar.\n");
+ fprintf(stream, "\t\t\tThis also sets the root inode uid\n");
+ fprintf(stream, "-default-gid <gid>\ttar files often do not store ");
+ fprintf(stream, "gids for intermediate\n\t\t\tdirectories. This ");
+ fprintf(stream, "option sets the default directory\n\t\t\tgroup to ");
+ fprintf(stream, "<gid>, rather than the group of the user\n");
+ fprintf(stream, "\t\t\trunning Sqfstar. This also sets the root ");
+ fprintf(stream, "inode gid\n");
+ fprintf(stream, "-pseudo-override\tmake pseudo file uids and gids ");
+ fprintf(stream, "override -all-root,\n\t\t\t-force-uid and ");
+ fprintf(stream, "-force-gid options\n");
+ fprintf(stream, "-exports\t\tmake the filesystem exportable via NFS\n");
+ fprintf(stream, "-no-sparse\t\tdo not detect sparse files\n");
+ fprintf(stream, "-no-fragments\t\tdo not use fragments\n");
+ fprintf(stream, "-no-tailends\t\tdo not pack tail ends into fragments\n");
+ fprintf(stream, "-no-duplicates\t\tdo not perform duplicate checking\n");
+ fprintf(stream, "-no-hardlinks\t\tdo not hardlink files, instead store duplicates\n");
+ fprintf(stream, "\nFilesystem filter options:\n");
+ fprintf(stream, "-p <pseudo-definition>\tadd pseudo file ");
+ fprintf(stream, "definition. The definition should\n");
+ fprintf(stream, "\t\t\tbe quoted\n");
+ fprintf(stream, "-pf <pseudo-file>\tadd list of pseudo file ");
+ fprintf(stream, "definitions. Pseudo file\n\t\t\tdefinitions in ");
+ fprintf(stream, "pseudo-files should not be quoted\n");
+ fprintf(stream, "-ef <exclude_file>\tlist of exclude dirs/files. ");
+ fprintf(stream, "One per line\n");
+ fprintf(stream, "-regex\t\t\tallow POSIX regular expressions to be used in ");
+ fprintf(stream, "exclude\n\t\t\tdirs/files\n");
+ fprintf(stream, "-ignore-zeros\t\tallow tar files to be concatenated ");
+ fprintf(stream, "together and fed to\n\t\t\tSqfstar. Normally a ");
+ fprintf(stream, "tarfile has two consecutive 512\n\t\t\tbyte blocks ");
+ fprintf(stream, "filled with zeros which means EOF and\n");
+ fprintf(stream, "\t\t\tSqfstar will stop reading after the first tar ");
+ fprintf(stream, "file on\n\t\t\tencountering them. This option makes ");
+ fprintf(stream, "Sqfstar ignore the\n\t\t\tzero filled blocks\n");
+ fprintf(stream, "\nFilesystem extended attribute (xattrs) options:\n");
+ fprintf(stream, "-no-xattrs\t\tdo not store extended attributes" NOXOPT_STR "\n");
+ fprintf(stream, "-xattrs\t\t\tstore extended attributes" XOPT_STR "\n");
+ fprintf(stream, "-xattrs-exclude <regex>\texclude any xattr names ");
+ fprintf(stream, "matching <regex>. <regex> is a\n\t\t\tPOSIX ");
+ fprintf(stream, "regular expression, e.g. -xattrs-exclude ");
+ fprintf(stream, "'^user.'\n\t\t\texcludes xattrs from the user ");
+ fprintf(stream, "namespace\n");
+ fprintf(stream, "-xattrs-include <regex>\tinclude any xattr names ");
+ fprintf(stream, "matching <regex>. <regex> is a\n\t\t\tPOSIX ");
+ fprintf(stream, "regular expression, e.g. -xattrs-include ");
+ fprintf(stream, "'^user.'\n\t\t\tincludes xattrs from the user ");
+ fprintf(stream, "namespace\n");
+ fprintf(stream, "-xattrs-add <name=val>\tadd the xattr <name> with ");
+ fprintf(stream, "<val> to files. If an\n\t\t\tuser xattr it ");
+ fprintf(stream, "will be added to regular files and\n");
+ fprintf(stream, "\t\t\tdirectories (see man 7 xattr). Otherwise it ");
+ fprintf(stream, "will be\n\t\t\tadded to all files. <val> by ");
+ fprintf(stream, "default will be treated as\n\t\t\tbinary (i.e. an ");
+ fprintf(stream, "uninterpreted byte sequence), but it can\n\t\t\tbe ");
+ fprintf(stream, "prefixed with 0s, where it will be treated as ");
+ fprintf(stream, "base64\n\t\t\tencoded, or prefixed with 0x, where ");
+ fprintf(stream, "val will be treated\n\t\t\tas hexidecimal. ");
+ fprintf(stream, "Additionally it can be prefixed with\n\t\t\t0t ");
+ fprintf(stream, "where this encoding is similar to binary encoding,\n");
+ fprintf(stream, "\t\t\texcept backslashes are specially treated, and ");
+ fprintf(stream, "a\n\t\t\tbackslash followed by 3 octal digits can ");
+ fprintf(stream, "be used to\n\t\t\tencode any ASCII character, ");
+ fprintf(stream, "which obviously can be used\n\t\t\tto encode ");
+ fprintf(stream, "control codes. The option can be repeated\n");
+ fprintf(stream, "\t\t\tmultiple times to add multiple xattrs\n");
+ fprintf(stream, "\nSqfstar runtime options:\n");
+ fprintf(stream, "-version\t\tprint version, licence and copyright message\n");
+ fprintf(stream, "-force\t\t\tforce Sqfstar to write to block device ");
+ fprintf(stream, "or file\n");
+ fprintf(stream, "-exit-on-error\t\ttreat normally ignored errors as fatal\n");
+ fprintf(stream, "-quiet\t\t\tno verbose output\n");
+ fprintf(stream, "-info\t\t\tprint files written to filesystem\n");
+ fprintf(stream, "-no-progress\t\tdo not display the progress bar\n");
+ fprintf(stream, "-progress\t\tdisplay progress bar when using the -info ");
+ fprintf(stream, "option\n");
+ fprintf(stream, "-percentage\t\tdisplay a percentage rather than the ");
+ fprintf(stream, "full progress bar.\n\t\t\tCan be used with dialog ");
+ fprintf(stream, "--gauge etc.\n");
+ fprintf(stream, "-throttle <percentage>\tthrottle the I/O input rate by the ");
+ fprintf(stream, "given percentage.\n\t\t\tThis can be used to reduce the I/O ");
+ fprintf(stream, "and CPU consumption\n\t\t\tof Sqfstar\n");
+ fprintf(stream, "-limit <percentage>\tlimit the I/O input rate to the given ");
+ fprintf(stream, "percentage.\n\t\t\tThis can be used to reduce the I/O and CPU ");
+ fprintf(stream, "consumption\n\t\t\tof Sqfstar (alternative to -throttle)\n");
+ fprintf(stream, "-processors <number>\tuse <number> processors. By default ");
+ fprintf(stream, "will use number of\n\t\t\tprocessors available\n");
+ fprintf(stream, "-mem <size>\t\tuse <size> physical memory for ");
+ fprintf(stream, "caches. Use K, M or G to\n\t\t\tspecify Kbytes,");
+ fprintf(stream, " Mbytes or Gbytes respectively\n");
+ fprintf(stream, "-mem-percent <percent>\tuse <percent> physical ");
+ fprintf(stream, "memory for caches. Default 25%%\n");
+ fprintf(stream, "-mem-default\t\tprint default memory usage in Mbytes\n");
+ fprintf(stream, "\nExpert options (these may make the filesystem unmountable):\n");
+ fprintf(stream, "-nopad\t\t\tdo not pad filesystem to a multiple of 4K\n");
+ fprintf(stream, "-offset <offset>\tskip <offset> bytes at the beginning of ");
+ fprintf(stream, "FILESYSTEM.\n\t\t\tOptionally a suffix of K, M or G can be given ");
+ fprintf(stream, "to specify\n\t\t\tKbytes, Mbytes or Gbytes respectively.\n");
+ fprintf(stream, "\t\t\tDefault 0 bytes\n");
+ fprintf(stream, "-o <offset>\t\tsynonym for -offset\n");
+ fprintf(stream, "\nMiscellaneous options:\n");
+ fprintf(stream, "-fstime <time>\t\talternative name for mkfs-time\n");
+ fprintf(stream, "-root-owned\t\talternative name for -all-root\n");
+ fprintf(stream, "-noInodeCompression\talternative name for -noI\n");
+ fprintf(stream, "-noIdTableCompression\talternative name for -noId\n");
+ fprintf(stream, "-noDataCompression\talternative name for -noD\n");
+ fprintf(stream, "-noFragmentCompression\talternative name for -noF\n");
+ fprintf(stream, "-noXattrCompression\talternative name for -noX\n");
+ fprintf(stream, "\n-help\t\t\toutput this options text to stdout\n");
+ fprintf(stream, "-h\t\t\toutput this options text to stdout\n");
+ fprintf(stream, "\n-Xhelp\t\t\tprint compressor options for selected ");
+ fprintf(stream, "compressor\n");
+ fprintf(stream, "\nPseudo file definition format:\n");;
+ fprintf(stream, "\"filename d mode uid gid\"\t\tcreate a directory\n");
+ fprintf(stream, "\"filename m mode uid gid\"\t\tmodify filename\n");
+ fprintf(stream, "\"filename b mode uid gid major minor\"\tcreate a block device\n");
+ fprintf(stream, "\"filename c mode uid gid major minor\"\tcreate a character device\n");
+ fprintf(stream, "\"filename f mode uid gid command\"\tcreate file from stdout of command\n");
+ fprintf(stream, "\"filename s mode uid gid symlink\"\tcreate a symbolic link\n");
+ fprintf(stream, "\"filename i mode uid gid [s|f]\"\t\tcreate a socket (s) or FIFO (f)\n");
+ fprintf(stream, "\"filename x name=val\"\t\t\tcreate an extended attribute\n");
+ fprintf(stream, "\"filename l linkname\"\t\t\tcreate a hard-link to linkname\n");
+ fprintf(stream, "\"filename L pseudo_filename\"\t\tsame, but link to pseudo file\n");
+ fprintf(stream, "\"filename D time mode uid gid\"\t\tcreate a directory with timestamp time\n");
+ fprintf(stream, "\"filename M time mode uid gid\"\t\tmodify a file with timestamp time\n");
+ fprintf(stream, "\"filename B time mode uid gid major minor\"\n\t\t\t\t\tcreate block device with timestamp time\n");
+ fprintf(stream, "\"filename C time mode uid gid major minor\"\n\t\t\t\t\tcreate char device with timestamp time\n");
+ fprintf(stream, "\"filename F time mode uid gid command\"\tcreate file with timestamp time\n");
+ fprintf(stream, "\"filename S time mode uid gid symlink\"\tcreate symlink with timestamp time\n");
+ fprintf(stream, "\"filename I time mode uid gid [s|f]\"\tcreate socket/fifo with timestamp time\n");
+ fprintf(stream, "\nCompressors available and compressor specific options:\n");
+ display_compressor_usage(stream, COMP_DEFAULT);
+
+ fprintf(stream, "\nEnvironment:\n");
+ fprintf(stream, "SOURCE_DATE_EPOCH\tIf set, this is used as the ");
+ fprintf(stream, "filesystem creation\n");
+ fprintf(stream, "\t\t\ttimestamp. Also any file timestamps which are\n");
+ fprintf(stream, "\t\t\tafter SOURCE_DATE_EPOCH will be clamped to\n");
+ fprintf(stream, "\t\t\tSOURCE_DATE_EPOCH. See\n");
+ fprintf(stream, "\t\t\thttps://reproducible-builds.org/docs/source-date-epoch/\n");
+ fprintf(stream, "\t\t\tfor more information\n");
+ fprintf(stream, "\nSee also:\n");
+ fprintf(stream, "The README for the Squashfs-tools 4.6.1 release, ");
+ fprintf(stream, "describing the new features can be\n");
+ fprintf(stream, "read here https://github.com/plougher/squashfs-tools/blob/master/README-4.6.1\n");
+
+ fprintf(stream, "\nThe Squashfs-tools USAGE guide can be read here\n");
+ fprintf(stream, "https://github.com/plougher/squashfs-tools/blob/master/USAGE-4.6\n");
+}
+
+
+static void print_version(char *string)
+{
+ printf("%s version " VERSION " (" DATE ")\n", string);
+ printf("copyright (C) " YEAR " Phillip Lougher ");
+ printf("<phillip@squashfs.org.uk>\n\n");
+ printf("This program is free software; you can redistribute it and/or\n");
+ printf("modify it under the terms of the GNU General Public License\n");
+ printf("as published by the Free Software Foundation; either version ");
+ printf("2,\n");
+ printf("or (at your option) any later version.\n\n");
+ printf("This program is distributed in the hope that it will be ");
+ printf("useful,\n");
+ printf("but WITHOUT ANY WARRANTY; without even the implied warranty of\n");
+ printf("MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n");
+ printf("GNU General Public License for more details.\n");
+}
+
+
+static void print_summary()
+{
+ int i;
+
+ printf("\n%sSquashfs %d.%d filesystem, %s compressed, data block size"
+ " %d\n", exportable ? "Exportable " : "", SQUASHFS_MAJOR,
+ SQUASHFS_MINOR, comp->name, block_size);
+ printf("\t%s data, %s metadata, %s fragments,\n\t%s xattrs, %s ids\n",
+ noD ? "uncompressed" : "compressed", noI ? "uncompressed" :
+ "compressed", no_fragments ? "no" : noF ? "uncompressed" :
+ "compressed", no_xattrs ? "no" : noX ? "uncompressed" :
+ "compressed", noI || noId ? "uncompressed" : "compressed");
+ printf("\tduplicates are %sremoved\n", duplicate_checking ? "" :
+ "not ");
+ printf("Filesystem size %.2f Kbytes (%.2f Mbytes)\n", bytes / 1024.0,
+ bytes / (1024.0 * 1024.0));
+ printf("\t%.2f%% of uncompressed filesystem size (%.2f Kbytes)\n",
+ ((float) bytes / total_bytes) * 100.0, total_bytes / 1024.0);
+ printf("Inode table size %lld bytes (%.2f Kbytes)\n",
+ inode_bytes, inode_bytes / 1024.0);
+ printf("\t%.2f%% of uncompressed inode table size (%lld bytes)\n",
+ ((float) inode_bytes / total_inode_bytes) * 100.0,
+ total_inode_bytes);
+ printf("Directory table size %lld bytes (%.2f Kbytes)\n",
+ directory_bytes, directory_bytes / 1024.0);
+ printf("\t%.2f%% of uncompressed directory table size (%lld bytes)\n",
+ ((float) directory_bytes / total_directory_bytes) * 100.0,
+ total_directory_bytes);
+ if(total_xattr_bytes) {
+ printf("Xattr table size %d bytes (%.2f Kbytes)\n",
+ xattr_bytes, xattr_bytes / 1024.0);
+ printf("\t%.2f%% of uncompressed xattr table size (%d bytes)\n",
+ ((float) xattr_bytes / total_xattr_bytes) * 100.0,
+ total_xattr_bytes);
+ }
+ if(duplicate_checking)
+ printf("Number of duplicate files found %u\n", file_count -
+ dup_files);
+ else
+ printf("No duplicate files removed\n");
+ printf("Number of inodes %u\n", inode_count);
+ printf("Number of files %u\n", file_count);
+ if(!no_fragments)
+ printf("Number of fragments %u\n", fragments);
+ printf("Number of symbolic links %u\n", sym_count);
+ printf("Number of device nodes %u\n", dev_count);
+ printf("Number of fifo nodes %u\n", fifo_count);
+ printf("Number of socket nodes %u\n", sock_count);
+ printf("Number of directories %u\n", dir_count);
+ printf("Number of hard-links %lld\n", hardlnk_count);
+ printf("Number of ids (unique uids + gids) %d\n", id_count);
+ printf("Number of uids %d\n", uid_count);
+
+ for(i = 0; i < id_count; i++) {
+ if(id_table[i]->flags & ISA_UID) {
+ struct passwd *user = getpwuid(id_table[i]->id);
+ printf("\t%s (%u)\n", user == NULL ? "unknown" :
+ user->pw_name, id_table[i]->id);
+ }
+ }
+
+ printf("Number of gids %d\n", guid_count);
+
+ for(i = 0; i < id_count; i++) {
+ if(id_table[i]->flags & ISA_GID) {
+ struct group *group = getgrgid(id_table[i]->id);
+ printf("\t%s (%d)\n", group == NULL ? "unknown" :
+ group->gr_name, id_table[i]->id);
+ }
+ }
+}
+
+
+int option_with_arg(char *string, char *table[])
+{
+ int i;
+
+ if(*string != '-')
+ return FALSE;
+
+ for(i = 0; table[i] != NULL; i++)
+ if(strcmp(string + 1, table[i]) == 0)
+ break;
+
+ if(table[i] != NULL)
+ return TRUE;
+
+ return compressor_option_args(comp, string);
+}
+
+
+static int get_uid_from_arg(char *arg, unsigned int *uid)
+{
+ char *last;
+ long long res;
+
+ res = strtoll(arg, &last, 10);
+ if(*last == '\0') {
+ if(res < 0 || res > (((long long) 1 << 32) - 1))
+ return -2;
+
+ *uid = res;
+ return 0;
+ } else {
+ struct passwd *id = getpwnam(arg);
+
+ if(id) {
+ *uid = id->pw_uid;
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+
+static int get_gid_from_arg(char *arg, unsigned int *gid)
+{
+ char *last;
+ long long res;
+
+ res = strtoll(arg, &last, 10);
+ if(*last == '\0') {
+ if(res < 0 || res > (((long long) 1 << 32) - 1))
+ return -2;
+
+ *gid = res;
+ return 0;
+ } else {
+ struct group *id = getgrnam(arg);
+
+ if(id) {
+ *gid = id->gr_gid;
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+
+int sqfstar(int argc, char *argv[])
+{
+ struct stat buf;
+ int res, i;
+ squashfs_inode inode;
+ int readq;
+ int fragq;
+ int bwriteq;
+ int fwriteq;
+ int total_mem = get_default_phys_mem();
+ int progress = TRUE;
+ int force_progress = FALSE;
+ int dest_index;
+ struct file_buffer **fragment = NULL;
+ int size;
+ void *comp_data;
+
+ if(argc == 2 && strcmp(argv[1], "-version") == 0) {
+ print_version("sqfstar");
+ exit(0);
+ }
+
+ block_log = slog(block_size);
+ calculate_queue_sizes(total_mem, &readq, &fragq, &bwriteq, &fwriteq);
+
+ if(argc == 2 && (strcmp(argv[1], "-help") == 0 || strcmp(argv[1], "-h") == 0)) {
+ print_sqfstar_options(stdout, argv[0], total_mem);
+ exit(0);
+ }
+
+ if(argc == 2 && strcmp(argv[1], "-mem-default") == 0) {
+ printf("%d\n", total_mem);
+ exit(0);
+ }
+
+ comp = lookup_compressor(COMP_DEFAULT);
+
+ /*
+ * Scan the command line for -comp xxx option, this should occur before
+ * any -X compression specific options to ensure these options are passed
+ * to the correct compressor
+ */
+ for(i = 1; i < argc; i++) {
+ if(strncmp(argv[i], "-X", 2) == 0)
+ X_opt_parsed = 1;
+
+ if(strcmp(argv[i], "-comp") == 0) {
+ struct compressor *prev_comp = comp;
+
+ if(++i == argc) {
+ ERROR("%s: -comp missing compression type\n",
+ argv[0]);
+ exit(1);
+ }
+ comp = lookup_compressor(argv[i]);
+ if(!comp->supported) {
+ ERROR("%s: Compressor \"%s\" is not supported!"
+ "\n", argv[0], argv[i]);
+ ERROR("%s: Compressors available:\n", argv[0]);
+ display_compressors(stderr, "", COMP_DEFAULT);
+ exit(1);
+ }
+ if(compressor_opt_parsed) {
+ ERROR("%s: -comp multiple conflicting -comp"
+ " options specified on command line"
+ ", previously %s, now %s\n", argv[0],
+ prev_comp->name, comp->name);
+ exit(1);
+ }
+ compressor_opt_parsed = 1;
+ if(X_opt_parsed) {
+ ERROR("%s: -comp option should be before any "
+ "-X option\n", argv[0]);
+ exit(1);
+ }
+ } else if(argv[i][0] != '-')
+ break;
+ else if(option_with_arg(argv[i], sqfstar_option_table))
+ i++;
+ }
+
+ if(i >= argc) {
+ print_sqfstar_options(stderr, argv[0], total_mem);
+ exit(1);
+ }
+
+ dest_index = i;
+ source_path = NULL;
+ source = 0;
+ old_exclude = FALSE;
+ tarfile = TRUE;
+
+ /* By default images generated from tar files are not exportable.
+ * Exportable by default is a "legacy" setting in Mksquashfs, which
+ * will cause too many problems to change now. But tarfile reading
+ * has no such issues */
+ exportable = FALSE;
+
+ /* By default images generated from tar files use tail-end packing.
+ * No tailend packing is a "legacy" setting in Mksquashfs, which
+ * will cause too many problems to change now. But tarfile reading
+ * has no such issues */
+ always_use_fragments = TRUE;
+
+ for(i = 1; i < dest_index; i++) {
+ if(strcmp(argv[i], "-ignore-zeros") == 0)
+ ignore_zeros = TRUE;
+ else if(strcmp(argv[i], "-no-hardlinks") == 0)
+ no_hardlinks = TRUE;
+ else if(strcmp(argv[i], "-throttle") == 0) {
+ if((++i == dest_index) || !parse_num(argv[i], &sleep_time)) {
+ ERROR("%s: %s missing or invalid value\n",
+ argv[0], argv[i - 1]);
+ exit(1);
+ }
+ if(sleep_time > 99) {
+ ERROR("%s: %s value should be between 0 and "
+ "99\n", argv[0], argv[i - 1]);
+ exit(1);
+ }
+ readq = 4;
+ } else if(strcmp(argv[i], "-limit") == 0) {
+ if((++i == dest_index) || !parse_num(argv[i], &sleep_time)) {
+ ERROR("%s: %s missing or invalid value\n",
+ argv[0], argv[i - 1]);
+ exit(1);
+ }
+ if(sleep_time < 1 || sleep_time > 100) {
+ ERROR("%s: %s value should be between 1 and "
+ "100\n", argv[0], argv[i - 1]);
+ exit(1);
+ }
+ sleep_time = 100 - sleep_time;
+ readq = 4;
+ } else if(strcmp(argv[i], "-mkfs-time") == 0 ||
+ strcmp(argv[i], "-fstime") == 0) {
+ if((++i == dest_index) ||
+ (!parse_num_unsigned(argv[i], &mkfs_time) &&
+ !exec_date(argv[i], &mkfs_time))) {
+ ERROR("%s: %s missing or invalid time "
+ "value\n", argv[0],
+ argv[i - 1]);
+ exit(1);
+ }
+ mkfs_time_opt = TRUE;
+ } else if(strcmp(argv[i], "-all-time") == 0) {
+ if((++i == dest_index) ||
+ (!parse_num_unsigned(argv[i], &all_time) &&
+ !exec_date(argv[i], &all_time))) {
+ ERROR("%s: %s missing or invalid time "
+ "value\n", argv[0],
+ argv[i - 1]);
+ exit(1);
+ }
+ all_time_opt = TRUE;
+ clamping = FALSE;
+ } else if(strcmp(argv[i], "-reproducible") == 0)
+ reproducible = TRUE;
+ else if(strcmp(argv[i], "-not-reproducible") == 0)
+ reproducible = FALSE;
+ else if(strcmp(argv[i], "-root-mode") == 0) {
+ if((++i == dest_index) || !parse_mode(argv[i], &root_mode)) {
+ ERROR("%s: -root-mode missing or invalid mode,"
+ " octal number <= 07777 expected\n", argv[0]);
+ exit(1);
+ }
+ root_mode_opt = TRUE;
+ } else if(strcmp(argv[i], "-root-uid") == 0) {
+ if(++i == dest_index) {
+ ERROR("%s: -root-uid missing uid or user name\n",
+ argv[0]);
+ exit(1);
+ }
+
+ res = get_uid_from_arg(argv[i], &root_uid);
+ if(res) {
+ if(res == -2)
+ ERROR("%s: -root-uid uid out of range\n",
+ argv[0]);
+ else
+ ERROR("%s: -root-uid invalid uid or "
+ "unknown user name\n", argv[0]);
+ exit(1);
+ }
+ root_uid_opt = TRUE;
+ } else if(strcmp(argv[i], "-root-gid") == 0) {
+ if(++i == dest_index) {
+ ERROR("%s: -root-gid missing gid or group name\n",
+ argv[0]);
+ exit(1);
+ }
+
+ res = get_gid_from_arg(argv[i], &root_gid);
+ if(res) {
+ if(res == -2)
+ ERROR("%s: -root-gid gid out of range\n",
+ argv[0]);
+ else
+ ERROR("%s: -root-gid invalid gid or "
+ "unknown group name\n", argv[0]);
+ exit(1);
+ }
+ root_gid_opt = TRUE;
+ } else if(strcmp(argv[i], "-root-time") == 0) {
+ if((++i == argc) ||
+ (!parse_num_unsigned(argv[i], &root_time) &&
+ !exec_date(argv[i], &root_time))) {
+ ERROR("%s: -root-time missing or invalid time\n",
+ argv[0]);
+ exit(1);
+ }
+ root_time_opt = TRUE;
+ } else if(strcmp(argv[i], "-default-mode") == 0) {
+ if((++i == dest_index) || !parse_mode(argv[i], &default_mode)) {
+ ERROR("%s: -default-mode missing or invalid mode,"
+ " octal number <= 07777 expected\n", argv[0]);
+ exit(1);
+ }
+ root_mode = default_mode;
+ default_mode_opt = root_mode_opt = TRUE;
+ } else if(strcmp(argv[i], "-default-uid") == 0) {
+ if((++i == dest_index) || !parse_num_unsigned(argv[i], &default_uid)) {
+ ERROR("%s: -default-uid missing or invalid uid\n",
+ argv[0]);
+ exit(1);
+ }
+ root_uid = default_uid;
+ default_uid_opt = root_uid_opt = TRUE;
+ } else if(strcmp(argv[i], "-default-gid") == 0) {
+ if((++i == dest_index) || !parse_num_unsigned(argv[i], &default_gid)) {
+ ERROR("%s: -default-gid missing or invalid gid\n",
+ argv[0]);
+ exit(1);
+ }
+ root_gid = default_gid;
+ default_gid_opt = root_gid_opt = TRUE;
+ } else if(strcmp(argv[i], "-comp") == 0)
+ /* parsed previously */
+ i++;
+ else if(strncmp(argv[i], "-X", 2) == 0) {
+ int args;
+
+ if(strcmp(argv[i] + 2, "help") == 0)
+ goto print_sqfstar_compressor_options;
+
+ args = compressor_options(comp, argv + i, dest_index - i);
+ if(args < 0) {
+ if(args == -1) {
+ ERROR("%s: Unrecognised compressor"
+ " option %s\n", argv[0],
+ argv[i]);
+ if(!compressor_opt_parsed)
+ ERROR("%s: Did you forget to"
+ " specify -comp, or "
+ "specify it after the"
+ " -X options?\n",
+ argv[0]);
+print_sqfstar_compressor_options:
+ ERROR("%s: selected compressor \"%s\""
+ ". Options supported: %s\n",
+ argv[0], comp->name,
+ comp->usage ? "" : "none");
+ if(comp->usage)
+ comp->usage(stderr);
+ }
+ exit(1);
+ }
+ i += args;
+
+ } else if(strcmp(argv[i], "-pf") == 0) {
+ if(++i == dest_index) {
+ ERROR("%s: -pf missing filename\n", argv[0]);
+ exit(1);
+ }
+ if(read_pseudo_file(argv[i], argv[dest_index]) == FALSE)
+ exit(1);
+ } else if(strcmp(argv[i], "-p") == 0) {
+ if(++i == dest_index) {
+ ERROR("%s: -p missing pseudo file definition\n",
+ argv[0]);
+ exit(1);
+ }
+ if(read_pseudo_definition(argv[i], argv[dest_index]) == FALSE)
+ exit(1);
+ } else if(strcmp(argv[i], "-regex") == 0)
+ use_regex = TRUE;
+ else if(strcmp(argv[i], "-no-sparse") == 0)
+ sparse_files = FALSE;
+ else if(strcmp(argv[i], "-no-progress") == 0)
+ progress = FALSE;
+ else if(strcmp(argv[i], "-progress") == 0)
+ force_progress = TRUE;
+ else if(strcmp(argv[i], "-exports") == 0)
+ exportable = TRUE;
+ else if(strcmp(argv[i], "-offset") == 0 ||
+ strcmp(argv[i], "-o") == 0) {
+ if((++i == dest_index) ||
+ !parse_numberll(argv[i], &start_offset, 1)) {
+ ERROR("%s: %s missing or invalid offset "
+ "size\n", argv[0], argv[i - 1]);
+ exit(1);
+ }
+ } else if(strcmp(argv[i], "-processors") == 0) {
+ if((++i == dest_index) || !parse_num(argv[i], &processors)) {
+ ERROR("%s: -processors missing or invalid "
+ "processor number\n", argv[0]);
+ exit(1);
+ }
+ if(processors < 1) {
+ ERROR("%s: -processors should be 1 or larger\n",
+ argv[0]);
+ exit(1);
+ }
+ } else if(strcmp(argv[i], "-mem") == 0) {
+ long long number;
+
+ if((++i == dest_index) ||
+ !parse_numberll(argv[i], &number, 1)) {
+ ERROR("%s: -mem missing or invalid mem size\n",
+ argv[0]);
+ exit(1);
+ }
+
+ /*
+ * convert from bytes to Mbytes, ensuring the value
+ * does not overflow a signed int
+ */
+ if(number >= (1LL << 51)) {
+ ERROR("%s: -mem invalid mem size\n", argv[0]);
+ exit(1);
+ }
+
+ total_mem = number / 1048576;
+ if(total_mem < (SQUASHFS_LOWMEM / SQUASHFS_TAKE)) {
+ ERROR("%s: -mem should be %d Mbytes or "
+ "larger\n", argv[0],
+ SQUASHFS_LOWMEM / SQUASHFS_TAKE);
+ exit(1);
+ }
+ calculate_queue_sizes(total_mem, &readq, &fragq,
+ &bwriteq, &fwriteq);
+ } else if(strcmp(argv[i], "-mem-percent") == 0) {
+ int percent, phys_mem;
+
+ /*
+ * Percentage of 75% and larger is dealt with later.
+ * In the same way a fixed mem size if more than 75%
+ * of memory is dealt with later.
+ */
+ if((++i == dest_index) ||
+ !parse_number(argv[i], &percent, 1) ||
+ (percent < 1)) {
+ ERROR("%s: -mem-percent missing or invalid "
+ "percentage: it should be 1 - 75%\n",
+ argv[0]);
+ exit(1);
+ }
+
+ phys_mem = get_physical_memory();
+
+ if(phys_mem == 0) {
+ ERROR("%s: -mem-percent unable to get physical "
+ "memory, use -mem instead\n", argv[0]);
+ exit(1);
+ }
+
+ if(multiply_overflow(phys_mem, percent)) {
+ ERROR("%s: -mem-percent requested phys mem too "
+ "large\n", argv[0]);
+ exit(1);
+ }
+
+ total_mem = phys_mem * percent / 100;
+
+ if(total_mem < (SQUASHFS_LOWMEM / SQUASHFS_TAKE)) {
+ ERROR("%s: -mem-percent mem too small, should "
+ "be %d Mbytes or larger\n", argv[0],
+ SQUASHFS_LOWMEM / SQUASHFS_TAKE);
+ exit(1);
+ }
+ calculate_queue_sizes(total_mem, &readq, &fragq,
+ &bwriteq, &fwriteq);
+ } else if(strcmp(argv[i], "-mem-default") == 0) {
+ printf("%d\n", total_mem);
+ exit(0);
+ } else if(strcmp(argv[i], "-b") == 0) {
+ if(++i == dest_index) {
+ ERROR("%s: -b missing block size\n", argv[0]);
+ exit(1);
+ }
+ if(!parse_number(argv[i], &block_size, 1)) {
+ ERROR("%s: -b invalid block size\n", argv[0]);
+ exit(1);
+ }
+ if((block_log = slog(block_size)) == 0) {
+ ERROR("%s: -b block size not power of two or "
+ "not between 4096 and 1Mbyte\n",
+ argv[0]);
+ exit(1);
+ }
+ } else if(strcmp(argv[i], "-ef") == 0) {
+ if(++i == dest_index) {
+ ERROR("%s: -ef missing filename\n", argv[0]);
+ exit(1);
+ }
+ } else if(strcmp(argv[i], "-no-duplicates") == 0)
+ duplicate_checking = FALSE;
+
+ else if(strcmp(argv[i], "-no-fragments") == 0)
+ no_fragments = TRUE;
+
+ else if(strcmp(argv[i], "-no-tailends") == 0)
+ always_use_fragments = FALSE;
+
+ else if(strcmp(argv[i], "-all-root") == 0 ||
+ strcmp(argv[i], "-root-owned") == 0) {
+ global_uid = global_gid = 0;
+ global_uid_opt = global_gid_opt = TRUE;
+ } else if(strcmp(argv[i], "-force-uid") == 0) {
+ if(++i == dest_index) {
+ ERROR("%s: -force-uid missing uid or user name\n",
+ argv[0]);
+ exit(1);
+ }
+
+ res = get_uid_from_arg(argv[i], &global_uid);
+ if(res) {
+ if(res == -2)
+ ERROR("%s: -force-uid uid out of range\n",
+ argv[0]);
+ else
+ ERROR("%s: -force-uid invalid uid or "
+ "unknown user name\n", argv[0]);
+ exit(1);
+ }
+ global_uid_opt = TRUE;
+ } else if(strcmp(argv[i], "-force-gid") == 0) {
+ if(++i == dest_index) {
+ ERROR("%s: -force-gid missing gid or group name\n",
+ argv[0]);
+ exit(1);
+ }
+
+ res = get_gid_from_arg(argv[i], &global_gid);
+ if(res) {
+ if(res == -2)
+ ERROR("%s: -force-gid gid out of range"
+ "\n", argv[0]);
+ else
+ ERROR("%s: -force-gid invalid gid or "
+ "unknown group name\n", argv[0]);
+ exit(1);
+ }
+ global_gid_opt = TRUE;
+ } else if(strcmp(argv[i], "-pseudo-override") == 0)
+ pseudo_override = TRUE;
+ else if(strcmp(argv[i], "-noI") == 0 ||
+ strcmp(argv[i], "-noInodeCompression") == 0)
+ noI = TRUE;
+
+ else if(strcmp(argv[i], "-noId") == 0 ||
+ strcmp(argv[i], "-noIdTableCompression") == 0)
+ noId = TRUE;
+
+ else if(strcmp(argv[i], "-noD") == 0 ||
+ strcmp(argv[i], "-noDataCompression") == 0)
+ noD = TRUE;
+
+ else if(strcmp(argv[i], "-noF") == 0 ||
+ strcmp(argv[i], "-noFragmentCompression") == 0)
+ noF = TRUE;
+
+ else if(strcmp(argv[i], "-noX") == 0 ||
+ strcmp(argv[i], "-noXattrCompression") == 0)
+ noX = TRUE;
+
+ else if(strcmp(argv[i], "-no-compression") == 0)
+ noI = noD = noF = noX = TRUE;
+
+ else if(strcmp(argv[i], "-no-xattrs") == 0) {
+ if(xattr_exclude_preg || xattr_include_preg ||
+ add_xattrs()) {
+ ERROR("%s: -no-xattrs should not be used in "
+ "combination with -xattrs-* options\n",
+ argv[0]);
+ exit(1);
+ }
+
+ no_xattrs = TRUE;
+
+ } else if(strcmp(argv[i], "-xattrs") == 0) {
+ if(xattrs_supported())
+ no_xattrs = FALSE;
+ else {
+ ERROR("%s: xattrs are unsupported in "
+ "this build\n", argv[0]);
+ exit(1);
+ }
+
+ } else if(strcmp(argv[i], "-xattrs-exclude") == 0) {
+ if(!xattrs_supported()) {
+ ERROR("%s: xattrs are unsupported in "
+ "this build\n", argv[0]);
+ exit(1);
+ } else if(++i == dest_index) {
+ ERROR("%s: -xattrs-exclude missing regex pattern\n",
+ argv[0]);
+ exit(1);
+ } else {
+ xattr_exclude_preg = xattr_regex(argv[i], "exclude");
+ no_xattrs = FALSE;
+ }
+ } else if(strcmp(argv[i], "-xattrs-include") == 0) {
+ if(!xattrs_supported()) {
+ ERROR("%s: xattrs are unsupported in "
+ "this build\n", argv[0]);
+ exit(1);
+ } else if(++i == dest_index) {
+ ERROR("%s: -xattrs-include missing regex pattern\n",
+ argv[0]);
+ exit(1);
+ } else {
+ xattr_include_preg = xattr_regex(argv[i], "include");
+ no_xattrs = FALSE;
+ }
+ } else if(strcmp(argv[i], "-xattrs-add") == 0) {
+ if(!xattrs_supported()) {
+ ERROR("%s: xattrs are unsupported in "
+ "this build\n", argv[0]);
+ exit(1);
+ } else if(++i == dest_index) {
+ ERROR("%s: -xattrs-add missing xattr argument\n",
+ argv[0]);
+ exit(1);
+ } else {
+ xattrs_add(argv[i]);
+ no_xattrs = FALSE;
+ }
+
+ } else if(strcmp(argv[i], "-nopad") == 0)
+ nopad = TRUE;
+
+ else if(strcmp(argv[i], "-info") == 0)
+ silent = FALSE;
+
+ else if(strcmp(argv[i], "-force") == 0)
+ appending = FALSE;
+
+ else if(strcmp(argv[i], "-quiet") == 0)
+ quiet = TRUE;
+
+ else if(strcmp(argv[i], "-exit-on-error") == 0)
+ exit_on_error = TRUE;
+
+ else if(strcmp(argv[i], "-percentage") == 0) {
+ progressbar_percentage();
+ progress = silent = TRUE;
+
+ } else if(strcmp(argv[i], "-version") == 0) {
+ print_version("sqfstar");
+ } else {
+ ERROR("%s: invalid option\n\n", argv[0]);
+ print_sqfstar_options(stderr, argv[0], total_mem);
+ exit(1);
+ }
+ }
+
+ check_env_var();
+
+ /*
+ * The -noI option implies -noId for backwards compatibility, so reset noId
+ * if both have been specified
+ */
+ if(noI && noId)
+ noId = FALSE;
+
+ /*
+ * Some compressors may need the options to be checked for validity
+ * once all the options have been processed
+ */
+ res = compressor_options_post(comp, block_size);
+ if(res)
+ EXIT_MKSQUASHFS();
+
+ /*
+ * If the -info option has been selected then disable the
+ * progress bar unless it has been explicitly enabled with
+ * the -progress option
+ */
+ if(!silent)
+ progress = force_progress;
+
+ /*
+ * Sort all the xattr-add options now they're all processed
+ */
+ sort_xattr_add_list();
+
+ /*
+ * If -pseudo-override option has been specified and there are
+ * no pseudo files then reset option. -pseudo-override relies
+ * on dir_scan2() being run, which won't be if there's no
+ * actions or pseudo files
+ */
+ if(pseudo_override && !get_pseudo())
+ pseudo_override = FALSE;
+
+#ifdef SQUASHFS_TRACE
+ /*
+ * Disable progress bar if full debug tracing is enabled.
+ * The progress bar in this case just gets in the way of the
+ * debug trace output
+ */
+ progress = FALSE;
+#endif
+
+ destination_file = argv[dest_index];
+ if(stat(destination_file, &buf) == -1) {
+ if(errno == ENOENT) { /* Does not exist */
+ appending = FALSE;
+ fd = open(destination_file, O_CREAT | O_TRUNC | O_RDWR,
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ if(fd == -1) {
+ perror("Could not create destination file");
+ exit(1);
+ }
+
+ /* ensure Sqfstar doesn't try to read
+ * the destination file as input, which
+ * will result in an I/O loop */
+ if(stat(destination_file, &buf) == -1) {
+ /* disappered after creating? */
+ perror("Could not stat destination file");
+ exit(1);
+ }
+ ADD_ENTRY(buf);
+ } else {
+ perror("Could not stat destination file");
+ exit(1);
+ }
+
+ } else {
+ if(!S_ISBLK(buf.st_mode) && !S_ISREG(buf.st_mode)) {
+ ERROR("Destination not block device or regular file\n");
+ exit(1);
+ }
+
+ if(appending) {
+ ERROR("Appending is not supported reading tar files\n");
+ ERROR("To force Sqfstar to write to this %s "
+ "use -force\n", S_ISBLK(buf.st_mode) ?
+ "block device" : "file");
+ EXIT_MKSQUASHFS();
+ }
+
+ if(S_ISBLK(buf.st_mode)) {
+ if((fd = open(destination_file, O_RDWR)) == -1) {
+ perror("Could not open block device as "
+ "destination");
+ exit(1);
+ }
+ block_device = 1;
+
+ } else {
+ fd = open(destination_file, O_TRUNC | O_RDWR);
+ if(fd == -1) {
+ perror("Could not open regular file for "
+ "writing as destination");
+ exit(1);
+ }
+ /* ensure Sqfstar doesn't try to read
+ * the destination file as input, which
+ * will result in an I/O loop */
+ ADD_ENTRY(buf);
+ }
+ }
+
+ /*
+ * process the exclude files - must be done afer destination file has
+ * been possibly created
+ */
+ for(i = 1; i < dest_index; i++) {
+ if(strcmp(argv[i], "-ef") == 0)
+ /*
+ * Note presence of filename arg has already
+ * been checked
+ */
+ process_exclude_file(argv[++i]);
+ else if(option_with_arg(argv[i], sqfstar_option_table))
+ i++;
+ }
+
+ for(i = dest_index + 1; i < argc; i++)
+ add_exclude(argv[i]);
+
+ initialise_threads(readq, fragq, bwriteq, fwriteq, !appending,
+ destination_file);
+
+ res = compressor_init(comp, &stream, SQUASHFS_METADATA_SIZE, 0);
+ if(res)
+ BAD_ERROR("compressor_init failed\n");
+
+ dupl_block = malloc(1048576 * sizeof(struct file_info *));
+ if(dupl_block == NULL)
+ MEM_ERROR();
+
+ dupl_frag = malloc(block_size * sizeof(struct file_info *));
+ if(dupl_frag == NULL)
+ MEM_ERROR();
+
+ memset(dupl_block, 0, 1048576 * sizeof(struct file_info *));
+ memset(dupl_frag, 0, block_size * sizeof(struct file_info *));
+
+ comp_data = compressor_dump_options(comp, block_size, &size);
+
+ if(!quiet)
+ printf("Creating %d.%d filesystem on %s, block size %d.\n",
+ SQUASHFS_MAJOR, SQUASHFS_MINOR,
+ destination_file, block_size);
+
+ /*
+ * store any compressor specific options after the superblock,
+ * and set the COMP_OPT flag to show that the filesystem has
+ * compressor specfic options
+ */
+ if(comp_data) {
+ unsigned short c_byte = size | SQUASHFS_COMPRESSED_BIT;
+
+ SQUASHFS_INSWAP_SHORTS(&c_byte, 1);
+ write_destination(fd, sizeof(struct squashfs_super_block),
+ sizeof(c_byte), &c_byte);
+ write_destination(fd, sizeof(struct squashfs_super_block) +
+ sizeof(c_byte), size, comp_data);
+ bytes = sizeof(struct squashfs_super_block) + sizeof(c_byte)
+ + size;
+ comp_opts = TRUE;
+ } else
+ bytes = sizeof(struct squashfs_super_block);
+
+ if(path)
+ paths = add_subdir(paths, path);
+
+ dump_actions();
+ dump_pseudos();
+
+ set_progressbar_state(progress);
+
+ inode = process_tar_file(progress);
+
+ sBlk.root_inode = inode;
+ sBlk.inodes = inode_count;
+ sBlk.s_magic = SQUASHFS_MAGIC;
+ sBlk.s_major = SQUASHFS_MAJOR;
+ sBlk.s_minor = SQUASHFS_MINOR;
+ sBlk.block_size = block_size;
+ sBlk.block_log = block_log;
+ sBlk.flags = SQUASHFS_MKFLAGS(noI, noD, noF, noX, noId, no_fragments,
+ always_use_fragments, duplicate_checking, exportable,
+ no_xattrs, comp_opts);
+ sBlk.mkfs_time = mkfs_time_opt ? mkfs_time : time(NULL);
+
+ disable_info();
+
+ while((fragment = get_frag_action(fragment)))
+ write_fragment(*fragment);
+ if(!reproducible)
+ unlock_fragments();
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex);
+ pthread_mutex_lock(&fragment_mutex);
+ while(fragments_outstanding) {
+ pthread_mutex_unlock(&fragment_mutex);
+ pthread_testcancel();
+ sched_yield();
+ pthread_mutex_lock(&fragment_mutex);
+ }
+ pthread_cleanup_pop(1);
+
+ queue_put(to_writer, NULL);
+ if(queue_get(from_writer) != 0)
+ EXIT_MKSQUASHFS();
+
+ set_progressbar_state(FALSE);
+ write_filesystem_tables(&sBlk);
+
+ if(!nopad && (i = bytes & (4096 - 1))) {
+ char temp[4096] = {0};
+ write_destination(fd, bytes, 4096 - i, temp);
+ }
+
+ res = close(fd);
+
+ if(res == -1)
+ BAD_ERROR("Failed to close output filesystem, close returned %s\n",
+ strerror(errno));
+
+ if(recovery_file)
+ unlink(recovery_file);
+
+ if(!quiet)
+ print_summary();
+
+ if(logging)
+ fclose(log_fd);
+
+ return 0;
+}
+
+
+int main(int argc, char *argv[])
+{
+ struct stat buf, source_buf;
+ int res, i;
+ char *root_name = NULL;
+ squashfs_inode inode;
+ int readq;
+ int fragq;
+ int bwriteq;
+ int fwriteq;
+ int total_mem = get_default_phys_mem();
+ int progress = TRUE;
+ int force_progress = FALSE;
+ struct file_buffer **fragment = NULL;
+ char *command;
+
+ /* skip leading path components in invocation command */
+ for(command = argv[0] + strlen(argv[0]) - 1;
+ command >= argv[0] && command[0] != '/'; command--);
+
+ if(command < argv[0])
+ command = argv[0];
+ else
+ command++;
+
+ if(strcmp(command, "sqfstar") == 0)
+ return sqfstar(argc, argv);
+
+ if(argc > 1 && strcmp(argv[1], "-version") == 0) {
+ print_version("mksquashfs");
+ exit(0);
+ }
+
+ block_log = slog(block_size);
+ calculate_queue_sizes(total_mem, &readq, &fragq, &bwriteq, &fwriteq);
+
+ for(i = 1; i < argc && (argv[i][0] != '-' || strcmp(argv[i], "-") == 0);
+ i++);
+
+ if(i < argc && (strcmp(argv[i], "-help") == 0 ||
+ strcmp(argv[i], "-h") == 0)) {
+ print_options(stdout, argv[0], total_mem);
+ exit(0);
+ }
+
+ if(i < argc && strcmp(argv[i], "-mem-default") == 0) {
+ printf("%d\n", total_mem);
+ exit(0);
+ }
+
+ if(i < 3) {
+ print_options(stderr, argv[0], total_mem);
+ exit(1);
+ }
+
+ option_offset = i;
+ destination_file = argv[i - 1];
+
+ if(argv[1][0] != '-') {
+ source_path = argv + 1;
+ source = i - 2;
+ } else {
+ source_path = NULL;
+ source = 0;
+ }
+
+ /*
+ * Scan the command line for -comp xxx option, this is to ensure
+ * any -X compressor specific options are passed to the
+ * correct compressor
+ */
+ for(; i < argc; i++) {
+ struct compressor *prev_comp = comp;
+
+ if(strcmp(argv[i], "-comp") == 0) {
+ if(++i == argc) {
+ ERROR("%s: -comp missing compression type\n",
+ argv[0]);
+ exit(1);
+ }
+ comp = lookup_compressor(argv[i]);
+ if(!comp->supported) {
+ ERROR("%s: Compressor \"%s\" is not supported!"
+ "\n", argv[0], argv[i]);
+ ERROR("%s: Compressors available:\n", argv[0]);
+ display_compressors(stderr, "", COMP_DEFAULT);
+ exit(1);
+ }
+ if(prev_comp != NULL && prev_comp != comp) {
+ ERROR("%s: -comp multiple conflicting -comp"
+ " options specified on command line"
+ ", previously %s, now %s\n", argv[0],
+ prev_comp->name, comp->name);
+ exit(1);
+ }
+ compressor_opt_parsed = 1;
+
+ } else if(strcmp(argv[i], "-e") == 0)
+ break;
+ else if(option_with_arg(argv[i], option_table))
+ i++;
+ }
+
+ /*
+ * if no -comp option specified lookup default compressor. Note the
+ * Makefile ensures the default compressor has been built, and so we
+ * don't need to to check for failure here
+ */
+ if(comp == NULL)
+ comp = lookup_compressor(COMP_DEFAULT);
+
+ /*
+ * Scan the command line for -cpiostyle, -tar and -pf xxx options, this
+ * is to ensure only one thing is trying to read from stdin
+ */
+ for(i = option_offset; i < argc; i++) {
+ if(strcmp(argv[i], "-cpiostyle") == 0)
+ cpiostyle = TRUE;
+ else if(strcmp(argv[i], "-cpiostyle0") == 0) {
+ cpiostyle = TRUE;
+ filename_terminator = '\0';
+ } else if(strcmp(argv[i], "-tar") == 0) {
+ tarfile = TRUE;
+ always_use_fragments = TRUE;
+ } else if(strcmp(argv[i], "-pf") == 0) {
+ if(++i == argc) {
+ ERROR("%s: -pf missing filename\n", argv[0]);
+ exit(1);
+ }
+ if(strcmp(argv[i], "-") == 0)
+ pseudo_stdin = TRUE;
+ } else if(strcmp(argv[i], "-e") == 0)
+ break;
+ else if(option_with_arg(argv[i], option_table))
+ i++;
+ }
+
+ /*
+ * Only one of cpiostyle, tar and pseudo file reading from stdin can
+ * be specified
+ */
+ if((!cpiostyle || tarfile || pseudo_stdin) &&
+ (!tarfile || cpiostyle || pseudo_stdin) &&
+ (!pseudo_stdin || cpiostyle || tarfile) &&
+ (cpiostyle || tarfile || pseudo_stdin))
+ BAD_ERROR("Only one of cpiostyle, tar file or pseudo file "
+ "reading from stdin can be specified\n");
+
+ for(i = option_offset; i < argc; i++) {
+ if(strcmp(argv[i], "-ignore-zeros") == 0)
+ ignore_zeros = TRUE;
+ if(strcmp(argv[i], "-one-file-system") == 0)
+ one_file_system = TRUE;
+ else if(strcmp(argv[i], "-one-file-system-x") == 0)
+ one_file_system = one_file_system_x = TRUE;
+ else if(strcmp(argv[i], "-recovery-path") == 0) {
+ if(++i == argc) {
+ ERROR("%s: -recovery-path missing pathname\n",
+ argv[0]);
+ exit(1);
+ }
+ recovery_pathname = argv[i];
+ } else if(strcmp(argv[i], "-help") == 0 ||
+ strcmp(argv[i], "-h") == 0) {
+ print_options(stdout, argv[0], total_mem);
+ exit(0);
+ } else if(strcmp(argv[i], "-no-hardlinks") == 0)
+ no_hardlinks = TRUE;
+ else if(strcmp(argv[i], "-no-strip") == 0 ||
+ strcmp(argv[i], "-tarstyle") == 0)
+ tarstyle = TRUE;
+ else if(strcmp(argv[i], "-max-depth") == 0) {
+ if((++i == argc) || !parse_num_unsigned(argv[i], &max_depth)) {
+ ERROR("%s: %s missing or invalid value\n",
+ argv[0], argv[i - 1]);
+ exit(1);
+ }
+ } else if(strcmp(argv[i], "-throttle") == 0) {
+ if((++i == argc) || !parse_num(argv[i], &sleep_time)) {
+ ERROR("%s: %s missing or invalid value\n",
+ argv[0], argv[i - 1]);
+ exit(1);
+ }
+ if(sleep_time > 99) {
+ ERROR("%s: %s value should be between 0 and "
+ "99\n", argv[0], argv[i - 1]);
+ exit(1);
+ }
+ readq = 4;
+ } else if(strcmp(argv[i], "-limit") == 0) {
+ if((++i == argc) || !parse_num(argv[i], &sleep_time)) {
+ ERROR("%s: %s missing or invalid value\n",
+ argv[0], argv[i - 1]);
+ exit(1);
+ }
+ if(sleep_time < 1 || sleep_time > 100) {
+ ERROR("%s: %s value should be between 1 and "
+ "100\n", argv[0], argv[i - 1]);
+ exit(1);
+ }
+ sleep_time = 100 - sleep_time;
+ readq = 4;
+ } else if(strcmp(argv[i], "-mkfs-time") == 0 ||
+ strcmp(argv[i], "-fstime") == 0) {
+ if((++i == argc) ||
+ (!parse_num_unsigned(argv[i], &mkfs_time) &&
+ !exec_date(argv[i], &mkfs_time))) {
+ ERROR("%s: %s missing or invalid time "
+ "value\n", argv[0],
+ argv[i - 1]);
+ exit(1);
+ }
+ mkfs_time_opt = TRUE;
+ } else if(strcmp(argv[i], "-all-time") == 0) {
+ if((++i == argc) ||
+ (!parse_num_unsigned(argv[i], &all_time) &&
+ !exec_date(argv[i], &all_time))) {
+ ERROR("%s: %s missing or invalid time "
+ "value\n", argv[0],
+ argv[i - 1]);
+ exit(1);
+ }
+ all_time_opt = TRUE;
+ clamping = FALSE;
+ } else if(strcmp(argv[i], "-reproducible") == 0)
+ reproducible = TRUE;
+ else if(strcmp(argv[i], "-not-reproducible") == 0)
+ reproducible = FALSE;
+ else if(strcmp(argv[i], "-root-mode") == 0) {
+ if((++i == argc) || !parse_mode(argv[i], &root_mode)) {
+ ERROR("%s: -root-mode missing or invalid mode,"
+ " octal number <= 07777 expected\n",
+ argv[0]);
+ exit(1);
+ }
+ root_mode_opt = TRUE;
+ } else if(strcmp(argv[i], "-root-uid") == 0) {
+ if(++i == argc) {
+ ERROR("%s: -root-uid missing uid or user name\n",
+ argv[0]);
+ exit(1);
+ }
+
+ res = get_uid_from_arg(argv[i], &root_uid);
+ if(res) {
+ if(res == -2)
+ ERROR("%s: -root-uid uid out of range\n",
+ argv[0]);
+ else
+ ERROR("%s: -root-uid invalid uid or "
+ "unknown user name\n", argv[0]);
+ exit(1);
+ }
+ root_uid_opt = TRUE;
+ } else if(strcmp(argv[i], "-root-gid") == 0) {
+ if(++i == argc) {
+ ERROR("%s: -root-gid missing gid or group name\n",
+ argv[0]);
+ exit(1);
+ }
+
+ res = get_gid_from_arg(argv[i], &root_gid);
+ if(res) {
+ if(res == -2)
+ ERROR("%s: -root-gid gid out of range\n",
+ argv[0]);
+ else
+ ERROR("%s: -root-gid invalid gid or "
+ "unknown group name\n", argv[0]);
+ exit(1);
+ }
+ root_gid_opt = TRUE;
+ } else if(strcmp(argv[i], "-root-time") == 0) {
+ if((++i == argc) ||
+ (!parse_num_unsigned(argv[i], &root_time) &&
+ !exec_date(argv[i], &root_time))) {
+ ERROR("%s: -root-time missing or invalid time\n",
+ argv[0]);
+ exit(1);
+ }
+ root_time_opt = TRUE;
+ } else if(strcmp(argv[i], "-default-mode") == 0) {
+ if((++i == argc) || !parse_mode(argv[i], &default_mode)) {
+ ERROR("%s: -default-mode missing or invalid mode,"
+ " octal number <= 07777 expected\n", argv[0]);
+ exit(1);
+ }
+ root_mode = default_mode;
+ default_mode_opt = root_mode_opt = TRUE;
+ } else if(strcmp(argv[i], "-default-uid") == 0) {
+ if((++i == argc) || !parse_num_unsigned(argv[i], &default_uid)) {
+ ERROR("%s: -default-uid missing or invalid uid\n",
+ argv[0]);
+ exit(1);
+ }
+ root_uid = default_uid;
+ default_uid_opt = root_uid_opt = TRUE;
+ } else if(strcmp(argv[i], "-default-gid") == 0) {
+ if((++i == argc) || !parse_num_unsigned(argv[i], &default_gid)) {
+ ERROR("%s: -default-gid missing or invalid gid\n",
+ argv[0]);
+ exit(1);
+ }
+ root_gid = default_gid;
+ default_gid_opt = root_gid_opt = TRUE;
+ } else if(strcmp(argv[i], "-log") == 0) {
+ if(++i == argc) {
+ ERROR("%s: %s missing log file\n",
+ argv[0], argv[i - 1]);
+ exit(1);
+ }
+ open_log_file(argv[i]);
+
+ } else if(strcmp(argv[i], "-action") == 0 ||
+ strcmp(argv[i], "-a") ==0) {
+ if(++i == argc) {
+ ERROR("%s: %s missing action\n",
+ argv[0], argv[i - 1]);
+ exit(1);
+ }
+ res = parse_action(argv[i], ACTION_LOG_NONE);
+ if(res == 0)
+ exit(1);
+
+ } else if(strcmp(argv[i], "-log-action") == 0 ||
+ strcmp(argv[i], "-va") ==0) {
+ if(++i == argc) {
+ ERROR("%s: %s missing action\n",
+ argv[0], argv[i - 1]);
+ exit(1);
+ }
+ res = parse_action(argv[i], ACTION_LOG_VERBOSE);
+ if(res == 0)
+ exit(1);
+
+ } else if(strcmp(argv[i], "-true-action") == 0 ||
+ strcmp(argv[i], "-ta") ==0) {
+ if(++i == argc) {
+ ERROR("%s: %s missing action\n",
+ argv[0], argv[i - 1]);
+ exit(1);
+ }
+ res = parse_action(argv[i], ACTION_LOG_TRUE);
+ if(res == 0)
+ exit(1);
+
+ } else if(strcmp(argv[i], "-false-action") == 0 ||
+ strcmp(argv[i], "-fa") ==0) {
+ if(++i == argc) {
+ ERROR("%s: %s missing action\n",
+ argv[0], argv[i - 1]);
+ exit(1);
+ }
+ res = parse_action(argv[i], ACTION_LOG_FALSE);
+ if(res == 0)
+ exit(1);
+
+ } else if(strcmp(argv[i], "-action-file") == 0 ||
+ strcmp(argv[i], "-af") ==0) {
+ if(++i == argc) {
+ ERROR("%s: %s missing filename\n", argv[0],
+ argv[i - 1]);
+ exit(1);
+ }
+ if(read_action_file(argv[i], ACTION_LOG_NONE) == FALSE)
+ exit(1);
+
+ } else if(strcmp(argv[i], "-log-action-file") == 0 ||
+ strcmp(argv[i], "-vaf") ==0) {
+ if(++i == argc) {
+ ERROR("%s: %s missing filename\n", argv[0],
+ argv[i - 1]);
+ exit(1);
+ }
+ if(read_action_file(argv[i], ACTION_LOG_VERBOSE) == FALSE)
+ exit(1);
+
+ } else if(strcmp(argv[i], "-true-action-file") == 0 ||
+ strcmp(argv[i], "-taf") ==0) {
+ if(++i == argc) {
+ ERROR("%s: %s missing filename\n", argv[0],
+ argv[i - 1]);
+ exit(1);
+ }
+ if(read_action_file(argv[i], ACTION_LOG_TRUE) == FALSE)
+ exit(1);
+
+ } else if(strcmp(argv[i], "-false-action-file") == 0 ||
+ strcmp(argv[i], "-faf") ==0) {
+ if(++i == argc) {
+ ERROR("%s: %s missing filename\n", argv[0],
+ argv[i - 1]);
+ exit(1);
+ }
+ if(read_action_file(argv[i], ACTION_LOG_FALSE) == FALSE)
+ exit(1);
+
+ } else if(strncmp(argv[i], "-X", 2) == 0) {
+ int args;
+
+ if(strcmp(argv[i] + 2, "help") == 0)
+ goto print_compressor_options;
+
+ args = compressor_options(comp, argv + i, argc - i);
+ if(args < 0) {
+ if(args == -1) {
+ ERROR("%s: Unrecognised compressor"
+ " option %s\n", argv[0],
+ argv[i]);
+ if(!compressor_opt_parsed)
+ ERROR("%s: Did you forget to"
+ " specify -comp?\n",
+ argv[0]);
+print_compressor_options:
+ ERROR("%s: selected compressor \"%s\""
+ ". Options supported: %s\n",
+ argv[0], comp->name,
+ comp->usage ? "" : "none");
+ if(comp->usage)
+ comp->usage(stderr);
+ }
+ exit(1);
+ }
+ i += args;
+
+ } else if(strcmp(argv[i], "-pf") == 0) {
+ if(read_pseudo_file(argv[++i], destination_file) == FALSE)
+ exit(1);
+ } else if(strcmp(argv[i], "-p") == 0) {
+ if(++i == argc) {
+ ERROR("%s: -p missing pseudo file definition\n",
+ argv[0]);
+ exit(1);
+ }
+ if(read_pseudo_definition(argv[i], destination_file) == FALSE)
+ exit(1);
+ } else if(strcmp(argv[i], "-recover") == 0) {
+ if(++i == argc) {
+ ERROR("%s: -recover missing recovery file\n",
+ argv[0]);
+ exit(1);
+ }
+ read_recovery_data(argv[i], destination_file);
+ } else if(strcmp(argv[i], "-no-recovery") == 0)
+ recover = FALSE;
+ else if(strcmp(argv[i], "-wildcards") == 0) {
+ old_exclude = FALSE;
+ use_regex = FALSE;
+ } else if(strcmp(argv[i], "-regex") == 0) {
+ old_exclude = FALSE;
+ use_regex = TRUE;
+ } else if(strcmp(argv[i], "-no-sparse") == 0)
+ sparse_files = FALSE;
+ else if(strcmp(argv[i], "-no-progress") == 0)
+ progress = FALSE;
+ else if(strcmp(argv[i], "-progress") == 0)
+ force_progress = TRUE;
+ else if(strcmp(argv[i], "-exports") == 0)
+ exportable = TRUE;
+ else if(strcmp(argv[i], "-no-exports") == 0)
+ exportable = FALSE;
+ else if(strcmp(argv[i], "-offset") == 0 ||
+ strcmp(argv[i], "-o") == 0) {
+ if((++i == argc) ||
+ !parse_numberll(argv[i], &start_offset, 1)) {
+ ERROR("%s: %s missing or invalid offset "
+ "size\n", argv[0], argv[i - 1]);
+ exit(1);
+ }
+ } else if(strcmp(argv[i], "-processors") == 0) {
+ if((++i == argc) || !parse_num(argv[i], &processors)) {
+ ERROR("%s: -processors missing or invalid "
+ "processor number\n", argv[0]);
+ exit(1);
+ }
+ if(processors < 1) {
+ ERROR("%s: -processors should be 1 or larger\n",
+ argv[0]);
+ exit(1);
+ }
+ } else if(strcmp(argv[i], "-read-queue") == 0) {
+ if((++i == argc) || !parse_num(argv[i], &readq)) {
+ ERROR("%s: -read-queue missing or invalid "
+ "queue size\n", argv[0]);
+ exit(1);
+ }
+ if(readq < 1) {
+ ERROR("%s: -read-queue should be 1 megabyte or "
+ "larger\n", argv[0]);
+ exit(1);
+ }
+ } else if(strcmp(argv[i], "-write-queue") == 0) {
+ if((++i == argc) || !parse_num(argv[i], &bwriteq)) {
+ ERROR("%s: -write-queue missing or invalid "
+ "queue size\n", argv[0]);
+ exit(1);
+ }
+ if(bwriteq < 2) {
+ ERROR("%s: -write-queue should be 2 megabytes "
+ "or larger\n", argv[0]);
+ exit(1);
+ }
+ fwriteq = bwriteq >> 1;
+ bwriteq -= fwriteq;
+ } else if(strcmp(argv[i], "-fragment-queue") == 0) {
+ if((++i == argc) || !parse_num(argv[i], &fragq)) {
+ ERROR("%s: -fragment-queue missing or invalid "
+ "queue size\n", argv[0]);
+ exit(1);
+ }
+ if(fragq < 1) {
+ ERROR("%s: -fragment-queue should be 1 "
+ "megabyte or larger\n", argv[0]);
+ exit(1);
+ }
+ } else if(strcmp(argv[i], "-mem") == 0) {
+ long long number;
+
+ if((++i == argc) ||
+ !parse_numberll(argv[i], &number, 1)) {
+ ERROR("%s: -mem missing or invalid mem size\n",
+ argv[0]);
+ exit(1);
+ }
+
+ /*
+ * convert from bytes to Mbytes, ensuring the value
+ * does not overflow a signed int
+ */
+ if(number >= (1LL << 51)) {
+ ERROR("%s: -mem invalid mem size\n", argv[0]);
+ exit(1);
+ }
+
+ total_mem = number / 1048576;
+ if(total_mem < (SQUASHFS_LOWMEM / SQUASHFS_TAKE)) {
+ ERROR("%s: -mem should be %d Mbytes or "
+ "larger\n", argv[0],
+ SQUASHFS_LOWMEM / SQUASHFS_TAKE);
+ exit(1);
+ }
+ calculate_queue_sizes(total_mem, &readq, &fragq,
+ &bwriteq, &fwriteq);
+ } else if(strcmp(argv[i], "-mem-percent") == 0) {
+ int percent, phys_mem;
+
+ /*
+ * Percentage of 75% and larger is dealt with later.
+ * In the same way a fixed mem size if more than 75%
+ * of memory is dealt with later.
+ */
+ if((++i == argc) ||
+ !parse_number(argv[i], &percent, 1) ||
+ (percent < 1)) {
+ ERROR("%s: -mem-percent missing or invalid "
+ "percentage: it should be 1 - 75%\n",
+ argv[0]);
+ exit(1);
+ }
+
+ phys_mem = get_physical_memory();
+
+ if(phys_mem == 0) {
+ ERROR("%s: -mem-percent unable to get physical "
+ "memory, use -mem instead\n", argv[0]);
+ exit(1);
+ }
+
+ if(multiply_overflow(phys_mem, percent)) {
+ ERROR("%s: -mem-percent requested phys mem too "
+ "large\n", argv[0]);
+ exit(1);
+ }
+
+ total_mem = phys_mem * percent / 100;
+
+ if(total_mem < (SQUASHFS_LOWMEM / SQUASHFS_TAKE)) {
+ ERROR("%s: -mem-percent mem too small, should "
+ "be %d Mbytes or larger\n", argv[0],
+ SQUASHFS_LOWMEM / SQUASHFS_TAKE);
+ exit(1);
+ }
+
+ calculate_queue_sizes(total_mem, &readq, &fragq,
+ &bwriteq, &fwriteq);
+ } else if(strcmp(argv[i], "-mem-default") == 0) {
+ printf("%d\n", total_mem);
+ exit(0);
+ } else if(strcmp(argv[i], "-b") == 0) {
+ if(++i == argc) {
+ ERROR("%s: -b missing block size\n", argv[0]);
+ exit(1);
+ }
+ if(!parse_number(argv[i], &block_size, 1)) {
+ ERROR("%s: -b invalid block size\n", argv[0]);
+ exit(1);
+ }
+ if((block_log = slog(block_size)) == 0) {
+ ERROR("%s: -b block size not power of two or "
+ "not between 4096 and 1Mbyte\n",
+ argv[0]);
+ exit(1);
+ }
+ } else if(strcmp(argv[i], "-ef") == 0) {
+ if(++i == argc) {
+ ERROR("%s: -ef missing filename\n", argv[0]);
+ exit(1);
+ }
+ } else if(strcmp(argv[i], "-no-duplicates") == 0)
+ duplicate_checking = FALSE;
+
+ else if(strcmp(argv[i], "-no-fragments") == 0)
+ no_fragments = TRUE;
+
+ else if(strcmp(argv[i], "-tailends") == 0 ||
+ strcmp(argv[i], "-always-use-fragments") == 0)
+ always_use_fragments = TRUE;
+
+ else if(strcmp(argv[i], "-no-tailends") == 0)
+ always_use_fragments = FALSE;
+
+ else if(strcmp(argv[i], "-sort") == 0) {
+ if(++i == argc) {
+ ERROR("%s: -sort missing filename\n", argv[0]);
+ exit(1);
+ }
+ } else if(strcmp(argv[i], "-all-root") == 0 ||
+ strcmp(argv[i], "-root-owned") == 0) {
+ global_uid = global_gid = 0;
+ global_uid_opt = global_gid_opt = TRUE;
+ } else if(strcmp(argv[i], "-force-uid") == 0) {
+ if(++i == argc) {
+ ERROR("%s: -force-uid missing uid or user name\n",
+ argv[0]);
+ exit(1);
+ }
+
+ res = get_uid_from_arg(argv[i], &global_uid);
+ if(res) {
+ if(res == -2)
+ ERROR("%s: -force-uid uid out of range\n",
+ argv[0]);
+ else
+ ERROR("%s: -force-uid invalid uid or "
+ "unknown user name\n", argv[0]);
+ exit(1);
+ }
+ global_uid_opt = TRUE;
+ } else if(strcmp(argv[i], "-force-gid") == 0) {
+ if(++i == argc) {
+ ERROR("%s: -force-gid missing gid or group name\n",
+ argv[0]);
+ exit(1);
+ }
+
+ res = get_gid_from_arg(argv[i], &global_gid);
+ if(res) {
+ if(res == -2)
+ ERROR("%s: -force-gid gid out of range"
+ "\n", argv[0]);
+ else
+ ERROR("%s: -force-gid invalid gid or "
+ "unknown group name\n", argv[0]);
+ exit(1);
+ }
+ global_gid_opt = TRUE;
+ } else if(strcmp(argv[i], "-pseudo-override") == 0)
+ pseudo_override = TRUE;
+ else if(strcmp(argv[i], "-noI") == 0 ||
+ strcmp(argv[i], "-noInodeCompression") == 0)
+ noI = TRUE;
+
+ else if(strcmp(argv[i], "-noId") == 0 ||
+ strcmp(argv[i], "-noIdTableCompression") == 0)
+ noId = TRUE;
+
+ else if(strcmp(argv[i], "-noD") == 0 ||
+ strcmp(argv[i], "-noDataCompression") == 0)
+ noD = TRUE;
+
+ else if(strcmp(argv[i], "-noF") == 0 ||
+ strcmp(argv[i], "-noFragmentCompression") == 0)
+ noF = TRUE;
+
+ else if(strcmp(argv[i], "-noX") == 0 ||
+ strcmp(argv[i], "-noXattrCompression") == 0)
+ noX = TRUE;
+
+ else if(strcmp(argv[i], "-no-compression") == 0)
+ noI = noD = noF = noX = TRUE;
+
+ else if(strcmp(argv[i], "-no-xattrs") == 0) {
+ if(xattr_exclude_preg || xattr_include_preg ||
+ add_xattrs()) {
+ ERROR("%s: -no-xattrs should not be used in "
+ "combination with -xattrs-* options\n",
+ argv[0]);
+ exit(1);
+ }
+
+ no_xattrs = TRUE;
+
+ } else if(strcmp(argv[i], "-xattrs") == 0) {
+ if(xattrs_supported())
+ no_xattrs = FALSE;
+ else {
+ ERROR("%s: xattrs are unsupported in "
+ "this build\n", argv[0]);
+ exit(1);
+ }
+
+ } else if(strcmp(argv[i], "-xattrs-exclude") == 0) {
+ if(!xattrs_supported()) {
+ ERROR("%s: xattrs are unsupported in "
+ "this build\n", argv[0]);
+ exit(1);
+ } else if(++i == argc) {
+ ERROR("%s: -xattrs-exclude missing regex pattern\n",
+ argv[0]);
+ exit(1);
+ } else {
+ xattr_exclude_preg = xattr_regex(argv[i], "exclude");
+ no_xattrs = FALSE;
+ }
+
+ } else if(strcmp(argv[i], "-xattrs-include") == 0) {
+ if(!xattrs_supported()) {
+ ERROR("%s: xattrs are unsupported in "
+ "this build\n", argv[0]);
+ exit(1);
+ } else if(++i == argc) {
+ ERROR("%s: -xattrs-include missing regex pattern\n",
+ argv[0]);
+ exit(1);
+ } else {
+ xattr_include_preg = xattr_regex(argv[i], "include");
+ no_xattrs = FALSE;
+ }
+ } else if(strcmp(argv[i], "-xattrs-add") == 0) {
+ if(!xattrs_supported()) {
+ ERROR("%s: xattrs are unsupported in "
+ "this build\n", argv[0]);
+ exit(1);
+ } else if(++i == argc) {
+ ERROR("%s: -xattrs-add missing xattr argument\n",
+ argv[0]);
+ exit(1);
+ } else {
+ xattrs_add(argv[i]);
+ no_xattrs = FALSE;
+ }
+ } else if(strcmp(argv[i], "-nopad") == 0)
+ nopad = TRUE;
+
+ else if(strcmp(argv[i], "-info") == 0)
+ silent = FALSE;
+
+ else if(strcmp(argv[i], "-e") == 0)
+ break;
+
+ else if(strcmp(argv[i], "-noappend") == 0)
+ appending = FALSE;
+
+ else if(strcmp(argv[i], "-quiet") == 0)
+ quiet = TRUE;
+
+ else if(strcmp(argv[i], "-keep-as-directory") == 0)
+ keep_as_directory = TRUE;
+
+ else if(strcmp(argv[i], "-exit-on-error") == 0)
+ exit_on_error = TRUE;
+
+ else if(strcmp(argv[i], "-root-becomes") == 0) {
+ if(++i == argc) {
+ ERROR("%s: -root-becomes: missing name\n",
+ argv[0]);
+ exit(1);
+ }
+ root_name = argv[i];
+ } else if(strcmp(argv[i], "-percentage") == 0) {
+ progressbar_percentage();
+ progress = silent = TRUE;
+ } else if(strcmp(argv[i], "-version") == 0) {
+ print_version("mksquashfs");
+ } else if(strcmp(argv[i], "-cpiostyle") == 0 ||
+ strcmp(argv[i], "-cpiostyle0") == 0 ||
+ strcmp(argv[i], "-tar") == 0) {
+ /* parsed previously */
+ } else if(strcmp(argv[i], "-comp") == 0) {
+ /* parsed previously */
+ i++;
+ } else {
+ ERROR("%s: invalid option\n\n", argv[0]);
+ print_options(stderr, argv[0], total_mem);
+ exit(1);
+ }
+ }
+
+ check_env_var();
+
+ /* If cpiostyle is set, then file names will be read-in
+ * from standard in. We do not expect to have any sources
+ * specified on the command line */
+ if(cpiostyle && source)
+ BAD_ERROR("Sources on the command line should be -, "
+ "when using -cpiostyle[0] options\n");
+
+ /* If -tar option is set, then files will be read-in
+ * from standard in. We do not expect to have any sources
+ * specified on the command line */
+ if(tarfile && source)
+ BAD_ERROR("Sources on the command line should be -, "
+ "when using -tar option\n");
+
+ /* If -tar option is set, then check that actions have not been
+ * specified, which are unsupported with tar file reading
+ */
+ if(tarfile && any_actions())
+ BAD_ERROR("Actions are unsupported when reading tar files\n");
+
+ /*
+ * The -noI option implies -noId for backwards compatibility, so reset
+ * noId if both have been specified
+ */
+ if(noI && noId)
+ noId = FALSE;
+
+ /*
+ * Some compressors may need the options to be checked for validity
+ * once all the options have been processed
+ */
+ res = compressor_options_post(comp, block_size);
+ if(res)
+ EXIT_MKSQUASHFS();
+
+ /*
+ * If the -info option has been selected then disable the
+ * progress bar unless it has been explicitly enabled with
+ * the -progress option
+ */
+ if(!silent)
+ progress = force_progress;
+
+ /*
+ * Sort all the xattr-add options now they're all processed
+ */
+ sort_xattr_add_list();
+
+ /*
+ * If -pseudo-override option has been specified and there are
+ * no pseudo files then reset option. -pseudo-override relies
+ * on dir_scan2() being run, which won't be if there's no
+ * actions or pseudo files
+ */
+ if(pseudo_override && !get_pseudo())
+ pseudo_override = FALSE;
+
+#ifdef SQUASHFS_TRACE
+ /*
+ * Disable progress bar if full debug tracing is enabled.
+ * The progress bar in this case just gets in the way of the
+ * debug trace output
+ */
+ progress = FALSE;
+#endif
+
+ if(one_file_system && source > 1) {
+ source_dev = malloc(source * sizeof(dev_t));
+ if(source_dev == NULL)
+ MEM_ERROR();
+ }
+
+ for(i = 0; i < source; i++) {
+ if(lstat(source_path[i], &source_buf) == -1) {
+ fprintf(stderr, "Cannot stat source directory \"%s\" "
+ "because %s\n", source_path[i],
+ strerror(errno));
+ EXIT_MKSQUASHFS();
+ }
+
+ if(one_file_system) {
+ if(source > 1)
+ source_dev[i] = source_buf.st_dev;
+ else
+ cur_dev = source_buf.st_dev;
+ }
+ }
+
+ if(stat(destination_file, &buf) == -1) {
+ if(errno == ENOENT) { /* Does not exist */
+ appending = FALSE;
+ fd = open(destination_file, O_CREAT | O_TRUNC | O_RDWR,
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ if(fd == -1) {
+ perror("Could not create destination file");
+ exit(1);
+ }
+
+ /* ensure Mksquashfs doesn't try to read
+ * the destination file as input, which
+ * will result in an I/O loop */
+ if(stat(destination_file, &buf) == -1) {
+ /* disappered after creating? */
+ perror("Could not stat destination file");
+ exit(1);
+ }
+ ADD_ENTRY(buf);
+ } else {
+ perror("Could not stat destination file");
+ exit(1);
+ }
+
+ } else {
+ if(!S_ISBLK(buf.st_mode) && !S_ISREG(buf.st_mode)) {
+ ERROR("Destination not block device or regular file\n");
+ exit(1);
+ }
+
+ if(tarfile && appending) {
+ ERROR("Appending is not supported reading tar files\n");
+ ERROR("To force Mksquashfs to write to this %s "
+ "use -noappend\n", S_ISBLK(buf.st_mode) ?
+ "block device" : "file");
+ EXIT_MKSQUASHFS();
+ }
+
+ if(S_ISBLK(buf.st_mode)) {
+ if((fd = open(destination_file, O_RDWR)) == -1) {
+ perror("Could not open block device as "
+ "destination");
+ exit(1);
+ }
+ block_device = 1;
+
+ } else {
+ fd = open(destination_file, (!appending ? O_TRUNC : 0) |
+ O_RDWR);
+ if(fd == -1) {
+ perror("Could not open regular file for "
+ "writing as destination");
+ exit(1);
+ }
+ /* ensure Mksquashfs doesn't try to read
+ * the destination file as input, which
+ * will result in an I/O loop */
+ ADD_ENTRY(buf);
+ }
+ }
+
+ /*
+ * process the exclude files - must be done afer destination file has
+ * been possibly created
+ */
+ for(i = option_offset; i < argc; i++)
+ if(strcmp(argv[i], "-ef") == 0)
+ /*
+ * Note presence of filename arg has already
+ * been checked
+ */
+ process_exclude_file(argv[++i]);
+ else if(strcmp(argv[i], "-e") == 0)
+ break;
+ else if(option_with_arg(argv[i], option_table))
+ i++;
+
+ if(i != argc) {
+ if(++i == argc) {
+ ERROR("%s: -e missing arguments\n", argv[0]);
+ EXIT_MKSQUASHFS();
+ }
+ while(i < argc)
+ if(old_exclude)
+ old_add_exclude(argv[i++]);
+ else
+ add_exclude(argv[i++]);
+ }
+
+ /* process the sort files - must be done afer the exclude files */
+ for(i = option_offset; i < argc; i++)
+ if(strcmp(argv[i], "-sort") == 0) {
+ if(tarfile)
+ BAD_ERROR("Sorting files is unsupported when "
+ "reading tar files\n");
+
+ res = read_sort_file(argv[++i], source, source_path);
+ if(res == FALSE)
+ BAD_ERROR("Failed to read sort file\n");
+ sorted ++;
+ } else if(strcmp(argv[i], "-e") == 0)
+ break;
+ else if(option_with_arg(argv[i], option_table))
+ i++;
+
+ if(appending) {
+ comp = read_super(fd, &sBlk, destination_file);
+ if(comp == NULL) {
+ ERROR("Failed to read existing filesystem - will not "
+ "overwrite - ABORTING!\n");
+ ERROR("To force Mksquashfs to write to this %s "
+ "use -noappend\n", block_device ?
+ "block device" : "file");
+ EXIT_MKSQUASHFS();
+ }
+
+ block_log = slog(block_size = sBlk.block_size);
+ noI = SQUASHFS_UNCOMPRESSED_INODES(sBlk.flags);
+ noD = SQUASHFS_UNCOMPRESSED_DATA(sBlk.flags);
+ noF = SQUASHFS_UNCOMPRESSED_FRAGMENTS(sBlk.flags);
+ noX = SQUASHFS_UNCOMPRESSED_XATTRS(sBlk.flags);
+ noId = SQUASHFS_UNCOMPRESSED_IDS(sBlk.flags);
+ no_fragments = SQUASHFS_NO_FRAGMENTS(sBlk.flags);
+ always_use_fragments = SQUASHFS_ALWAYS_FRAGMENTS(sBlk.flags);
+ duplicate_checking = SQUASHFS_DUPLICATES(sBlk.flags);
+ exportable = SQUASHFS_EXPORTABLE(sBlk.flags);
+ no_xattrs = SQUASHFS_NO_XATTRS(sBlk.flags);
+ comp_opts = SQUASHFS_COMP_OPTS(sBlk.flags);
+ }
+
+ initialise_threads(readq, fragq, bwriteq, fwriteq, !appending,
+ destination_file);
+
+ res = compressor_init(comp, &stream, SQUASHFS_METADATA_SIZE, 0);
+ if(res)
+ BAD_ERROR("compressor_init failed\n");
+
+ dupl_block = malloc(1048576 * sizeof(struct file_info *));
+ if(dupl_block == NULL)
+ MEM_ERROR();
+
+ dupl_frag = malloc(block_size * sizeof(struct file_info *));
+ if(dupl_frag == NULL)
+ MEM_ERROR();
+
+ memset(dupl_block, 0, 1048576 * sizeof(struct file_info *));
+ memset(dupl_frag, 0, block_size * sizeof(struct file_info *));
+
+ if(!appending) {
+ int size;
+ void *comp_data = compressor_dump_options(comp, block_size,
+ &size);
+
+ if(!quiet)
+ printf("Creating %d.%d filesystem on %s, block size %d.\n",
+ SQUASHFS_MAJOR, SQUASHFS_MINOR,
+ destination_file, block_size);
+
+ /*
+ * store any compressor specific options after the superblock,
+ * and set the COMP_OPT flag to show that the filesystem has
+ * compressor specfic options
+ */
+ if(comp_data) {
+ unsigned short c_byte = size | SQUASHFS_COMPRESSED_BIT;
+
+ SQUASHFS_INSWAP_SHORTS(&c_byte, 1);
+ write_destination(fd, sizeof(struct squashfs_super_block),
+ sizeof(c_byte), &c_byte);
+ write_destination(fd, sizeof(struct squashfs_super_block) +
+ sizeof(c_byte), size, comp_data);
+ bytes = sizeof(struct squashfs_super_block) + sizeof(c_byte)
+ + size;
+ comp_opts = TRUE;
+ } else
+ bytes = sizeof(struct squashfs_super_block);
+ } else {
+ unsigned int last_directory_block, inode_dir_file_size,
+ root_inode_size, inode_dir_start_block,
+ compressed_data, inode_dir_inode_number,
+ inode_dir_parent_inode;
+ unsigned int root_inode_start =
+ SQUASHFS_INODE_BLK(sBlk.root_inode),
+ root_inode_offset =
+ SQUASHFS_INODE_OFFSET(sBlk.root_inode);
+ int inode_dir_offset, uncompressed_data;
+
+ if((bytes = read_filesystem(root_name, fd, &sBlk, &inode_table,
+ &data_cache, &directory_table,
+ &directory_data_cache, &last_directory_block,
+ &inode_dir_offset, &inode_dir_file_size,
+ &root_inode_size, &inode_dir_start_block,
+ &file_count, &sym_count, &dev_count, &dir_count,
+ &fifo_count, &sock_count, &total_bytes,
+ &total_inode_bytes, &total_directory_bytes,
+ &inode_dir_inode_number,
+ &inode_dir_parent_inode, add_old_root_entry,
+ &fragment_table, &inode_lookup_table)) == 0) {
+ ERROR("Failed to read existing filesystem - will not "
+ "overwrite - ABORTING!\n");
+ ERROR("To force Mksquashfs to write to this block "
+ "device or file use -noappend\n");
+ EXIT_MKSQUASHFS();
+ }
+ if((fragments = sBlk.fragments)) {
+ fragment_table = realloc((char *) fragment_table,
+ ((fragments + FRAG_SIZE - 1) & ~(FRAG_SIZE - 1))
+ * sizeof(struct squashfs_fragment_entry));
+ if(fragment_table == NULL)
+ BAD_ERROR("Out of memory in save filesystem state\n");
+ }
+
+ if(!quiet) {
+ printf("Appending to existing %d.%d filesystem on "
+ "%s, block size %d\n", SQUASHFS_MAJOR,
+ SQUASHFS_MINOR, destination_file, block_size);
+ printf("All -b, -noI, -noD, -noF, -noX, -noId, "
+ "-no-duplicates, -no-fragments,\n"
+ "-always-use-fragments, -exportable and "
+ "-comp options ignored\n");
+ printf("\nIf appending is not wanted, please re-run "
+ "with -noappend specified!\n\n");
+ }
+
+ compressed_data = ((long long) inode_dir_offset +
+ inode_dir_file_size) & ~(SQUASHFS_METADATA_SIZE - 1);
+ uncompressed_data = ((long long) inode_dir_offset +
+ inode_dir_file_size) & (SQUASHFS_METADATA_SIZE - 1);
+
+ /* save original filesystem state for restoring ... */
+ sfragments = fragments;
+ sbytes = bytes;
+ sinode_count = sBlk.inodes;
+ scache_bytes = root_inode_offset + root_inode_size;
+ sdirectory_cache_bytes = uncompressed_data;
+ sdata_cache = malloc(scache_bytes);
+ if(sdata_cache == NULL)
+ BAD_ERROR("Out of memory in save filesystem state\n");
+ sdirectory_data_cache = malloc(sdirectory_cache_bytes);
+ if(sdirectory_data_cache == NULL)
+ BAD_ERROR("Out of memory in save filesystem state\n");
+ memcpy(sdata_cache, data_cache, scache_bytes);
+ memcpy(sdirectory_data_cache, directory_data_cache +
+ compressed_data, sdirectory_cache_bytes);
+ sinode_bytes = root_inode_start;
+ stotal_bytes = total_bytes;
+ stotal_inode_bytes = total_inode_bytes;
+ stotal_directory_bytes = total_directory_bytes +
+ compressed_data;
+ sfile_count = file_count;
+ ssym_count = sym_count;
+ sdev_count = dev_count;
+ sdir_count = dir_count + 1;
+ sfifo_count = fifo_count;
+ ssock_count = sock_count;
+ sdup_files = dup_files;
+ sid_count = id_count;
+ write_recovery_data(&sBlk);
+ save_xattrs();
+
+ /*
+ * set the filesystem state up to be able to append to the
+ * original filesystem. The filesystem state differs depending
+ * on whether we're appending to the original root directory, or
+ * if the original root directory becomes a sub-directory
+ * (root-becomes specified on command line, here root_name !=
+ * NULL)
+ */
+ inode_bytes = inode_size = root_inode_start;
+ directory_size = last_directory_block;
+ cache_size = root_inode_offset + root_inode_size;
+ directory_cache_size = inode_dir_offset + inode_dir_file_size;
+ if(root_name) {
+ sdirectory_bytes = last_directory_block;
+ sdirectory_compressed_bytes = 0;
+ root_inode_number = inode_dir_parent_inode;
+ inode_no = sBlk.inodes + 2;
+ directory_bytes = last_directory_block;
+ directory_cache_bytes = uncompressed_data;
+ memmove(directory_data_cache, directory_data_cache +
+ compressed_data, uncompressed_data);
+ cache_bytes = root_inode_offset + root_inode_size;
+ add_old_root_entry(root_name, sBlk.root_inode,
+ inode_dir_inode_number, SQUASHFS_DIR_TYPE);
+ total_directory_bytes += compressed_data;
+ dir_count ++;
+ } else {
+ sdirectory_compressed_bytes = last_directory_block -
+ inode_dir_start_block;
+ sdirectory_compressed =
+ malloc(sdirectory_compressed_bytes);
+ if(sdirectory_compressed == NULL)
+ BAD_ERROR("Out of memory in save filesystem "
+ "state\n");
+ memcpy(sdirectory_compressed, directory_table +
+ inode_dir_start_block,
+ sdirectory_compressed_bytes);
+ sdirectory_bytes = inode_dir_start_block;
+ root_inode_number = inode_dir_inode_number;
+ inode_no = sBlk.inodes + 1;
+ directory_bytes = inode_dir_start_block;
+ directory_cache_bytes = inode_dir_offset;
+ cache_bytes = root_inode_offset;
+ }
+
+ inode_count = file_count + dir_count + sym_count + dev_count +
+ fifo_count + sock_count;
+ }
+
+ if(path)
+ paths = add_subdir(paths, path);
+
+ dump_actions();
+ dump_pseudos();
+
+ set_progressbar_state(progress);
+
+ if(tarfile)
+ inode = process_tar_file(progress);
+ else if(tarstyle || cpiostyle)
+ inode = process_source(progress);
+ else if(!source)
+ inode = no_sources(progress);
+ else
+ inode = dir_scan(S_ISDIR(source_buf.st_mode), progress);
+
+ sBlk.root_inode = inode;
+ sBlk.inodes = inode_count;
+ sBlk.s_magic = SQUASHFS_MAGIC;
+ sBlk.s_major = SQUASHFS_MAJOR;
+ sBlk.s_minor = SQUASHFS_MINOR;
+ sBlk.block_size = block_size;
+ sBlk.block_log = block_log;
+ sBlk.flags = SQUASHFS_MKFLAGS(noI, noD, noF, noX, noId, no_fragments,
+ always_use_fragments, duplicate_checking, exportable,
+ no_xattrs, comp_opts);
+ sBlk.mkfs_time = mkfs_time_opt ? mkfs_time : time(NULL);
+
+ disable_info();
+
+ while((fragment = get_frag_action(fragment)))
+ write_fragment(*fragment);
+ if(!reproducible)
+ unlock_fragments();
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex);
+ pthread_mutex_lock(&fragment_mutex);
+ while(fragments_outstanding) {
+ pthread_mutex_unlock(&fragment_mutex);
+ pthread_testcancel();
+ sched_yield();
+ pthread_mutex_lock(&fragment_mutex);
+ }
+ pthread_cleanup_pop(1);
+
+ queue_put(to_writer, NULL);
+ if(queue_get(from_writer) != 0)
+ EXIT_MKSQUASHFS();
+
+ set_progressbar_state(FALSE);
+ write_filesystem_tables(&sBlk);
+
+ if(!nopad && (i = bytes & (4096 - 1))) {
+ char temp[4096] = {0};
+ write_destination(fd, bytes, 4096 - i, temp);
+ }
+
+ res = close(fd);
+
+ if(res == -1)
+ BAD_ERROR("Failed to close output filesystem, close returned %s\n",
+ strerror(errno));
+
+ if(recovery_file)
+ unlink(recovery_file);
+
+ if(!quiet)
+ print_summary();
+
+ if(logging)
+ fclose(log_fd);
+
+ return 0;
+}
diff --git a/squashfs-tools/mksquashfs.h b/squashfs-tools/mksquashfs.h
new file mode 100644
index 0000000..10d31ff
--- /dev/null
+++ b/squashfs-tools/mksquashfs.h
@@ -0,0 +1,285 @@
+#ifndef MKSQUASHFS_H
+#define MKSQUASHFS_H
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
+ * 2012, 2013, 2014, 2019, 2021, 2022, 2023
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * mksquashfs.h
+ *
+ */
+
+struct dir_info {
+ char *pathname;
+ char *subpath;
+ unsigned int count;
+ unsigned int directory_count;
+ unsigned int depth;
+ unsigned int excluded;
+ char dir_is_ldir;
+ struct dir_ent *dir_ent;
+ struct dir_ent *list;
+ DIR *linuxdir;
+};
+
+struct dir_ent {
+ char *name;
+ char *source_name;
+ char *nonstandard_pathname;
+ struct inode_info *inode;
+ struct dir_info *dir;
+ struct dir_info *our_dir;
+ struct dir_ent *next;
+};
+
+struct inode_info {
+ struct stat buf;
+ struct inode_info *next;
+ struct pseudo_dev *pseudo;
+ struct tar_file *tar_file;
+ struct pseudo_xattr *xattr;
+ squashfs_inode inode;
+ unsigned int inode_number;
+ unsigned int nlink;
+ char dummy_root_dir;
+ char type;
+ char read;
+ char root_entry;
+ char no_fragments;
+ char always_use_fragments;
+ char noD;
+ char noF;
+ char tarfile;
+ char symlink[0];
+};
+
+
+/* in memory file info */
+struct file_info {
+ long long file_size;
+ long long bytes;
+ long long start;
+ long long sparse;
+ unsigned int *block_list;
+ struct file_info *frag_next;
+ struct file_info *block_next;
+ struct fragment *fragment;
+ struct dup_info *dup;
+ unsigned int blocks;
+ unsigned short checksum;
+ unsigned short fragment_checksum;
+ char have_frag_checksum;
+ char have_checksum;
+};
+
+
+struct dup_info {
+ struct file_info *file;
+ struct file_info *frag;
+ struct dup_info *next;
+};
+
+
+/* fragment block data structures */
+struct fragment {
+ unsigned int index;
+ int offset;
+ int size;
+};
+
+/* in memory uid tables */
+#define ID_ENTRIES 256
+#define ID_HASH(id) (id & (ID_ENTRIES - 1))
+#define ISA_UID 1
+#define ISA_GID 2
+
+struct id {
+ unsigned int id;
+ int index;
+ char flags;
+ struct id *next;
+};
+
+/* fragment to file mapping used when appending */
+struct append_file {
+ struct file_info *file;
+ struct append_file *next;
+};
+
+/*
+ * Amount of physical memory to use by default, and the default queue
+ * ratios
+ */
+#define SQUASHFS_TAKE 4
+#define SQUASHFS_READQ_MEM 4
+#define SQUASHFS_BWRITEQ_MEM 4
+#define SQUASHFS_FWRITEQ_MEM 4
+
+/*
+ * Lowest amount of physical memory considered viable for Mksquashfs
+ * to run in Mbytes
+ */
+#define SQUASHFS_LOWMEM 64
+
+/* offset of data in compressed metadata blocks (allowing room for
+ * compressed size */
+#define BLOCK_OFFSET 2
+
+#ifdef REPRODUCIBLE_DEFAULT
+#define NOREP_STR
+#define REP_STR " (default)"
+#define REP_DEF 1
+#else
+#define NOREP_STR " (default)"
+#define REP_STR
+#define REP_DEF 0
+#endif
+
+/* in memory directory data */
+#define I_COUNT_SIZE 128
+#define DIR_ENTRIES 32
+#define INODE_HASH_SIZE 65536
+#define INODE_HASH_MASK (INODE_HASH_SIZE - 1)
+#define INODE_HASH(dev, ino) (ino & INODE_HASH_MASK)
+
+struct cached_dir_index {
+ struct squashfs_dir_index index;
+ char *name;
+};
+
+struct directory {
+ unsigned int start_block;
+ unsigned int size;
+ unsigned char *buff;
+ unsigned char *p;
+ unsigned int entry_count;
+ unsigned char *entry_count_p;
+ unsigned int i_count;
+ unsigned int i_size;
+ struct cached_dir_index *index;
+ unsigned char *index_count_p;
+ unsigned int inode_number;
+};
+
+/* exclude file handling */
+/* list of exclude dirs/files */
+struct exclude_info {
+ dev_t st_dev;
+ ino_t st_ino;
+};
+
+#define EXCLUDE_SIZE 8192
+
+struct pathname {
+ int names;
+ struct path_entry *name;
+};
+
+struct pathnames {
+ int count;
+ struct pathname *path[0];
+};
+#define PATHS_ALLOC_SIZE 10
+
+#define FRAG_SIZE 32768
+
+struct old_root_entry_info {
+ char *name;
+ struct inode_info inode;
+};
+
+#define ALLOC_SIZE 128
+
+/* Maximum transfer size for Linux read() call on both 32-bit and 64-bit systems.
+ * See READ(2) */
+#define MAXIMUM_READ_SIZE 0x7ffff000
+
+extern int sleep_time;
+extern struct cache *reader_buffer, *fragment_buffer, *reserve_cache;
+extern struct cache *bwriter_buffer, *fwriter_buffer;
+extern struct queue *to_reader, *to_deflate, *to_writer, *from_writer,
+ *to_frag, *locked_fragment, *to_process_frag;
+extern struct append_file **file_mapping;
+extern struct seq_queue *to_main, *to_order;
+extern pthread_mutex_t fragment_mutex, dup_mutex;
+extern struct squashfs_fragment_entry *fragment_table;
+extern struct compressor *comp;
+extern int block_size;
+extern int block_log;
+extern int sorted;
+extern int noF;
+extern int noD;
+extern int old_exclude;
+extern int no_fragments;
+extern int always_use_fragments;
+extern struct file_info **dupl_frag;
+extern int duplicate_checking;
+extern int no_hardlinks;
+extern struct dir_info *root_dir;
+extern struct pathnames *paths;
+extern int tarfile;
+extern int root_mode_opt;
+extern mode_t root_mode;
+extern int root_time_opt;
+extern unsigned int root_time;
+extern int root_uid_opt;
+extern unsigned int root_uid;
+extern int root_gid_opt;
+extern unsigned int root_gid;
+extern struct inode_info *inode_info[INODE_HASH_SIZE];
+extern int quiet;
+extern int sequence_count;
+extern int pseudo_override;
+extern int global_uid_opt;
+extern unsigned int global_uid;
+extern int global_gid_opt;
+extern unsigned int global_gid;
+extern int sleep_time;
+
+extern int read_fs_bytes(int, long long, long long, void *);
+extern void add_file(long long, long long, long long, unsigned int *, int,
+ unsigned int, int, int);
+extern struct id *create_id(unsigned int);
+extern unsigned int get_uid(unsigned int);
+extern unsigned int get_guid(unsigned int);
+extern long long read_bytes(int, void *, long long);
+extern unsigned short get_checksum_mem(char *, int);
+extern int reproducible;
+extern void *reader(void *arg);
+extern squashfs_inode create_inode(struct dir_info *dir_info,
+ struct dir_ent *dir_ent, int type, long long byte_size,
+ long long start_block, unsigned int offset, unsigned int *block_list,
+ struct fragment *fragment, struct directory *dir_in, long long sparse);
+extern void free_fragment(struct fragment *fragment);
+extern struct file_info *write_file(struct dir_ent *dir_ent, int *dup);
+extern int excluded(char *name, struct pathnames *paths, struct pathnames **new);
+extern struct dir_ent *lookup_name(struct dir_info *dir, char *name);
+extern struct dir_ent *create_dir_entry(char *name, char *source_name,
+ char *nonstandard_pathname, struct dir_info *dir);
+extern void add_dir_entry(struct dir_ent *dir_ent, struct dir_info *sub_dir,
+ struct inode_info *inode_info);
+extern void free_dir_entry(struct dir_ent *dir_ent);
+extern void free_dir(struct dir_info *dir);
+extern struct dir_info *create_dir(char *pathname, char *subpath, unsigned int depth);
+extern char *subpathname(struct dir_ent *dir_ent);
+extern struct dir_info *scan1_opendir(char *pathname, char *subpath, unsigned int depth);
+extern squashfs_inode do_directory_scans(struct dir_ent *dir_ent, int progress);
+extern struct inode_info *lookup_inode(struct stat *buf);
+extern int exec_date(char *, unsigned int *);
+#endif
diff --git a/squashfs-tools/mksquashfs_error.h b/squashfs-tools/mksquashfs_error.h
new file mode 100644
index 0000000..5f861b5
--- /dev/null
+++ b/squashfs-tools/mksquashfs_error.h
@@ -0,0 +1,74 @@
+#ifndef MKSQUASHFS_ERROR_H
+#define MKSQUASHFS_ERROR_H
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2021
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * mksquashfs_error.h
+ */
+
+#include "error.h"
+
+extern int exit_on_error;
+extern void prep_exit();
+
+#define INFO(s, args...) \
+ do {\
+ if(!silent)\
+ progressbar_info(s, ## args);\
+ } while(0)
+
+
+#define ERROR_START(s, args...) \
+ do { \
+ disable_progress_bar(); \
+ fprintf(stderr, s, ## args); \
+ } while(0)
+
+#define ERROR_EXIT(s, args...) \
+ do {\
+ if (exit_on_error) { \
+ fprintf(stderr, "\n"); \
+ EXIT_MKSQUASHFS(); \
+ } else { \
+ fprintf(stderr, s, ## args); \
+ enable_progress_bar(); \
+ } \
+ } while(0)
+
+#define EXIT_MKSQUASHFS() \
+ do {\
+ prep_exit();\
+ exit(1);\
+ } while(0)
+
+#define BAD_ERROR(s, args...) \
+ do {\
+ progressbar_error("FATAL ERROR: " s, ##args); \
+ EXIT_MKSQUASHFS();\
+ } while(0)
+
+#define MEM_ERROR() \
+ do {\
+ progressbar_error("FATAL ERROR: Out of memory (%s)\n", \
+ __func__); \
+ EXIT_MKSQUASHFS();\
+ } while(0)
+#endif
diff --git a/squashfs-tools/process_fragments.c b/squashfs-tools/process_fragments.c
new file mode 100644
index 0000000..5e78e9d
--- /dev/null
+++ b/squashfs-tools/process_fragments.c
@@ -0,0 +1,373 @@
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2014, 2021, 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * process_fragments.c
+ */
+
+#include <pthread.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <string.h>
+#include <stdio.h>
+#include <math.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <dirent.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include "caches-queues-lists.h"
+#include "squashfs_fs.h"
+#include "mksquashfs.h"
+#include "mksquashfs_error.h"
+#include "progressbar.h"
+#include "info.h"
+#include "compressor.h"
+#include "process_fragments.h"
+
+#define FALSE 0
+#define TRUE 1
+
+extern struct queue *to_process_frag;
+extern struct seq_queue *to_main;
+extern int sparse_files;
+extern long long start_offset;
+
+/*
+ * Compute 16 bit BSD checksum over the data, and check for sparseness
+ */
+static int checksum_sparse(struct file_buffer *file_buffer)
+{
+ unsigned char *b = (unsigned char *) file_buffer->data;
+ unsigned short chksum = 0;
+ int bytes = file_buffer->size, sparse = TRUE, value;
+
+ while(bytes --) {
+ chksum = (chksum & 1) ? (chksum >> 1) | 0x8000 : chksum >> 1;
+ value = *b++;
+ if(value) {
+ sparse = FALSE;
+ chksum += value;
+ }
+ }
+
+ file_buffer->checksum = chksum;
+ return sparse;
+}
+
+
+static int read_filesystem(int fd, long long byte, int bytes, void *buff)
+{
+ off_t off = byte;
+
+ TRACE("read_filesystem: reading from position 0x%llx, bytes %d\n",
+ byte, bytes);
+
+ if(lseek(fd, start_offset + off, SEEK_SET) == -1) {
+ ERROR("read_filesystem: Lseek on destination failed because %s, "
+ "offset=0x%llx\n", strerror(errno), start_offset + off);
+ return 0;
+ } else if(read_bytes(fd, buff, bytes) < bytes) {
+ ERROR("Read on destination failed\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+
+static struct file_buffer *get_fragment(struct fragment *fragment,
+ char *data_buffer, int fd)
+{
+ struct squashfs_fragment_entry *disk_fragment;
+ struct file_buffer *buffer, *compressed_buffer;
+ long long start_block;
+ int res, size, index = fragment->index, compressed;
+ char locked;
+
+ /*
+ * Lookup fragment block in cache.
+ * If the fragment block doesn't exist, then get the compressed version
+ * from the writer cache or off disk, and decompress it.
+ *
+ * This routine has two things which complicate the code:
+ *
+ * 1. Multiple threads can simultaneously lookup/create the
+ * same buffer. This means a buffer needs to be "locked"
+ * when it is being filled in, to prevent other threads from
+ * using it when it is not ready. This is because we now do
+ * fragment duplicate checking in parallel.
+ * 2. We have two caches which need to be checked for the
+ * presence of fragment blocks: the normal fragment cache
+ * and a "reserve" cache. The reserve cache is used to
+ * prevent an unnecessary pipeline stall when the fragment cache
+ * is full of fragments waiting to be compressed.
+ */
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &dup_mutex);
+ pthread_mutex_lock(&dup_mutex);
+
+again:
+ buffer = cache_lookup_nowait(fragment_buffer, index, &locked);
+ if(buffer) {
+ pthread_mutex_unlock(&dup_mutex);
+ if(locked)
+ /* got a buffer being filled in. Wait for it */
+ cache_wait_unlock(buffer);
+ goto finished;
+ }
+
+ /* not in fragment cache, is it in the reserve cache? */
+ buffer = cache_lookup_nowait(reserve_cache, index, &locked);
+ if(buffer) {
+ pthread_mutex_unlock(&dup_mutex);
+ if(locked)
+ /* got a buffer being filled in. Wait for it */
+ cache_wait_unlock(buffer);
+ goto finished;
+ }
+
+ /* in neither cache, try to get it from the fragment cache */
+ buffer = cache_get_nowait(fragment_buffer, index);
+ if(!buffer) {
+ /*
+ * no room, get it from the reserve cache, this is
+ * dimensioned so it will always have space (no more than
+ * processors + 1 can have an outstanding reserve buffer)
+ */
+ buffer = cache_get_nowait(reserve_cache, index);
+ if(!buffer) {
+ /* failsafe */
+ ERROR("no space in reserve cache\n");
+ goto again;
+ }
+ }
+
+ pthread_mutex_unlock(&dup_mutex);
+
+ compressed_buffer = cache_lookup(fwriter_buffer, index);
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex);
+ pthread_mutex_lock(&fragment_mutex);
+ disk_fragment = &fragment_table[index];
+ size = SQUASHFS_COMPRESSED_SIZE_BLOCK(disk_fragment->size);
+ compressed = SQUASHFS_COMPRESSED_BLOCK(disk_fragment->size);
+ start_block = disk_fragment->start_block;
+ pthread_cleanup_pop(1);
+
+ if(compressed) {
+ int error;
+ char *data;
+
+ if(compressed_buffer)
+ data = compressed_buffer->data;
+ else {
+ res = read_filesystem(fd, start_block, size, data_buffer);
+ if(res == 0) {
+ ERROR("Failed to read fragment from output"
+ " filesystem\n");
+ BAD_ERROR("Output filesystem corrupted?\n");
+ }
+ data = data_buffer;
+ }
+
+ res = compressor_uncompress(comp, buffer->data, data, size,
+ block_size, &error);
+ if(res == -1)
+ BAD_ERROR("%s uncompress failed with error code %d\n",
+ comp->name, error);
+ } else if(compressed_buffer)
+ memcpy(buffer->data, compressed_buffer->data, size);
+ else {
+ res = read_filesystem(fd, start_block, size, buffer->data);
+ if(res == 0) {
+ ERROR("Failed to read fragment from output "
+ "filesystem\n");
+ BAD_ERROR("Output filesystem corrupted?\n");
+ }
+ }
+
+ cache_unlock(buffer);
+ cache_block_put(compressed_buffer);
+
+finished:
+ pthread_cleanup_pop(0);
+
+ return buffer;
+}
+
+
+struct file_buffer *get_fragment_cksum(struct file_info *file,
+ char *data_buffer, int fd, unsigned short *checksum)
+{
+ struct file_buffer *frag_buffer;
+ struct append_file *append;
+ int index = file->fragment->index;
+
+ frag_buffer = get_fragment(file->fragment, data_buffer, fd);
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &dup_mutex);
+
+ for(append = file_mapping[index]; append; append = append->next) {
+ int offset = append->file->fragment->offset;
+ int size = append->file->fragment->size;
+ char *data = frag_buffer->data + offset;
+ unsigned short cksum = get_checksum_mem(data, size);
+
+ if(file == append->file)
+ *checksum = cksum;
+
+ pthread_mutex_lock(&dup_mutex);
+ append->file->fragment_checksum = cksum;
+ append->file->have_frag_checksum = TRUE;
+ pthread_mutex_unlock(&dup_mutex);
+ }
+
+ pthread_cleanup_pop(0);
+
+ return frag_buffer;
+}
+
+
+void *frag_thrd(void *destination_file)
+{
+ sigset_t sigmask, old_mask;
+ char *data_buffer;
+ int fd;
+
+ sigemptyset(&sigmask);
+ sigaddset(&sigmask, SIGINT);
+ sigaddset(&sigmask, SIGTERM);
+ sigaddset(&sigmask, SIGUSR1);
+ pthread_sigmask(SIG_BLOCK, &sigmask, &old_mask);
+
+ fd = open(destination_file, O_RDONLY);
+ if(fd == -1)
+ BAD_ERROR("frag_thrd: can't open destination for reading\n");
+
+ data_buffer = malloc(SQUASHFS_FILE_MAX_SIZE);
+ if(data_buffer == NULL)
+ MEM_ERROR();
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &dup_mutex);
+
+ while(1) {
+ struct file_buffer *file_buffer = queue_get(to_process_frag);
+ struct file_buffer *buffer;
+ int sparse = checksum_sparse(file_buffer);
+ struct file_info *dupl_ptr;
+ long long file_size;
+ unsigned short checksum;
+ char flag;
+ int res;
+
+ if(sparse_files && sparse) {
+ file_buffer->c_byte = 0;
+ file_buffer->fragment = FALSE;
+ } else
+ file_buffer->c_byte = file_buffer->size;
+
+ /*
+ * Specutively pull into the fragment cache any fragment blocks
+ * which contain fragments which *this* fragment may be
+ * be a duplicate.
+ *
+ * By ensuring the fragment block is in cache ahead of time
+ * should eliminate the parallelisation stall when the
+ * main thread needs to read the fragment block to do a
+ * duplicate check on it.
+ *
+ * If this is a fragment belonging to a larger file
+ * (with additional blocks) then ignore it. Here we're
+ * interested in the "low hanging fruit" of files which
+ * consist of only a fragment
+ */
+ if(file_buffer->file_size != file_buffer->size) {
+ seq_queue_put(to_main, file_buffer);
+ continue;
+ }
+
+ file_size = file_buffer->file_size;
+
+ pthread_mutex_lock(&dup_mutex);
+ dupl_ptr = dupl_frag[file_size];
+ pthread_mutex_unlock(&dup_mutex);
+
+ file_buffer->dupl_start = dupl_ptr;
+ file_buffer->duplicate = FALSE;
+
+ for(; dupl_ptr; dupl_ptr = dupl_ptr->frag_next) {
+ if(file_size != dupl_ptr->fragment->size)
+ continue;
+
+ pthread_mutex_lock(&dup_mutex);
+ flag = dupl_ptr->have_frag_checksum;
+ checksum = dupl_ptr->fragment_checksum;
+ pthread_mutex_unlock(&dup_mutex);
+
+ /*
+ * If we have the checksum and it matches then
+ * read in the fragment block.
+ *
+ * If we *don't* have the checksum, then we are
+ * appending, and the fragment block is on the
+ * "old" filesystem. Read it in and checksum
+ * the entire fragment buffer
+ */
+ if(!flag) {
+ buffer = get_fragment_cksum(dupl_ptr,
+ data_buffer, fd, &checksum);
+ if(checksum != file_buffer->checksum) {
+ cache_block_put(buffer);
+ continue;
+ }
+ } else if(checksum == file_buffer->checksum)
+ buffer = get_fragment(dupl_ptr->fragment,
+ data_buffer, fd);
+ else
+ continue;
+
+ res = memcmp(file_buffer->data, buffer->data +
+ dupl_ptr->fragment->offset, file_size);
+ cache_block_put(buffer);
+ if(res == 0) {
+ struct file_buffer *dup = malloc(sizeof(*dup));
+ if(dup == NULL)
+ MEM_ERROR();
+ memcpy(dup, file_buffer, sizeof(*dup));
+ cache_block_put(file_buffer);
+ dup->dupl_start = dupl_ptr;
+ dup->duplicate = TRUE;
+ dup->cache = NULL;
+ file_buffer = dup;
+ break;
+ }
+ }
+
+ seq_queue_put(to_main, file_buffer);
+ }
+
+ pthread_cleanup_pop(0);
+ return NULL;
+}
diff --git a/squashfs-tools/process_fragments.h b/squashfs-tools/process_fragments.h
new file mode 100644
index 0000000..2a01d66
--- /dev/null
+++ b/squashfs-tools/process_fragments.h
@@ -0,0 +1,28 @@
+#ifndef PROCESS_FRAGMENTS_H
+#define PROCESS_FRAGMENTS_H
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2014, 2021
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * process_fragments.h
+ */
+
+extern void *frag_thrd(void *);
+#endif
diff --git a/squashfs-tools/progressbar.c b/squashfs-tools/progressbar.c
new file mode 100644
index 0000000..19ad807
--- /dev/null
+++ b/squashfs-tools/progressbar.c
@@ -0,0 +1,300 @@
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2012, 2013, 2014, 2021, 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * progressbar.c
+ */
+
+#include <pthread.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <stdio.h>
+#include <math.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <stdlib.h>
+
+#include "mksquashfs_error.h"
+
+#define FALSE 0
+#define TRUE 1
+
+/* flag whether progressbar display is enabled or not */
+static int display_progress_bar = FALSE;
+
+/* flag whether the progress bar is temporarily disbled */
+static int temp_disabled = FALSE;
+
+/* flag whether to display full progress bar or just a percentage */
+static int percent = FALSE;
+
+/* flag whether we need to output a newline before printing
+ * a line - this is because progressbar printing does *not*
+ * output a newline */
+static int need_nl = FALSE;
+
+static int rotate = 0;
+static long long cur_uncompressed = 0, estimated_uncompressed = 0;
+static int columns;
+
+static pthread_t progress_thread;
+static pthread_mutex_t progress_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t size_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+
+static void sigwinch_handler(int arg)
+{
+ struct winsize winsize;
+
+ if(ioctl(1, TIOCGWINSZ, &winsize) == -1) {
+ if(isatty(STDOUT_FILENO))
+ ERROR("TIOCGWINSZ ioctl failed, defaulting to 80 "
+ "columns\n");
+ columns = 80;
+ } else
+ columns = winsize.ws_col;
+}
+
+
+void progressbar_percentage()
+{
+ percent = TRUE;
+}
+
+
+void inc_progress_bar()
+{
+ cur_uncompressed ++;
+}
+
+
+void dec_progress_bar(int count)
+{
+ cur_uncompressed -= count;
+}
+
+
+void progress_bar_size(int count)
+{
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &size_mutex);
+ pthread_mutex_lock(&size_mutex);
+ estimated_uncompressed += count;
+ pthread_cleanup_pop(1);
+}
+
+
+static void progressbar(long long current, long long max, int columns)
+{
+ char rotate_list[] = { '|', '/', '-', '\\' };
+ int max_digits, used, hashes, spaces, percentage;
+ static int tty = -1;
+
+ if(max == 0) {
+ max_digits = 1;
+ used = 13;
+ hashes = 0;
+ spaces = columns - 13;
+ percentage = 100;
+ } else {
+ max_digits = floor(log10(max)) + 1;
+ used = max_digits * 2 + 11;
+ hashes = (current * (columns - used)) / max;
+ spaces = columns - used - hashes;
+ percentage = current * 100 / max;
+ }
+
+ if((current > max) || (columns - used < 0))
+ return;
+
+ if(tty == -1)
+ tty = isatty(STDOUT_FILENO);
+ if(!tty) {
+ static long long previous = -1;
+
+ /* Updating much more frequently than this results in huge
+ * log files. */
+ if((current % 100) != 0 && current != max)
+ return;
+ /* Don't update just to rotate the spinner. */
+ if(current == previous)
+ return;
+ previous = current;
+ }
+
+ printf("\r[");
+
+ while (hashes --)
+ putchar('=');
+
+ putchar(rotate_list[rotate]);
+
+ while(spaces --)
+ putchar(' ');
+
+ printf("] %*lld/%*lld", max_digits, current, max_digits, max);
+ printf(" %3d%%", percentage);
+ fflush(stdout);
+}
+
+
+static void display_percentage(long long current, long long max)
+{
+ int percentage = max == 0 ? 100 : current * 100 / max;
+ static int previous = -1;
+
+ if(percentage != previous) {
+ printf("%d\n", percentage);
+ fflush(stdout);
+ previous = percentage;
+ }
+}
+
+
+static void progress_bar(long long current, long long max, int columns)
+{
+ if(percent)
+ display_percentage(current, max);
+ else
+ progressbar(current, max, columns);
+}
+
+
+void enable_progress_bar()
+{
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &progress_mutex);
+ pthread_mutex_lock(&progress_mutex);
+ if(display_progress_bar)
+ progress_bar(cur_uncompressed, estimated_uncompressed, columns);
+ temp_disabled = FALSE;
+ pthread_cleanup_pop(1);
+}
+
+
+void disable_progress_bar()
+{
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &progress_mutex);
+ pthread_mutex_lock(&progress_mutex);
+ if(need_nl) {
+ printf("\n");
+ need_nl = FALSE;
+ }
+ temp_disabled = TRUE;
+ pthread_cleanup_pop(1);
+}
+
+
+void set_progressbar_state(int state)
+{
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &progress_mutex);
+ pthread_mutex_lock(&progress_mutex);
+ if(display_progress_bar != state) {
+ if(display_progress_bar && !temp_disabled) {
+ progress_bar(cur_uncompressed, estimated_uncompressed,
+ columns);
+ printf("\n");
+ need_nl = FALSE;
+ }
+ display_progress_bar = state;
+ }
+ pthread_cleanup_pop(1);
+}
+
+
+static void *progress_thrd(void *arg)
+{
+ struct timespec requested_time, remaining;
+ struct winsize winsize;
+
+ if(ioctl(1, TIOCGWINSZ, &winsize) == -1) {
+ if(isatty(STDOUT_FILENO))
+ ERROR("TIOCGWINSZ ioctl failed, defaulting to 80 "
+ "columns\n");
+ columns = 80;
+ } else
+ columns = winsize.ws_col;
+ signal(SIGWINCH, sigwinch_handler);
+
+ requested_time.tv_sec = 0;
+ requested_time.tv_nsec = 250000000;
+
+ while(1) {
+ int res = nanosleep(&requested_time, &remaining);
+
+ if(res == -1 && errno != EINTR)
+ BAD_ERROR("nanosleep failed in progress thread\n");
+
+ pthread_mutex_lock(&progress_mutex);
+ rotate = (rotate + 1) % 4;
+ if(display_progress_bar && !temp_disabled) {
+ progress_bar(cur_uncompressed, estimated_uncompressed, columns);
+ need_nl = TRUE;
+ }
+ pthread_mutex_unlock(&progress_mutex);
+ }
+}
+
+
+void init_progress_bar()
+{
+ pthread_create(&progress_thread, NULL, progress_thrd, NULL);
+}
+
+
+void progressbar_error(char *fmt, ...)
+{
+ va_list ap;
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &progress_mutex);
+ pthread_mutex_lock(&progress_mutex);
+
+ if(need_nl) {
+ printf("\n");
+ need_nl = FALSE;
+ }
+
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+
+ pthread_cleanup_pop(1);
+}
+
+
+void progressbar_info(char *fmt, ...)
+{
+ va_list ap;
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &progress_mutex);
+ pthread_mutex_lock(&progress_mutex);
+
+ if(need_nl) {
+ printf("\n");
+ need_nl = FALSE;
+ }
+
+ va_start(ap, fmt);
+ vprintf(fmt, ap);
+ va_end(ap);
+
+ pthread_cleanup_pop(1);
+}
+
diff --git a/squashfs-tools/progressbar.h b/squashfs-tools/progressbar.h
new file mode 100644
index 0000000..a97ff37
--- /dev/null
+++ b/squashfs-tools/progressbar.h
@@ -0,0 +1,35 @@
+#ifndef PROGRESSBAR_H
+#define PROGRESSBAR_H
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2012, 2013, 2014, 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * progressbar.h
+ */
+
+extern void inc_progress_bar();
+extern void dec_progress_bar(int count);
+extern void progress_bar_size(int count);
+extern void enable_progress_bar();
+extern void disable_progress_bar();
+extern void init_progress_bar();
+extern void set_progressbar_state(int);
+extern void progressbar_percentage();
+#endif
diff --git a/squashfs-tools/pseudo.c b/squashfs-tools/pseudo.c
new file mode 100644
index 0000000..2542173
--- /dev/null
+++ b/squashfs-tools/pseudo.c
@@ -0,0 +1,1376 @@
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2009, 2010, 2012, 2014, 2017, 2019, 2021, 2022, 2023
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * pseudo.c
+ */
+
+#include <pwd.h>
+#include <grp.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/stat.h>
+#include <ctype.h>
+#include <time.h>
+#include <ctype.h>
+#include <regex.h>
+#include <dirent.h>
+#include <sys/types.h>
+
+#include "pseudo.h"
+#include "mksquashfs_error.h"
+#include "progressbar.h"
+#include "squashfs_fs.h"
+#include "mksquashfs.h"
+#include "xattr.h"
+
+#define TRUE 1
+#define FALSE 0
+#define MAX_LINE 16384
+
+struct pseudo *pseudo = NULL;
+
+char *get_element(char *target, char **targname)
+{
+ char *start;
+
+ start = target;
+ while(*target != '/' && *target != '\0')
+ target ++;
+
+ *targname = strndup(start, target - start);
+
+ while(*target == '/')
+ target ++;
+
+ return target;
+}
+
+
+/*
+ * Add pseudo device target to the set of pseudo devices. Pseudo_dev
+ * describes the pseudo device attributes.
+ */
+static struct pseudo *add_pseudo(struct pseudo *pseudo, struct pseudo_dev *pseudo_dev,
+ char *target, char *alltarget)
+{
+ char *targname;
+ int i;
+
+ target = get_element(target, &targname);
+
+ if(pseudo == NULL) {
+ pseudo = malloc(sizeof(struct pseudo));
+ if(pseudo == NULL)
+ MEM_ERROR();
+
+ pseudo->names = 0;
+ pseudo->count = 0;
+ pseudo->name = NULL;
+ }
+
+ for(i = 0; i < pseudo->names; i++)
+ if(strcmp(pseudo->name[i].name, targname) == 0)
+ break;
+
+ if(i == pseudo->names) {
+ /* allocate new name entry */
+ pseudo->names ++;
+ pseudo->name = realloc(pseudo->name, (i + 1) *
+ sizeof(struct pseudo_entry));
+ if(pseudo->name == NULL)
+ MEM_ERROR();
+ pseudo->name[i].name = targname;
+ pseudo->name[i].xattr = NULL;
+
+ if(target[0] == '\0') {
+ /* at leaf pathname component */
+ pseudo->name[i].pseudo = NULL;
+ pseudo->name[i].pathname = strdup(alltarget);
+ pseudo->name[i].dev = pseudo_dev;
+ } else {
+ /* recurse adding child components */
+ pseudo->name[i].dev = NULL;
+ pseudo->name[i].pseudo = add_pseudo(NULL, pseudo_dev,
+ target, alltarget);
+ }
+ } else {
+ /* existing matching entry */
+ free(targname);
+
+ if(pseudo->name[i].pseudo == NULL) {
+ /* No sub-directory which means this is the leaf
+ * component, this may or may not be a pre-existing
+ * pseudo file.
+ */
+ if(target[0] != '\0') {
+ /*
+ * entry must exist as either a 'd' type or
+ * 'm' type pseudo file, or not exist at all
+ */
+ if(pseudo->name[i].dev == NULL ||
+ pseudo->name[i].dev->type == 'd' ||
+ pseudo->name[i].dev->type == 'm')
+ /* recurse adding child components */
+ pseudo->name[i].pseudo =
+ add_pseudo(NULL, pseudo_dev,
+ target, alltarget);
+ else {
+ ERROR_START("%s already exists as a "
+ "non directory.",
+ pseudo->name[i].name);
+ ERROR_EXIT(". Ignoring %s!\n",
+ alltarget);
+ }
+ } else if(pseudo->name[i].dev == NULL) {
+ /* add this pseudo definition */
+ pseudo->name[i].pathname = strdup(alltarget);
+ pseudo->name[i].dev = pseudo_dev;
+ } else if(memcmp(pseudo_dev, pseudo->name[i].dev,
+ sizeof(struct pseudo_dev)) != 0) {
+ ERROR_START("%s already exists as a different "
+ "pseudo definition.", alltarget);
+ ERROR_EXIT(" Ignoring!\n");
+ } else {
+ ERROR_START("%s already exists as an identical "
+ "pseudo definition!", alltarget);
+ ERROR_EXIT(" Ignoring!\n");
+ }
+ } else {
+ if(target[0] == '\0') {
+ /*
+ * sub-directory exists, which means we can only
+ * add a pseudo file of type 'd' or type 'm'
+ */
+ if(pseudo->name[i].dev == NULL &&
+ (pseudo_dev->type == 'd' ||
+ pseudo_dev->type == 'm')) {
+ pseudo->name[i].pathname =
+ strdup(alltarget);
+ pseudo->name[i].dev = pseudo_dev;
+ } else {
+ ERROR_START("%s already exists as a "
+ "different pseudo definition.",
+ pseudo->name[i].name);
+ ERROR_EXIT(" Ignoring %s!\n",
+ alltarget);
+ }
+ } else
+ /* recurse adding child components */
+ add_pseudo(pseudo->name[i].pseudo, pseudo_dev,
+ target, alltarget);
+ }
+ }
+
+ return pseudo;
+}
+
+
+static struct pseudo *add_pseudo_definition(struct pseudo *pseudo, struct pseudo_dev *pseudo_dev,
+ char *target, char *alltarget)
+{
+ /* special case if a root pseudo definition is being added */
+ if(strcmp(target, "/") == 0) {
+ /* type must be 'd' */
+ if(pseudo_dev->type != 'd') {
+ ERROR("Pseudo definition / is not a directory. Ignoring!\n");
+ return pseudo;
+ }
+
+ /* if already have a root pseudo just replace */
+ if(pseudo && pseudo->names == 1 && strcmp(pseudo->name[0].name, "/") == 0) {
+ pseudo->name[0].dev = pseudo_dev;
+ return pseudo;
+ } else {
+ struct pseudo *new = malloc(sizeof(struct pseudo));
+ if(new == NULL)
+ MEM_ERROR();
+
+ new->names = 1;
+ new->count = 0;
+ new->name = malloc(sizeof(struct pseudo_entry));
+ if(new->name == NULL)
+ MEM_ERROR();
+
+ new->name[0].name = "/";
+ new->name[0].pseudo = pseudo;
+ new->name[0].pathname = "/";
+ new->name[0].dev = pseudo_dev;
+ new->name[0].xattr = NULL;
+ return new;
+ }
+ }
+
+ /* if there's a root pseudo definition, skip it before walking target */
+ if(pseudo && pseudo->names == 1 && strcmp(pseudo->name[0].name, "/") == 0) {
+ pseudo->name[0].pseudo = add_pseudo(pseudo->name[0].pseudo, pseudo_dev, target, alltarget);
+ return pseudo;
+ } else
+ return add_pseudo(pseudo, pseudo_dev, target, alltarget);
+}
+
+
+/*
+ * Find subdirectory in pseudo directory referenced by pseudo, matching
+ * filename. If filename doesn't exist or if filename is a leaf file
+ * return NULL
+ */
+struct pseudo *pseudo_subdir(char *filename, struct pseudo *pseudo)
+{
+ int i;
+
+ if(pseudo == NULL)
+ return NULL;
+
+ for(i = 0; i < pseudo->names; i++)
+ if(strcmp(filename, pseudo->name[i].name) == 0)
+ return pseudo->name[i].pseudo;
+
+ return NULL;
+}
+
+
+struct pseudo_entry *pseudo_readdir(struct pseudo *pseudo)
+{
+ if(pseudo == NULL)
+ return NULL;
+
+ while(pseudo->count < pseudo->names)
+ return &pseudo->name[pseudo->count++];
+
+ return NULL;
+}
+
+
+int pseudo_exec_file(struct pseudo_dev *dev, int *child)
+{
+ int res, pipefd[2];
+
+ res = pipe(pipefd);
+ if(res == -1) {
+ ERROR("Executing dynamic pseudo file, pipe failed\n");
+ return 0;
+ }
+
+ *child = fork();
+ if(*child == -1) {
+ ERROR("Executing dynamic pseudo file, fork failed\n");
+ goto failed;
+ }
+
+ if(*child == 0) {
+ close(pipefd[0]);
+ close(STDOUT_FILENO);
+ res = dup(pipefd[1]);
+ if(res == -1)
+ exit(EXIT_FAILURE);
+
+ execl("/bin/sh", "sh", "-c", dev->command, (char *) NULL);
+ exit(EXIT_FAILURE);
+ }
+
+ close(pipefd[1]);
+ return pipefd[0];
+
+failed:
+ close(pipefd[0]);
+ close(pipefd[1]);
+ return 0;
+}
+
+
+static struct pseudo_entry *pseudo_lookup(struct pseudo *pseudo, char *target)
+{
+ char *targname;
+ int i;
+
+ if(pseudo == NULL)
+ return NULL;
+
+ target = get_element(target, &targname);
+
+ for(i = 0; i < pseudo->names; i++)
+ if(strcmp(pseudo->name[i].name, targname) == 0)
+ break;
+
+ free(targname);
+
+ if(i == pseudo->names)
+ return NULL;
+
+ if(target[0] == '\0')
+ return &pseudo->name[i];
+
+ if(pseudo->name[i].pseudo == NULL)
+ return NULL;
+
+ return pseudo_lookup(pseudo->name[i].pseudo, target);
+}
+
+
+void print_definitions()
+{
+ ERROR("Pseudo definitions should be of the format\n");
+ ERROR("\tfilename d mode uid gid\n");
+ ERROR("\tfilename m mode uid gid\n");
+ ERROR("\tfilename b mode uid gid major minor\n");
+ ERROR("\tfilename c mode uid gid major minor\n");
+ ERROR("\tfilename f mode uid gid command\n");
+ ERROR("\tfilename s mode uid gid symlink\n");
+ ERROR("\tfilename i mode uid gid [s|f]\n");
+ ERROR("\tfilename x name=value\n");
+ ERROR("\tfilename l filename\n");
+ ERROR("\tfilename L pseudo_filename\n");
+ ERROR("\tfilename D time mode uid gid\n");
+ ERROR("\tfilename M time mode uid gid\n");
+ ERROR("\tfilename B time mode uid gid major minor\n");
+ ERROR("\tfilename C time mode uid gid major minor\n");
+ ERROR("\tfilename F time mode uid gid command\n");
+ ERROR("\tfilename S time mode uid gid symlink\n");
+ ERROR("\tfilename I time mode uid gid [s|f]\n");
+ ERROR("\tfilename R time mode uid gid length offset sparse\n");
+}
+
+
+static int read_pseudo_def_pseudo_link(char *orig_def, char *filename, char *name, char *def)
+{
+ char *linkname, *link;
+ int quoted = FALSE;
+ struct pseudo_entry *pseudo_ent;
+
+ /*
+ * Scan for filename, don't use sscanf() and "%s" because
+ * that can't handle filenames with spaces.
+ *
+ * Filenames with spaces should either escape (backslash) the
+ * space or use double quotes.
+ */
+ linkname = malloc(strlen(def) + 1);
+ if(linkname == NULL)
+ MEM_ERROR();
+
+ for(link = linkname; (quoted || !isspace(*def)) && *def != '\0';) {
+ if(*def == '"') {
+ quoted = !quoted;
+ def ++;
+ continue;
+ }
+
+ if(*def == '\\') {
+ def ++;
+ if (*def == '\0')
+ break;
+ }
+ *link ++ = *def ++;
+ }
+ *link = '\0';
+
+ /* Skip any leading slashes (/) */
+ for(link = linkname; *link == '/'; link ++);
+
+ if(*link == '\0') {
+ ERROR("Not enough or invalid arguments in pseudo LINK file "
+ "definition \"%s\"\n", orig_def);
+ goto error;
+ }
+
+ /* Lookup linkname in pseudo definition tree */
+ /* if there's a root pseudo definition, skip it before walking target */
+ if(pseudo && pseudo->names == 1 && strcmp(pseudo->name[0].name, "/") == 0)
+ pseudo_ent = pseudo_lookup(pseudo->name[0].pseudo, link);
+ else
+ pseudo_ent = pseudo_lookup(pseudo, link);
+
+ if(pseudo_ent == NULL || pseudo_ent->dev == NULL) {
+ ERROR("Pseudo LINK file %s doesn't exist\n", linkname);
+ goto error;
+ }
+
+ if(pseudo_ent->dev->type == 'd' || pseudo_ent->dev->type == 'm') {
+ ERROR("Cannot hardlink to a Pseudo directory or modify definition\n");
+ goto error;
+ }
+
+ pseudo = add_pseudo_definition(pseudo, pseudo_ent->dev, name, name);
+
+ free(filename);
+ free(linkname);
+ return TRUE;
+
+error:
+ print_definitions();
+ free(filename);
+ free(linkname);
+ return FALSE;
+}
+
+
+static int read_pseudo_def_link(char *orig_def, char *filename, char *name, char *def, char *destination)
+{
+ char *linkname, *link;
+ int quoted = FALSE;
+ struct pseudo_dev *dev = NULL;
+ static struct stat *dest_buf = NULL;
+
+ /*
+ * Stat destination file. We need to do this to prevent people
+ * from creating a circular loop, connecting the output to the
+ * input (only needed for appending, otherwise the destination
+ * file will not exist).
+ */
+ if(dest_buf == NULL) {
+ dest_buf = malloc(sizeof(struct stat));
+ if(dest_buf == NULL)
+ MEM_ERROR();
+
+ memset(dest_buf, 0, sizeof(struct stat));
+ lstat(destination, dest_buf);
+ }
+
+
+ /*
+ * Scan for filename, don't use sscanf() and "%s" because
+ * that can't handle filenames with spaces.
+ *
+ * Filenames with spaces should either escape (backslash) the
+ * space or use double quotes.
+ */
+ linkname = malloc(strlen(def) + 1);
+ if(linkname == NULL)
+ MEM_ERROR();
+
+ for(link = linkname; (quoted || !isspace(*def)) && *def != '\0';) {
+ if(*def == '"') {
+ quoted = !quoted;
+ def ++;
+ continue;
+ }
+
+ if(*def == '\\') {
+ def ++;
+ if (*def == '\0')
+ break;
+ }
+ *link ++ = *def ++;
+ }
+ *link = '\0';
+
+ if(*linkname == '\0') {
+ ERROR("Not enough or invalid arguments in pseudo link file "
+ "definition \"%s\"\n", orig_def);
+ goto error;
+ }
+
+ dev = malloc(sizeof(struct pseudo_dev));
+ if(dev == NULL)
+ MEM_ERROR();
+
+ memset(dev, 0, sizeof(struct pseudo_dev));
+
+ dev->linkbuf = malloc(sizeof(struct stat));
+ if(dev->linkbuf == NULL)
+ MEM_ERROR();
+
+ if(lstat(linkname, dev->linkbuf) == -1) {
+ ERROR("Cannot stat pseudo link file %s because %s\n",
+ linkname, strerror(errno));
+ goto error;
+ }
+
+ if(S_ISDIR(dev->linkbuf->st_mode)) {
+ ERROR("Pseudo link file %s is a directory, ", linkname);
+ ERROR("which cannot be hardlinked to\n");
+ goto error;
+ }
+
+ if(S_ISREG(dev->linkbuf->st_mode)) {
+ /*
+ * Check we're not trying to create a circular loop,
+ * connecting the output destination file to the
+ * input
+ */
+ if(memcmp(dev->linkbuf, dest_buf, sizeof(struct stat)) == 0) {
+ ERROR("Pseudo link file %s is the ", linkname);
+ ERROR("destination output file, which cannot be linked to\n");
+ goto error;
+ }
+ }
+
+ dev->type = 'l';
+ dev->pseudo_type = PSEUDO_FILE_OTHER;
+ dev->linkname = strdup(linkname);
+
+ pseudo = add_pseudo_definition(pseudo, dev, name, name);
+
+ free(filename);
+ free(linkname);
+ return TRUE;
+
+error:
+ print_definitions();
+ if(dev)
+ free(dev->linkbuf);
+ free(dev);
+ free(filename);
+ free(linkname);
+ return FALSE;
+}
+
+
+static int read_pseudo_def_extended(char type, char *orig_def, char *filename,
+ char *name, char *def, char *pseudo_file, struct pseudo_file **file)
+{
+ int n, bytes;
+ int quoted = FALSE;
+ unsigned int major = 0, minor = 0, mode, mtime;
+ char *ptr, *str, *string, *command = NULL, *symlink = NULL;
+ char suid[100], sgid[100]; /* overflow safe */
+ char ipc_type;
+ long long uid, gid;
+ struct pseudo_dev *dev;
+ static int pseudo_ino = 1;
+ long long file_length, pseudo_offset;
+ int sparse;
+
+ n = sscanf(def, "%u %o %n", &mtime, &mode, &bytes);
+
+ if(n < 2) {
+ /*
+ * Couldn't match date and mode. Date may not be quoted
+ * and is instead using backslashed spaces (i.e. 1\ jan\ 1980)
+ * where the "1" matched for the integer, but, jan didn't for
+ * the octal number.
+ *
+ * Scan for date string, don't use sscanf() and "%s" because
+ * that can't handle strings with spaces.
+ *
+ * Strings with spaces should either escape (backslash) the
+ * space or use double quotes.
+ */
+ string = malloc(strlen(def) + 1);
+ if(string == NULL)
+ MEM_ERROR();
+
+ for(str = string; (quoted || !isspace(*def)) && *def != '\0';) {
+ if(*def == '"') {
+ quoted = !quoted;
+ def ++;
+ continue;
+ }
+
+ if(*def == '\\') {
+ def ++;
+ if (*def == '\0')
+ break;
+ }
+ *str++ = *def ++;
+ }
+ *str = '\0';
+
+ if(string[0] == '\0') {
+ ERROR("Not enough or invalid arguments in pseudo file "
+ "definition \"%s\"\n", orig_def);
+ free(string);
+ goto error;
+ }
+
+ n = exec_date(string, &mtime);
+ if(n == FALSE) {
+ ERROR("Couldn't parse time, date string or "
+ "unsigned decimal integer "
+ "expected\n");
+ free(string);
+ goto error;
+ }
+
+ free(string);
+
+ n = sscanf(def, "%o %99s %99s %n", &mode, suid, sgid, &bytes);
+ def += bytes;
+ if(n < 3) {
+ ERROR("Not enough or invalid arguments in pseudo file "
+ "definition \"%s\"\n", orig_def);
+ switch(n) {
+ case -1:
+ /* FALLTHROUGH */
+ case 0:
+ ERROR("Couldn't parse mode, octal integer expected\n");
+ break;
+ case 1:
+ ERROR("Read filename, type, time and mode, but failed to "
+ "read or match uid\n");
+ break;
+ default:
+ ERROR("Read filename, type, time, mode and uid, but failed "
+ "to read or match gid\n");
+ break;
+ }
+ goto error;
+ }
+ } else {
+ def += bytes;
+ n = sscanf(def, "%99s %99s %n", suid, sgid, &bytes);
+ def += bytes;
+
+ if(n < 2) {
+ ERROR("Not enough or invalid arguments in pseudo file "
+ "definition \"%s\"\n", orig_def);
+ switch(n) {
+ case -1:
+ /* FALLTHROUGH */
+ case 0:
+ ERROR("Read filename, type, time and mode, but failed to "
+ "read or match uid\n");
+ break;
+ default:
+ ERROR("Read filename, type, time, mode and uid, but failed "
+ "to read or match gid\n");
+ break;
+ }
+ goto error;
+ }
+ }
+
+ switch(type) {
+ case 'B':
+ /* FALLTHROUGH */
+ case 'C':
+ n = sscanf(def, "%u %u %n", &major, &minor, &bytes);
+ def += bytes;
+
+ if(n < 2) {
+ ERROR("Not enough or invalid arguments in %s device "
+ "pseudo file definition \"%s\"\n", type == 'B' ?
+ "block" : "character", orig_def);
+ if(n < 1)
+ ERROR("Read filename, type, time, mode, uid and "
+ "gid, but failed to read or match major\n");
+ else
+ ERROR("Read filename, type, time, mode, uid, gid "
+ "and major, but failed to read or "
+ "match minor\n");
+ goto error;
+ }
+
+ if(major > 0xfff) {
+ ERROR("Major %d out of range\n", major);
+ goto error;
+ }
+
+ if(minor > 0xfffff) {
+ ERROR("Minor %d out of range\n", minor);
+ goto error;
+ }
+ break;
+ case 'I':
+ n = sscanf(def, "%c %n", &ipc_type, &bytes);
+ def += bytes;
+
+ if(n < 1) {
+ ERROR("Not enough or invalid arguments in ipc "
+ "pseudo file definition \"%s\"\n", orig_def);
+ ERROR("Read filename, type, mode, uid and gid, "
+ "but failed to read or match ipc_type\n");
+ goto error;
+ }
+
+ if(ipc_type != 's' && ipc_type != 'f') {
+ ERROR("Ipc_type should be s or f\n");
+ goto error;
+ }
+ break;
+ case 'R':
+ if(pseudo_file == NULL) {
+ ERROR("'R' definition can only be used in a Pseudo file\n");
+ goto error;
+ }
+
+ n = sscanf(def, "%lld %lld %d %n", &file_length, &pseudo_offset,
+ &sparse, &bytes);
+ def += bytes;
+
+ if(n < 3) {
+ ERROR("Not enough or invalid arguments in inline read "
+ "pseudo file definition \"%s\"\n", orig_def);
+ ERROR("Read filename, type, time, mode, uid and gid, "
+ "but failed to read or match file length, "
+ "offset or sparse\n");
+ goto error;
+ }
+ break;
+ case 'D':
+ case 'M':
+ break;
+ case 'F':
+ if(def[0] == '\0') {
+ ERROR("Not enough arguments in dynamic file pseudo "
+ "definition \"%s\"\n", orig_def);
+ ERROR("Expected command, which can be an executable "
+ "or a piece of shell script\n");
+ goto error;
+ }
+ command = def;
+ def += strlen(def);
+ break;
+ case 'S':
+ if(def[0] == '\0') {
+ ERROR("Not enough arguments in symlink pseudo "
+ "definition \"%s\"\n", orig_def);
+ ERROR("Expected symlink\n");
+ goto error;
+ }
+
+ if(strlen(def) > 65535) {
+ ERROR("Symlink pseudo definition %s is greater than 65535"
+ " bytes!\n", def);
+ goto error;
+ }
+ symlink = def;
+ def += strlen(def);
+ break;
+ default:
+ ERROR("Unsupported type %c\n", type);
+ goto error;
+ }
+
+ /*
+ * Check for trailing junk after expected arguments
+ */
+ if(def[0] != '\0') {
+ ERROR("Unexpected tailing characters in pseudo file "
+ "definition \"%s\"\n", orig_def);
+ goto error;
+ }
+
+ if(mode > 07777) {
+ ERROR("Mode %o out of range\n", mode);
+ goto error;
+ }
+
+ uid = strtoll(suid, &ptr, 10);
+ if(*ptr == '\0') {
+ if(uid < 0 || uid > ((1LL << 32) - 1)) {
+ ERROR("Uid %s out of range\n", suid);
+ goto error;
+ }
+ } else {
+ struct passwd *pwuid = getpwnam(suid);
+ if(pwuid)
+ uid = pwuid->pw_uid;
+ else {
+ ERROR("Uid %s invalid uid or unknown user\n", suid);
+ goto error;
+ }
+ }
+
+ gid = strtoll(sgid, &ptr, 10);
+ if(*ptr == '\0') {
+ if(gid < 0 || gid > ((1LL << 32) - 1)) {
+ ERROR("Gid %s out of range\n", sgid);
+ goto error;
+ }
+ } else {
+ struct group *grgid = getgrnam(sgid);
+ if(grgid)
+ gid = grgid->gr_gid;
+ else {
+ ERROR("Gid %s invalid uid or unknown user\n", sgid);
+ goto error;
+ }
+ }
+
+ switch(type) {
+ case 'B':
+ mode |= S_IFBLK;
+ break;
+ case 'C':
+ mode |= S_IFCHR;
+ break;
+ case 'I':
+ if(ipc_type == 's')
+ mode |= S_IFSOCK;
+ else
+ mode |= S_IFIFO;
+ break;
+ case 'D':
+ mode |= S_IFDIR;
+ break;
+ case 'F':
+ case 'R':
+ mode |= S_IFREG;
+ break;
+ case 'S':
+ /* permissions on symlinks are always rwxrwxrwx */
+ mode = 0777 | S_IFLNK;
+ break;
+ }
+
+ dev = malloc(sizeof(struct pseudo_dev));
+ if(dev == NULL)
+ MEM_ERROR();
+
+ dev->buf = malloc(sizeof(struct pseudo_stat));
+ if(dev->buf == NULL)
+ MEM_ERROR();
+
+ dev->type = type == 'M' ? 'M' : tolower(type);
+ dev->buf->mode = mode;
+ dev->buf->uid = uid;
+ dev->buf->gid = gid;
+ dev->buf->major = major;
+ dev->buf->minor = minor;
+ dev->buf->mtime = mtime;
+ dev->buf->ino = pseudo_ino ++;
+
+ if(type == 'R') {
+ if(*file == NULL) {
+ *file = malloc(sizeof(struct pseudo_file));
+ if(*file == NULL)
+ MEM_ERROR();
+
+ (*file)->filename = strdup(pseudo_file);
+ (*file)->fd = -1;
+ }
+
+ dev->data = malloc(sizeof(struct pseudo_data));
+ if(dev->data == NULL)
+ MEM_ERROR();
+
+ dev->pseudo_type = PSEUDO_FILE_DATA;
+ dev->data->file = *file;
+ dev->data->length = file_length;
+ dev->data->offset = pseudo_offset;
+ dev->data->sparse = sparse;
+ } else if(type == 'F') {
+ dev->pseudo_type = PSEUDO_FILE_PROCESS;
+ dev->command = strdup(command);
+ } else
+ dev->pseudo_type = PSEUDO_FILE_OTHER;
+
+ if(type == 'S')
+ dev->symlink = strdup(symlink);
+
+ pseudo = add_pseudo_definition(pseudo, dev, name, name);
+
+ free(filename);
+ return TRUE;
+
+error:
+ print_definitions();
+ free(filename);
+ return FALSE;
+}
+
+
+static int read_pseudo_def_original(char type, char *orig_def, char *filename, char *name, char *def)
+{
+ int n, bytes;
+ unsigned int major = 0, minor = 0, mode;
+ char *ptr, *command = NULL, *symlink = NULL;
+ char suid[100], sgid[100]; /* overflow safe */
+ char ipc_type;
+ long long uid, gid;
+ struct pseudo_dev *dev;
+ static int pseudo_ino = 1;
+
+ n = sscanf(def, "%o %99s %99s %n", &mode, suid, sgid, &bytes);
+ def += bytes;
+
+ if(n < 3) {
+ ERROR("Not enough or invalid arguments in pseudo file "
+ "definition \"%s\"\n", orig_def);
+ switch(n) {
+ case -1:
+ /* FALLTHROUGH */
+ case 0:
+ /* FALLTHROUGH */
+ case 1:
+ ERROR("Couldn't parse filename, type or octal mode\n");
+ ERROR("If the filename has spaces, either quote it, or "
+ "backslash the spaces\n");
+ break;
+ case 2:
+ ERROR("Read filename, type and mode, but failed to "
+ "read or match uid\n");
+ break;
+ default:
+ ERROR("Read filename, type, mode and uid, but failed "
+ "to read or match gid\n");
+ break;
+ }
+ goto error;
+ }
+
+ switch(type) {
+ case 'b':
+ /* FALLTHROUGH */
+ case 'c':
+ n = sscanf(def, "%u %u %n", &major, &minor, &bytes);
+ def += bytes;
+
+ if(n < 2) {
+ ERROR("Not enough or invalid arguments in %s device "
+ "pseudo file definition \"%s\"\n", type == 'b' ?
+ "block" : "character", orig_def);
+ if(n < 1)
+ ERROR("Read filename, type, mode, uid and gid, "
+ "but failed to read or match major\n");
+ else
+ ERROR("Read filename, type, mode, uid, gid "
+ "and major, but failed to read or "
+ "match minor\n");
+ goto error;
+ }
+
+ if(major > 0xfff) {
+ ERROR("Major %d out of range\n", major);
+ goto error;
+ }
+
+ if(minor > 0xfffff) {
+ ERROR("Minor %d out of range\n", minor);
+ goto error;
+ }
+ break;
+ case 'i':
+ n = sscanf(def, "%c %n", &ipc_type, &bytes);
+ def += bytes;
+
+ if(n < 1) {
+ ERROR("Not enough or invalid arguments in ipc "
+ "pseudo file definition \"%s\"\n", orig_def);
+ ERROR("Read filename, type, mode, uid and gid, "
+ "but failed to read or match ipc_type\n");
+ goto error;
+ }
+
+ if(ipc_type != 's' && ipc_type != 'f') {
+ ERROR("Ipc_type should be s or f\n");
+ goto error;
+ }
+ break;
+ case 'd':
+ case 'm':
+ break;
+ case 'f':
+ if(def[0] == '\0') {
+ ERROR("Not enough arguments in dynamic file pseudo "
+ "definition \"%s\"\n", orig_def);
+ ERROR("Expected command, which can be an executable "
+ "or a piece of shell script\n");
+ goto error;
+ }
+ command = def;
+ def += strlen(def);
+ break;
+ case 's':
+ if(def[0] == '\0') {
+ ERROR("Not enough arguments in symlink pseudo "
+ "definition \"%s\"\n", orig_def);
+ ERROR("Expected symlink\n");
+ goto error;
+ }
+
+ if(strlen(def) > 65535) {
+ ERROR("Symlink pseudo definition %s is greater than 65535"
+ " bytes!\n", def);
+ goto error;
+ }
+ symlink = def;
+ def += strlen(def);
+ break;
+ default:
+ ERROR("Unsupported type %c\n", type);
+ goto error;
+ }
+
+ /*
+ * Check for trailing junk after expected arguments
+ */
+ if(def[0] != '\0') {
+ ERROR("Unexpected tailing characters in pseudo file "
+ "definition \"%s\"\n", orig_def);
+ goto error;
+ }
+
+ if(mode > 07777) {
+ ERROR("Mode %o out of range\n", mode);
+ goto error;
+ }
+
+ uid = strtoll(suid, &ptr, 10);
+ if(*ptr == '\0') {
+ if(uid < 0 || uid > ((1LL << 32) - 1)) {
+ ERROR("Uid %s out of range\n", suid);
+ goto error;
+ }
+ } else {
+ struct passwd *pwuid = getpwnam(suid);
+ if(pwuid)
+ uid = pwuid->pw_uid;
+ else {
+ ERROR("Uid %s invalid uid or unknown user\n", suid);
+ goto error;
+ }
+ }
+
+ gid = strtoll(sgid, &ptr, 10);
+ if(*ptr == '\0') {
+ if(gid < 0 || gid > ((1LL << 32) - 1)) {
+ ERROR("Gid %s out of range\n", sgid);
+ goto error;
+ }
+ } else {
+ struct group *grgid = getgrnam(sgid);
+ if(grgid)
+ gid = grgid->gr_gid;
+ else {
+ ERROR("Gid %s invalid uid or unknown user\n", sgid);
+ goto error;
+ }
+ }
+
+ switch(type) {
+ case 'b':
+ mode |= S_IFBLK;
+ break;
+ case 'c':
+ mode |= S_IFCHR;
+ break;
+ case 'i':
+ if(ipc_type == 's')
+ mode |= S_IFSOCK;
+ else
+ mode |= S_IFIFO;
+ break;
+ case 'd':
+ mode |= S_IFDIR;
+ break;
+ case 'f':
+ mode |= S_IFREG;
+ break;
+ case 's':
+ /* permissions on symlinks are always rwxrwxrwx */
+ mode = 0777 | S_IFLNK;
+ break;
+ }
+
+ dev = malloc(sizeof(struct pseudo_dev));
+ if(dev == NULL)
+ MEM_ERROR();
+
+ dev->buf = malloc(sizeof(struct pseudo_stat));
+ if(dev->buf == NULL)
+ MEM_ERROR();
+
+ dev->type = type;
+ dev->buf->mode = mode;
+ dev->buf->uid = uid;
+ dev->buf->gid = gid;
+ dev->buf->major = major;
+ dev->buf->minor = minor;
+ dev->buf->mtime = time(NULL);
+ dev->buf->ino = pseudo_ino ++;
+
+ if(type == 'f') {
+ dev->pseudo_type = PSEUDO_FILE_PROCESS;
+ dev->command = strdup(command);
+ } else
+ dev->pseudo_type = PSEUDO_FILE_OTHER;
+
+ if(type == 's')
+ dev->symlink = strdup(symlink);
+
+ pseudo = add_pseudo_definition(pseudo, dev, name, name);
+
+ free(filename);
+ return TRUE;
+
+error:
+ print_definitions();
+ free(filename);
+ return FALSE;
+}
+
+
+static int read_pseudo_def(char *def, char *destination, char *pseudo_file, struct pseudo_file **file)
+{
+ int n, bytes;
+ int quoted = 0;
+ char type;
+ char *filename, *name;
+ char *orig_def = def;
+
+ /*
+ * Scan for filename, don't use sscanf() and "%s" because
+ * that can't handle filenames with spaces.
+ *
+ * Filenames with spaces should either escape (backslash) the
+ * space or use double quotes.
+ */
+ filename = malloc(strlen(def) + 1);
+ if(filename == NULL)
+ MEM_ERROR();
+
+ for(name = filename; (quoted || !isspace(*def)) && *def != '\0';) {
+ if(*def == '"') {
+ quoted = !quoted;
+ def ++;
+ continue;
+ }
+
+ if(*def == '\\') {
+ def ++;
+ if (*def == '\0')
+ break;
+ }
+ *name ++ = *def ++;
+ }
+ *name = '\0';
+
+ /* Skip any leading slashes (/) */
+ for(name = filename; *name == '/'; name ++);
+
+ if(*name == '\0') {
+ strcpy(filename, "/");
+ name = filename;
+ }
+
+ n = sscanf(def, " %c %n", &type, &bytes);
+ def += bytes;
+
+ if(n < 1) {
+ ERROR("Not enough or invalid arguments in pseudo file "
+ "definition \"%s\"\n", orig_def);
+ goto error;
+ }
+
+ if(type == 'x')
+ return read_pseudo_xattr(orig_def, filename, name, def);
+ else if(type == 'l')
+ return read_pseudo_def_link(orig_def, filename, name, def, destination);
+ else if(type == 'L')
+ return read_pseudo_def_pseudo_link(orig_def, filename, name, def);
+ else if(isupper(type))
+ return read_pseudo_def_extended(type, orig_def, filename, name, def, pseudo_file, file);
+ else
+ return read_pseudo_def_original(type, orig_def, filename, name, def);
+
+error:
+ print_definitions();
+ free(filename);
+ return FALSE;
+}
+
+
+int read_pseudo_definition(char *filename, char *destination)
+{
+ return read_pseudo_def(filename, destination, NULL, NULL);
+}
+
+
+int read_pseudo_file(char *filename, char *destination)
+{
+ FILE *fd;
+ char *def, *err, *line = NULL;
+ int res, size = 0;
+ struct pseudo_file *file = NULL;
+ long long bytes = 0;
+ int pseudo_stdin = strcmp(filename, "-") == 0;
+
+ if(pseudo_stdin)
+ fd = stdin;
+ else {
+ fd = fopen(filename, "r");
+ if(fd == NULL) {
+ ERROR("Could not open pseudo device file \"%s\" "
+ "because %s\n", filename, strerror(errno));
+ return FALSE;
+ }
+ }
+
+ while(1) {
+ int total = 0;
+
+ while(1) {
+ int len;
+
+ if(total + (MAX_LINE + 1) > size) {
+ line = realloc(line, size += (MAX_LINE + 1));
+ if(line == NULL)
+ MEM_ERROR();
+ }
+
+ err = fgets(line + total, MAX_LINE + 1, fd);
+ if(err == NULL)
+ break;
+
+ len = strlen(line + total);
+ total += len;
+ bytes += len;
+
+ if(len == MAX_LINE && line[total - 1] != '\n') {
+ /* line too large */
+ ERROR("Line too long when reading "
+ "pseudo file \"%s\", larger than "
+ "%d bytes\n", filename, MAX_LINE);
+ goto failed;
+ }
+
+ /*
+ * Remove '\n' terminator if it exists (the last line
+ * in the file may not be '\n' terminated)
+ */
+ if(len && line[total - 1] == '\n') {
+ line[-- total] = '\0';
+ len --;
+ }
+
+ /*
+ * If no line continuation then jump out to
+ * process line. Note, we have to be careful to
+ * check for "\\" (backslashed backslash) and to
+ * ensure we don't look at the previous line
+ */
+ if(len == 0 || line[total - 1] != '\\' || (len >= 2 &&
+ strcmp(line + total - 2, "\\\\") == 0))
+ break;
+ else
+ total --;
+ }
+
+ if(err == NULL) {
+ if(ferror(fd)) {
+ ERROR("Reading pseudo file \"%s\" failed "
+ "because %s\n", filename,
+ strerror(errno));
+ goto failed;
+ }
+
+ /*
+ * At EOF, normally we'll be finished, but, have to
+ * check for special case where we had "\" line
+ * continuation and then hit EOF immediately afterwards
+ */
+ if(total == 0)
+ break;
+ else
+ line[total] = '\0';
+ }
+
+ /* Skip any leading whitespace */
+ for(def = line; isspace(*def); def ++);
+
+ /* if line is now empty after skipping characters, skip it */
+ if(*def == '\0')
+ continue;
+
+ /* if comment line, skip it. But, we also have to check if
+ * it is the data demarker */
+ if(*def == '#') {
+ if(strcmp(def, "# START OF DATA - DO NOT MODIFY") == 0) {
+ if(file) {
+ file->start = bytes + 2;
+ file->current = 0;
+ file->fd = pseudo_stdin ? 0 : -1;
+ fgetc(fd);
+ fgetc(fd);
+ }
+ if(!pseudo_stdin)
+ fclose(fd);
+ free(line);
+ return TRUE;
+ } else
+ continue;
+ }
+
+ res = read_pseudo_def(def, destination, filename, &file);
+ if(res == FALSE)
+ goto failed;
+ }
+
+ if(file) {
+ /* No Data demarker found */
+ ERROR("No START OF DATA demarker found in pseudo file %s\n", filename);
+ goto failed;
+ }
+
+ if(!pseudo_stdin)
+ fclose(fd);
+ free(line);
+ return TRUE;
+
+failed:
+ if(!pseudo_stdin)
+ fclose(fd);
+ free(line);
+ return FALSE;
+}
+
+
+struct pseudo *get_pseudo()
+{
+ return pseudo;
+}
+
+
+#ifdef SQUASHFS_TRACE
+static void dump_pseudo(struct pseudo *pseudo, char *string)
+{
+ int i, res;
+ char *path;
+
+ for(i = 0; i < pseudo->names; i++) {
+ struct pseudo_entry *entry = &pseudo->name[i];
+ if(string) {
+ res = asprintf(&path, "%s/%s", string, entry->name);
+ if(res == -1)
+ BAD_ERROR("asprintf failed in dump_pseudo\n");
+ } else
+ path = entry->name;
+ if(entry->dev)
+ ERROR("%s %c 0%o %d %d %d %d\n", path, entry->dev->type,
+ entry->dev->buf->mode & ~S_IFMT, entry->dev->buf->uid,
+ entry->dev->buf->gid, entry->dev->buf->major,
+ entry->dev->buf->minor);
+ if(entry->pseudo)
+ dump_pseudo(entry->pseudo, path);
+ if(string)
+ free(path);
+ }
+}
+
+
+void dump_pseudos()
+{
+ if (pseudo)
+ dump_pseudo(pseudo, NULL);
+}
+#else
+void dump_pseudos()
+{
+}
+#endif
diff --git a/squashfs-tools/pseudo.h b/squashfs-tools/pseudo.h
new file mode 100644
index 0000000..8372861
--- /dev/null
+++ b/squashfs-tools/pseudo.h
@@ -0,0 +1,107 @@
+#ifndef PSEUDO_H
+#define PSEUDO_H
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2009, 2010, 2014, 2017, 2021, 2022, 2023
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * pseudo.h
+ */
+
+#define PSEUDO_FILE_OTHER 1
+#define PSEUDO_FILE_PROCESS 2
+#define PSEUDO_FILE_DATA 4
+
+#define IS_PSEUDO(a) ((a)->pseudo)
+#define IS_PSEUDO_PROCESS(a) ((a)->pseudo && ((a)->pseudo->pseudo_type & PSEUDO_FILE_PROCESS))
+#define IS_PSEUDO_OTHER(a) ((a)->pseudo && ((a)->pseudo->pseudo_type & PSEUDO_FILE_OTHER))
+#define IS_PSEUDO_DATA(a) ((a)->pseudo && ((a)->pseudo->pseudo_type & PSEUDO_FILE_DATA))
+
+struct pseudo_stat {
+ unsigned int mode;
+ unsigned int uid;
+ unsigned int gid;
+ unsigned int major;
+ unsigned int minor;
+ time_t mtime;
+ int ino;
+};
+
+struct pseudo_file {
+ char *filename;
+ long long start;
+ long long current;
+ int fd;
+};
+
+struct pseudo_data {
+ struct pseudo_file *file;
+ long long offset;
+ long long length;
+ int sparse;
+};
+
+struct pseudo_dev {
+ char type;
+ int pseudo_type;
+ union {
+ struct pseudo_stat *buf;
+ struct stat *linkbuf;
+ };
+ union {
+ struct pseudo_data *data;
+ char *command;
+ char *symlink;
+ char *linkname;
+ };
+};
+
+struct pseudo_entry {
+ char *name;
+ char *pathname;
+ struct pseudo *pseudo;
+ struct pseudo_dev *dev;
+ struct pseudo_xattr *xattr;
+};
+
+struct pseudo {
+ int names;
+ int count;
+ struct pseudo_entry *name;
+};
+
+struct pseudo_xattr {
+ int count;
+ struct xattr_add *xattr;
+};
+
+extern struct pseudo *pseudo;
+
+extern long long read_bytes(int, void *, long long);
+extern int read_pseudo_definition(char *, char *);
+extern int read_pseudo_file(char *, char *);
+extern struct pseudo *pseudo_subdir(char *, struct pseudo *);
+extern struct pseudo_entry *pseudo_readdir(struct pseudo *);
+extern struct pseudo_dev *get_pseudo_file(int);
+extern int pseudo_exec_file(struct pseudo_dev *, int *);
+extern struct pseudo *get_pseudo();
+extern void dump_pseudos();
+extern char *get_element(char *target, char **targname);
+extern void print_definitions();
+#endif
diff --git a/squashfs-tools/pseudo_xattr.c b/squashfs-tools/pseudo_xattr.c
new file mode 100644
index 0000000..454e8e8
--- /dev/null
+++ b/squashfs-tools/pseudo_xattr.c
@@ -0,0 +1,176 @@
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * pseudo_xattr.c
+ */
+
+#include <string.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <regex.h>
+#include <dirent.h>
+
+#include "pseudo.h"
+#include "mksquashfs_error.h"
+#include "squashfs_fs.h"
+#include "xattr.h"
+
+#define TRUE 1
+#define FALSE 0
+
+static void add_xattr(struct pseudo_xattr **xattr, struct xattr_add *entry)
+{
+ if(*xattr == NULL) {
+ *xattr = malloc(sizeof(struct pseudo_xattr));
+ if(*xattr == NULL)
+ MEM_ERROR();
+
+ (*xattr)->xattr = entry;
+ entry->next = NULL;
+ (*xattr)->count = 1;
+ } else {
+ entry->next = (*xattr)->xattr;
+ (*xattr)->xattr = entry;
+ (*xattr)->count ++;
+ }
+}
+
+
+/*
+ * Add pseudo xattr to the set of pseudo definitions.
+ */
+static struct pseudo *add_pseudo_xattr(struct pseudo *pseudo, struct xattr_add *xattr,
+ char *target, char *alltarget)
+{
+ char *targname;
+ int i;
+
+ target = get_element(target, &targname);
+
+ if(pseudo == NULL) {
+ pseudo = malloc(sizeof(struct pseudo));
+ if(pseudo == NULL)
+ MEM_ERROR();
+
+ pseudo->names = 0;
+ pseudo->count = 0;
+ pseudo->name = NULL;
+ }
+
+ for(i = 0; i < pseudo->names; i++)
+ if(strcmp(pseudo->name[i].name, targname) == 0)
+ break;
+
+ if(i == pseudo->names) {
+ /* allocate new name entry */
+ pseudo->names ++;
+ pseudo->name = realloc(pseudo->name, (i + 1) *
+ sizeof(struct pseudo_entry));
+ if(pseudo->name == NULL)
+ MEM_ERROR();
+ pseudo->name[i].name = targname;
+ pseudo->name[i].pathname = NULL;
+ pseudo->name[i].dev = NULL;
+ pseudo->name[i].xattr = NULL;
+
+ if(target[0] == '\0') {
+ /* at leaf pathname component */
+ pseudo->name[i].pathname = strdup(alltarget);
+ pseudo->name[i].pseudo = NULL;
+ add_xattr(&pseudo->name[i].xattr, xattr);
+ } else {
+ /* recurse adding child components */
+ pseudo->name[i].pseudo = add_pseudo_xattr(NULL, xattr,
+ target, alltarget);
+ }
+ } else {
+ /* existing matching entry */
+
+ free(targname);
+
+ if(target[0] == '\0') {
+ /* Add xattr to this entry */
+ pseudo->name[i].pathname = strdup(alltarget);
+ add_xattr(&pseudo->name[i].xattr, xattr);
+ } else {
+ /* recurse adding child components */
+ pseudo->name[i].pseudo = add_pseudo_xattr(pseudo->name[i].pseudo, xattr, target, alltarget);
+ }
+ }
+
+ return pseudo;
+}
+
+
+static struct pseudo *add_pseudo_xattr_definition(struct pseudo *pseudo,
+ struct xattr_add *xattr, char *target, char *alltarget)
+{
+ /* special case if a root pseudo definition is being added */
+ if(strcmp(target, "/") == 0) {
+ /* if already have a root pseudo just add xattr */
+ if(pseudo && pseudo->names == 1 && strcmp(pseudo->name[0].name, "/") == 0) {
+ add_xattr(&pseudo->name[0].xattr, xattr);
+ return pseudo;
+ } else {
+ struct pseudo *new = malloc(sizeof(struct pseudo));
+ if(new == NULL)
+ MEM_ERROR();
+
+ new->names = 1;
+ new->count = 0;
+ new->name = malloc(sizeof(struct pseudo_entry));
+ if(new->name == NULL)
+ MEM_ERROR();
+
+ new->name[0].name = "/";
+ new->name[0].pseudo = pseudo;
+ new->name[0].pathname = "/";
+ new->name[0].dev = NULL;
+ new->name[0].xattr = NULL;
+ add_xattr(&new->name[0].xattr, xattr);
+ return new;
+ }
+ }
+
+ /* if there's a root pseudo definition, skip it before walking target */
+ if(pseudo && pseudo->names == 1 && strcmp(pseudo->name[0].name, "/") == 0) {
+ pseudo->name[0].pseudo = add_pseudo_xattr(pseudo->name[0].pseudo, xattr, target, alltarget);
+ return pseudo;
+ } else
+ return add_pseudo_xattr(pseudo, xattr, target, alltarget);
+}
+
+
+int read_pseudo_xattr(char *orig_def, char *filename, char *name, char *def)
+{
+ struct xattr_add *xattr = xattr_parse(def, "", "pseudo xattr");
+
+ if(xattr == NULL) {
+ print_definitions();
+ free(filename);
+ return FALSE;
+ }
+
+ pseudo = add_pseudo_xattr_definition(pseudo, xattr, name, name);
+
+ free(filename);
+ return TRUE;
+}
diff --git a/squashfs-tools/read_fs.c b/squashfs-tools/read_fs.c
new file mode 100644
index 0000000..f9976c0
--- /dev/null
+++ b/squashfs-tools/read_fs.c
@@ -0,0 +1,1090 @@
+/*
+ * Read a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+ * 2012, 2013, 2014, 2019, 2021, 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * read_fs.c
+ */
+
+#define TRUE 1
+#define FALSE 0
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <limits.h>
+#include <dirent.h>
+#include <stdlib.h>
+#include <regex.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_swap.h"
+#include "compressor.h"
+#include "mksquashfs.h"
+#include "xattr.h"
+#include "mksquashfs_error.h"
+
+int read_block(int fd, long long start, long long *next, int expected,
+ void *block)
+{
+ unsigned short c_byte;
+ int res, compressed;
+ int outlen = expected ? expected : SQUASHFS_METADATA_SIZE;
+
+ /* Read block size */
+ res = read_fs_bytes(fd, start, 2, &c_byte);
+ if(res == 0)
+ return 0;
+
+ SQUASHFS_INSWAP_SHORTS(&c_byte, 1);
+ compressed = SQUASHFS_COMPRESSED(c_byte);
+ c_byte = SQUASHFS_COMPRESSED_SIZE(c_byte);
+
+ /*
+ * The block size should not be larger than
+ * the uncompressed size (or max uncompressed size if
+ * expected is 0)
+ */
+ if (c_byte > outlen)
+ return 0;
+
+ if(compressed) {
+ char buffer[c_byte];
+ int error;
+
+ res = read_fs_bytes(fd, start + 2, c_byte, buffer);
+ if(res == 0)
+ return 0;
+
+ res = compressor_uncompress(comp, block, buffer, c_byte,
+ outlen, &error);
+ if(res == -1) {
+ ERROR("%s uncompress failed with error code %d\n",
+ comp->name, error);
+ return 0;
+ }
+ } else {
+ res = read_fs_bytes(fd, start + 2, c_byte, block);
+ if(res == 0)
+ return 0;
+ res = c_byte;
+ }
+
+ if(next)
+ *next = start + 2 + c_byte;
+
+ /*
+ * if expected, then check the (uncompressed) return data
+ * is of the expected size
+ */
+ if(expected && expected != res)
+ return 0;
+ else
+ return res;
+}
+
+
+#define NO_BYTES(SIZE) \
+ (bytes - (cur_ptr - inode_table) < (SIZE))
+
+#define NO_INODE_BYTES(INODE) NO_BYTES(sizeof(struct INODE))
+
+unsigned char *scan_inode_table(int fd, long long start, long long end,
+ long long root_inode_start, int root_inode_offset,
+ struct squashfs_super_block *sBlk, union squashfs_inode_header
+ *dir_inode, long long *root_inode_block, unsigned int
+ *root_inode_size, long long *uncompressed_file, long long
+ *uncompressed_directory, unsigned int *file_count, unsigned int *sym_count,
+ unsigned int *dev_count, unsigned int *dir_count, unsigned int *fifo_count,
+ unsigned int *sock_count, unsigned int *id_table)
+{
+ unsigned char *cur_ptr;
+ unsigned char *inode_table = NULL;
+ int byte, files = 0;
+ unsigned int directory_start_block;
+ struct squashfs_base_inode_header base;
+ long long alloc_size, bytes = 0, size = 0;
+
+ TRACE("scan_inode_table: start 0x%llx, end 0x%llx, root_inode_start "
+ "0x%llx\n", start, end, root_inode_start);
+
+ /*
+ * Use the size of the compressed inode table as an initial
+ * memory allocation value, and the reallocation value, if
+ * this is too small.
+ *
+ * With a 50% compression ratio, this should require 2 alloc calls
+ * With a 25% compression ratio, this should require 4 alloc calls
+ * With a 12.5% compression ratio, this should require 8 alloc calls
+ *
+ * Always round to a multiple of SQUASHFS_METADATA_SIZE
+ */
+ alloc_size = ((end - start) + SQUASHFS_METADATA_SIZE) & ~(SQUASHFS_METADATA_SIZE - 1);
+
+ /* Rogue value used to check if it was found */
+ *root_inode_block = -1LL;
+ while(start < end) {
+ if(start == root_inode_start) {
+ TRACE("scan_inode_table: read compressed block 0x%llx "
+ "containing root inode\n", start);
+ *root_inode_block = bytes;
+ }
+ if(size - bytes < SQUASHFS_METADATA_SIZE) {
+ inode_table = realloc(inode_table, size += alloc_size);
+ if(inode_table == NULL)
+ MEM_ERROR();
+ }
+ TRACE("scan_inode_table: reading block 0x%llx\n", start);
+ byte = read_block(fd, start, &start, 0, inode_table + bytes);
+ if(byte == 0)
+ goto corrupted;
+
+ bytes += byte;
+
+ /* If this is not the last metadata block in the inode table
+ * then it should be SQUASHFS_METADATA_SIZE in size.
+ * Note, we can't use expected in read_block() above for this
+ * because we don't know if this is the last block until
+ * after reading.
+ */
+ if(start != end && byte != SQUASHFS_METADATA_SIZE)
+ goto corrupted;
+ }
+
+ /*
+ * We expect to have found the metadata block containing the
+ * root inode in the above inode_table metadata block scan. If it
+ * hasn't been found then the filesystem is corrupted
+ */
+ if(*root_inode_block == -1LL)
+ goto corrupted;
+
+ /*
+ * The number of bytes available after the root inode metadata block
+ * should be at least the root inode offset + the size of a
+ * regular directory inode, if not the filesystem is corrupted
+ *
+ * +-----------------------+-----------------------+
+ * | | directory |
+ * | | inode |
+ * +-----------------------+-----------------------+
+ * ^ ^ ^
+ * *root_inode_block root_inode_offset bytes
+ */
+ if((bytes - *root_inode_block) < (root_inode_offset +
+ sizeof(struct squashfs_dir_inode_header)))
+ goto corrupted;
+
+ /*
+ * Read the last inode in the inode table, which is the root directory
+ * inode, and get the directory start block. This is used when
+ * calculating the uncompressed directory size. The directory
+ * bytes in the last block will be counted as normal.
+ *
+ * Note, the previous check ensures the following calculation won't
+ * underflow, and we won't access beyond the buffer
+ */
+ *root_inode_size = bytes - (*root_inode_block + root_inode_offset);
+ bytes = *root_inode_block + root_inode_offset;
+ SQUASHFS_SWAP_DIR_INODE_HEADER(inode_table + bytes, &dir_inode->dir);
+
+ if(dir_inode->base.inode_type == SQUASHFS_DIR_TYPE) {
+ directory_start_block = dir_inode->dir.start_block;
+ if(*root_inode_size < sizeof(struct squashfs_dir_inode_header))
+ /* corrupted filesystem */
+ goto corrupted;
+ } else if(dir_inode->base.inode_type == SQUASHFS_LDIR_TYPE) {
+ if(*root_inode_size < sizeof(struct squashfs_ldir_inode_header))
+ /* corrupted filesystem */
+ goto corrupted;
+ SQUASHFS_SWAP_LDIR_INODE_HEADER(inode_table + bytes,
+ &dir_inode->ldir);
+ directory_start_block = dir_inode->ldir.start_block;
+ } else
+ /* bad type, corrupted filesystem */
+ goto corrupted;
+
+ if(dir_inode->base.uid >= sBlk->no_ids) {
+ ERROR("File system corrupted - uid index in inode too large (uid: %d)\n", dir_inode->base.uid);
+ goto corrupted2;
+ }
+
+ if(dir_inode->base.guid >= sBlk->no_ids) {
+ ERROR("File system corrupted - gid index in inode too large (gid: %d)\n", dir_inode->base.guid);
+ goto corrupted2;
+ }
+
+ get_uid(id_table[dir_inode->base.uid]);
+ get_guid(id_table[dir_inode->base.guid]);
+
+ /* allocate fragment to file mapping table */
+ file_mapping = calloc(sBlk->fragments, sizeof(struct append_file *));
+ if(file_mapping == NULL)
+ MEM_ERROR();
+
+ for(cur_ptr = inode_table; cur_ptr < inode_table + bytes; files ++) {
+ /*
+ * There should always be enough bytes to read the base
+ * inode header
+ */
+ if(NO_INODE_BYTES(squashfs_base_inode_header))
+ /* corrupted filesystem */
+ goto corrupted;
+
+ SQUASHFS_SWAP_BASE_INODE_HEADER(cur_ptr, &base);
+
+ TRACE("scan_inode_table: processing inode @ byte position "
+ "0x%x, type 0x%x\n",
+ (unsigned int) (cur_ptr - inode_table),
+ base.inode_type);
+
+ if(base.uid >= sBlk->no_ids) {
+ ERROR("File system corrupted - uid index in inode too large (uid: %d)\n", base.uid);
+ goto corrupted2;
+ }
+
+ if(base.guid >= sBlk->no_ids) {
+ ERROR("File system corrupted - gid index in inode too large (gid: %d)\n", base.guid);
+ goto corrupted2;
+ }
+
+ get_uid(id_table[base.uid]);
+ get_guid(id_table[base.guid]);
+
+ switch(base.inode_type) {
+ case SQUASHFS_FILE_TYPE: {
+ struct squashfs_reg_inode_header inode;
+ int frag_bytes, blocks, i;
+ long long start, file_bytes = 0;
+ unsigned int *block_list;
+
+ /*
+ * There should always be enough bytes to read an
+ * inode of the expected type
+ */
+ if(NO_INODE_BYTES(squashfs_reg_inode_header))
+ /* corrupted filesystem */
+ goto corrupted;
+
+ SQUASHFS_SWAP_REG_INODE_HEADER(cur_ptr, &inode);
+
+ frag_bytes = inode.fragment == SQUASHFS_INVALID_FRAG ?
+ 0 : inode.file_size % sBlk->block_size;
+ blocks = inode.fragment == SQUASHFS_INVALID_FRAG ?
+ (inode.file_size + sBlk->block_size - 1) >>
+ sBlk->block_log : inode.file_size >>
+ sBlk->block_log;
+ start = inode.start_block;
+
+ TRACE("scan_inode_table: regular file, file_size %d, "
+ "blocks %d\n", inode.file_size, blocks);
+
+ cur_ptr += sizeof(inode);
+
+ if(NO_BYTES(blocks * sizeof(unsigned int)))
+ /* corrupted filesystem */
+ goto corrupted;
+
+ block_list = malloc(blocks * sizeof(unsigned int));
+ if(block_list == NULL)
+ MEM_ERROR();
+
+ SQUASHFS_SWAP_INTS(cur_ptr, block_list, blocks);
+
+ *uncompressed_file += inode.file_size;
+ (*file_count) ++;
+
+ for(i = 0; i < blocks; i++)
+ file_bytes +=
+ SQUASHFS_COMPRESSED_SIZE_BLOCK
+ (block_list[i]);
+
+ if(inode.fragment != SQUASHFS_INVALID_FRAG &&
+ inode.fragment >= sBlk->fragments) {
+ free(block_list);
+ goto corrupted;
+ }
+
+ add_file(start, inode.file_size, file_bytes,
+ block_list, blocks, inode.fragment,
+ inode.offset, frag_bytes);
+
+ cur_ptr += blocks * sizeof(unsigned int);
+ break;
+ }
+ case SQUASHFS_LREG_TYPE: {
+ struct squashfs_lreg_inode_header inode;
+ int frag_bytes, blocks, i;
+ long long start, file_bytes = 0;
+ unsigned int *block_list;
+
+ /*
+ * There should always be enough bytes to read an
+ * inode of the expected type
+ */
+ if(NO_INODE_BYTES(squashfs_lreg_inode_header))
+ /* corrupted filesystem */
+ goto corrupted;
+
+ SQUASHFS_SWAP_LREG_INODE_HEADER(cur_ptr, &inode);
+
+ frag_bytes = inode.fragment == SQUASHFS_INVALID_FRAG ?
+ 0 : inode.file_size % sBlk->block_size;
+ blocks = inode.fragment == SQUASHFS_INVALID_FRAG ?
+ (inode.file_size + sBlk->block_size - 1) >>
+ sBlk->block_log : inode.file_size >>
+ sBlk->block_log;
+ start = inode.start_block;
+
+ TRACE("scan_inode_table: extended regular "
+ "file, file_size %lld, blocks %d\n",
+ inode.file_size, blocks);
+
+ cur_ptr += sizeof(inode);
+
+ if(NO_BYTES(blocks * sizeof(unsigned int)))
+ /* corrupted filesystem */
+ goto corrupted;
+
+ block_list = malloc(blocks * sizeof(unsigned int));
+ if(block_list == NULL)
+ MEM_ERROR();
+
+ SQUASHFS_SWAP_INTS(cur_ptr, block_list, blocks);
+
+ *uncompressed_file += inode.file_size;
+ (*file_count) ++;
+
+ for(i = 0; i < blocks; i++)
+ file_bytes +=
+ SQUASHFS_COMPRESSED_SIZE_BLOCK
+ (block_list[i]);
+
+ if(inode.fragment != SQUASHFS_INVALID_FRAG &&
+ inode.fragment >= sBlk->fragments) {
+ free(block_list);
+ goto corrupted;
+ }
+
+ add_file(start, inode.file_size, file_bytes,
+ block_list, blocks, inode.fragment,
+ inode.offset, frag_bytes);
+
+ cur_ptr += blocks * sizeof(unsigned int);
+ break;
+ }
+ case SQUASHFS_SYMLINK_TYPE:
+ case SQUASHFS_LSYMLINK_TYPE: {
+ struct squashfs_symlink_inode_header inode;
+
+ /*
+ * There should always be enough bytes to read an
+ * inode of the expected type
+ */
+ if(NO_INODE_BYTES(squashfs_symlink_inode_header))
+ /* corrupted filesystem */
+ goto corrupted;
+
+ SQUASHFS_SWAP_SYMLINK_INODE_HEADER(cur_ptr, &inode);
+
+ (*sym_count) ++;
+
+ cur_ptr += sizeof(inode);
+
+ if (inode.inode_type == SQUASHFS_LSYMLINK_TYPE) {
+ if(NO_BYTES(inode.symlink_size +
+ sizeof(unsigned int)))
+ /* corrupted filesystem */
+ goto corrupted;
+ cur_ptr += inode.symlink_size +
+ sizeof(unsigned int);
+ } else {
+ if(NO_BYTES(inode.symlink_size))
+ /* corrupted filesystem */
+ goto corrupted;
+ cur_ptr += inode.symlink_size;
+ }
+ break;
+ }
+ case SQUASHFS_DIR_TYPE: {
+ struct squashfs_dir_inode_header dir_inode;
+
+ /*
+ * There should always be enough bytes to read an
+ * inode of the expected type
+ */
+ if(NO_INODE_BYTES(squashfs_dir_inode_header))
+ /* corrupted filesystem */
+ goto corrupted;
+
+ SQUASHFS_SWAP_DIR_INODE_HEADER(cur_ptr, &dir_inode);
+
+ if(dir_inode.start_block < directory_start_block)
+ *uncompressed_directory += dir_inode.file_size;
+
+ (*dir_count) ++;
+ cur_ptr += sizeof(struct squashfs_dir_inode_header);
+ break;
+ }
+ case SQUASHFS_LDIR_TYPE: {
+ struct squashfs_ldir_inode_header dir_inode;
+ int i;
+
+ /*
+ * There should always be enough bytes to read an
+ * inode of the expected type
+ */
+ if(NO_INODE_BYTES(squashfs_ldir_inode_header))
+ /* corrupted filesystem */
+ goto corrupted;
+
+ SQUASHFS_SWAP_LDIR_INODE_HEADER(cur_ptr, &dir_inode);
+
+ if(dir_inode.start_block < directory_start_block)
+ *uncompressed_directory += dir_inode.file_size;
+
+ (*dir_count) ++;
+ cur_ptr += sizeof(struct squashfs_ldir_inode_header);
+
+ /*
+ * Read and check the directory index for correctness
+ */
+ for(i = 0; i < dir_inode.i_count; i++) {
+ struct squashfs_dir_index index;
+
+ if(NO_BYTES(sizeof(index)))
+ /* corrupted filesystem */
+ goto corrupted;
+
+ SQUASHFS_SWAP_DIR_INDEX(cur_ptr, &index);
+
+ cur_ptr += sizeof(index);
+
+ if(NO_BYTES(index.size + 1))
+ /* corrupted filesystem */
+ goto corrupted;
+
+ cur_ptr += index.size + 1;
+ }
+ break;
+ }
+ case SQUASHFS_BLKDEV_TYPE:
+ case SQUASHFS_CHRDEV_TYPE:
+ /*
+ * There should always be enough bytes to read an
+ * inode of the expected type
+ */
+ if(NO_INODE_BYTES(squashfs_dev_inode_header))
+ /* corrupted filesystem */
+ goto corrupted;
+
+ (*dev_count) ++;
+ cur_ptr += sizeof(struct squashfs_dev_inode_header);
+ break;
+ case SQUASHFS_LBLKDEV_TYPE:
+ case SQUASHFS_LCHRDEV_TYPE:
+ /*
+ * There should always be enough bytes to read an
+ * inode of the expected type
+ */
+ if(NO_INODE_BYTES(squashfs_ldev_inode_header))
+ /* corrupted filesystem */
+ goto corrupted;
+
+ (*dev_count) ++;
+ cur_ptr += sizeof(struct squashfs_ldev_inode_header);
+ break;
+ case SQUASHFS_FIFO_TYPE:
+ /*
+ * There should always be enough bytes to read an
+ * inode of the expected type
+ */
+ if(NO_INODE_BYTES(squashfs_ipc_inode_header))
+ /* corrupted filesystem */
+ goto corrupted;
+
+ (*fifo_count) ++;
+ cur_ptr += sizeof(struct squashfs_ipc_inode_header);
+ break;
+ case SQUASHFS_LFIFO_TYPE:
+ /*
+ * There should always be enough bytes to read an
+ * inode of the expected type
+ */
+ if(NO_INODE_BYTES(squashfs_lipc_inode_header))
+ /* corrupted filesystem */
+ goto corrupted;
+
+ (*fifo_count) ++;
+ cur_ptr += sizeof(struct squashfs_lipc_inode_header);
+ break;
+ case SQUASHFS_SOCKET_TYPE:
+ /*
+ * There should always be enough bytes to read an
+ * inode of the expected type
+ */
+ if(NO_INODE_BYTES(squashfs_ipc_inode_header))
+ /* corrupted filesystem */
+ goto corrupted;
+
+ (*sock_count) ++;
+ cur_ptr += sizeof(struct squashfs_ipc_inode_header);
+ break;
+ case SQUASHFS_LSOCKET_TYPE:
+ /*
+ * There should always be enough bytes to read an
+ * inode of the expected type
+ */
+ if(NO_INODE_BYTES(squashfs_lipc_inode_header))
+ /* corrupted filesystem */
+ goto corrupted;
+
+ (*sock_count) ++;
+ cur_ptr += sizeof(struct squashfs_lipc_inode_header);
+ break;
+ default:
+ ERROR("Unknown inode type %d in scan_inode_table!\n",
+ base.inode_type);
+ goto corrupted;
+ }
+ }
+
+ if(!quiet)
+ printf("Read existing filesystem, %d inodes scanned\n", files);
+
+ return inode_table;
+
+corrupted:
+ ERROR("scan_inode_table: filesystem corruption detected in "
+ "scanning metadata\n");
+corrupted2:
+ free(inode_table);
+ return NULL;
+}
+
+
+struct compressor *read_super(int fd, struct squashfs_super_block *sBlk, char *source)
+{
+ int res, bytes = 0;
+ char buffer[SQUASHFS_METADATA_SIZE] __attribute__ ((aligned));
+
+ res = read_fs_bytes(fd, SQUASHFS_START, sizeof(struct squashfs_super_block),
+ sBlk);
+ if(res == 0) {
+ ERROR("Can't find a SQUASHFS superblock on %s\n",
+ source);
+ ERROR("Wrong filesystem or filesystem is corrupted!\n");
+ goto failed_mount;
+ }
+
+ SQUASHFS_INSWAP_SUPER_BLOCK(sBlk);
+
+ if(sBlk->s_magic != SQUASHFS_MAGIC) {
+ if(sBlk->s_magic == SQUASHFS_MAGIC_SWAP)
+ ERROR("Pre 4.0 big-endian filesystem on %s, appending"
+ " to this is unsupported\n", source);
+ else {
+ ERROR("Can't find a SQUASHFS superblock on %s\n",
+ source);
+ ERROR("Wrong filesystem or filesystem is corrupted!\n");
+ }
+ goto failed_mount;
+ }
+
+ /* Check the MAJOR & MINOR versions */
+ if(sBlk->s_major != SQUASHFS_MAJOR || sBlk->s_minor > SQUASHFS_MINOR) {
+ if(sBlk->s_major < 4)
+ ERROR("Filesystem on %s is a SQUASHFS %d.%d filesystem."
+ " Appending\nto SQUASHFS %d.%d filesystems is "
+ "not supported. Please convert it to a "
+ "SQUASHFS 4 filesystem\n", source,
+ sBlk->s_major,
+ sBlk->s_minor, sBlk->s_major, sBlk->s_minor);
+ else
+ ERROR("Filesystem on %s is %d.%d, which is a later "
+ "filesystem version than I support\n",
+ source, sBlk->s_major, sBlk->s_minor);
+ goto failed_mount;
+ }
+
+ /* Check the compression type */
+ comp = lookup_compressor_id(sBlk->compression);
+ if(!comp->supported) {
+ ERROR("Filesystem on %s uses %s compression, this is "
+ "unsupported by this version\n", source, comp->name);
+ ERROR("Compressors available:\n");
+ display_compressors(stderr, "", "");
+ goto failed_mount;
+ }
+
+ /*
+ * Read extended superblock information from disk.
+ *
+ * Read compressor specific options from disk if present, and pass
+ * to compressor to set compressor options.
+ *
+ * Note, if there's no compressor options present, the compressor
+ * is still called to set the default options (the defaults may have
+ * been changed by the user specifying options on the command
+ * line which need to be over-ridden).
+ *
+ * Compressor_extract_options is also used to ensure that
+ * we know how to decompress a filesystem compressed with these
+ * compression options.
+ */
+ if(SQUASHFS_COMP_OPTS(sBlk->flags)) {
+ bytes = read_block(fd, sizeof(*sBlk), NULL, 0, buffer);
+
+ if(bytes == 0) {
+ ERROR("Failed to read compressor options from append "
+ "filesystem\n");
+ ERROR("Filesystem corrupted?\n");
+ goto failed_mount;
+ }
+ }
+
+ res = compressor_extract_options(comp, sBlk->block_size, buffer, bytes);
+ if(res == -1) {
+ ERROR("Compressor failed to set compressor options\n");
+ goto failed_mount;
+ }
+
+ if(quiet)
+ return comp;
+
+ printf("Found a valid %sSQUASHFS superblock on %s.\n",
+ SQUASHFS_EXPORTABLE(sBlk->flags) ? "exportable " : "", source);
+ printf("\tCompression used %s\n", comp->name);
+ printf("\tInodes are %scompressed\n",
+ SQUASHFS_UNCOMPRESSED_INODES(sBlk->flags) ? "un" : "");
+ printf("\tData is %scompressed\n",
+ SQUASHFS_UNCOMPRESSED_DATA(sBlk->flags) ? "un" : "");
+ printf("\tFragments are %scompressed\n",
+ SQUASHFS_UNCOMPRESSED_FRAGMENTS(sBlk->flags) ? "un" : "");
+ printf("\tXattrs are %scompressed\n",
+ SQUASHFS_UNCOMPRESSED_XATTRS(sBlk->flags) ? "un" : "");
+ printf("\tFragments are %spresent in the filesystem\n",
+ SQUASHFS_NO_FRAGMENTS(sBlk->flags) ? "not " : "");
+ printf("\tAlways-use-fragments option is %sspecified\n",
+ SQUASHFS_ALWAYS_FRAGMENTS(sBlk->flags) ? "" : "not ");
+ printf("\tDuplicates are %sremoved\n",
+ SQUASHFS_DUPLICATES(sBlk->flags) ? "" : "not ");
+ printf("\tXattrs are %sstored\n",
+ SQUASHFS_NO_XATTRS(sBlk->flags) ? "not " : "");
+ printf("\tFilesystem size %.2f Kbytes (%.2f Mbytes)\n",
+ sBlk->bytes_used / 1024.0, sBlk->bytes_used
+ / (1024.0 * 1024.0));
+ printf("\tBlock size %d\n", sBlk->block_size);
+ printf("\tNumber of fragments %u\n", sBlk->fragments);
+ printf("\tNumber of inodes %d\n", sBlk->inodes);
+ printf("\tNumber of ids %d\n", sBlk->no_ids);
+ TRACE("sBlk->inode_table_start %llx\n", sBlk->inode_table_start);
+ TRACE("sBlk->directory_table_start %llx\n",
+ sBlk->directory_table_start);
+ TRACE("sBlk->id_table_start %llx\n", sBlk->id_table_start);
+ TRACE("sBlk->fragment_table_start %llx\n", sBlk->fragment_table_start);
+ TRACE("sBlk->lookup_table_start %llx\n", sBlk->lookup_table_start);
+ TRACE("sBlk->xattr_id_table_start %llx\n", sBlk->xattr_id_table_start);
+ printf("\n");
+
+ return comp;
+
+failed_mount:
+ return NULL;
+}
+
+
+static unsigned char *squashfs_readdir(int fd, int root_entries,
+ unsigned int directory_start_block, int offset, unsigned int dir_size,
+ unsigned int *last_directory_block, struct squashfs_super_block *sBlk,
+ void (push_directory_entry)(char *, squashfs_inode, unsigned int, int))
+{
+ struct squashfs_dir_header dirh;
+ char buffer[sizeof(struct squashfs_dir_entry) + SQUASHFS_NAME_LEN + 1]
+ __attribute__ ((aligned));
+ struct squashfs_dir_entry *dire = (struct squashfs_dir_entry *) buffer;
+ unsigned char *directory_table = NULL;
+ int byte, dir_count;
+ long long start = sBlk->directory_table_start + directory_start_block;
+ long long last_start_block = start, size = dir_size, bytes = 0;
+
+ size += offset;
+ directory_table = malloc((size + SQUASHFS_METADATA_SIZE * 2 - 1) &
+ ~(SQUASHFS_METADATA_SIZE - 1));
+ if(directory_table == NULL)
+ MEM_ERROR();
+
+ while(bytes < size) {
+ int expected = (size - bytes) >= SQUASHFS_METADATA_SIZE ?
+ SQUASHFS_METADATA_SIZE : 0;
+
+ TRACE("squashfs_readdir: reading block 0x%llx, bytes read so "
+ "far %lld\n", start, bytes);
+
+ last_start_block = start;
+ byte = read_block(fd, start, &start, expected, directory_table + bytes);
+ if(byte == 0) {
+ ERROR("Failed to read directory\n");
+ ERROR("Filesystem corrupted?\n");
+ free(directory_table);
+ return NULL;
+ }
+ bytes += byte;
+ }
+
+ if(!root_entries)
+ goto all_done;
+
+ bytes = offset;
+ while(bytes < size) {
+ SQUASHFS_SWAP_DIR_HEADER(directory_table + bytes, &dirh);
+
+ dir_count = dirh.count + 1;
+
+ /* dir_count should never be larger than SQUASHFS_DIR_COUNT */
+ if(dir_count > SQUASHFS_DIR_COUNT) {
+ ERROR("File system corrupted: too many entries in directory\n");
+ free(directory_table);
+ return NULL;
+ }
+
+ TRACE("squashfs_readdir: Read directory header @ byte position "
+ "0x%llx, 0x%x directory entries\n", bytes, dir_count);
+ bytes += sizeof(dirh);
+
+ while(dir_count--) {
+ SQUASHFS_SWAP_DIR_ENTRY(directory_table + bytes, dire);
+ bytes += sizeof(*dire);
+
+ /* size should never be SQUASHFS_NAME_LEN or larger */
+ if(dire->size >= SQUASHFS_NAME_LEN) {
+ ERROR("File system corrupted: filename too long\n");
+ free(directory_table);
+ return NULL;
+ }
+
+ memcpy(dire->name, directory_table + bytes,
+ dire->size + 1);
+ dire->name[dire->size + 1] = '\0';
+ TRACE("squashfs_readdir: pushing directory entry %s, "
+ "inode %x:%x, type 0x%x\n", dire->name,
+ dirh.start_block, dire->offset, dire->type);
+ push_directory_entry(dire->name,
+ SQUASHFS_MKINODE(dirh.start_block,
+ dire->offset), dirh.inode_number +
+ dire->inode_number, dire->type);
+ bytes += dire->size + 1;
+ }
+ }
+
+all_done:
+ *last_directory_block = (unsigned int) last_start_block -
+ sBlk->directory_table_start;
+ return directory_table;
+}
+
+
+static unsigned int *read_id_table(int fd, struct squashfs_super_block *sBlk)
+{
+ int indexes = SQUASHFS_ID_BLOCKS(sBlk->no_ids);
+ long long index[indexes];
+ int bytes = SQUASHFS_ID_BYTES(sBlk->no_ids);
+ unsigned int *id_table;
+ int res, i;
+
+ id_table = malloc(bytes);
+ if(id_table == NULL)
+ MEM_ERROR();
+
+ res = read_fs_bytes(fd, sBlk->id_table_start,
+ SQUASHFS_ID_BLOCK_BYTES(sBlk->no_ids), index);
+ if(res == 0) {
+ ERROR("Failed to read id table index\n");
+ ERROR("Filesystem corrupted?\n");
+ free(id_table);
+ return NULL;
+ }
+
+ SQUASHFS_INSWAP_ID_BLOCKS(index, indexes);
+
+ for(i = 0; i < indexes; i++) {
+ int expected = (i + 1) != indexes ? SQUASHFS_METADATA_SIZE :
+ bytes & (SQUASHFS_METADATA_SIZE - 1);
+ int length = read_block(fd, index[i], NULL, expected,
+ ((unsigned char *) id_table) +
+ (i * SQUASHFS_METADATA_SIZE));
+ TRACE("Read id table block %d, from 0x%llx, length %d\n", i,
+ index[i], length);
+ if(length == 0) {
+ ERROR("Failed to read id table block %d, from 0x%llx, "
+ "length %d\n", i, index[i], length);
+ ERROR("Filesystem corrupted?\n");
+ free(id_table);
+ return NULL;
+ }
+ }
+
+ SQUASHFS_INSWAP_INTS(id_table, sBlk->no_ids);
+
+ for(i = 0; i < sBlk->no_ids; i++) {
+ TRACE("Adding id %d to id tables\n", id_table[i]);
+ create_id(id_table[i]);
+ }
+
+ return id_table;
+}
+
+
+static struct squashfs_fragment_entry *read_fragment_table(int fd, struct squashfs_super_block *sBlk)
+{
+ int res;
+ unsigned int i;
+ long long bytes = SQUASHFS_FRAGMENT_BYTES(sBlk->fragments);
+ int indexes = SQUASHFS_FRAGMENT_INDEXES(sBlk->fragments);
+ long long fragment_table_index[indexes];
+ struct squashfs_fragment_entry *fragment_table;
+
+ TRACE("read_fragment_table: %u fragments, reading %d fragment indexes "
+ "from 0x%llx\n", sBlk->fragments, indexes,
+ sBlk->fragment_table_start);
+
+ fragment_table = malloc(bytes);
+ if(fragment_table == NULL)
+ MEM_ERROR();
+
+ res = read_fs_bytes(fd, sBlk->fragment_table_start,
+ SQUASHFS_FRAGMENT_INDEX_BYTES(sBlk->fragments),
+ fragment_table_index);
+ if(res == 0) {
+ ERROR("Failed to read fragment table index\n");
+ ERROR("Filesystem corrupted?\n");
+ free(fragment_table);
+ return NULL;
+ }
+
+ SQUASHFS_INSWAP_FRAGMENT_INDEXES(fragment_table_index, indexes);
+
+ for(i = 0; i < indexes; i++) {
+ int expected = (i + 1) != indexes ? SQUASHFS_METADATA_SIZE :
+ bytes & (SQUASHFS_METADATA_SIZE - 1);
+ int length = read_block(fd, fragment_table_index[i], NULL,
+ expected, ((unsigned char *) fragment_table) +
+ (i * SQUASHFS_METADATA_SIZE));
+ TRACE("Read fragment table block %d, from 0x%llx, length %d\n",
+ i, fragment_table_index[i], length);
+ if(length == 0) {
+ ERROR("Failed to read fragment table block %d, from "
+ "0x%llx, length %d\n", i,
+ fragment_table_index[i], length);
+ ERROR("Filesystem corrupted?\n");
+ free(fragment_table);
+ return NULL;
+ }
+ }
+
+ for(i = 0; i < sBlk->fragments; i++)
+ SQUASHFS_INSWAP_FRAGMENT_ENTRY(&fragment_table[i]);
+
+ return fragment_table;
+}
+
+
+static squashfs_inode *read_inode_lookup_table(int fd, struct squashfs_super_block *sBlk)
+{
+ int lookup_bytes = SQUASHFS_LOOKUP_BYTES(sBlk->inodes);
+ int indexes = SQUASHFS_LOOKUP_BLOCKS(sBlk->inodes);
+ long long index[indexes];
+ int res, i;
+ squashfs_inode *inode_lookup_table;
+
+ inode_lookup_table = malloc(lookup_bytes);
+ if(inode_lookup_table == NULL)
+ MEM_ERROR();
+
+ res = read_fs_bytes(fd, sBlk->lookup_table_start,
+ SQUASHFS_LOOKUP_BLOCK_BYTES(sBlk->inodes), index);
+ if(res == 0) {
+ ERROR("Failed to read inode lookup table index\n");
+ ERROR("Filesystem corrupted?\n");
+ free(inode_lookup_table);
+ return NULL;
+ }
+
+ SQUASHFS_INSWAP_LONG_LONGS(index, indexes);
+
+ for(i = 0; i < indexes; i++) {
+ int expected = (i + 1) != indexes ? SQUASHFS_METADATA_SIZE :
+ lookup_bytes & (SQUASHFS_METADATA_SIZE - 1);
+ int length = read_block(fd, index[i], NULL, expected,
+ ((unsigned char *) inode_lookup_table) +
+ (i * SQUASHFS_METADATA_SIZE));
+ TRACE("Read inode lookup table block %d, from 0x%llx, length "
+ "%d\n", i, index[i], length);
+ if(length == 0) {
+ ERROR("Failed to read inode lookup table block %d, "
+ "from 0x%llx, length %d\n", i, index[i],
+ length);
+ ERROR("Filesystem corrupted?\n");
+ free(inode_lookup_table);
+ return NULL;
+ }
+ }
+
+ SQUASHFS_INSWAP_LONG_LONGS(inode_lookup_table, sBlk->inodes);
+
+ return inode_lookup_table;
+}
+
+
+long long read_filesystem(char *root_name, int fd, struct squashfs_super_block *sBlk,
+ char **cinode_table, char **data_cache, char **cdirectory_table,
+ char **directory_data_cache, unsigned int *last_directory_block,
+ int *inode_dir_offset, unsigned int *inode_dir_file_size,
+ unsigned int *root_inode_size, unsigned int *inode_dir_start_block,
+ unsigned int *file_count, unsigned int *sym_count, unsigned int *dev_count,
+ unsigned int *dir_count, unsigned int *fifo_count, unsigned int *sock_count,
+ long long *uncompressed_file, long long *uncompressed_inode,
+ long long *uncompressed_directory, unsigned int *inode_dir_inode_number,
+ unsigned int *inode_dir_parent_inode,
+ void (push_directory_entry)(char *, squashfs_inode, unsigned int, int),
+ struct squashfs_fragment_entry **fragment_table,
+ squashfs_inode **inode_lookup_table)
+{
+ unsigned char *inode_table = NULL, *directory_table = NULL;
+ long long start = sBlk->inode_table_start;
+ long long end = sBlk->directory_table_start;
+ long long root_inode_start = start +
+ SQUASHFS_INODE_BLK(sBlk->root_inode);
+ unsigned int root_inode_offset =
+ SQUASHFS_INODE_OFFSET(sBlk->root_inode);
+ long long root_inode_block;
+ union squashfs_inode_header inode;
+ unsigned int *id_table = NULL;
+ int res;
+
+ if(!quiet)
+ printf("Scanning existing filesystem...\n");
+
+ if(get_xattrs(fd, sBlk) == 0)
+ goto error;
+
+ if(sBlk->fragments > 0) {
+ *fragment_table = read_fragment_table(fd, sBlk);
+ if(*fragment_table == NULL)
+ goto error;
+ }
+
+ if(sBlk->lookup_table_start != SQUASHFS_INVALID_BLK) {
+ *inode_lookup_table = read_inode_lookup_table(fd, sBlk);
+ if(*inode_lookup_table == NULL)
+ goto error;
+ }
+
+ id_table = read_id_table(fd, sBlk);
+ if(id_table == NULL)
+ goto error;
+
+ inode_table = scan_inode_table(fd, start, end, root_inode_start,
+ root_inode_offset, sBlk, &inode, &root_inode_block,
+ root_inode_size, uncompressed_file, uncompressed_directory,
+ file_count, sym_count, dev_count, dir_count, fifo_count,
+ sock_count, id_table);
+ if(inode_table == NULL)
+ goto error;
+
+ *uncompressed_inode = root_inode_block;
+
+ if(inode.base.inode_type == SQUASHFS_DIR_TYPE ||
+ inode.base.inode_type == SQUASHFS_LDIR_TYPE) {
+ if(inode.base.inode_type == SQUASHFS_DIR_TYPE) {
+ *inode_dir_start_block = inode.dir.start_block;
+ *inode_dir_offset = inode.dir.offset;
+ *inode_dir_file_size = inode.dir.file_size - 3;
+ *inode_dir_inode_number = inode.dir.inode_number;
+ *inode_dir_parent_inode = inode.dir.parent_inode;
+ } else {
+ *inode_dir_start_block = inode.ldir.start_block;
+ *inode_dir_offset = inode.ldir.offset;
+ *inode_dir_file_size = inode.ldir.file_size - 3;
+ *inode_dir_inode_number = inode.ldir.inode_number;
+ *inode_dir_parent_inode = inode.ldir.parent_inode;
+ }
+
+ directory_table = squashfs_readdir(fd, !root_name,
+ *inode_dir_start_block, *inode_dir_offset,
+ *inode_dir_file_size, last_directory_block, sBlk,
+ push_directory_entry);
+ if(directory_table == NULL)
+ goto error;
+
+ root_inode_start -= start;
+ *cinode_table = malloc(root_inode_start);
+ if(*cinode_table == NULL)
+ MEM_ERROR();
+
+ res = read_fs_bytes(fd, start, root_inode_start, *cinode_table);
+ if(res == 0) {
+ ERROR("Failed to read inode table\n");
+ ERROR("Filesystem corrupted?\n");
+ goto error;
+ }
+
+ *cdirectory_table = malloc(*last_directory_block);
+ if(*cdirectory_table == NULL)
+ MEM_ERROR();
+
+ res = read_fs_bytes(fd, sBlk->directory_table_start,
+ *last_directory_block, *cdirectory_table);
+ if(res == 0) {
+ ERROR("Failed to read directory table\n");
+ ERROR("Filesystem corrupted?\n");
+ goto error;
+ }
+
+ *data_cache = malloc(root_inode_offset + *root_inode_size);
+ if(*data_cache == NULL)
+ MEM_ERROR();
+
+ memcpy(*data_cache, inode_table + root_inode_block,
+ root_inode_offset + *root_inode_size);
+
+ *directory_data_cache = malloc(*inode_dir_offset +
+ *inode_dir_file_size);
+ if(*directory_data_cache == NULL)
+ MEM_ERROR();
+
+ memcpy(*directory_data_cache, directory_table,
+ *inode_dir_offset + *inode_dir_file_size);
+
+ free(id_table);
+ free(inode_table);
+ free(directory_table);
+ return sBlk->inode_table_start;
+ }
+
+error:
+ free(id_table);
+ free(inode_table);
+ free(directory_table);
+ return 0;
+}
diff --git a/squashfs-tools/read_fs.h b/squashfs-tools/read_fs.h
new file mode 100644
index 0000000..1c94742
--- /dev/null
+++ b/squashfs-tools/read_fs.h
@@ -0,0 +1,36 @@
+#ifndef READ_FS_H
+#define READ_FS_H
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2013,
+ * 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * read_fs.h
+ *
+ */
+extern struct compressor *read_super(int, struct squashfs_super_block *,
+ char *);
+extern long long read_filesystem(char *, int, struct squashfs_super_block *,
+char **, char **, char **, char **, unsigned int *, int *, unsigned int *,
+unsigned int *, unsigned int *, unsigned int *, unsigned int *, unsigned int *,
+unsigned int *, unsigned int *, unsigned int *, long long *, long long *,
+long long *, unsigned int *, unsigned int *, void (push_directory_entry)
+(char *, squashfs_inode, unsigned int, int), struct squashfs_fragment_entry **,
+squashfs_inode **);
+#endif
diff --git a/squashfs-tools/read_xattrs.c b/squashfs-tools/read_xattrs.c
new file mode 100644
index 0000000..a9d044f
--- /dev/null
+++ b/squashfs-tools/read_xattrs.c
@@ -0,0 +1,454 @@
+/*
+ * Read a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2010, 2012, 2013, 2019, 2021, 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * read_xattrs.c
+ */
+
+/*
+ * Common xattr read code shared between mksquashfs and unsquashfs
+ */
+
+#define TRUE 1
+#define FALSE 0
+#include <stdio.h>
+#include <string.h>
+#include <regex.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_swap.h"
+#include "xattr.h"
+#include "error.h"
+
+#include <stdlib.h>
+
+extern int read_fs_bytes(int, long long, long long, void *);
+extern int read_block(int, long long, long long *, int, void *);
+
+static struct hash_entry {
+ long long start;
+ long long offset;
+ struct hash_entry *next;
+} *hash_table[65536];
+
+static struct squashfs_xattr_id *xattr_ids;
+static void *xattrs = NULL;
+static long long xattr_table_start;
+
+/*
+ * Prefix lookup table, storing mapping to/from prefix string and prefix id
+ */
+struct prefix prefix_table[] = {
+ { "user.", SQUASHFS_XATTR_USER },
+ { "trusted.", SQUASHFS_XATTR_TRUSTED },
+ { "security.", SQUASHFS_XATTR_SECURITY },
+ { "", -1 }
+};
+
+/*
+ * store mapping from location of compressed block in fs ->
+ * location of uncompressed block in memory
+ */
+static int save_xattr_block(long long start, long long offset)
+{
+ struct hash_entry *hash_entry = malloc(sizeof(*hash_entry));
+ int hash = start & 0xffff;
+
+ TRACE("save_xattr_block: start %lld, offset %d\n", start, offset);
+
+ if(hash_entry == NULL)
+ return FALSE;
+
+ hash_entry->start = start;
+ hash_entry->offset = offset;
+ hash_entry->next = hash_table[hash];
+ hash_table[hash] = hash_entry;
+
+ return TRUE;
+}
+
+
+/*
+ * map from location of compressed block in fs ->
+ * location of uncompressed block in memory
+ */
+static long long get_xattr_block(long long start)
+{
+ int hash = start & 0xffff;
+ struct hash_entry *hash_entry = hash_table[hash];
+
+ for(; hash_entry; hash_entry = hash_entry->next)
+ if(hash_entry->start == start)
+ break;
+
+ TRACE("get_xattr_block: start %lld, offset %d\n", start,
+ hash_entry ? hash_entry->offset : -1);
+
+ return hash_entry ? hash_entry->offset : -1;
+}
+
+
+/*
+ * construct the xattr_list entry from the fs xattr, including
+ * mapping name and prefix into a full name
+ */
+static int read_xattr_entry(struct xattr_list *xattr,
+ struct squashfs_xattr_entry *entry, void *name)
+{
+ int i, len, type = entry->type & XATTR_PREFIX_MASK;
+
+ for(i = 0; prefix_table[i].type != -1; i++)
+ if(prefix_table[i].type == type)
+ break;
+
+ if(prefix_table[i].type == -1) {
+ ERROR("read_xattr_entry: Unrecognised xattr type %d\n", type);
+ return 0;
+ }
+
+ len = strlen(prefix_table[i].prefix);
+ xattr->full_name = malloc(len + entry->size + 1);
+ if(xattr->full_name == NULL) {
+ ERROR("FATAL ERROR: Out of memory (%s)\n", __func__);
+ return -1;
+ }
+
+ memcpy(xattr->full_name, prefix_table[i].prefix, len);
+ memcpy(xattr->full_name + len, name, entry->size);
+ xattr->full_name[len + entry->size] = '\0';
+ xattr->name = xattr->full_name + len;
+ xattr->size = entry->size;
+ xattr->type = type;
+
+ return 1;
+}
+
+
+/*
+ * Read and decompress the xattr id table and the xattr metadata.
+ * This is cached in memory for later use by get_xattr()
+ */
+unsigned int read_xattrs_from_disk(int fd, struct squashfs_super_block *sBlk, int sanity_only, long long *table_start)
+{
+ /*
+ * Note on overflow limits:
+ * Size of ids (id_table.xattr_ids) is 2^32 (unsigned int)
+ * Max size of bytes is 2^32*16 or 2^36
+ * Max indexes is (2^32*16)/8K or 2^23
+ * Max index_bytes is ((2^32*16)/8K)*8 or 2^26 or 64M
+ */
+ int res, i, indexes, index_bytes;
+ unsigned int ids;
+ long long bytes;
+ long long *index, start, end;
+ struct squashfs_xattr_table id_table;
+
+ TRACE("read_xattrs_from_disk\n");
+
+ /*
+ * Read xattr id table, containing start of xattr metadata and the
+ * number of xattrs in the file system
+ */
+ res = read_fs_bytes(fd, sBlk->xattr_id_table_start, sizeof(id_table),
+ &id_table);
+ if(res == 0)
+ goto failed;
+
+ SQUASHFS_INSWAP_XATTR_TABLE(&id_table);
+
+ /*
+ * Compute index table values
+ */
+ ids = id_table.xattr_ids;
+ if(ids == 0) {
+ ERROR("FATAL ERROR: File system corrupted - xattr_ids is 0 in xattr table\n");
+ goto failed;
+ }
+
+ xattr_table_start = id_table.xattr_table_start;
+ index_bytes = SQUASHFS_XATTR_BLOCK_BYTES(ids);
+ indexes = SQUASHFS_XATTR_BLOCKS(ids);
+
+ /*
+ * The size of the index table (index_bytes) should match the
+ * table start and end points
+ */
+ if(index_bytes != (sBlk->bytes_used - (sBlk->xattr_id_table_start + sizeof(id_table)))) {
+ ERROR("FATAL ERROR: File system corrupted - Bad xattr_ids count in super block\n");
+ goto failed;
+ }
+
+ /*
+ * id_table.xattr_table_start stores the start of the compressed xattr
+ * metadata blocks. This by definition is also the end of the previous
+ * filesystem table - the id lookup table.
+ */
+ if(table_start != NULL)
+ *table_start = id_table.xattr_table_start;
+
+ /*
+ * If sanity_only is set then return once we've read the above
+ * table_start. That value is necessary for sanity checking,
+ * but we don't actually want to extract the xattrs, and so
+ * stop here.
+ */
+ if(sanity_only)
+ return id_table.xattr_ids;
+
+ /*
+ * Allocate and read the index to the xattr id table metadata
+ * blocks
+ */
+ index = malloc(index_bytes);
+ if(index == NULL) {
+ ERROR("FATAL ERROR: Out of memory (%s)\n", __func__);
+ goto failed;
+ }
+
+ res = read_fs_bytes(fd, sBlk->xattr_id_table_start + sizeof(id_table),
+ index_bytes, index);
+ if(res ==0)
+ goto failed1;
+
+ SQUASHFS_INSWAP_LONG_LONGS(index, indexes);
+
+ /*
+ * Allocate enough space for the uncompressed xattr id table, and
+ * read and decompress it
+ */
+ bytes = SQUASHFS_XATTR_BYTES(ids);
+ xattr_ids = malloc(bytes);
+ if(xattr_ids == NULL) {
+ ERROR("FATAL ERROR: Out of memory (%s)\n", __func__);
+ goto failed1;
+ }
+
+ for(i = 0; i < indexes; i++) {
+ int expected = (i + 1) != indexes ? SQUASHFS_METADATA_SIZE :
+ bytes & (SQUASHFS_METADATA_SIZE - 1);
+ int length = read_block(fd, index[i], NULL, expected,
+ ((unsigned char *) xattr_ids) +
+ ((long long) i * SQUASHFS_METADATA_SIZE));
+ TRACE("Read xattr id table block %d, from 0x%llx, length "
+ "%d\n", i, index[i], length);
+ if(length == 0) {
+ ERROR("FATAL ERROR - Failed to read xattr id table block %d, "
+ "from 0x%llx, length %d. File system corrupted?\n", i, index[i],
+ length);
+ goto failed2;
+ }
+ }
+
+ /*
+ * Read and decompress the xattr metadata
+ *
+ * Note the first xattr id table metadata block is immediately after
+ * the last xattr metadata block, so we can use index[0] to work out
+ * the end of the xattr metadata
+ */
+ start = xattr_table_start;
+ end = index[0];
+ for(i = 0; start < end; i++) {
+ int length, res;
+ xattrs = realloc(xattrs, (i + 1) * SQUASHFS_METADATA_SIZE);
+ if(xattrs == NULL) {
+ ERROR("FATAL ERROR: Out of memory (%s)\n", __func__);
+ goto failed3;
+ }
+
+ /* store mapping from location of compressed block in fs ->
+ * location of uncompressed block in memory */
+ res = save_xattr_block(start, i * SQUASHFS_METADATA_SIZE);
+ if (res == FALSE) {
+ ERROR("FATAL ERROR: Out of memory (%s)\n", __func__);
+ goto failed3;
+ }
+
+ length = read_block(fd, start, &start, 0,
+ ((unsigned char *) xattrs) +
+ (i * SQUASHFS_METADATA_SIZE));
+ TRACE("Read xattr block %d, length %d\n", i, length);
+ if(length == 0) {
+ ERROR("FATAL ERROR - Failed to read xattr block %d. File system corrupted?\n", i);
+ goto failed3;
+ }
+
+ /*
+ * If this is not the last metadata block in the xattr metadata
+ * then it should be SQUASHFS_METADATA_SIZE in size.
+ * Note, we can't use expected in read_block() above for this
+ * because we don't know if this is the last block until
+ * after reading.
+ */
+ if(start != end && length != SQUASHFS_METADATA_SIZE) {
+ ERROR("FATAL ERROR: Xattr block %d should be %d bytes in length, "
+ "it is %d bytes. File system corrupted?\n", i, SQUASHFS_METADATA_SIZE,
+ length);
+ goto failed3;
+ }
+ }
+
+ /* swap if necessary the xattr id entries */
+ for(i = 0; i < ids; i++)
+ SQUASHFS_INSWAP_XATTR_ID(&xattr_ids[i]);
+
+ free(index);
+
+ return ids;
+
+failed3:
+ free(xattrs);
+failed2:
+ free(xattr_ids);
+failed1:
+ free(index);
+
+failed:
+ return FALSE;
+}
+
+
+void free_xattr(struct xattr_list *xattr_list, int count)
+{
+ int i;
+
+ for(i = 0; i < count; i++)
+ free(xattr_list[i].full_name);
+
+ free(xattr_list);
+}
+
+
+/*
+ * Construct and return the list of xattr name:value pairs for the passed xattr
+ * id
+ *
+ * There are two users for get_xattr(), Mksquashfs uses it to read the
+ * xattrs from the filesystem on appending, and Unsquashfs uses it
+ * to retrieve the xattrs for writing to disk.
+ *
+ * Unfortunately, the two users disagree on what to do with unknown
+ * xattr prefixes, Mksquashfs wants to treat this as fatal otherwise
+ * this will cause xattrs to be be lost on appending. Unsquashfs
+ * on the otherhand wants to retrieve the xattrs which are known and
+ * to ignore the rest, this allows Unsquashfs to cope more gracefully
+ * with future versions which may have unknown xattrs, as long as the
+ * general xattr structure is adhered to, Unsquashfs should be able
+ * to safely ignore unknown xattrs, and to write the ones it knows about,
+ * this is better than completely refusing to retrieve all the xattrs.
+ *
+ * So return an error flag if any unrecognised types were found.
+ */
+struct xattr_list *get_xattr(int i, unsigned int *count, int *failed)
+{
+ long long start, xptr_offset;
+ struct xattr_list *xattr_list = NULL;
+ unsigned int offset;
+ void *xptr;
+ int j, n, res = 1;
+
+ TRACE("get_xattr\n");
+
+ if(xattr_ids[i].count == 0) {
+ ERROR("get_xattr: xattr count unexpectedly 0 - corrupt fs?\n");
+ *failed = TRUE;
+ *count = 0;
+ return NULL;
+ } else
+ *failed = FALSE;
+
+ start = SQUASHFS_XATTR_BLK(xattr_ids[i].xattr) + xattr_table_start;
+ offset = SQUASHFS_XATTR_OFFSET(xattr_ids[i].xattr);
+ xptr_offset = get_xattr_block(start);
+
+ if(xptr_offset == -1) {
+ ERROR("FATAL ERROR: file system is corrupt - incorrect xattr value in metadata\n");
+ *failed = FALSE;
+ return NULL;
+ }
+
+
+ xptr = xattrs + xptr_offset + offset;
+
+ TRACE("get_xattr: xattr_id %d, count %d, start %lld, offset %d\n", i,
+ xattr_ids[i].count, start, offset);
+
+ for(j = 0, n = 0; n < xattr_ids[i].count; n++) {
+ struct squashfs_xattr_entry entry;
+ struct squashfs_xattr_val val;
+
+ if(res != 0) {
+ xattr_list = realloc(xattr_list, (j + 1) *
+ sizeof(struct xattr_list));
+ if(xattr_list == NULL) {
+ ERROR("FATAL ERROR: Out of memory (%s)\n", __func__);
+ *failed = FALSE;
+ return NULL;
+ }
+ }
+
+ SQUASHFS_SWAP_XATTR_ENTRY(xptr, &entry);
+ xptr += sizeof(entry);
+
+ res = read_xattr_entry(&xattr_list[j], &entry, xptr);
+ if(res == 0) {
+ /* unknown type, skip, and set error flag */
+ xptr += entry.size;
+ SQUASHFS_SWAP_XATTR_VAL(xptr, &val);
+ xptr += sizeof(val) + val.vsize;
+ *failed = TRUE;
+ continue;
+ } else if(res == -1) {
+ ERROR("FATAL ERROR: Out of memory (%s)\n", __func__);
+ *failed = FALSE;
+ return NULL;
+ }
+
+ xptr += entry.size;
+
+ TRACE("get_xattr: xattr %d, type %d, size %d, name %s\n", j,
+ entry.type, entry.size, xattr_list[j].full_name);
+
+ if(entry.type & SQUASHFS_XATTR_VALUE_OOL) {
+ long long xattr;
+ void *ool_xptr;
+
+ xptr += sizeof(val);
+ SQUASHFS_SWAP_LONG_LONGS(xptr, &xattr, 1);
+ xptr += sizeof(xattr);
+ start = SQUASHFS_XATTR_BLK(xattr) + xattr_table_start;
+ offset = SQUASHFS_XATTR_OFFSET(xattr);
+ ool_xptr = xattrs + get_xattr_block(start) + offset;
+ SQUASHFS_SWAP_XATTR_VAL(ool_xptr, &val);
+ xattr_list[j].value = ool_xptr + sizeof(val);
+ } else {
+ SQUASHFS_SWAP_XATTR_VAL(xptr, &val);
+ xattr_list[j].value = xptr + sizeof(val);
+ xptr += sizeof(val) + val.vsize;
+ }
+
+ TRACE("get_xattr: xattr %d, vsize %d\n", j, val.vsize);
+
+ xattr_list[j++].vsize = val.vsize;
+ }
+
+ *count = j;
+ return xattr_list;
+}
diff --git a/squashfs-tools/reader.c b/squashfs-tools/reader.c
new file mode 100644
index 0000000..5954a76
--- /dev/null
+++ b/squashfs-tools/reader.c
@@ -0,0 +1,715 @@
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2021, 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * reader.c
+ */
+
+/* if throttling I/O, time to sleep between reads (in tenths of a second) */
+int sleep_time;
+
+#define TRUE 1
+#define FALSE 0
+
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <dirent.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+#include <pthread.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <signal.h>
+#include "squashfs_fs.h"
+#include "mksquashfs.h"
+#include "caches-queues-lists.h"
+#include "progressbar.h"
+#include "mksquashfs_error.h"
+#include "pseudo.h"
+#include "sort.h"
+#include "tar.h"
+#include "reader.h"
+
+static struct readahead **readahead_table = NULL;
+
+static void sigalrm_handler(int arg)
+{
+ struct timespec requested_time, remaining;
+
+ requested_time.tv_sec = sleep_time / 10;
+ requested_time.tv_nsec = (sleep_time % 10) * 100000000;
+
+ nanosleep(&requested_time, &remaining);
+}
+
+
+static char *pathname(struct dir_ent *dir_ent)
+{
+ static char *pathname = NULL;
+ static int size = ALLOC_SIZE;
+
+ if (dir_ent->nonstandard_pathname)
+ return dir_ent->nonstandard_pathname;
+
+ if(pathname == NULL) {
+ pathname = malloc(ALLOC_SIZE);
+ if(pathname == NULL)
+ MEM_ERROR();
+ }
+
+ for(;;) {
+ int res = snprintf(pathname, size, "%s/%s",
+ dir_ent->our_dir->pathname,
+ dir_ent->source_name ? : dir_ent->name);
+
+ if(res < 0)
+ BAD_ERROR("snprintf failed in pathname\n");
+ else if(res >= size) {
+ /*
+ * pathname is too small to contain the result, so
+ * increase it and try again
+ */
+ size = (res + ALLOC_SIZE) & ~(ALLOC_SIZE - 1);
+ pathname = realloc(pathname, size);
+ if(pathname == NULL)
+ MEM_ERROR();
+ } else
+ break;
+ }
+
+ return pathname;
+}
+
+
+static inline int is_fragment(struct inode_info *inode)
+{
+ off_t file_size = inode->buf.st_size;
+
+ /*
+ * If this block is to be compressed differently to the
+ * fragment compression then it cannot be a fragment
+ */
+ if(inode->noF != noF)
+ return FALSE;
+
+ return !inode->no_fragments && file_size && (file_size < block_size ||
+ (inode->always_use_fragments && file_size & (block_size - 1)));
+}
+
+
+static void put_file_buffer(struct file_buffer *file_buffer)
+{
+ /*
+ * Decide where to send the file buffer:
+ * - compressible non-fragment blocks go to the deflate threads,
+ * - fragments go to the process fragment threads,
+ * - all others go directly to the main thread
+ */
+ if(file_buffer->error) {
+ file_buffer->fragment = 0;
+ seq_queue_put(to_main, file_buffer);
+ } else if (file_buffer->file_size == 0)
+ seq_queue_put(to_main, file_buffer);
+ else if(file_buffer->fragment)
+ queue_put(to_process_frag, file_buffer);
+ else
+ queue_put(to_deflate, file_buffer);
+}
+
+
+static void reader_read_process(struct dir_ent *dir_ent)
+{
+ long long bytes = 0;
+ struct inode_info *inode = dir_ent->inode;
+ struct file_buffer *prev_buffer = NULL, *file_buffer;
+ int status, byte, res, child;
+ int file;
+
+ if(inode->read)
+ return;
+
+ inode->read = TRUE;
+
+ file = pseudo_exec_file(inode->pseudo, &child);
+ if(!file) {
+ file_buffer = cache_get_nohash(reader_buffer);
+ file_buffer->sequence = sequence_count ++;
+ goto read_err;
+ }
+
+ while(1) {
+ file_buffer = cache_get_nohash(reader_buffer);
+ file_buffer->sequence = sequence_count ++;
+ file_buffer->noD = inode->noD;
+
+ byte = read_bytes(file, file_buffer->data, block_size);
+ if(byte == -1)
+ goto read_err2;
+
+ file_buffer->size = byte;
+ file_buffer->file_size = -1;
+ file_buffer->error = FALSE;
+ file_buffer->fragment = FALSE;
+ bytes += byte;
+
+ if(byte == 0)
+ break;
+
+ /*
+ * Update progress bar size. This is done
+ * on every block rather than waiting for all blocks to be
+ * read incase write_file_process() is running in parallel
+ * with this. Otherwise the current progress bar position
+ * may get ahead of the progress bar size.
+ */
+ progress_bar_size(1);
+
+ if(prev_buffer)
+ put_file_buffer(prev_buffer);
+ prev_buffer = file_buffer;
+ }
+
+ /*
+ * Update inode file size now that the size of the dynamic pseudo file
+ * is known. This is needed for the -info option.
+ */
+ inode->buf.st_size = bytes;
+
+ while(1) {
+ res = waitpid(child, &status, 0);
+ if(res != -1)
+ break;
+ else if(errno != EINTR)
+ BAD_ERROR("read process: waitpid returned %d\n", errno);
+ }
+
+ close(file);
+
+ if(res == -1 || !WIFEXITED(status) || WEXITSTATUS(status) != 0)
+ goto read_err;
+
+ if(prev_buffer == NULL)
+ prev_buffer = file_buffer;
+ else {
+ cache_block_put(file_buffer);
+ sequence_count --;
+ }
+ prev_buffer->file_size = bytes;
+ prev_buffer->fragment = is_fragment(inode);
+ put_file_buffer(prev_buffer);
+
+ return;
+
+read_err2:
+ close(file);
+read_err:
+ if(prev_buffer) {
+ cache_block_put(file_buffer);
+ sequence_count --;
+ file_buffer = prev_buffer;
+ }
+ file_buffer->error = TRUE;
+ put_file_buffer(file_buffer);
+}
+
+
+static void reader_read_file(struct dir_ent *dir_ent)
+{
+ struct stat *buf = &dir_ent->inode->buf, buf2;
+ struct file_buffer *file_buffer;
+ int blocks, file, res;
+ long long bytes, read_size;
+ struct inode_info *inode = dir_ent->inode;
+
+ if(inode->read)
+ return;
+
+ inode->read = TRUE;
+again:
+ bytes = 0;
+ read_size = buf->st_size;
+ blocks = (read_size + block_size - 1) >> block_log;
+
+ while(1) {
+ file = open(pathname(dir_ent), O_RDONLY);
+ if(file != -1 || errno != EINTR)
+ break;
+ }
+
+ if(file == -1) {
+ file_buffer = cache_get_nohash(reader_buffer);
+ file_buffer->sequence = sequence_count ++;
+ goto read_err2;
+ }
+
+ do {
+ file_buffer = cache_get_nohash(reader_buffer);
+ file_buffer->file_size = read_size;
+ file_buffer->sequence = sequence_count ++;
+ file_buffer->noD = inode->noD;
+ file_buffer->error = FALSE;
+
+ /*
+ * Always try to read block_size bytes from the file rather
+ * than expected bytes (which will be less than the block_size
+ * at the file tail) to check that the file hasn't grown
+ * since being stated. If it is longer (or shorter) than
+ * expected, then restat, and try again. Note the special
+ * case where the file is an exact multiple of the block_size
+ * is dealt with later.
+ */
+ file_buffer->size = read_bytes(file, file_buffer->data,
+ block_size);
+ if(file_buffer->size == -1)
+ goto read_err;
+
+ bytes += file_buffer->size;
+
+ if(blocks > 1) {
+ /* non-tail block should be exactly block_size */
+ if(file_buffer->size < block_size)
+ goto restat;
+
+ file_buffer->fragment = FALSE;
+ put_file_buffer(file_buffer);
+ }
+ } while(-- blocks > 0);
+
+ /* Overall size including tail should match */
+ if(read_size != bytes)
+ goto restat;
+
+ if(read_size && read_size % block_size == 0) {
+ /*
+ * Special case where we've not tried to read past the end of
+ * the file. We expect to get EOF, i.e. the file isn't larger
+ * than we expect.
+ */
+ char buffer;
+ int res;
+
+ res = read_bytes(file, &buffer, 1);
+ if(res == -1)
+ goto read_err;
+
+ if(res != 0)
+ goto restat;
+ }
+
+ file_buffer->fragment = is_fragment(inode);
+ put_file_buffer(file_buffer);
+
+ close(file);
+
+ return;
+
+restat:
+ res = fstat(file, &buf2);
+ if(res == -1) {
+ ERROR("Cannot stat dir/file %s because %s\n",
+ pathname(dir_ent), strerror(errno));
+ goto read_err;
+ }
+
+ if(read_size != buf2.st_size) {
+ close(file);
+ memcpy(buf, &buf2, sizeof(struct stat));
+ file_buffer->error = 2;
+ put_file_buffer(file_buffer);
+ goto again;
+ }
+read_err:
+ close(file);
+read_err2:
+ file_buffer->error = TRUE;
+ put_file_buffer(file_buffer);
+}
+
+
+static void remove_readahead(int index, struct readahead *prev, struct readahead *new)
+{
+ if(prev)
+ prev->next = new->next;
+ else
+ readahead_table[index] = new->next;
+}
+
+
+static void add_readahead(struct readahead *new)
+{
+ int index = READAHEAD_INDEX(new->start);
+
+ new->next = readahead_table[index];
+ readahead_table[index] = new;
+}
+
+
+static int get_bytes(char *data, int size)
+{
+ int res = fread(data, 1, size, stdin);
+
+ if(res == size)
+ return res;
+
+ return feof(stdin) ? 0 : -1;
+}
+
+
+static int get_readahead(struct pseudo_file *file, long long current,
+ struct file_buffer *file_buffer, int size)
+{
+ int count = size;
+ char *dest = file_buffer->data;
+
+ if(readahead_table == NULL)
+ return -1;
+
+ while(size) {
+ int index = READAHEAD_INDEX(current);
+ struct readahead *buffer = readahead_table[index], *prev = NULL;
+
+ for(; buffer; prev = buffer, buffer = buffer->next) {
+ if(buffer->start <= current && buffer->start + buffer->size > current) {
+ int offset = READAHEAD_OFFSET(current);
+ int buffer_offset = READAHEAD_OFFSET(buffer->start);
+
+ /*
+ * Four posibilities:
+ * 1. Wanted data is whole of buffer
+ * 2. Wanted data is at start of buffer
+ * 3. Wanted data is at end of buffer
+ * 4. Wanted data is in middle of buffer
+ */
+ if(offset == buffer_offset && size >= buffer->size) {
+ memcpy(dest, buffer->src, buffer->size);
+ dest += buffer->size;
+ size -= buffer->size;
+ current += buffer->size;
+
+ remove_readahead(index, prev, buffer);
+ free(buffer);
+ break;
+ } else if(offset == buffer_offset) {
+ memcpy(dest, buffer->src, size);
+ buffer->start += size;
+ buffer->src += size;
+ buffer->size -= size;
+
+ remove_readahead(index, prev, buffer);
+ add_readahead(buffer);
+
+ goto finished;
+ } else if(buffer_offset + buffer->size <= offset+ size) {
+ int bytes = buffer_offset + buffer->size - offset;
+
+ memcpy(dest, buffer->src + offset - buffer_offset, bytes);
+ buffer->size -= bytes;
+ dest += bytes;
+ size -= bytes;
+ current += bytes;
+ break;
+ } else {
+ struct readahead *left, *right;
+ int left_size = offset - buffer_offset;
+ int right_size = buffer->size - (offset + size);
+
+ memcpy(dest, buffer->src + offset - buffer_offset, size);
+
+ /* Split buffer into two */
+ left = malloc(sizeof(struct readahead) + left_size);
+ right = malloc(sizeof(struct readahead) + right_size);
+
+ if(left == NULL || right == NULL)
+ MEM_ERROR();
+
+ left->start = buffer->start;
+ left->size = left_size;
+ left->src = left->data;
+ memcpy(left->data, buffer->src, left_size);
+
+ right->start = current + size;
+ right->size = right_size;
+ right->src = right->data;
+ memcpy(right->data, buffer->src + offset + size, right_size);
+
+ remove_readahead(index, prev, buffer);
+ free(buffer);
+
+ add_readahead(left);
+ add_readahead(right);
+ goto finished;
+ }
+ }
+ }
+
+ if(buffer == NULL)
+ return -1;
+ }
+
+finished:
+ return count;
+}
+
+
+static int do_readahead(struct pseudo_file *file, long long current,
+ struct file_buffer *file_buffer, int size)
+{
+ int res;
+ long long readahead = current - file->current;
+
+ if(readahead_table == NULL) {
+ readahead_table = malloc(READAHEAD_ALLOC);
+ if(readahead_table == NULL)
+ MEM_ERROR();
+
+ memset(readahead_table, 0, READAHEAD_ALLOC);
+ }
+
+ while(readahead) {
+ int offset = READAHEAD_OFFSET(file->current);
+ int bytes = READAHEAD_SIZE - offset < readahead ? READAHEAD_SIZE - offset : readahead;
+ struct readahead *buffer = malloc(sizeof(struct readahead) + bytes);
+
+ if(buffer == NULL)
+ MEM_ERROR();
+
+ res = get_bytes(buffer->data, bytes);
+
+ if(res == -1) {
+ free(buffer);
+ return res;
+ }
+
+ buffer->start = file->current;
+ buffer->size = bytes;
+ buffer->src = buffer->data;
+ add_readahead(buffer);
+
+ file->current += bytes;
+ readahead -= bytes;
+ }
+
+ res = get_bytes(file_buffer->data, size);
+
+ if(res != -1)
+ file->current += size;
+
+ return res;
+}
+
+
+static int read_data(struct pseudo_file *file, long long current,
+ struct file_buffer *file_buffer, int size)
+{
+ int res;
+
+ if(file->fd != STDIN_FILENO) {
+ if(current != file->current) {
+ /*
+ * File data reading is not in the same order as stored
+ * in the pseudo file. As this is not stdin, we can
+ * lseek() to the wanted data
+ */
+ res = lseek(file->fd, current + file->start, SEEK_SET);
+ if(res == -1)
+ BAD_ERROR("Lseek on pseudo file %s failed because %s\n",
+ file->filename, strerror(errno));
+
+ file->current = current;
+ }
+
+ res = read_bytes(file->fd, file_buffer->data, size);
+
+ if(res != -1)
+ file->current += size;
+
+ return res;
+ }
+
+ /*
+ * Reading from stdin. Three possibilities
+ * 1. We are at the current place in stdin, so just read data
+ * 2. Data we want has already been read and buffered (readahead).
+ * 3. Data is later in the file, readahead and buffer data to that point
+ */
+
+ if(current == file->current) {
+ res = get_bytes(file_buffer->data, size);
+
+ if(res != -1)
+ file->current += size;
+
+ return res;
+ } else if(current < file->current)
+ return get_readahead(file, current, file_buffer, size);
+ else
+ return do_readahead(file, current, file_buffer, size);
+}
+
+
+static void reader_read_data(struct dir_ent *dir_ent)
+{
+ struct file_buffer *file_buffer;
+ int blocks;
+ long long bytes, read_size, current;
+ struct inode_info *inode = dir_ent->inode;
+ static struct pseudo_file *file = NULL;
+
+ if(inode->read)
+ return;
+
+ inode->read = TRUE;
+ bytes = 0;
+ read_size = inode->pseudo->data->length;
+ blocks = (read_size + block_size - 1) >> block_log;
+
+ if(inode->pseudo->data->file != file) {
+ /* Reading the first or a different pseudo file, if
+ * a different one, first close the previous pseudo
+ * file, unless it is stdin */
+ if(file && file->fd > 0) {
+ close(file->fd);
+ file->fd = -1;
+ }
+
+ file = inode->pseudo->data->file;
+
+ if(file->fd == -1) {
+ while(1) {
+ file->fd = open(file->filename, O_RDONLY);
+ if(file->fd != -1 || errno != EINTR)
+ break;
+ }
+
+ if(file->fd == -1)
+ BAD_ERROR("Could not open pseudo file %s "
+ "because %s\n", file->filename,
+ strerror(errno));
+
+ file->current = -file->start;
+ }
+ }
+
+ current = inode->pseudo->data->offset;
+
+ do {
+ file_buffer = cache_get_nohash(reader_buffer);
+ file_buffer->file_size = read_size;
+ file_buffer->sequence = sequence_count ++;
+ file_buffer->noD = inode->noD;
+ file_buffer->error = FALSE;
+
+ if(blocks > 1) {
+ /* non-tail block should be exactly block_size */
+ file_buffer->size = read_data(file, current, file_buffer, block_size);
+ if(file_buffer->size != block_size)
+ BAD_ERROR("Failed to read pseudo file %s, it appears to be truncated or corrupted\n", file->filename);
+
+ current += file_buffer->size;
+ bytes += file_buffer->size;
+
+ file_buffer->fragment = FALSE;
+ put_file_buffer(file_buffer);
+ } else {
+ int expected = read_size - bytes;
+
+ file_buffer->size = read_data(file, current, file_buffer, expected);
+ if(file_buffer->size != expected)
+ BAD_ERROR("Failed to read pseudo file %s, it appears to be truncated or corrupted\n", file->filename);
+
+ current += file_buffer->size;
+ }
+ } while(-- blocks > 0);
+
+ file_buffer->fragment = is_fragment(inode);
+ put_file_buffer(file_buffer);
+}
+
+
+void reader_scan(struct dir_info *dir)
+{
+ struct dir_ent *dir_ent = dir->list;
+
+ for(; dir_ent; dir_ent = dir_ent->next) {
+ struct stat *buf = &dir_ent->inode->buf;
+
+ if(dir_ent->inode->root_entry || IS_TARFILE(dir_ent->inode))
+ continue;
+
+ if(IS_PSEUDO_PROCESS(dir_ent->inode)) {
+ reader_read_process(dir_ent);
+ continue;
+ }
+
+ if(IS_PSEUDO_DATA(dir_ent->inode)) {
+ reader_read_data(dir_ent);
+ continue;
+ }
+
+ switch(buf->st_mode & S_IFMT) {
+ case S_IFREG:
+ reader_read_file(dir_ent);
+ break;
+ case S_IFDIR:
+ reader_scan(dir_ent->dir);
+ break;
+ }
+ }
+}
+
+
+void *reader(void *arg)
+{
+ struct itimerval itimerval;
+ struct dir_info *dir = queue_get(to_reader);
+
+ if(sleep_time) {
+ signal(SIGALRM, sigalrm_handler);
+
+ itimerval.it_value.tv_sec = 0;
+ itimerval.it_value.tv_usec = 100000;
+ itimerval.it_interval.tv_sec = 10;
+ itimerval.it_interval.tv_usec = 0;
+ setitimer(ITIMER_REAL, &itimerval, NULL);
+ }
+
+ if(tarfile) {
+ read_tar_file();
+ dir = queue_get(to_reader);
+ }
+
+ if(!sorted)
+ reader_scan(dir);
+ else{
+ int i;
+ struct priority_entry *entry;
+
+ for(i = 65535; i >= 0; i--)
+ for(entry = priority_list[i]; entry;
+ entry = entry->next)
+ reader_read_file(entry->dir);
+ }
+
+ pthread_exit(NULL);
+}
diff --git a/squashfs-tools/reader.h b/squashfs-tools/reader.h
new file mode 100644
index 0000000..dd79865
--- /dev/null
+++ b/squashfs-tools/reader.h
@@ -0,0 +1,39 @@
+#ifndef READER_H
+#define READER_H
+
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * reader.h
+ */
+
+#define READAHEAD_SIZE 8192
+#define READAHEAD_ALLOC (0x100000 * sizeof(struct readahead *))
+#define READAHEAD_INDEX(A) ((A >> 13) & 0xfffff)
+#define READAHEAD_OFFSET(A) (A % READAHEAD_SIZE)
+
+struct readahead {
+ long long start;
+ int size;
+ struct readahead *next;
+ char *src;
+ char data[0] __attribute__((aligned));
+};
+#endif
diff --git a/squashfs-tools/restore.c b/squashfs-tools/restore.c
new file mode 100644
index 0000000..cec5ce9
--- /dev/null
+++ b/squashfs-tools/restore.c
@@ -0,0 +1,168 @@
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2013, 2014, 2019, 2021
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * restore.c
+ */
+
+#include <pthread.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <stdio.h>
+#include <math.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <dirent.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include "caches-queues-lists.h"
+#include "squashfs_fs.h"
+#include "mksquashfs.h"
+#include "mksquashfs_error.h"
+#include "progressbar.h"
+#include "info.h"
+
+#define FALSE 0
+#define TRUE 1
+
+extern pthread_t reader_thread, writer_thread, main_thread, order_thread;
+extern pthread_t *deflator_thread, *frag_deflator_thread, *frag_thread;
+extern struct queue *to_deflate, *to_writer, *to_frag, *to_process_frag;
+extern struct seq_queue *to_main, *to_order;
+extern void restorefs();
+extern int processors;
+extern int reproducible;
+
+static int interrupted = 0;
+static pthread_t restore_thread;
+
+void *restore_thrd(void *arg)
+{
+ sigset_t sigmask, old_mask;
+ int i, sig;
+
+ sigemptyset(&sigmask);
+ sigaddset(&sigmask, SIGINT);
+ sigaddset(&sigmask, SIGTERM);
+ sigaddset(&sigmask, SIGUSR1);
+ pthread_sigmask(SIG_BLOCK, &sigmask, &old_mask);
+
+ while(1) {
+ sigwait(&sigmask, &sig);
+
+ if((sig == SIGINT || sig == SIGTERM) && !interrupted) {
+ ERROR("Interrupting will restore original "
+ "filesystem!\n");
+ ERROR("Interrupt again to quit\n");
+ interrupted = TRUE;
+ continue;
+ }
+
+ /* kill main thread/worker threads and restore */
+ set_progressbar_state(FALSE);
+ disable_info();
+
+ /* first kill the reader thread */
+ pthread_cancel(reader_thread);
+ pthread_join(reader_thread, NULL);
+
+ /*
+ * then flush the reader to deflator thread(s) output queue.
+ * The deflator thread(s) will idle
+ */
+ queue_flush(to_deflate);
+
+ /* now kill the deflator thread(s) */
+ for(i = 0; i < processors; i++)
+ pthread_cancel(deflator_thread[i]);
+ for(i = 0; i < processors; i++)
+ pthread_join(deflator_thread[i], NULL);
+
+ /*
+ * then flush the reader to process fragment thread(s) output
+ * queue. The process fragment thread(s) will idle
+ */
+ queue_flush(to_process_frag);
+
+ /* now kill the process fragment thread(s) */
+ for(i = 0; i < processors; i++)
+ pthread_cancel(frag_thread[i]);
+ for(i = 0; i < processors; i++)
+ pthread_join(frag_thread[i], NULL);
+
+ /*
+ * then flush the reader/deflator/process fragment to main
+ * thread output queue. The main thread will idle
+ */
+ seq_queue_flush(to_main);
+
+ /* now kill the main thread */
+ pthread_cancel(main_thread);
+ pthread_join(main_thread, NULL);
+
+ /* then flush the main thread to fragment deflator thread(s)
+ * queue. The fragment deflator thread(s) will idle
+ */
+ queue_flush(to_frag);
+
+ /* now kill the fragment deflator thread(s) */
+ for(i = 0; i < processors; i++)
+ pthread_cancel(frag_deflator_thread[i]);
+ for(i = 0; i < processors; i++)
+ pthread_join(frag_deflator_thread[i], NULL);
+
+ if(reproducible) {
+ /* then flush the fragment deflator_threads(s)
+ * to frag orderer thread. The frag orderer
+ * thread will idle
+ */
+ seq_queue_flush(to_order);
+
+ /* now kill the frag orderer thread */
+ pthread_cancel(order_thread);
+ pthread_join(order_thread, NULL);
+ }
+
+ /*
+ * then flush the main thread/fragment deflator thread(s)
+ * to writer thread queue. The writer thread will idle
+ */
+ queue_flush(to_writer);
+
+ /* now kill the writer thread */
+ pthread_cancel(writer_thread);
+ pthread_join(writer_thread, NULL);
+
+ TRACE("All threads cancelled\n");
+
+ restorefs();
+ }
+}
+
+
+pthread_t *init_restore_thread()
+{
+ pthread_create(&restore_thread, NULL, restore_thrd, NULL);
+ return &restore_thread;
+}
diff --git a/squashfs-tools/restore.h b/squashfs-tools/restore.h
new file mode 100644
index 0000000..35129f0
--- /dev/null
+++ b/squashfs-tools/restore.h
@@ -0,0 +1,28 @@
+#ifndef RESTORE_H
+#define RESTORE_H
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2013, 2014
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * restore.h
+ */
+
+extern pthread_t *init_restore_thread();
+#endif
diff --git a/squashfs-tools/signals.h b/squashfs-tools/signals.h
new file mode 100644
index 0000000..5418448
--- /dev/null
+++ b/squashfs-tools/signals.h
@@ -0,0 +1,54 @@
+#ifndef SIGNALS_H
+#define SIGNALS_H
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2023
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * signals.h
+ */
+
+static inline int wait_for_signal(sigset_t *sigmask, int *waiting)
+{
+ int sig;
+
+#if defined(__APPLE__) && defined(__MACH__)
+ sigwait(sigmask, &sig);
+ *waiting = 0;
+#else
+ struct timespec timespec = { .tv_sec = 1, .tv_nsec = 0 };
+
+ while(1) {
+ if(*waiting)
+ sig = sigtimedwait(sigmask, NULL, &timespec);
+ else
+ sig = sigwaitinfo(sigmask, NULL);
+
+ if(sig != -1)
+ break;
+
+ if(errno == EAGAIN)
+ *waiting = 0;
+ else if(errno != EINTR)
+ BAD_ERROR("sigtimedwait/sigwaitinfo failed because %s\n", strerror(errno));
+ }
+#endif
+ return sig;
+}
+#endif
diff --git a/squashfs-tools/sort.c b/squashfs-tools/sort.c
new file mode 100644
index 0000000..8814d95
--- /dev/null
+++ b/squashfs-tools/sort.c
@@ -0,0 +1,373 @@
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2012,
+ * 2013, 2014, 2021, 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * sort.c
+ */
+
+#define TRUE 1
+#define FALSE 0
+#define MAX_LINE 16384
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <dirent.h>
+#include <string.h>
+#include <stdlib.h>
+#include <ctype.h>
+
+#include "squashfs_fs.h"
+#include "mksquashfs.h"
+#include "sort.h"
+#include "mksquashfs_error.h"
+#include "progressbar.h"
+
+static int mkisofs_style = -1;
+
+struct sort_info {
+ dev_t st_dev;
+ ino_t st_ino;
+ int priority;
+ struct sort_info *next;
+};
+
+static struct sort_info *sort_info_list[65536];
+
+struct priority_entry *priority_list[65536];
+
+extern int silent;
+extern char *pathname(struct dir_ent *dir_ent);
+extern long long hardlnk_count;
+
+static void add_priority_list(struct dir_ent *dir, int priority)
+{
+ struct priority_entry *new_priority_entry;
+
+ priority += 32768;
+ new_priority_entry = malloc(sizeof(struct priority_entry));
+ if(new_priority_entry == NULL)
+ MEM_ERROR();
+
+ new_priority_entry->dir = dir;;
+ new_priority_entry->next = priority_list[priority];
+ priority_list[priority] = new_priority_entry;
+}
+
+
+static int get_priority(char *filename, struct stat *buf, int priority)
+{
+ int hash = buf->st_ino & 0xffff;
+ struct sort_info *s;
+
+ for(s = sort_info_list[hash]; s; s = s->next)
+ if((s->st_dev == buf->st_dev) && (s->st_ino == buf->st_ino)) {
+ TRACE("returning priority %d (%s)\n", s->priority,
+ filename);
+ return s->priority;
+ }
+ TRACE("returning priority %d (%s)\n", priority, filename);
+ return priority;
+}
+
+
+#define ADD_ENTRY(buf, priority) {\
+ int hash = buf.st_ino & 0xffff;\
+ struct sort_info *s;\
+ if((s = malloc(sizeof(struct sort_info))) == NULL) \
+ MEM_ERROR(); \
+ s->st_dev = buf.st_dev;\
+ s->st_ino = buf.st_ino;\
+ s->priority = priority;\
+ s->next = sort_info_list[hash];\
+ sort_info_list[hash] = s;\
+ }
+static int add_sort_list(char *path, int priority, int source, char *source_path[])
+{
+ int i, n;
+ struct stat buf;
+
+ TRACE("add_sort_list: filename %s, priority %d\n", path, priority);
+ if(strlen(path) > 1 && strcmp(path + strlen(path) - 2, "/*") == 0)
+ path[strlen(path) - 2] = '\0';
+
+ TRACE("add_sort_list: filename %s, priority %d\n", path, priority);
+re_read:
+ if(path[0] == '/' || strncmp(path, "./", 2) == 0 ||
+ strncmp(path, "../", 3) == 0 || mkisofs_style == 1) {
+ if(lstat(path, &buf) == -1)
+ goto error;
+ TRACE("adding filename %s, priority %d, st_dev %d, st_ino "
+ "%lld\n", path, priority, (int) buf.st_dev,
+ (long long) buf.st_ino);
+ ADD_ENTRY(buf, priority);
+ return TRUE;
+ }
+
+ for(i = 0, n = 0; i < source; i++) {
+ char *filename;
+ int res = asprintf(&filename, "%s/%s", source_path[i], path);
+ if(res == -1)
+ BAD_ERROR("asprintf failed in add_sort_list\n");
+ res = lstat(filename, &buf);
+ free(filename);
+ if(res == -1) {
+ if(!(errno == ENOENT || errno == ENOTDIR))
+ goto error;
+ continue;
+ }
+ ADD_ENTRY(buf, priority);
+ n ++;
+ }
+
+ if(n == 0 && mkisofs_style == -1 && lstat(path, &buf) != -1) {
+ ERROR("WARNING: Mkisofs style sortlist detected! This is "
+ "supported but please\n");
+ ERROR("convert to mksquashfs style sortlist! A sortlist entry");
+ ERROR(" should be\neither absolute (starting with ");
+ ERROR("'/') start with './' or '../' (taken to be\nrelative to "
+ "$PWD), otherwise it ");
+ ERROR("is assumed the entry is relative to one\nof the source "
+ "directories, i.e. with ");
+ ERROR("\"mksquashfs test test.sqsh\",\nthe sortlist ");
+ ERROR("entry \"file\" is assumed to be inside the directory "
+ "test.\n\n");
+ mkisofs_style = 1;
+ goto re_read;
+ }
+
+ mkisofs_style = 0;
+
+ if(n == 1)
+ return TRUE;
+ if(n > 1) {
+ ERROR(" Ambiguous sortlist entry \"%s\"\n\nIt maps to more "
+ "than one source entry! Please use an absolute path."
+ "\n", path);
+ return FALSE;
+ }
+
+error:
+ ERROR_START("Cannot stat sortlist entry \"%s\"\n", path);
+ ERROR("This is probably because you're using the wrong file\n");
+ ERROR("path relative to the source directories.");
+ ERROR_EXIT(" Ignoring\n");
+ /*
+ * Historical note
+ * Failure to stat a sortlist entry is deliberately ignored, even
+ * though it is an error. Squashfs release 2.2 changed the behaviour
+ * to treat it as a fatal error, but it was changed back to
+ * the original behaviour to ignore it in release 2.2-r2 following
+ * feedback from users at the time.
+ */
+ return TRUE;
+}
+
+
+void generate_file_priorities(struct dir_info *dir, int priority,
+ struct stat *buf)
+{
+ struct dir_ent *dir_ent = dir->list;
+
+ priority = get_priority(dir->pathname, buf, priority);
+
+ for(; dir_ent; dir_ent = dir_ent->next) {
+ struct stat *buf = &dir_ent->inode->buf;
+ if(dir_ent->inode->root_entry)
+ continue;
+
+ switch(buf->st_mode & S_IFMT) {
+ case S_IFREG:
+ add_priority_list(dir_ent,
+ get_priority(pathname(dir_ent), buf,
+ priority));
+ break;
+ case S_IFDIR:
+ generate_file_priorities(dir_ent->dir,
+ priority, buf);
+ break;
+ }
+ }
+}
+
+
+int read_sort_file(char *filename, int source, char *source_path[])
+{
+ FILE *fd;
+ char line_buffer[MAX_LINE + 1]; /* overflow safe */
+ char sort_filename[MAX_LINE + 1]; /* overflow safe */
+ char *line, *name;
+ int n, priority, res;
+
+ if((fd = fopen(filename, "r")) == NULL) {
+ ERROR("Failed to open sort file \"%s\" because %s\n",
+ filename, strerror(errno));
+ return FALSE;
+ }
+
+ while(fgets(line = line_buffer, MAX_LINE + 1, fd) != NULL) {
+ int len = strlen(line);
+
+ if(len == MAX_LINE && line[len - 1] != '\n') {
+ /* line too large */
+ ERROR("Line too long when reading "
+ "sort file \"%s\", larger than %d "
+ "bytes\n", filename, MAX_LINE);
+ goto failed;
+ }
+
+ /*
+ * Remove '\n' terminator if it exists (the last line
+ * in the file may not be '\n' terminated)
+ */
+ if(len && line[len - 1] == '\n')
+ line[len - 1] = '\0';
+
+ /* Skip any leading whitespace */
+ while(isspace(*line))
+ line ++;
+
+ /* if comment line, skip */
+ if(*line == '#')
+ continue;
+
+ /*
+ * Scan for filename, don't use sscanf() and "%s" because
+ * that can't handle filenames with spaces
+ */
+ for(name = sort_filename; !isspace(*line) && *line != '\0';) {
+ if(*line == '\\') {
+ line ++;
+ if (*line == '\0')
+ break;
+ }
+ *name ++ = *line ++;
+ }
+ *name = '\0';
+
+ /*
+ * if filename empty, then line was empty of anything but
+ * whitespace or a backslash character. Skip empy lines
+ */
+ if(sort_filename[0] == '\0')
+ continue;
+
+ /*
+ * Scan the rest of the line, we expect a decimal number
+ * which is the filename priority
+ */
+ errno = 0;
+ res = sscanf(line, "%d%n", &priority, &n);
+
+ if((res < 1 || errno) && errno != ERANGE) {
+ if(errno == 0)
+ /* No error, assume EOL or match failure */
+ ERROR("Sort file \"%s\", can't find priority "
+ "in entry \"%s\", EOL or match "
+ "failure\n", filename, line_buffer);
+ else
+ /* Some other failure not ERANGE */
+ ERROR("Sscanf failed reading sort file \"%s\" "
+ "because %s\n", filename,
+ strerror(errno));
+ goto failed;
+ } else if((errno == ERANGE) ||
+ (priority < -32768 || priority > 32767)) {
+ ERROR("Sort file \"%s\", entry \"%s\" has priority "
+ "outside range of -32767:32768.\n", filename,
+ line_buffer);
+ goto failed;
+ }
+
+ /* Skip any trailing whitespace */
+ line += n;
+ while(isspace(*line))
+ line ++;
+
+ if(*line != '\0') {
+ ERROR("Sort file \"%s\", trailing characters after "
+ "priority in entry \"%s\"\n", filename,
+ line_buffer);
+ goto failed;
+ }
+
+ res = add_sort_list(sort_filename, priority, source,
+ source_path);
+ if(res == FALSE)
+ goto failed;
+ }
+
+ if(ferror(fd)) {
+ ERROR("Reading sort file \"%s\" failed because %s\n", filename,
+ strerror(errno));
+ goto failed;
+ }
+
+ fclose(fd);
+ return TRUE;
+
+failed:
+ fclose(fd);
+ return FALSE;
+}
+
+
+void sort_files_and_write(struct dir_info *dir)
+{
+ int i;
+ struct priority_entry *entry;
+ squashfs_inode inode;
+ int duplicate_file;
+ struct file_info *file;
+
+ for(i = 65535; i >= 0; i--)
+ for(entry = priority_list[i]; entry; entry = entry->next) {
+ TRACE("%d: %s\n", i - 32768, pathname(entry->dir));
+ if(entry->dir->inode->inode == SQUASHFS_INVALID_BLK) {
+ file = write_file(entry->dir, &duplicate_file);
+ inode = create_inode(NULL, entry->dir,
+ SQUASHFS_FILE_TYPE, file->file_size,
+ file->start, file->blocks,
+ file->block_list,
+ file->fragment, NULL,
+ file->sparse);
+ if(duplicate_checking == FALSE) {
+ free_fragment(file->fragment);
+ free(file->block_list);
+ }
+ INFO("file %s, uncompressed size %lld bytes %s"
+ "\n", pathname(entry->dir),
+ (long long)
+ entry->dir->inode->buf.st_size,
+ duplicate_file ? "DUPLICATE" : "");
+ entry->dir->inode->inode = inode;
+ entry->dir->inode->type = SQUASHFS_FILE_TYPE;
+ hardlnk_count --;
+ } else
+ INFO("file %s, uncompressed size %lld bytes "
+ "LINK\n", pathname(entry->dir),
+ (long long)
+ entry->dir->inode->buf.st_size);
+ }
+}
diff --git a/squashfs-tools/sort.h b/squashfs-tools/sort.h
new file mode 100644
index 0000000..98db62c
--- /dev/null
+++ b/squashfs-tools/sort.h
@@ -0,0 +1,37 @@
+#ifndef SORT_H
+#define SORT_H
+
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2013
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * sort.h
+ */
+
+struct priority_entry {
+ struct dir_ent *dir;
+ struct priority_entry *next;
+};
+
+extern int read_sort_file(char *, int, char *[]);
+extern void sort_files_and_write(struct dir_info *);
+extern void generate_file_priorities(struct dir_info *, int priority,
+ struct stat *);
+extern struct priority_entry *priority_list[65536];
+#endif
diff --git a/squashfs-tools/squashfs_compat.h b/squashfs-tools/squashfs_compat.h
new file mode 100644
index 0000000..1f58266
--- /dev/null
+++ b/squashfs-tools/squashfs_compat.h
@@ -0,0 +1,833 @@
+#ifndef SQUASHFS_COMPAT
+#define SQUASHFS_COMPAT
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2014, 2019
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * squashfs_compat.h
+ */
+
+/*
+ * definitions for structures on disk - layout 3.x
+ */
+
+#define SQUASHFS_CHECK 2
+#define SQUASHFS_CHECK_DATA(flags) SQUASHFS_BIT(flags, SQUASHFS_CHECK)
+
+/* Max number of uids and gids */
+#define SQUASHFS_UIDS 256
+#define SQUASHFS_GUIDS 255
+
+struct squashfs_super_block_3 {
+ unsigned int s_magic;
+ unsigned int inodes;
+ unsigned int bytes_used_2;
+ unsigned int uid_start_2;
+ unsigned int guid_start_2;
+ unsigned int inode_table_start_2;
+ unsigned int directory_table_start_2;
+ unsigned int s_major:16;
+ unsigned int s_minor:16;
+ unsigned int block_size_1:16;
+ unsigned int block_log:16;
+ unsigned int flags:8;
+ unsigned int no_uids:8;
+ unsigned int no_guids:8;
+ int mkfs_time /* time of filesystem creation */;
+ squashfs_inode root_inode;
+ unsigned int block_size;
+ unsigned int fragments;
+ unsigned int fragment_table_start_2;
+ long long bytes_used;
+ long long uid_start;
+ long long guid_start;
+ long long inode_table_start;
+ long long directory_table_start;
+ long long fragment_table_start;
+ long long lookup_table_start;
+} __attribute__ ((packed));
+
+struct squashfs_dir_index_3 {
+ unsigned int index;
+ unsigned int start_block;
+ unsigned char size;
+ unsigned char name[0];
+} __attribute__ ((packed));
+
+struct squashfs_base_inode_header_3 {
+ unsigned int inode_type:4;
+ unsigned int mode:12;
+ unsigned int uid:8;
+ unsigned int guid:8;
+ int mtime;
+ unsigned int inode_number;
+} __attribute__ ((packed));
+
+struct squashfs_ipc_inode_header_3 {
+ unsigned int inode_type:4;
+ unsigned int mode:12;
+ unsigned int uid:8;
+ unsigned int guid:8;
+ int mtime;
+ unsigned int inode_number;
+ unsigned int nlink;
+} __attribute__ ((packed));
+
+struct squashfs_dev_inode_header_3 {
+ unsigned int inode_type:4;
+ unsigned int mode:12;
+ unsigned int uid:8;
+ unsigned int guid:8;
+ int mtime;
+ unsigned int inode_number;
+ unsigned int nlink;
+ unsigned short rdev;
+} __attribute__ ((packed));
+
+struct squashfs_symlink_inode_header_3 {
+ unsigned int inode_type:4;
+ unsigned int mode:12;
+ unsigned int uid:8;
+ unsigned int guid:8;
+ int mtime;
+ unsigned int inode_number;
+ unsigned int nlink;
+ unsigned short symlink_size;
+ char symlink[0];
+} __attribute__ ((packed));
+
+struct squashfs_reg_inode_header_3 {
+ unsigned int inode_type:4;
+ unsigned int mode:12;
+ unsigned int uid:8;
+ unsigned int guid:8;
+ int mtime;
+ unsigned int inode_number;
+ squashfs_block start_block;
+ unsigned int fragment;
+ unsigned int offset;
+ unsigned int file_size;
+ unsigned short block_list[0];
+} __attribute__ ((packed));
+
+struct squashfs_lreg_inode_header_3 {
+ unsigned int inode_type:4;
+ unsigned int mode:12;
+ unsigned int uid:8;
+ unsigned int guid:8;
+ int mtime;
+ unsigned int inode_number;
+ unsigned int nlink;
+ squashfs_block start_block;
+ unsigned int fragment;
+ unsigned int offset;
+ long long file_size;
+ unsigned short block_list[0];
+} __attribute__ ((packed));
+
+struct squashfs_dir_inode_header_3 {
+ unsigned int inode_type:4;
+ unsigned int mode:12;
+ unsigned int uid:8;
+ unsigned int guid:8;
+ int mtime;
+ unsigned int inode_number;
+ unsigned int nlink;
+ unsigned int file_size:19;
+ unsigned int offset:13;
+ unsigned int start_block;
+ unsigned int parent_inode;
+} __attribute__ ((packed));
+
+struct squashfs_ldir_inode_header_3 {
+ unsigned int inode_type:4;
+ unsigned int mode:12;
+ unsigned int uid:8;
+ unsigned int guid:8;
+ int mtime;
+ unsigned int inode_number;
+ unsigned int nlink;
+ unsigned int file_size:27;
+ unsigned int offset:13;
+ unsigned int start_block;
+ unsigned int i_count:16;
+ unsigned int parent_inode;
+ struct squashfs_dir_index_3 index[0];
+} __attribute__ ((packed));
+
+union squashfs_inode_header_3 {
+ struct squashfs_base_inode_header_3 base;
+ struct squashfs_dev_inode_header_3 dev;
+ struct squashfs_symlink_inode_header_3 symlink;
+ struct squashfs_reg_inode_header_3 reg;
+ struct squashfs_lreg_inode_header_3 lreg;
+ struct squashfs_dir_inode_header_3 dir;
+ struct squashfs_ldir_inode_header_3 ldir;
+ struct squashfs_ipc_inode_header_3 ipc;
+};
+
+struct squashfs_dir_entry_3 {
+ unsigned int offset:13;
+ unsigned int type:3;
+ unsigned int size:8;
+ int inode_number:16;
+ char name[0];
+} __attribute__ ((packed));
+
+struct squashfs_dir_header_3 {
+ unsigned int count:8;
+ unsigned int start_block;
+ unsigned int inode_number;
+} __attribute__ ((packed));
+
+struct squashfs_fragment_entry_3 {
+ long long start_block;
+ unsigned int size;
+ unsigned int pending;
+} __attribute__ ((packed));
+
+
+typedef struct squashfs_super_block_3 squashfs_super_block_3;
+typedef struct squashfs_dir_index_3 squashfs_dir_index_3;
+typedef struct squashfs_base_inode_header_3 squashfs_base_inode_header_3;
+typedef struct squashfs_ipc_inode_header_3 squashfs_ipc_inode_header_3;
+typedef struct squashfs_dev_inode_header_3 squashfs_dev_inode_header_3;
+typedef struct squashfs_symlink_inode_header_3 squashfs_symlink_inode_header_3;
+typedef struct squashfs_reg_inode_header_3 squashfs_reg_inode_header_3;
+typedef struct squashfs_lreg_inode_header_3 squashfs_lreg_inode_header_3;
+typedef struct squashfs_dir_inode_header_3 squashfs_dir_inode_header_3;
+typedef struct squashfs_ldir_inode_header_3 squashfs_ldir_inode_header_3;
+typedef struct squashfs_dir_entry_3 squashfs_dir_entry_3;
+typedef struct squashfs_dir_header_3 squashfs_dir_header_3;
+typedef struct squashfs_fragment_entry_3 squashfs_fragment_entry_3;
+
+/*
+ * macros to convert each packed bitfield structure from little endian to big
+ * endian and vice versa. These are needed when creating or using a filesystem
+ * on a machine with different byte ordering to the target architecture.
+ *
+ */
+
+#define SQUASHFS_SWAP_START \
+ int bits;\
+ int b_pos;\
+ unsigned long long val;\
+ unsigned char *s;\
+ unsigned char *d;
+
+#define SQUASHFS_SWAP_SUPER_BLOCK_3(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_super_block_3));\
+ SQUASHFS_SWAP((s)->s_magic, d, 0, 32);\
+ SQUASHFS_SWAP((s)->inodes, d, 32, 32);\
+ SQUASHFS_SWAP((s)->bytes_used_2, d, 64, 32);\
+ SQUASHFS_SWAP((s)->uid_start_2, d, 96, 32);\
+ SQUASHFS_SWAP((s)->guid_start_2, d, 128, 32);\
+ SQUASHFS_SWAP((s)->inode_table_start_2, d, 160, 32);\
+ SQUASHFS_SWAP((s)->directory_table_start_2, d, 192, 32);\
+ SQUASHFS_SWAP((s)->s_major, d, 224, 16);\
+ SQUASHFS_SWAP((s)->s_minor, d, 240, 16);\
+ SQUASHFS_SWAP((s)->block_size_1, d, 256, 16);\
+ SQUASHFS_SWAP((s)->block_log, d, 272, 16);\
+ SQUASHFS_SWAP((s)->flags, d, 288, 8);\
+ SQUASHFS_SWAP((s)->no_uids, d, 296, 8);\
+ SQUASHFS_SWAP((s)->no_guids, d, 304, 8);\
+ SQUASHFS_SWAP((s)->mkfs_time, d, 312, 32);\
+ SQUASHFS_SWAP((s)->root_inode, d, 344, 64);\
+ SQUASHFS_SWAP((s)->block_size, d, 408, 32);\
+ SQUASHFS_SWAP((s)->fragments, d, 440, 32);\
+ SQUASHFS_SWAP((s)->fragment_table_start_2, d, 472, 32);\
+ SQUASHFS_SWAP((s)->bytes_used, d, 504, 64);\
+ SQUASHFS_SWAP((s)->uid_start, d, 568, 64);\
+ SQUASHFS_SWAP((s)->guid_start, d, 632, 64);\
+ SQUASHFS_SWAP((s)->inode_table_start, d, 696, 64);\
+ SQUASHFS_SWAP((s)->directory_table_start, d, 760, 64);\
+ SQUASHFS_SWAP((s)->fragment_table_start, d, 824, 64);\
+ SQUASHFS_SWAP((s)->lookup_table_start, d, 888, 64);\
+}
+
+#define SQUASHFS_SWAP_BASE_INODE_CORE_3(s, d, n)\
+ SQUASHFS_MEMSET(s, d, n);\
+ SQUASHFS_SWAP((s)->inode_type, d, 0, 4);\
+ SQUASHFS_SWAP((s)->mode, d, 4, 12);\
+ SQUASHFS_SWAP((s)->uid, d, 16, 8);\
+ SQUASHFS_SWAP((s)->guid, d, 24, 8);\
+ SQUASHFS_SWAP((s)->mtime, d, 32, 32);\
+ SQUASHFS_SWAP((s)->inode_number, d, 64, 32);
+
+#define SQUASHFS_SWAP_BASE_INODE_HEADER_3(s, d, n) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_SWAP_BASE_INODE_CORE_3(s, d, n)\
+}
+
+#define SQUASHFS_SWAP_IPC_INODE_HEADER_3(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_SWAP_BASE_INODE_CORE_3(s, d, \
+ sizeof(struct squashfs_ipc_inode_header_3))\
+ SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
+}
+
+#define SQUASHFS_SWAP_DEV_INODE_HEADER_3(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_SWAP_BASE_INODE_CORE_3(s, d, \
+ sizeof(struct squashfs_dev_inode_header_3)); \
+ SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
+ SQUASHFS_SWAP((s)->rdev, d, 128, 16);\
+}
+
+#define SQUASHFS_SWAP_SYMLINK_INODE_HEADER_3(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_SWAP_BASE_INODE_CORE_3(s, d, \
+ sizeof(struct squashfs_symlink_inode_header_3));\
+ SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
+ SQUASHFS_SWAP((s)->symlink_size, d, 128, 16);\
+}
+
+#define SQUASHFS_SWAP_REG_INODE_HEADER_3(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_SWAP_BASE_INODE_CORE_3(s, d, \
+ sizeof(struct squashfs_reg_inode_header_3));\
+ SQUASHFS_SWAP((s)->start_block, d, 96, 64);\
+ SQUASHFS_SWAP((s)->fragment, d, 160, 32);\
+ SQUASHFS_SWAP((s)->offset, d, 192, 32);\
+ SQUASHFS_SWAP((s)->file_size, d, 224, 32);\
+}
+
+#define SQUASHFS_SWAP_LREG_INODE_HEADER_3(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_SWAP_BASE_INODE_CORE_3(s, d, \
+ sizeof(struct squashfs_lreg_inode_header_3));\
+ SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
+ SQUASHFS_SWAP((s)->start_block, d, 128, 64);\
+ SQUASHFS_SWAP((s)->fragment, d, 192, 32);\
+ SQUASHFS_SWAP((s)->offset, d, 224, 32);\
+ SQUASHFS_SWAP((s)->file_size, d, 256, 64);\
+}
+
+#define SQUASHFS_SWAP_DIR_INODE_HEADER_3(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_SWAP_BASE_INODE_CORE_3(s, d, \
+ sizeof(struct squashfs_dir_inode_header_3));\
+ SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
+ SQUASHFS_SWAP((s)->file_size, d, 128, 19);\
+ SQUASHFS_SWAP((s)->offset, d, 147, 13);\
+ SQUASHFS_SWAP((s)->start_block, d, 160, 32);\
+ SQUASHFS_SWAP((s)->parent_inode, d, 192, 32);\
+}
+
+#define SQUASHFS_SWAP_LDIR_INODE_HEADER_3(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_SWAP_BASE_INODE_CORE_3(s, d, \
+ sizeof(struct squashfs_ldir_inode_header_3));\
+ SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
+ SQUASHFS_SWAP((s)->file_size, d, 128, 27);\
+ SQUASHFS_SWAP((s)->offset, d, 155, 13);\
+ SQUASHFS_SWAP((s)->start_block, d, 168, 32);\
+ SQUASHFS_SWAP((s)->i_count, d, 200, 16);\
+ SQUASHFS_SWAP((s)->parent_inode, d, 216, 32);\
+}
+
+#define SQUASHFS_SWAP_DIR_INDEX_3(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_index_3));\
+ SQUASHFS_SWAP((s)->index, d, 0, 32);\
+ SQUASHFS_SWAP((s)->start_block, d, 32, 32);\
+ SQUASHFS_SWAP((s)->size, d, 64, 8);\
+}
+
+#define SQUASHFS_SWAP_DIR_HEADER_3(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_header_3));\
+ SQUASHFS_SWAP((s)->count, d, 0, 8);\
+ SQUASHFS_SWAP((s)->start_block, d, 8, 32);\
+ SQUASHFS_SWAP((s)->inode_number, d, 40, 32);\
+}
+
+#define SQUASHFS_SWAP_DIR_ENTRY_3(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_entry_3));\
+ SQUASHFS_SWAP((s)->offset, d, 0, 13);\
+ SQUASHFS_SWAP((s)->type, d, 13, 3);\
+ SQUASHFS_SWAP((s)->size, d, 16, 8);\
+ SQUASHFS_SWAP((s)->inode_number, d, 24, 16);\
+}
+
+#define SQUASHFS_SWAP_INODE_T_3(s, d) SQUASHFS_SWAP_LONG_LONGS_3(s, d, 1)
+
+#define SQUASHFS_SWAP_SHORTS_3(s, d, n) {\
+ int entry;\
+ int bit_position;\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_MEMSET(s, d, n * 2);\
+ for(entry = 0, bit_position = 0; entry < n; entry++, bit_position += \
+ 16)\
+ SQUASHFS_SWAP(s[entry], d, bit_position, 16);\
+}
+
+#define SQUASHFS_SWAP_INTS_3(s, d, n) {\
+ int entry;\
+ int bit_position;\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_MEMSET(s, d, n * 4);\
+ for(entry = 0, bit_position = 0; entry < n; entry++, bit_position += \
+ 32)\
+ SQUASHFS_SWAP(s[entry], d, bit_position, 32);\
+}
+
+#define SQUASHFS_SWAP_LONG_LONGS_3(s, d, n) {\
+ int entry;\
+ int bit_position;\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_MEMSET(s, d, n * 8);\
+ for(entry = 0, bit_position = 0; entry < n; entry++, bit_position += \
+ 64)\
+ SQUASHFS_SWAP(s[entry], d, bit_position, 64);\
+}
+
+#define SQUASHFS_SWAP_DATA(s, d, n, bits) {\
+ int entry;\
+ int bit_position;\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_MEMSET(s, d, n * bits / 8);\
+ for(entry = 0, bit_position = 0; entry < n; entry++, bit_position += \
+ bits)\
+ SQUASHFS_SWAP(s[entry], d, bit_position, bits);\
+}
+
+#define SQUASHFS_SWAP_FRAGMENT_INDEXES_3(s, d, n) SQUASHFS_SWAP_LONG_LONGS_3(s, d, n)
+#define SQUASHFS_SWAP_LOOKUP_BLOCKS_3(s, d, n) SQUASHFS_SWAP_LONG_LONGS_3(s, d, n)
+
+#define SQUASHFS_SWAP_FRAGMENT_ENTRY_3(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_fragment_entry_3));\
+ SQUASHFS_SWAP((s)->start_block, d, 0, 64);\
+ SQUASHFS_SWAP((s)->size, d, 64, 32);\
+}
+
+/* fragment and fragment table defines */
+#define SQUASHFS_FRAGMENT_BYTES_3(A) ((A) * sizeof(struct squashfs_fragment_entry_3))
+
+#define SQUASHFS_FRAGMENT_INDEX_3(A) (SQUASHFS_FRAGMENT_BYTES_3(A) / \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_FRAGMENT_INDEX_OFFSET_3(A) (SQUASHFS_FRAGMENT_BYTES_3(A) % \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_FRAGMENT_INDEXES_3(A) ((SQUASHFS_FRAGMENT_BYTES_3(A) + \
+ SQUASHFS_METADATA_SIZE - 1) / \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_FRAGMENT_INDEX_BYTES_3(A) (SQUASHFS_FRAGMENT_INDEXES_3(A) *\
+ sizeof(long long))
+
+/* inode lookup table defines */
+#define SQUASHFS_LOOKUP_BYTES_3(A) ((A) * sizeof(squashfs_inode))
+
+#define SQUASHFS_LOOKUP_BLOCK_3(A) (SQUASHFS_LOOKUP_BYTES_3(A) / \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_LOOKUP_BLOCK_OFFSET_3(A) (SQUASHFS_LOOKUP_BYTES_3(A) % \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_LOOKUP_BLOCKS_3(A) ((SQUASHFS_LOOKUP_BYTES_3(A) + \
+ SQUASHFS_METADATA_SIZE - 1) / \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_LOOKUP_BLOCK_BYTES_3(A) (SQUASHFS_LOOKUP_BLOCKS(A) *\
+ sizeof(long long))
+
+/*
+ * definitions for structures on disk - layout 1.x
+ */
+#define SQUASHFS_TYPES 5
+#define SQUASHFS_IPC_TYPE 0
+
+struct squashfs_base_inode_header_1 {
+ unsigned int inode_type:4;
+ unsigned int mode:12; /* protection */
+ unsigned int uid:4; /* index into uid table */
+ unsigned int guid:4; /* index into guid table */
+} __attribute__ ((packed));
+
+struct squashfs_ipc_inode_header_1 {
+ unsigned int inode_type:4;
+ unsigned int mode:12; /* protection */
+ unsigned int uid:4; /* index into uid table */
+ unsigned int guid:4; /* index into guid table */
+ unsigned int type:4;
+ unsigned int offset:4;
+} __attribute__ ((packed));
+
+struct squashfs_dev_inode_header_1 {
+ unsigned int inode_type:4;
+ unsigned int mode:12; /* protection */
+ unsigned int uid:4; /* index into uid table */
+ unsigned int guid:4; /* index into guid table */
+ unsigned short rdev;
+} __attribute__ ((packed));
+
+struct squashfs_symlink_inode_header_1 {
+ unsigned int inode_type:4;
+ unsigned int mode:12; /* protection */
+ unsigned int uid:4; /* index into uid table */
+ unsigned int guid:4; /* index into guid table */
+ unsigned short symlink_size;
+ char symlink[0];
+} __attribute__ ((packed));
+
+struct squashfs_reg_inode_header_1 {
+ unsigned int inode_type:4;
+ unsigned int mode:12; /* protection */
+ unsigned int uid:4; /* index into uid table */
+ unsigned int guid:4; /* index into guid table */
+ int mtime;
+ unsigned int start_block;
+ unsigned int file_size:32;
+ unsigned short block_list[0];
+} __attribute__ ((packed));
+
+struct squashfs_dir_inode_header_1 {
+ unsigned int inode_type:4;
+ unsigned int mode:12; /* protection */
+ unsigned int uid:4; /* index into uid table */
+ unsigned int guid:4; /* index into guid table */
+ unsigned int file_size:19;
+ unsigned int offset:13;
+ int mtime;
+ unsigned int start_block:24;
+} __attribute__ ((packed));
+
+union squashfs_inode_header_1 {
+ struct squashfs_base_inode_header_1 base;
+ struct squashfs_dev_inode_header_1 dev;
+ struct squashfs_symlink_inode_header_1 symlink;
+ struct squashfs_reg_inode_header_1 reg;
+ struct squashfs_dir_inode_header_1 dir;
+ struct squashfs_ipc_inode_header_1 ipc;
+};
+
+typedef struct squashfs_dir_index_1 squashfs_dir_index_1;
+typedef struct squashfs_base_inode_header_1 squashfs_base_inode_header_1;
+typedef struct squashfs_ipc_inode_header_1 squashfs_ipc_inode_header_1;
+typedef struct squashfs_dev_inode_header_1 squashfs_dev_inode_header_1;
+typedef struct squashfs_symlink_inode_header_1 squashfs_symlink_inode_header_1;
+typedef struct squashfs_reg_inode_header_1 squashfs_reg_inode_header_1;
+typedef struct squashfs_dir_inode_header_1 squashfs_dir_inode_header_1;
+
+#define SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, n) \
+ SQUASHFS_MEMSET(s, d, n);\
+ SQUASHFS_SWAP((s)->inode_type, d, 0, 4);\
+ SQUASHFS_SWAP((s)->mode, d, 4, 12);\
+ SQUASHFS_SWAP((s)->uid, d, 16, 4);\
+ SQUASHFS_SWAP((s)->guid, d, 20, 4);
+
+#define SQUASHFS_SWAP_BASE_INODE_HEADER_1(s, d, n) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, n)\
+}
+
+#define SQUASHFS_SWAP_IPC_INODE_HEADER_1(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
+ sizeof(struct squashfs_ipc_inode_header_1));\
+ SQUASHFS_SWAP((s)->type, d, 24, 4);\
+ SQUASHFS_SWAP((s)->offset, d, 28, 4);\
+}
+
+#define SQUASHFS_SWAP_DEV_INODE_HEADER_1(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
+ sizeof(struct squashfs_dev_inode_header_1));\
+ SQUASHFS_SWAP((s)->rdev, d, 24, 16);\
+}
+
+#define SQUASHFS_SWAP_SYMLINK_INODE_HEADER_1(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
+ sizeof(struct squashfs_symlink_inode_header_1));\
+ SQUASHFS_SWAP((s)->symlink_size, d, 24, 16);\
+}
+
+#define SQUASHFS_SWAP_REG_INODE_HEADER_1(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
+ sizeof(struct squashfs_reg_inode_header_1));\
+ SQUASHFS_SWAP((s)->mtime, d, 24, 32);\
+ SQUASHFS_SWAP((s)->start_block, d, 56, 32);\
+ SQUASHFS_SWAP((s)->file_size, d, 88, 32);\
+}
+
+#define SQUASHFS_SWAP_DIR_INODE_HEADER_1(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
+ sizeof(struct squashfs_dir_inode_header_1));\
+ SQUASHFS_SWAP((s)->file_size, d, 24, 19);\
+ SQUASHFS_SWAP((s)->offset, d, 43, 13);\
+ SQUASHFS_SWAP((s)->mtime, d, 56, 32);\
+ SQUASHFS_SWAP((s)->start_block, d, 88, 24);\
+}
+
+/*
+ * definitions for structures on disk - layout 2.x
+ */
+struct squashfs_dir_index_2 {
+ unsigned int index:27;
+ unsigned int start_block:29;
+ unsigned char size;
+ unsigned char name[0];
+} __attribute__ ((packed));
+
+struct squashfs_base_inode_header_2 {
+ unsigned int inode_type:4;
+ unsigned int mode:12; /* protection */
+ unsigned int uid:8; /* index into uid table */
+ unsigned int guid:8; /* index into guid table */
+} __attribute__ ((packed));
+
+struct squashfs_ipc_inode_header_2 {
+ unsigned int inode_type:4;
+ unsigned int mode:12; /* protection */
+ unsigned int uid:8; /* index into uid table */
+ unsigned int guid:8; /* index into guid table */
+} __attribute__ ((packed));
+
+struct squashfs_dev_inode_header_2 {
+ unsigned int inode_type:4;
+ unsigned int mode:12; /* protection */
+ unsigned int uid:8; /* index into uid table */
+ unsigned int guid:8; /* index into guid table */
+ unsigned short rdev;
+} __attribute__ ((packed));
+
+struct squashfs_symlink_inode_header_2 {
+ unsigned int inode_type:4;
+ unsigned int mode:12; /* protection */
+ unsigned int uid:8; /* index into uid table */
+ unsigned int guid:8; /* index into guid table */
+ unsigned short symlink_size;
+ char symlink[0];
+} __attribute__ ((packed));
+
+struct squashfs_reg_inode_header_2 {
+ unsigned int inode_type:4;
+ unsigned int mode:12; /* protection */
+ unsigned int uid:8; /* index into uid table */
+ unsigned int guid:8; /* index into guid table */
+ int mtime;
+ unsigned int start_block;
+ unsigned int fragment;
+ unsigned int offset;
+ unsigned int file_size:32;
+ unsigned short block_list[0];
+} __attribute__ ((packed));
+
+struct squashfs_dir_inode_header_2 {
+ unsigned int inode_type:4;
+ unsigned int mode:12; /* protection */
+ unsigned int uid:8; /* index into uid table */
+ unsigned int guid:8; /* index into guid table */
+ unsigned int file_size:19;
+ unsigned int offset:13;
+ int mtime;
+ unsigned int start_block:24;
+} __attribute__ ((packed));
+
+struct squashfs_ldir_inode_header_2 {
+ unsigned int inode_type:4;
+ unsigned int mode:12; /* protection */
+ unsigned int uid:8; /* index into uid table */
+ unsigned int guid:8; /* index into guid table */
+ unsigned int file_size:27;
+ unsigned int offset:13;
+ int mtime;
+ unsigned int start_block:24;
+ unsigned int i_count:16;
+ struct squashfs_dir_index_2 index[0];
+} __attribute__ ((packed));
+
+union squashfs_inode_header_2 {
+ struct squashfs_base_inode_header_2 base;
+ struct squashfs_dev_inode_header_2 dev;
+ struct squashfs_symlink_inode_header_2 symlink;
+ struct squashfs_reg_inode_header_2 reg;
+ struct squashfs_dir_inode_header_2 dir;
+ struct squashfs_ldir_inode_header_2 ldir;
+ struct squashfs_ipc_inode_header_2 ipc;
+};
+
+struct squashfs_dir_header_2 {
+ unsigned int count:8;
+ unsigned int start_block:24;
+} __attribute__ ((packed));
+
+struct squashfs_dir_entry_2 {
+ unsigned int offset:13;
+ unsigned int type:3;
+ unsigned int size:8;
+ char name[0];
+} __attribute__ ((packed));
+
+struct squashfs_fragment_entry_2 {
+ unsigned int start_block;
+ unsigned int size;
+} __attribute__ ((packed));
+
+typedef struct squashfs_dir_index_2 squashfs_dir_index_2;
+typedef struct squashfs_base_inode_header_2 squashfs_base_inode_header_2;
+typedef struct squashfs_ipc_inode_header_2 squashfs_ipc_inode_header_2;
+typedef struct squashfs_dev_inode_header_2 squashfs_dev_inode_header_2;
+typedef struct squashfs_symlink_inode_header_2 squashfs_symlink_inode_header_2;
+typedef struct squashfs_reg_inode_header_2 squashfs_reg_inode_header_2;
+typedef struct squashfs_lreg_inode_header_2 squashfs_lreg_inode_header_2;
+typedef struct squashfs_dir_inode_header_2 squashfs_dir_inode_header_2;
+typedef struct squashfs_ldir_inode_header_2 squashfs_ldir_inode_header_2;
+typedef struct squashfs_dir_entry_2 squashfs_dir_entry_2;
+typedef struct squashfs_dir_header_2 squashfs_dir_header_2;
+typedef struct squashfs_fragment_entry_2 squashfs_fragment_entry_2;
+
+#define SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, n)\
+ SQUASHFS_MEMSET(s, d, n);\
+ SQUASHFS_SWAP((s)->inode_type, d, 0, 4);\
+ SQUASHFS_SWAP((s)->mode, d, 4, 12);\
+ SQUASHFS_SWAP((s)->uid, d, 16, 8);\
+ SQUASHFS_SWAP((s)->guid, d, 24, 8);\
+
+#define SQUASHFS_SWAP_BASE_INODE_HEADER_2(s, d, n) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, n)\
+}
+
+#define SQUASHFS_SWAP_IPC_INODE_HEADER_2(s, d) \
+ SQUASHFS_SWAP_BASE_INODE_HEADER_2(s, d, sizeof(struct squashfs_ipc_inode_header_2))
+
+#define SQUASHFS_SWAP_DEV_INODE_HEADER_2(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
+ sizeof(struct squashfs_dev_inode_header_2)); \
+ SQUASHFS_SWAP((s)->rdev, d, 32, 16);\
+}
+
+#define SQUASHFS_SWAP_SYMLINK_INODE_HEADER_2(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
+ sizeof(struct squashfs_symlink_inode_header_2));\
+ SQUASHFS_SWAP((s)->symlink_size, d, 32, 16);\
+}
+
+#define SQUASHFS_SWAP_REG_INODE_HEADER_2(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
+ sizeof(struct squashfs_reg_inode_header_2));\
+ SQUASHFS_SWAP((s)->mtime, d, 32, 32);\
+ SQUASHFS_SWAP((s)->start_block, d, 64, 32);\
+ SQUASHFS_SWAP((s)->fragment, d, 96, 32);\
+ SQUASHFS_SWAP((s)->offset, d, 128, 32);\
+ SQUASHFS_SWAP((s)->file_size, d, 160, 32);\
+}
+
+#define SQUASHFS_SWAP_DIR_INODE_HEADER_2(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
+ sizeof(struct squashfs_dir_inode_header_2));\
+ SQUASHFS_SWAP((s)->file_size, d, 32, 19);\
+ SQUASHFS_SWAP((s)->offset, d, 51, 13);\
+ SQUASHFS_SWAP((s)->mtime, d, 64, 32);\
+ SQUASHFS_SWAP((s)->start_block, d, 96, 24);\
+}
+
+#define SQUASHFS_SWAP_LDIR_INODE_HEADER_2(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
+ sizeof(struct squashfs_ldir_inode_header_2));\
+ SQUASHFS_SWAP((s)->file_size, d, 32, 27);\
+ SQUASHFS_SWAP((s)->offset, d, 59, 13);\
+ SQUASHFS_SWAP((s)->mtime, d, 72, 32);\
+ SQUASHFS_SWAP((s)->start_block, d, 104, 24);\
+ SQUASHFS_SWAP((s)->i_count, d, 128, 16);\
+}
+
+#define SQUASHFS_SWAP_DIR_INDEX_2(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_index_2));\
+ SQUASHFS_SWAP((s)->index, d, 0, 27);\
+ SQUASHFS_SWAP((s)->start_block, d, 27, 29);\
+ SQUASHFS_SWAP((s)->size, d, 56, 8);\
+}
+#define SQUASHFS_SWAP_DIR_HEADER_2(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_header_2));\
+ SQUASHFS_SWAP((s)->count, d, 0, 8);\
+ SQUASHFS_SWAP((s)->start_block, d, 8, 24);\
+}
+
+#define SQUASHFS_SWAP_DIR_ENTRY_2(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_entry_2));\
+ SQUASHFS_SWAP((s)->offset, d, 0, 13);\
+ SQUASHFS_SWAP((s)->type, d, 13, 3);\
+ SQUASHFS_SWAP((s)->size, d, 16, 8);\
+}
+
+#define SQUASHFS_SWAP_FRAGMENT_ENTRY_2(s, d) {\
+ SQUASHFS_SWAP_START\
+ SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_fragment_entry_2));\
+ SQUASHFS_SWAP((s)->start_block, d, 0, 32);\
+ SQUASHFS_SWAP((s)->size, d, 32, 32);\
+}
+
+#define SQUASHFS_SWAP_FRAGMENT_INDEXES_2(s, d, n) SQUASHFS_SWAP_INTS_3(s, d, n)
+
+/* fragment and fragment table defines */
+#define SQUASHFS_FRAGMENT_BYTES_2(A) ((A) * sizeof(struct squashfs_fragment_entry_2))
+
+#define SQUASHFS_FRAGMENT_INDEX_2(A) (SQUASHFS_FRAGMENT_BYTES_2(A) / \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_FRAGMENT_INDEX_OFFSET_2(A) (SQUASHFS_FRAGMENT_BYTES_2(A) % \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_FRAGMENT_INDEXES_2(A) ((SQUASHFS_FRAGMENT_BYTES_2(A) + \
+ SQUASHFS_METADATA_SIZE - 1) / \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_FRAGMENT_INDEX_BYTES_2(A) (SQUASHFS_FRAGMENT_INDEXES_2(A) *\
+ sizeof(int))
+/*
+ * macros used to swap each structure entry, taking into account
+ * bitfields and different bitfield placing conventions on differing architectures
+ */
+#if __BYTE_ORDER == __BIG_ENDIAN
+ /* convert from little endian to big endian */
+#define SQUASHFS_SWAP(value, p, pos, tbits) _SQUASHFS_SWAP(value, p, pos, tbits, b_pos)
+#else
+ /* convert from big endian to little endian */
+#define SQUASHFS_SWAP(value, p, pos, tbits) _SQUASHFS_SWAP(value, p, pos, tbits, 64 - tbits - b_pos)
+#endif
+
+#define _SQUASHFS_SWAP(value, p, pos, tbits, SHIFT) {\
+ b_pos = pos % 8;\
+ val = 0;\
+ s = (unsigned char *)p + (pos / 8);\
+ d = ((unsigned char *) &val) + 7;\
+ for(bits = 0; bits < (tbits + b_pos); bits += 8) \
+ *d-- = *s++;\
+ value = (val >> (SHIFT));\
+}
+#define SQUASHFS_MEMSET(s, d, n) memset(s, 0, n);
+#endif
diff --git a/squashfs-tools/squashfs_fs.h b/squashfs-tools/squashfs_fs.h
new file mode 100644
index 0000000..659f17c
--- /dev/null
+++ b/squashfs-tools/squashfs_fs.h
@@ -0,0 +1,502 @@
+#ifndef SQUASHFS_FS
+#define SQUASHFS_FS
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2012,
+ * 2013, 2014, 2017, 2019, 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * squashfs_fs.h
+ */
+
+#define SQUASHFS_CACHED_FRAGMENTS CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE
+#define SQUASHFS_MAJOR 4
+#define SQUASHFS_MINOR 0
+#define SQUASHFS_MAGIC 0x73717368
+#define SQUASHFS_MAGIC_SWAP 0x68737173
+#define SQUASHFS_START 0
+
+/* size of metadata (inode and directory) blocks */
+#define SQUASHFS_METADATA_SIZE 8192
+#define SQUASHFS_METADATA_LOG 13
+
+/* default size of data blocks */
+#define SQUASHFS_FILE_SIZE 131072
+
+#define SQUASHFS_FILE_MAX_SIZE 1048576
+#define SQUASHFS_FILE_MAX_LOG 20
+
+/* Max number of uids and gids */
+#define SQUASHFS_IDS 65536
+
+/* Max length of filename (not 255) */
+#define SQUASHFS_NAME_LEN 256
+
+/* Max value for directory header count */
+#define SQUASHFS_DIR_COUNT 256
+
+/* Max length of a symbolic ink */
+#define SQUASHFS_SYMLINK_MAX 65535
+
+#define SQUASHFS_INVALID ((long long) 0xffffffffffff)
+#define SQUASHFS_INVALID_FRAG ((unsigned int) 0xffffffff)
+#define SQUASHFS_INVALID_XATTR ((unsigned int) 0xffffffff)
+#define SQUASHFS_INVALID_BLK ((long long) -1)
+#define SQUASHFS_USED_BLK ((long long) -2)
+
+/* Filesystem flags */
+#define SQUASHFS_NOI 0
+#define SQUASHFS_NOD 1
+#define SQUASHFS_CHECK 2
+#define SQUASHFS_NOF 3
+#define SQUASHFS_NO_FRAG 4
+#define SQUASHFS_ALWAYS_FRAG 5
+#define SQUASHFS_DUPLICATE 6
+#define SQUASHFS_EXPORT 7
+#define SQUASHFS_NOX 8
+#define SQUASHFS_NO_XATTR 9
+#define SQUASHFS_COMP_OPT 10
+#define SQUASHFS_NOID 11
+
+#define SQUASHFS_BIT(flag, bit) ((flag >> bit) & 1)
+
+#define SQUASHFS_UNCOMPRESSED_INODES(flags) SQUASHFS_BIT(flags, \
+ SQUASHFS_NOI)
+
+#define SQUASHFS_UNCOMPRESSED_DATA(flags) SQUASHFS_BIT(flags, \
+ SQUASHFS_NOD)
+
+#define SQUASHFS_UNCOMPRESSED_FRAGMENTS(flags) SQUASHFS_BIT(flags, \
+ SQUASHFS_NOF)
+
+#define SQUASHFS_NO_FRAGMENTS(flags) SQUASHFS_BIT(flags, \
+ SQUASHFS_NO_FRAG)
+
+#define SQUASHFS_ALWAYS_FRAGMENTS(flags) SQUASHFS_BIT(flags, \
+ SQUASHFS_ALWAYS_FRAG)
+
+#define SQUASHFS_DUPLICATES(flags) SQUASHFS_BIT(flags, \
+ SQUASHFS_DUPLICATE)
+
+#define SQUASHFS_EXPORTABLE(flags) SQUASHFS_BIT(flags, \
+ SQUASHFS_EXPORT)
+
+#define SQUASHFS_UNCOMPRESSED_XATTRS(flags) SQUASHFS_BIT(flags, \
+ SQUASHFS_NOX)
+
+#define SQUASHFS_NO_XATTRS(flags) SQUASHFS_BIT(flags, \
+ SQUASHFS_NO_XATTR)
+
+#define SQUASHFS_COMP_OPTS(flags) SQUASHFS_BIT(flags, \
+ SQUASHFS_COMP_OPT)
+
+#define SQUASHFS_UNCOMPRESSED_IDS(flags) SQUASHFS_BIT(flags, \
+ SQUASHFS_NOID)
+
+#define SQUASHFS_MKFLAGS(noi, nod, nof, nox, noid, no_frag, always_frag, \
+ duplicate_checking, exportable, no_xattr, comp_opt) (noi | \
+ (nod << 1) | (nof << 3) | (no_frag << 4) | \
+ (always_frag << 5) | (duplicate_checking << 6) | \
+ (exportable << 7) | (nox << 8) | (no_xattr << 9) | \
+ (comp_opt << 10) | (noid << 11))
+
+/* Max number of types and file types */
+#define SQUASHFS_DIR_TYPE 1
+#define SQUASHFS_FILE_TYPE 2
+#define SQUASHFS_SYMLINK_TYPE 3
+#define SQUASHFS_BLKDEV_TYPE 4
+#define SQUASHFS_CHRDEV_TYPE 5
+#define SQUASHFS_FIFO_TYPE 6
+#define SQUASHFS_SOCKET_TYPE 7
+#define SQUASHFS_LDIR_TYPE 8
+#define SQUASHFS_LREG_TYPE 9
+#define SQUASHFS_LSYMLINK_TYPE 10
+#define SQUASHFS_LBLKDEV_TYPE 11
+#define SQUASHFS_LCHRDEV_TYPE 12
+#define SQUASHFS_LFIFO_TYPE 13
+#define SQUASHFS_LSOCKET_TYPE 14
+
+/* Xattr types */
+#define SQUASHFS_XATTR_USER 0
+#define SQUASHFS_XATTR_TRUSTED 1
+#define SQUASHFS_XATTR_SECURITY 2
+#define SQUASHFS_XATTR_VALUE_OOL 256
+#define SQUASHFS_XATTR_PREFIX_MASK 0xff
+
+/* Flag whether block is compressed or uncompressed, bit is set if block is
+ * uncompressed */
+#define SQUASHFS_COMPRESSED_BIT (1 << 15)
+
+#define SQUASHFS_COMPRESSED_SIZE(B) (((B) & ~SQUASHFS_COMPRESSED_BIT) ? \
+ (B) & ~SQUASHFS_COMPRESSED_BIT : SQUASHFS_COMPRESSED_BIT)
+
+#define SQUASHFS_COMPRESSED(B) (!((B) & SQUASHFS_COMPRESSED_BIT))
+
+#define SQUASHFS_COMPRESSED_BIT_BLOCK (1 << 24)
+
+#define SQUASHFS_COMPRESSED_SIZE_BLOCK(B) ((B) & \
+ ~SQUASHFS_COMPRESSED_BIT_BLOCK)
+
+#define SQUASHFS_COMPRESSED_BLOCK(B) (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK))
+
+/*
+ * Inode number ops. Inodes consist of a compressed block number, and an
+ * uncompressed offset within that block
+ */
+#define SQUASHFS_INODE_BLK(a) ((unsigned int) ((a) >> 16))
+
+#define SQUASHFS_INODE_OFFSET(a) ((unsigned int) ((a) & 0xffff))
+
+#define SQUASHFS_MKINODE(A, B) ((squashfs_inode)(((squashfs_inode) (A)\
+ << 16) + (B)))
+
+/* Compute 32 bit VFS inode number from squashfs inode number */
+#define SQUASHFS_MK_VFS_INODE(a, b) ((unsigned int) (((a) << 8) + \
+ ((b) >> 2) + 1))
+
+/* Translate between VFS mode and squashfs mode */
+#define SQUASHFS_MODE(a) ((a) & 0xfff)
+
+/* fragment and fragment table defines */
+#define SQUASHFS_FRAGMENT_BYTES(A) ((A) * \
+ sizeof(struct squashfs_fragment_entry))
+
+#define SQUASHFS_FRAGMENT_INDEX(A) (SQUASHFS_FRAGMENT_BYTES(A) / \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_FRAGMENT_INDEX_OFFSET(A) (SQUASHFS_FRAGMENT_BYTES(A) % \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_FRAGMENT_INDEXES(A) ((SQUASHFS_FRAGMENT_BYTES(A) + \
+ SQUASHFS_METADATA_SIZE - 1) / \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_FRAGMENT_INDEX_BYTES(A) (SQUASHFS_FRAGMENT_INDEXES(A) *\
+ sizeof(long long))
+
+/* inode lookup table defines */
+#define SQUASHFS_LOOKUP_BYTES(A) ((A) * sizeof(squashfs_inode))
+
+#define SQUASHFS_LOOKUP_BLOCK(A) (SQUASHFS_LOOKUP_BYTES(A) / \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_LOOKUP_BLOCK_OFFSET(A) (SQUASHFS_LOOKUP_BYTES(A) % \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_LOOKUP_BLOCKS(A) ((SQUASHFS_LOOKUP_BYTES(A) + \
+ SQUASHFS_METADATA_SIZE - 1) / \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_LOOKUP_BLOCK_BYTES(A) (SQUASHFS_LOOKUP_BLOCKS(A) *\
+ sizeof(long long))
+
+/* uid lookup table defines */
+#define SQUASHFS_ID_BYTES(A) ((A) * sizeof(unsigned int))
+
+#define SQUASHFS_ID_BLOCK(A) (SQUASHFS_ID_BYTES(A) / \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_ID_BLOCK_OFFSET(A) (SQUASHFS_ID_BYTES(A) % \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_ID_BLOCKS(A) ((SQUASHFS_ID_BYTES(A) + \
+ SQUASHFS_METADATA_SIZE - 1) / \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_ID_BLOCK_BYTES(A) (SQUASHFS_ID_BLOCKS(A) *\
+ sizeof(long long))
+
+/* xattr id lookup table defines */
+#define SQUASHFS_XATTR_BYTES(A) (((long long) (A)) * sizeof(struct squashfs_xattr_id))
+
+#define SQUASHFS_XATTR_BLOCK(A) (SQUASHFS_XATTR_BYTES(A) / \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_XATTR_BLOCK_OFFSET(A) (SQUASHFS_XATTR_BYTES(A) % \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_XATTR_BLOCKS(A) ((SQUASHFS_XATTR_BYTES(A) + \
+ SQUASHFS_METADATA_SIZE - 1) / \
+ SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_XATTR_BLOCK_BYTES(A) (SQUASHFS_XATTR_BLOCKS(A) *\
+ sizeof(long long))
+
+#define SQUASHFS_XATTR_BLK(A) ((unsigned int) ((A) >> 16))
+
+#define SQUASHFS_XATTR_OFFSET(A) ((unsigned int) ((A) & 0xffff))
+
+/* cached data constants for filesystem */
+#define SQUASHFS_CACHED_BLKS 8
+
+#define SQUASHFS_MAX_FILE_SIZE_LOG 64
+
+#define SQUASHFS_MAX_FILE_SIZE ((long long) 1 << \
+ (SQUASHFS_MAX_FILE_SIZE_LOG - 2))
+
+#define SQUASHFS_MARKER_BYTE 0xff
+
+/* meta index cache */
+#define SQUASHFS_META_INDEXES (SQUASHFS_METADATA_SIZE / sizeof(unsigned int))
+#define SQUASHFS_META_ENTRIES 31
+#define SQUASHFS_META_NUMBER 8
+#define SQUASHFS_SLOTS 4
+
+struct meta_entry {
+ long long data_block;
+ unsigned int index_block;
+ unsigned short offset;
+ unsigned short pad;
+};
+
+struct meta_index {
+ unsigned int inode_number;
+ unsigned int offset;
+ unsigned short entries;
+ unsigned short skip;
+ unsigned short locked;
+ unsigned short pad;
+ struct meta_entry meta_entry[SQUASHFS_META_ENTRIES];
+};
+
+
+/*
+ * definitions for structures on disk
+ */
+
+typedef long long squashfs_block;
+typedef long long squashfs_inode;
+
+#define ZLIB_COMPRESSION 1
+#define LZMA_COMPRESSION 2
+#define LZO_COMPRESSION 3
+#define XZ_COMPRESSION 4
+#define LZ4_COMPRESSION 5
+#define ZSTD_COMPRESSION 6
+
+struct squashfs_super_block {
+ unsigned int s_magic;
+ unsigned int inodes;
+ unsigned int mkfs_time /* time of filesystem creation */;
+ unsigned int block_size;
+ unsigned int fragments;
+ unsigned short compression;
+ unsigned short block_log;
+ unsigned short flags;
+ unsigned short no_ids;
+ unsigned short s_major;
+ unsigned short s_minor;
+ squashfs_inode root_inode;
+ long long bytes_used;
+ long long id_table_start;
+ long long xattr_id_table_start;
+ long long inode_table_start;
+ long long directory_table_start;
+ long long fragment_table_start;
+ long long lookup_table_start;
+};
+
+struct squashfs_dir_index {
+ unsigned int index;
+ unsigned int start_block;
+ unsigned int size;
+ unsigned char name[0];
+};
+
+struct squashfs_base_inode_header {
+ unsigned short inode_type;
+ unsigned short mode;
+ unsigned short uid;
+ unsigned short guid;
+ unsigned int mtime;
+ unsigned int inode_number;
+};
+
+struct squashfs_ipc_inode_header {
+ unsigned short inode_type;
+ unsigned short mode;
+ unsigned short uid;
+ unsigned short guid;
+ unsigned int mtime;
+ unsigned int inode_number;
+ unsigned int nlink;
+};
+
+struct squashfs_lipc_inode_header {
+ unsigned short inode_type;
+ unsigned short mode;
+ unsigned short uid;
+ unsigned short guid;
+ unsigned int mtime;
+ unsigned int inode_number;
+ unsigned int nlink;
+ unsigned int xattr;
+};
+
+struct squashfs_dev_inode_header {
+ unsigned short inode_type;
+ unsigned short mode;
+ unsigned short uid;
+ unsigned short guid;
+ unsigned int mtime;
+ unsigned int inode_number;
+ unsigned int nlink;
+ unsigned int rdev;
+};
+
+struct squashfs_ldev_inode_header {
+ unsigned short inode_type;
+ unsigned short mode;
+ unsigned short uid;
+ unsigned short guid;
+ unsigned int mtime;
+ unsigned int inode_number;
+ unsigned int nlink;
+ unsigned int rdev;
+ unsigned int xattr;
+};
+
+struct squashfs_symlink_inode_header {
+ unsigned short inode_type;
+ unsigned short mode;
+ unsigned short uid;
+ unsigned short guid;
+ unsigned int mtime;
+ unsigned int inode_number;
+ unsigned int nlink;
+ unsigned int symlink_size;
+ char symlink[0];
+};
+
+struct squashfs_reg_inode_header {
+ unsigned short inode_type;
+ unsigned short mode;
+ unsigned short uid;
+ unsigned short guid;
+ unsigned int mtime;
+ unsigned int inode_number;
+ unsigned int start_block;
+ unsigned int fragment;
+ unsigned int offset;
+ unsigned int file_size;
+ unsigned int block_list[0];
+};
+
+struct squashfs_lreg_inode_header {
+ unsigned short inode_type;
+ unsigned short mode;
+ unsigned short uid;
+ unsigned short guid;
+ unsigned int mtime;
+ unsigned int inode_number;
+ squashfs_block start_block;
+ long long file_size;
+ long long sparse;
+ unsigned int nlink;
+ unsigned int fragment;
+ unsigned int offset;
+ unsigned int xattr;
+ unsigned int block_list[0];
+};
+
+struct squashfs_dir_inode_header {
+ unsigned short inode_type;
+ unsigned short mode;
+ unsigned short uid;
+ unsigned short guid;
+ unsigned int mtime;
+ unsigned int inode_number;
+ unsigned int start_block;
+ unsigned int nlink;
+ unsigned short file_size;
+ unsigned short offset;
+ unsigned int parent_inode;
+};
+
+struct squashfs_ldir_inode_header {
+ unsigned short inode_type;
+ unsigned short mode;
+ unsigned short uid;
+ unsigned short guid;
+ unsigned int mtime;
+ unsigned int inode_number;
+ unsigned int nlink;
+ unsigned int file_size;
+ unsigned int start_block;
+ unsigned int parent_inode;
+ unsigned short i_count;
+ unsigned short offset;
+ unsigned int xattr;
+ struct squashfs_dir_index index[0];
+};
+
+union squashfs_inode_header {
+ struct squashfs_base_inode_header base;
+ struct squashfs_dev_inode_header dev;
+ struct squashfs_ldev_inode_header ldev;
+ struct squashfs_symlink_inode_header symlink;
+ struct squashfs_reg_inode_header reg;
+ struct squashfs_lreg_inode_header lreg;
+ struct squashfs_dir_inode_header dir;
+ struct squashfs_ldir_inode_header ldir;
+ struct squashfs_ipc_inode_header ipc;
+ struct squashfs_lipc_inode_header lipc;
+};
+
+struct squashfs_dir_entry {
+ unsigned short offset;
+ short inode_number;
+ unsigned short type;
+ unsigned short size;
+ char name[0];
+};
+
+struct squashfs_dir_header {
+ unsigned int count;
+ unsigned int start_block;
+ unsigned int inode_number;
+};
+
+struct squashfs_fragment_entry {
+ long long start_block;
+ unsigned int size;
+ unsigned int unused;
+};
+
+struct squashfs_xattr_entry {
+ unsigned short type;
+ unsigned short size;
+};
+
+struct squashfs_xattr_val {
+ unsigned int vsize;
+};
+
+struct squashfs_xattr_id {
+ long long xattr;
+ unsigned int count;
+ unsigned int size;
+};
+
+struct squashfs_xattr_table {
+ long long xattr_table_start;
+ unsigned int xattr_ids;
+ unsigned int unused;
+};
+
+#endif
diff --git a/squashfs-tools/squashfs_swap.h b/squashfs-tools/squashfs_swap.h
new file mode 100644
index 0000000..edaf029
--- /dev/null
+++ b/squashfs-tools/squashfs_swap.h
@@ -0,0 +1,425 @@
+#ifndef SQUASHFS_SWAP_H
+#define SQUASHFS_SWAP_H
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2008, 2009, 2010, 2013, 2019, 2021
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * squashfs_swap.h
+ */
+
+/*
+ * macros to convert each stucture from big endian to little endian
+ */
+
+#include "endian_compat.h"
+
+#if __BYTE_ORDER == __BIG_ENDIAN
+#include <stddef.h>
+extern void swap_le16(void *, void *);
+extern void swap_le32(void *, void *);
+extern void swap_le64(void *, void *);
+extern void swap_le16_num(void *, void *, int);
+extern void swap_le32_num(void *, void *, int);
+extern void swap_le64_num(void *, void *, int);
+extern unsigned short inswap_le16(unsigned short);
+extern unsigned int inswap_le32(unsigned int);
+extern long long inswap_le64(long long);
+extern void inswap_le16_num(unsigned short *, int);
+extern void inswap_le32_num(unsigned int *, int);
+extern void inswap_le64_num(long long *, int);
+
+#define _SQUASHFS_SWAP_SUPER_BLOCK(s, d, SWAP_FUNC) {\
+ SWAP_FUNC(32, s, d, s_magic, struct squashfs_super_block);\
+ SWAP_FUNC(32, s, d, inodes, struct squashfs_super_block);\
+ SWAP_FUNC(32, s, d, mkfs_time, struct squashfs_super_block);\
+ SWAP_FUNC(32, s, d, block_size, struct squashfs_super_block);\
+ SWAP_FUNC(32, s, d, fragments, struct squashfs_super_block);\
+ SWAP_FUNC(16, s, d, compression, struct squashfs_super_block);\
+ SWAP_FUNC(16, s, d, block_log, struct squashfs_super_block);\
+ SWAP_FUNC(16, s, d, flags, struct squashfs_super_block);\
+ SWAP_FUNC(16, s, d, no_ids, struct squashfs_super_block);\
+ SWAP_FUNC(16, s, d, s_major, struct squashfs_super_block);\
+ SWAP_FUNC(16, s, d, s_minor, struct squashfs_super_block);\
+ SWAP_FUNC(64, s, d, root_inode, struct squashfs_super_block);\
+ SWAP_FUNC(64, s, d, bytes_used, struct squashfs_super_block);\
+ SWAP_FUNC(64, s, d, id_table_start, struct squashfs_super_block);\
+ SWAP_FUNC(64, s, d, xattr_id_table_start, struct squashfs_super_block);\
+ SWAP_FUNC(64, s, d, inode_table_start, struct squashfs_super_block);\
+ SWAP_FUNC(64, s, d, directory_table_start, struct squashfs_super_block);\
+ SWAP_FUNC(64, s, d, fragment_table_start, struct squashfs_super_block);\
+ SWAP_FUNC(64, s, d, lookup_table_start, struct squashfs_super_block);\
+}
+
+#define _SQUASHFS_SWAP_DIR_INDEX(s, d, SWAP_FUNC) {\
+ SWAP_FUNC(32, s, d, index, struct squashfs_dir_index);\
+ SWAP_FUNC(32, s, d, start_block, struct squashfs_dir_index);\
+ SWAP_FUNC(32, s, d, size, struct squashfs_dir_index);\
+}
+
+#define _SQUASHFS_SWAP_BASE_INODE_HEADER(s, d, SWAP_FUNC) {\
+ SWAP_FUNC(16, s, d, inode_type, struct squashfs_base_inode_header);\
+ SWAP_FUNC(16, s, d, mode, struct squashfs_base_inode_header);\
+ SWAP_FUNC(16, s, d, uid, struct squashfs_base_inode_header);\
+ SWAP_FUNC(16, s, d, guid, struct squashfs_base_inode_header);\
+ SWAP_FUNC(32, s, d, mtime, struct squashfs_base_inode_header);\
+ SWAP_FUNC(32, s, d, inode_number, struct squashfs_base_inode_header);\
+}
+
+#define _SQUASHFS_SWAP_IPC_INODE_HEADER(s, d, SWAP_FUNC) {\
+ SWAP_FUNC(16, s, d, inode_type, struct squashfs_ipc_inode_header);\
+ SWAP_FUNC(16, s, d, mode, struct squashfs_ipc_inode_header);\
+ SWAP_FUNC(16, s, d, uid, struct squashfs_ipc_inode_header);\
+ SWAP_FUNC(16, s, d, guid, struct squashfs_ipc_inode_header);\
+ SWAP_FUNC(32, s, d, mtime, struct squashfs_ipc_inode_header);\
+ SWAP_FUNC(32, s, d, inode_number, struct squashfs_ipc_inode_header);\
+ SWAP_FUNC(32, s, d, nlink, struct squashfs_ipc_inode_header);\
+}
+
+#define _SQUASHFS_SWAP_LIPC_INODE_HEADER(s, d, SWAP_FUNC) {\
+ SWAP_FUNC(16, s, d, inode_type, struct squashfs_lipc_inode_header);\
+ SWAP_FUNC(16, s, d, mode, struct squashfs_lipc_inode_header);\
+ SWAP_FUNC(16, s, d, uid, struct squashfs_lipc_inode_header);\
+ SWAP_FUNC(16, s, d, guid, struct squashfs_lipc_inode_header);\
+ SWAP_FUNC(32, s, d, mtime, struct squashfs_lipc_inode_header);\
+ SWAP_FUNC(32, s, d, inode_number, struct squashfs_lipc_inode_header);\
+ SWAP_FUNC(32, s, d, nlink, struct squashfs_lipc_inode_header);\
+ SWAP_FUNC(32, s, d, xattr, struct squashfs_lipc_inode_header);\
+}
+
+#define _SQUASHFS_SWAP_DEV_INODE_HEADER(s, d, SWAP_FUNC) {\
+ SWAP_FUNC(16, s, d, inode_type, struct squashfs_dev_inode_header);\
+ SWAP_FUNC(16, s, d, mode, struct squashfs_dev_inode_header);\
+ SWAP_FUNC(16, s, d, uid, struct squashfs_dev_inode_header);\
+ SWAP_FUNC(16, s, d, guid, struct squashfs_dev_inode_header);\
+ SWAP_FUNC(32, s, d, mtime, struct squashfs_dev_inode_header);\
+ SWAP_FUNC(32, s, d, inode_number, struct squashfs_dev_inode_header);\
+ SWAP_FUNC(32, s, d, nlink, struct squashfs_dev_inode_header);\
+ SWAP_FUNC(32, s, d, rdev, struct squashfs_dev_inode_header);\
+}
+
+#define _SQUASHFS_SWAP_LDEV_INODE_HEADER(s, d, SWAP_FUNC) {\
+ SWAP_FUNC(16, s, d, inode_type, struct squashfs_ldev_inode_header);\
+ SWAP_FUNC(16, s, d, mode, struct squashfs_ldev_inode_header);\
+ SWAP_FUNC(16, s, d, uid, struct squashfs_ldev_inode_header);\
+ SWAP_FUNC(16, s, d, guid, struct squashfs_ldev_inode_header);\
+ SWAP_FUNC(32, s, d, mtime, struct squashfs_ldev_inode_header);\
+ SWAP_FUNC(32, s, d, inode_number, struct squashfs_ldev_inode_header);\
+ SWAP_FUNC(32, s, d, nlink, struct squashfs_ldev_inode_header);\
+ SWAP_FUNC(32, s, d, rdev, struct squashfs_ldev_inode_header);\
+ SWAP_FUNC(32, s, d, xattr, struct squashfs_ldev_inode_header);\
+}
+
+#define _SQUASHFS_SWAP_SYMLINK_INODE_HEADER(s, d, SWAP_FUNC) {\
+ SWAP_FUNC(16, s, d, inode_type, struct squashfs_symlink_inode_header);\
+ SWAP_FUNC(16, s, d, mode, struct squashfs_symlink_inode_header);\
+ SWAP_FUNC(16, s, d, uid, struct squashfs_symlink_inode_header);\
+ SWAP_FUNC(16, s, d, guid, struct squashfs_symlink_inode_header);\
+ SWAP_FUNC(32, s, d, mtime, struct squashfs_symlink_inode_header);\
+ SWAP_FUNC(32, s, d, inode_number, struct squashfs_symlink_inode_header);\
+ SWAP_FUNC(32, s, d, nlink, struct squashfs_symlink_inode_header);\
+ SWAP_FUNC(32, s, d, symlink_size, struct squashfs_symlink_inode_header);\
+}
+
+#define _SQUASHFS_SWAP_REG_INODE_HEADER(s, d, SWAP_FUNC) {\
+ SWAP_FUNC(16, s, d, inode_type, struct squashfs_reg_inode_header);\
+ SWAP_FUNC(16, s, d, mode, struct squashfs_reg_inode_header);\
+ SWAP_FUNC(16, s, d, uid, struct squashfs_reg_inode_header);\
+ SWAP_FUNC(16, s, d, guid, struct squashfs_reg_inode_header);\
+ SWAP_FUNC(32, s, d, mtime, struct squashfs_reg_inode_header);\
+ SWAP_FUNC(32, s, d, inode_number, struct squashfs_reg_inode_header);\
+ SWAP_FUNC(32, s, d, start_block, struct squashfs_reg_inode_header);\
+ SWAP_FUNC(32, s, d, fragment, struct squashfs_reg_inode_header);\
+ SWAP_FUNC(32, s, d, offset, struct squashfs_reg_inode_header);\
+ SWAP_FUNC(32, s, d, file_size, struct squashfs_reg_inode_header);\
+}
+
+#define _SQUASHFS_SWAP_LREG_INODE_HEADER(s, d, SWAP_FUNC) {\
+ SWAP_FUNC(16, s, d, inode_type, struct squashfs_lreg_inode_header);\
+ SWAP_FUNC(16, s, d, mode, struct squashfs_lreg_inode_header);\
+ SWAP_FUNC(16, s, d, uid, struct squashfs_lreg_inode_header);\
+ SWAP_FUNC(16, s, d, guid, struct squashfs_lreg_inode_header);\
+ SWAP_FUNC(32, s, d, mtime, struct squashfs_lreg_inode_header);\
+ SWAP_FUNC(32, s, d, inode_number, struct squashfs_lreg_inode_header);\
+ SWAP_FUNC(64, s, d, start_block, struct squashfs_lreg_inode_header);\
+ SWAP_FUNC(64, s, d, file_size, struct squashfs_lreg_inode_header);\
+ SWAP_FUNC(64, s, d, sparse, struct squashfs_lreg_inode_header);\
+ SWAP_FUNC(32, s, d, nlink, struct squashfs_lreg_inode_header);\
+ SWAP_FUNC(32, s, d, fragment, struct squashfs_lreg_inode_header);\
+ SWAP_FUNC(32, s, d, offset, struct squashfs_lreg_inode_header);\
+ SWAP_FUNC(32, s, d, xattr, struct squashfs_lreg_inode_header);\
+}
+
+#define _SQUASHFS_SWAP_DIR_INODE_HEADER(s, d, SWAP_FUNC) {\
+ SWAP_FUNC(16, s, d, inode_type, struct squashfs_dir_inode_header);\
+ SWAP_FUNC(16, s, d, mode, struct squashfs_dir_inode_header);\
+ SWAP_FUNC(16, s, d, uid, struct squashfs_dir_inode_header);\
+ SWAP_FUNC(16, s, d, guid, struct squashfs_dir_inode_header);\
+ SWAP_FUNC(32, s, d, mtime, struct squashfs_dir_inode_header);\
+ SWAP_FUNC(32, s, d, inode_number, struct squashfs_dir_inode_header);\
+ SWAP_FUNC(32, s, d, start_block, struct squashfs_dir_inode_header);\
+ SWAP_FUNC(32, s, d, nlink, struct squashfs_dir_inode_header);\
+ SWAP_FUNC(16, s, d, file_size, struct squashfs_dir_inode_header);\
+ SWAP_FUNC(16, s, d, offset, struct squashfs_dir_inode_header);\
+ SWAP_FUNC(32, s, d, parent_inode, struct squashfs_dir_inode_header);\
+}
+
+#define _SQUASHFS_SWAP_LDIR_INODE_HEADER(s, d, SWAP_FUNC) {\
+ SWAP_FUNC(16, s, d, inode_type, struct squashfs_ldir_inode_header);\
+ SWAP_FUNC(16, s, d, mode, struct squashfs_ldir_inode_header);\
+ SWAP_FUNC(16, s, d, uid, struct squashfs_ldir_inode_header);\
+ SWAP_FUNC(16, s, d, guid, struct squashfs_ldir_inode_header);\
+ SWAP_FUNC(32, s, d, mtime, struct squashfs_ldir_inode_header);\
+ SWAP_FUNC(32, s, d, inode_number, struct squashfs_ldir_inode_header);\
+ SWAP_FUNC(32, s, d, nlink, struct squashfs_ldir_inode_header);\
+ SWAP_FUNC(32, s, d, file_size, struct squashfs_ldir_inode_header);\
+ SWAP_FUNC(32, s, d, start_block, struct squashfs_ldir_inode_header);\
+ SWAP_FUNC(32, s, d, parent_inode, struct squashfs_ldir_inode_header);\
+ SWAP_FUNC(16, s, d, i_count, struct squashfs_ldir_inode_header);\
+ SWAP_FUNC(16, s, d, offset, struct squashfs_ldir_inode_header);\
+ SWAP_FUNC(32, s, d, xattr, struct squashfs_ldir_inode_header);\
+}
+
+#define _SQUASHFS_SWAP_DIR_ENTRY(s, d, SWAP_FUNC) {\
+ SWAP_FUNC(16, s, d, offset, struct squashfs_dir_entry);\
+ SWAP_FUNC##S(16, s, d, inode_number, struct squashfs_dir_entry);\
+ SWAP_FUNC(16, s, d, type, struct squashfs_dir_entry);\
+ SWAP_FUNC(16, s, d, size, struct squashfs_dir_entry);\
+}
+
+#define _SQUASHFS_SWAP_DIR_HEADER(s, d, SWAP_FUNC) {\
+ SWAP_FUNC(32, s, d, count, struct squashfs_dir_header);\
+ SWAP_FUNC(32, s, d, start_block, struct squashfs_dir_header);\
+ SWAP_FUNC(32, s, d, inode_number, struct squashfs_dir_header);\
+}
+
+#define _SQUASHFS_SWAP_FRAGMENT_ENTRY(s, d, SWAP_FUNC) {\
+ SWAP_FUNC(64, s, d, start_block, struct squashfs_fragment_entry);\
+ SWAP_FUNC(32, s, d, size, struct squashfs_fragment_entry);\
+}
+
+#define _SQUASHFS_SWAP_XATTR_ENTRY(s, d, SWAP_FUNC) {\
+ SWAP_FUNC(16, s, d, type, struct squashfs_xattr_entry);\
+ SWAP_FUNC(16, s, d, size, struct squashfs_xattr_entry);\
+}
+
+#define _SQUASHFS_SWAP_XATTR_VAL(s, d, SWAP_FUNC) {\
+ SWAP_FUNC(32, s, d, vsize, struct squashfs_xattr_val);\
+}
+
+#define _SQUASHFS_SWAP_XATTR_ID(s, d, SWAP_FUNC) {\
+ SWAP_FUNC(64, s, d, xattr, struct squashfs_xattr_id);\
+ SWAP_FUNC(32, s, d, count, struct squashfs_xattr_id);\
+ SWAP_FUNC(32, s, d, size, struct squashfs_xattr_id);\
+}
+
+#define _SQUASHFS_SWAP_XATTR_TABLE(s, d, SWAP_FUNC) {\
+ SWAP_FUNC(64, s, d, xattr_table_start, struct squashfs_xattr_table);\
+ SWAP_FUNC(32, s, d, xattr_ids, struct squashfs_xattr_table);\
+}
+
+/* big endian architecture copy and swap macros */
+#define SQUASHFS_SWAP_SUPER_BLOCK(s, d) \
+ _SQUASHFS_SWAP_SUPER_BLOCK(s, d, SWAP_LE)
+#define SQUASHFS_SWAP_DIR_INDEX(s, d) \
+ _SQUASHFS_SWAP_DIR_INDEX(s, d, SWAP_LE)
+#define SQUASHFS_SWAP_BASE_INODE_HEADER(s, d) \
+ _SQUASHFS_SWAP_BASE_INODE_HEADER(s, d, SWAP_LE)
+#define SQUASHFS_SWAP_IPC_INODE_HEADER(s, d) \
+ _SQUASHFS_SWAP_IPC_INODE_HEADER(s, d, SWAP_LE)
+#define SQUASHFS_SWAP_LIPC_INODE_HEADER(s, d) \
+ _SQUASHFS_SWAP_LIPC_INODE_HEADER(s, d, SWAP_LE)
+#define SQUASHFS_SWAP_DEV_INODE_HEADER(s, d) \
+ _SQUASHFS_SWAP_DEV_INODE_HEADER(s, d, SWAP_LE)
+#define SQUASHFS_SWAP_LDEV_INODE_HEADER(s, d) \
+ _SQUASHFS_SWAP_LDEV_INODE_HEADER(s, d, SWAP_LE)
+#define SQUASHFS_SWAP_SYMLINK_INODE_HEADER(s, d) \
+ _SQUASHFS_SWAP_SYMLINK_INODE_HEADER(s, d, SWAP_LE)
+#define SQUASHFS_SWAP_REG_INODE_HEADER(s, d) \
+ _SQUASHFS_SWAP_REG_INODE_HEADER(s, d, SWAP_LE)
+#define SQUASHFS_SWAP_LREG_INODE_HEADER(s, d) \
+ _SQUASHFS_SWAP_LREG_INODE_HEADER(s, d, SWAP_LE)
+#define SQUASHFS_SWAP_DIR_INODE_HEADER(s, d) \
+ _SQUASHFS_SWAP_DIR_INODE_HEADER(s, d, SWAP_LE)
+#define SQUASHFS_SWAP_LDIR_INODE_HEADER(s, d) \
+ _SQUASHFS_SWAP_LDIR_INODE_HEADER(s, d, SWAP_LE)
+#define SQUASHFS_SWAP_DIR_ENTRY(s, d) \
+ _SQUASHFS_SWAP_DIR_ENTRY(s, d, SWAP_LE)
+#define SQUASHFS_SWAP_DIR_HEADER(s, d) \
+ _SQUASHFS_SWAP_DIR_HEADER(s, d, SWAP_LE)
+#define SQUASHFS_SWAP_FRAGMENT_ENTRY(s, d) \
+ _SQUASHFS_SWAP_FRAGMENT_ENTRY(s, d, SWAP_LE)
+#define SQUASHFS_SWAP_XATTR_ENTRY(s, d) \
+ _SQUASHFS_SWAP_XATTR_ENTRY(s, d, SWAP_LE)
+#define SQUASHFS_SWAP_XATTR_VAL(s, d) \
+ _SQUASHFS_SWAP_XATTR_VAL(s, d, SWAP_LE)
+#define SQUASHFS_SWAP_XATTR_ID(s, d) \
+ _SQUASHFS_SWAP_XATTR_ID(s, d, SWAP_LE)
+#define SQUASHFS_SWAP_XATTR_TABLE(s, d) \
+ _SQUASHFS_SWAP_XATTR_TABLE(s, d, SWAP_LE)
+#define SWAP_LE(bits, s, d, field, type) \
+ SWAP_LE##bits(((void *)(s)) + offsetof(type, field), \
+ ((void *)(d)) + offsetof(type, field))
+#define SWAP_LES(bits, s, d, field, type) \
+ SWAP_LE(bits, s, d, field, type)
+#define SQUASHFS_SWAP_INODE_T(s, d) SQUASHFS_SWAP_LONG_LONGS(s, d, 1)
+#define SQUASHFS_SWAP_FRAGMENT_INDEXES(s, d, n) \
+ SQUASHFS_SWAP_LONG_LONGS(s, d, n)
+#define SQUASHFS_SWAP_LOOKUP_BLOCKS(s, d, n) SQUASHFS_SWAP_LONG_LONGS(s, d, n)
+#define SQUASHFS_SWAP_ID_BLOCKS(s, d, n) SQUASHFS_SWAP_LONG_LONGS(s, d, n)
+
+#define SQUASHFS_SWAP_SHORTS(s, d, n) swap_le16_num(s, d, n)
+#define SQUASHFS_SWAP_INTS(s, d, n) swap_le32_num(s, d, n)
+#define SQUASHFS_SWAP_LONG_LONGS(s, d, n) swap_le64_num(s, d, n)
+
+#define SWAP_LE16(s, d) swap_le16(s, d)
+#define SWAP_LE32(s, d) swap_le32(s, d)
+#define SWAP_LE64(s, d) swap_le64(s, d)
+
+/* big endian architecture swap in-place macros */
+#define SQUASHFS_INSWAP_SUPER_BLOCK(s) \
+ _SQUASHFS_SWAP_SUPER_BLOCK(s, s, INSWAP_LE)
+#define SQUASHFS_INSWAP_DIR_INDEX(s) \
+ _SQUASHFS_SWAP_DIR_INDEX(s, s, INSWAP_LE)
+#define SQUASHFS_INSWAP_BASE_INODE_HEADER(s) \
+ _SQUASHFS_SWAP_BASE_INODE_HEADER(s, s, INSWAP_LE)
+#define SQUASHFS_INSWAP_IPC_INODE_HEADER(s) \
+ _SQUASHFS_SWAP_IPC_INODE_HEADER(s, s, INSWAP_LE)
+#define SQUASHFS_INSWAP_LIPC_INODE_HEADER(s) \
+ _SQUASHFS_SWAP_LIPC_INODE_HEADER(s, s, INSWAP_LE)
+#define SQUASHFS_INSWAP_DEV_INODE_HEADER(s) \
+ _SQUASHFS_SWAP_DEV_INODE_HEADER(s, s, INSWAP_LE)
+#define SQUASHFS_INSWAP_LDEV_INODE_HEADER(s) \
+ _SQUASHFS_SWAP_LDEV_INODE_HEADER(s, s, INSWAP_LE)
+#define SQUASHFS_INSWAP_SYMLINK_INODE_HEADER(s) \
+ _SQUASHFS_SWAP_SYMLINK_INODE_HEADER(s, s, INSWAP_LE)
+#define SQUASHFS_INSWAP_REG_INODE_HEADER(s) \
+ _SQUASHFS_SWAP_REG_INODE_HEADER(s, s, INSWAP_LE)
+#define SQUASHFS_INSWAP_LREG_INODE_HEADER(s) \
+ _SQUASHFS_SWAP_LREG_INODE_HEADER(s, s, INSWAP_LE)
+#define SQUASHFS_INSWAP_DIR_INODE_HEADER(s) \
+ _SQUASHFS_SWAP_DIR_INODE_HEADER(s, s, INSWAP_LE)
+#define SQUASHFS_INSWAP_LDIR_INODE_HEADER(s) \
+ _SQUASHFS_SWAP_LDIR_INODE_HEADER(s, s, INSWAP_LE)
+#define SQUASHFS_INSWAP_DIR_ENTRY(s) \
+ _SQUASHFS_SWAP_DIR_ENTRY(s, s, INSWAP_LE)
+#define SQUASHFS_INSWAP_DIR_HEADER(s) \
+ _SQUASHFS_SWAP_DIR_HEADER(s, s, INSWAP_LE)
+#define SQUASHFS_INSWAP_FRAGMENT_ENTRY(s) \
+ _SQUASHFS_SWAP_FRAGMENT_ENTRY(s, s, INSWAP_LE)
+#define SQUASHFS_INSWAP_XATTR_ENTRY(s) \
+ _SQUASHFS_SWAP_XATTR_ENTRY(s, s, INSWAP_LE)
+#define SQUASHFS_INSWAP_XATTR_VAL(s) \
+ _SQUASHFS_SWAP_XATTR_VAL(s, s, INSWAP_LE)
+#define SQUASHFS_INSWAP_XATTR_ID(s) \
+ _SQUASHFS_SWAP_XATTR_ID(s, s, INSWAP_LE)
+#define SQUASHFS_INSWAP_XATTR_TABLE(s) \
+ _SQUASHFS_SWAP_XATTR_TABLE(s, s, INSWAP_LE)
+#define INSWAP_LE(bits, s, d, field, type) \
+ (s)->field = inswap_le##bits((s)->field)
+#define INSWAP_LES(bits, s, d, field, type) \
+ (s)->field = (short) inswap_le##bits((unsigned short) \
+ (s)->field)
+#define SQUASHFS_INSWAP_INODE_T(s) s = inswap_le64(s)
+#define SQUASHFS_INSWAP_FRAGMENT_INDEXES(s, n) inswap_le64_num(s, n)
+#define SQUASHFS_INSWAP_LOOKUP_BLOCKS(s, n) inswap_le64_num(s, n)
+#define SQUASHFS_INSWAP_ID_BLOCKS(s, n) inswap_le64_num(s, n)
+#define SQUASHFS_INSWAP_SHORTS(s, n) inswap_le16_num(s, n)
+#define SQUASHFS_INSWAP_INTS(s, n) inswap_le32_num(s, n)
+#define SQUASHFS_INSWAP_LONG_LONGS(s, n) inswap_le64_num(s, n)
+#else
+/* little endian architecture, just copy */
+#define SQUASHFS_SWAP_SUPER_BLOCK(s, d) \
+ SQUASHFS_MEMCPY(s, d, sizeof(struct squashfs_super_block))
+#define SQUASHFS_SWAP_DIR_INDEX(s, d) \
+ SQUASHFS_MEMCPY(s, d, sizeof(struct squashfs_dir_index))
+#define SQUASHFS_SWAP_BASE_INODE_HEADER(s, d) \
+ SQUASHFS_MEMCPY(s, d, sizeof(struct squashfs_base_inode_header))
+#define SQUASHFS_SWAP_IPC_INODE_HEADER(s, d) \
+ SQUASHFS_MEMCPY(s, d, sizeof(struct squashfs_ipc_inode_header))
+#define SQUASHFS_SWAP_LIPC_INODE_HEADER(s, d) \
+ SQUASHFS_MEMCPY(s, d, sizeof(struct squashfs_lipc_inode_header))
+#define SQUASHFS_SWAP_DEV_INODE_HEADER(s, d) \
+ SQUASHFS_MEMCPY(s, d, sizeof(struct squashfs_dev_inode_header))
+#define SQUASHFS_SWAP_LDEV_INODE_HEADER(s, d) \
+ SQUASHFS_MEMCPY(s, d, sizeof(struct squashfs_ldev_inode_header))
+#define SQUASHFS_SWAP_SYMLINK_INODE_HEADER(s, d) \
+ SQUASHFS_MEMCPY(s, d, sizeof(struct squashfs_symlink_inode_header))
+#define SQUASHFS_SWAP_REG_INODE_HEADER(s, d) \
+ SQUASHFS_MEMCPY(s, d, sizeof(struct squashfs_reg_inode_header))
+#define SQUASHFS_SWAP_LREG_INODE_HEADER(s, d) \
+ SQUASHFS_MEMCPY(s, d, sizeof(struct squashfs_lreg_inode_header))
+#define SQUASHFS_SWAP_DIR_INODE_HEADER(s, d) \
+ SQUASHFS_MEMCPY(s, d, sizeof(struct squashfs_dir_inode_header))
+#define SQUASHFS_SWAP_LDIR_INODE_HEADER(s, d) \
+ SQUASHFS_MEMCPY(s, d, sizeof(struct squashfs_ldir_inode_header))
+#define SQUASHFS_SWAP_DIR_ENTRY(s, d) \
+ SQUASHFS_MEMCPY(s, d, sizeof(struct squashfs_dir_entry))
+#define SQUASHFS_SWAP_DIR_HEADER(s, d) \
+ SQUASHFS_MEMCPY(s, d, sizeof(struct squashfs_dir_header))
+#define SQUASHFS_SWAP_FRAGMENT_ENTRY(s, d) \
+ SQUASHFS_MEMCPY(s, d, sizeof(struct squashfs_fragment_entry))
+#define SQUASHFS_SWAP_XATTR_ENTRY(s, d) \
+ SQUASHFS_MEMCPY(s, d, sizeof(struct squashfs_xattr_entry))
+#define SQUASHFS_SWAP_XATTR_VAL(s, d) \
+ SQUASHFS_MEMCPY(s, d, sizeof(struct squashfs_xattr_val))
+#define SQUASHFS_SWAP_XATTR_ID(s, d) \
+ SQUASHFS_MEMCPY(s, d, sizeof(struct squashfs_xattr_id))
+#define SQUASHFS_SWAP_XATTR_TABLE(s, d) \
+ SQUASHFS_MEMCPY(s, d, sizeof(struct squashfs_xattr_table))
+#define SQUASHFS_SWAP_INODE_T(s, d) SQUASHFS_SWAP_LONG_LONGS(s, d, 1)
+#define SQUASHFS_SWAP_FRAGMENT_INDEXES(s, d, n) \
+ SQUASHFS_SWAP_LONG_LONGS(s, d, n)
+#define SQUASHFS_SWAP_LOOKUP_BLOCKS(s, d, n) SQUASHFS_SWAP_LONG_LONGS(s, d, n)
+#define SQUASHFS_SWAP_ID_BLOCKS(s, d, n) SQUASHFS_SWAP_LONG_LONGS(s, d, n)
+
+#define SQUASHFS_MEMCPY(s, d, n) memcpy(d, s, n)
+#define SQUASHFS_SWAP_SHORTS(s, d, n) memcpy(d, s, n * sizeof(short))
+#define SQUASHFS_SWAP_INTS(s, d, n) memcpy(d, s, n * sizeof(int))
+#define SQUASHFS_SWAP_LONG_LONGS(s, d, n) \
+ memcpy(d, s, n * sizeof(long long))
+
+/* little endian architecture, data already in place so do nothing */
+#define SQUASHFS_INSWAP_SUPER_BLOCK(s)
+#define SQUASHFS_INSWAP_DIR_INDEX(s)
+#define SQUASHFS_INSWAP_BASE_INODE_HEADER(s)
+#define SQUASHFS_INSWAP_IPC_INODE_HEADER(s)
+#define SQUASHFS_INSWAP_LIPC_INODE_HEADER(s)
+#define SQUASHFS_INSWAP_DEV_INODE_HEADER(s)
+#define SQUASHFS_INSWAP_LDEV_INODE_HEADER(s)
+#define SQUASHFS_INSWAP_SYMLINK_INODE_HEADER(s)
+#define SQUASHFS_INSWAP_REG_INODE_HEADER(s)
+#define SQUASHFS_INSWAP_LREG_INODE_HEADER(s)
+#define SQUASHFS_INSWAP_DIR_INODE_HEADER(s)
+#define SQUASHFS_INSWAP_LDIR_INODE_HEADER(s)
+#define SQUASHFS_INSWAP_DIR_ENTRY(s)
+#define SQUASHFS_INSWAP_DIR_HEADER(s)
+#define SQUASHFS_INSWAP_FRAGMENT_ENTRY(s)
+#define SQUASHFS_INSWAP_XATTR_ENTRY(s)
+#define SQUASHFS_INSWAP_XATTR_VAL(s)
+#define SQUASHFS_INSWAP_XATTR_ID(s)
+#define SQUASHFS_INSWAP_XATTR_TABLE(s)
+#define SQUASHFS_INSWAP_INODE_T(s)
+#define SQUASHFS_INSWAP_FRAGMENT_INDEXES(s, n)
+#define SQUASHFS_INSWAP_LOOKUP_BLOCKS(s, n)
+#define SQUASHFS_INSWAP_ID_BLOCKS(s, n)
+#define SQUASHFS_INSWAP_SHORTS(s, n)
+#define SQUASHFS_INSWAP_INTS(s, n)
+#define SQUASHFS_INSWAP_LONG_LONGS(s, n)
+#endif
+#endif
diff --git a/squashfs-tools/swap.c b/squashfs-tools/swap.c
new file mode 100644
index 0000000..eb0f326
--- /dev/null
+++ b/squashfs-tools/swap.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2009, 2010, 2021
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * swap.c
+ */
+
+#include "endian_compat.h"
+
+#if __BYTE_ORDER == __BIG_ENDIAN
+void swap_le16(void *src, void *dest)
+{
+ unsigned char *s = src;
+ unsigned char *d = dest;
+
+ d[0] = s[1];
+ d[1] = s[0];
+}
+
+
+void swap_le32(void *src, void *dest)
+{
+ unsigned char *s = src;
+ unsigned char *d = dest;
+
+ d[0] = s[3];
+ d[1] = s[2];
+ d[2] = s[1];
+ d[3] = s[0];
+}
+
+
+void swap_le64(void *src, void *dest)
+{
+ unsigned char *s = src;
+ unsigned char *d = dest;
+
+ d[0] = s[7];
+ d[1] = s[6];
+ d[2] = s[5];
+ d[3] = s[4];
+ d[4] = s[3];
+ d[5] = s[2];
+ d[6] = s[1];
+ d[7] = s[0];
+}
+
+
+unsigned short inswap_le16(unsigned short num)
+{
+ return (num >> 8) |
+ ((num & 0xff) << 8);
+}
+
+
+unsigned int inswap_le32(unsigned int num)
+{
+ return (num >> 24) |
+ ((num & 0xff0000) >> 8) |
+ ((num & 0xff00) << 8) |
+ ((num & 0xff) << 24);
+}
+
+
+long long inswap_le64(long long n)
+{
+ unsigned long long num = n;
+
+ return (num >> 56) |
+ ((num & 0xff000000000000LL) >> 40) |
+ ((num & 0xff0000000000LL) >> 24) |
+ ((num & 0xff00000000LL) >> 8) |
+ ((num & 0xff000000) << 8) |
+ ((num & 0xff0000) << 24) |
+ ((num & 0xff00) << 40) |
+ ((num & 0xff) << 56);
+}
+
+
+#define SWAP_LE_NUM(BITS) \
+void swap_le##BITS##_num(void *s, void *d, int n) \
+{\
+ int i;\
+ for(i = 0; i < n; i++, s += BITS / 8, d += BITS / 8)\
+ swap_le##BITS(s, d);\
+}
+
+SWAP_LE_NUM(16)
+SWAP_LE_NUM(32)
+SWAP_LE_NUM(64)
+
+#define INSWAP_LE_NUM(BITS, TYPE) \
+void inswap_le##BITS##_num(TYPE *s, int n) \
+{\
+ int i;\
+ for(i = 0; i < n; i++)\
+ s[i] = inswap_le##BITS(s[i]);\
+}
+
+INSWAP_LE_NUM(16, unsigned short)
+INSWAP_LE_NUM(32, unsigned int)
+INSWAP_LE_NUM(64, long long)
+#endif
diff --git a/squashfs-tools/tar.c b/squashfs-tools/tar.c
new file mode 100644
index 0000000..b02b797
--- /dev/null
+++ b/squashfs-tools/tar.c
@@ -0,0 +1,1682 @@
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2021, 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * tar.c
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <dirent.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <pwd.h>
+#include <grp.h>
+#include <time.h>
+#include <regex.h>
+
+#include "squashfs_fs.h"
+#include "mksquashfs.h"
+#include "caches-queues-lists.h"
+#include "mksquashfs_error.h"
+#include "xattr.h"
+#include "tar.h"
+#include "progressbar.h"
+#include "info.h"
+
+#define TRUE 1
+#define FALSE 0
+
+extern int silent;
+int ignore_zeros = FALSE;
+extern int ignore_zeros;
+int default_uid_opt = FALSE;
+unsigned int default_uid;
+int default_gid_opt = FALSE;
+unsigned int default_gid;
+int default_mode_opt = FALSE;
+mode_t default_mode;
+
+static long long read_octal(char *s, int size)
+{
+ long long res = 0;
+
+ for(; size && *s == ' '; s++, size--);
+
+ if(size == 0)
+ return -1;
+
+ for(; size && *s >= '0' && *s < '8'; s++, size--)
+ res = (res << 3) + *s - '0';
+
+ if(size && (*s != ' ' && *s != '\0'))
+ return -1;
+
+ return res;
+}
+
+
+static long long read_binary(char *src, int size)
+{
+ unsigned char *s = (unsigned char *) src;
+ long long res = 0;
+
+ for(; size; s++, size --)
+ res = (res << 8) + *s;
+
+ return res;
+}
+
+
+static long long read_number(char *s, int size)
+{
+ if(*((signed char *) s) == -128)
+ return read_binary(s + 1, size - 1);
+ else
+ return read_octal(s, size);
+}
+
+
+static long long read_decimal(char *s, int maxsize, int *bytes)
+{
+ long long res = 0;
+ int size = maxsize;
+
+ for(; size && *s >= '0' && *s <= '9'; s++, size--)
+ res = (res * 10) + *s - '0';
+
+ /* size should be > 0, and we should be at the terminator */
+ if(size > 0 && *s == '\n') {
+ *bytes = maxsize - size + 1;
+ return res;
+ }
+
+ /* Bad value or out of bytes? */
+ if(size)
+ return -1;
+ else
+ return -2;
+}
+
+
+static char *read_long_string(int size, int skip)
+{
+ char buffer[512];
+ char *name = malloc(size + 1);
+ int i, res, length = size;
+
+ if(name == NULL)
+ MEM_ERROR();
+
+ for(i = 0; size > 0; i++) {
+ int expected = size > 512 ? 512 : size;
+
+ res = read_bytes(STDIN_FILENO, buffer, 512);
+ if(res < 512) {
+ if(res != -1)
+ ERROR("Unexpected EOF (end of file), the tarfile appears to be truncated or corrupted\n");
+ free(name);
+ return NULL;
+ }
+ memcpy(name + i * 512, buffer, expected);
+ size -= 512;
+ }
+
+ name[length] = '\0';
+
+ if(skip) {
+ char *filename = name;
+
+ while(1) {
+ if(length >= 3 && strncmp(filename, "../", 3) == 0) {
+ filename += 3;
+ length -= 3;
+ } else if(length >= 2 && strncmp(filename, "./", 2) == 0) {
+ filename += 2;
+ length -= 2;
+ } else if(length >= 1 && *filename == '/') {
+ filename++;
+ length--;
+ } else
+ break;
+ }
+
+ if(filename != name) {
+ if(length == 0) {
+ ERROR("Empty tar filename after skipping leading /, ./, or ../\n");
+ free(name);
+ return NULL;
+ }
+
+ memmove(name, filename, length + 1);
+ name = realloc(name, length + 1);
+ if(name == NULL)
+ MEM_ERROR();
+ }
+ }
+
+ return name;
+}
+
+
+static int all_zero(struct tar_header *header)
+{
+ int i;
+
+ for(i = 0; i < 512; i++)
+ if(header->udata[i])
+ return FALSE;
+
+ return TRUE;
+}
+
+
+static int checksum_matches(struct tar_header *header, int silent)
+{
+ int checksum = read_number(header->checksum, 8);
+ int computed = 0;
+ int i;
+
+ if(checksum == -1) {
+ if(!silent)
+ ERROR("Failed to read checksum in tar header\n");
+ return FALSE;
+ }
+
+ /* The checksum is computed with the checksum field
+ * filled with spaces */
+ memcpy(header->checksum, " ", 8);
+
+ /* Header bytes should be treated as unsigned */
+ for(i = 0; i < 512; i++)
+ computed += header->udata[i];
+
+ if(computed == checksum)
+ return TRUE;
+
+ /* Some historical implementations treated header bytes as signed */
+ for(computed = 0, i = 0; i < 512; i++)
+ computed += header->sdata[i];
+
+ return computed == checksum;
+}
+
+
+static char *get_component(char *target, char **targname)
+{
+ char *start;
+
+ start = target;
+ while(*target != '/' && *target != '\0')
+ target ++;
+
+ *targname = strndup(start, target - start);
+
+ while(*target == '/')
+ target ++;
+
+ return target;
+}
+
+
+static struct inode_info *new_inode(struct tar_file *tar_file)
+{
+ struct inode_info *inode;
+ int bytes = tar_file->link ? strlen(tar_file->link) + 1 : 0;
+
+ inode = malloc(sizeof(struct inode_info) + bytes);
+ if(inode == NULL)
+ MEM_ERROR();
+
+ if(bytes)
+ memcpy(&inode->symlink, tar_file->link, bytes);
+ memcpy(&inode->buf, &tar_file->buf, sizeof(struct stat));
+ inode->read = FALSE;
+ inode->root_entry = FALSE;
+ inode->tar_file = tar_file;
+ inode->inode = SQUASHFS_INVALID_BLK;
+ inode->nlink = 1;
+ inode->inode_number = 0;
+ inode->pseudo = NULL;
+ inode->dummy_root_dir = FALSE;
+ inode->xattr = NULL;
+ inode->tarfile = TRUE;
+
+ /*
+ * Copy filesystem wide defaults into inode, these filesystem
+ * wide defaults may be altered on an individual inode basis by
+ * user specified actions
+ *
+ */
+ inode->no_fragments = no_fragments;
+ inode->always_use_fragments = always_use_fragments;
+ inode->noD = noD;
+ inode->noF = noF;
+
+ inode->next = inode_info[0];
+ inode_info[0] = inode;
+
+ return inode;
+}
+
+
+static struct inode_info *copy_inode(struct inode_info *source)
+{
+ struct inode_info *inode;
+ int bytes = S_ISLNK(source->buf.st_mode) ? strlen(source->symlink) + 1 : 0;
+
+ inode = malloc(sizeof(struct inode_info) + bytes);
+ if(inode == NULL)
+ MEM_ERROR();
+
+ memcpy(inode, source, sizeof(struct inode_info) + bytes);
+
+ return inode;
+}
+
+
+static void fixup_tree(struct dir_info *dir)
+{
+ struct dir_ent *entry;
+
+ for(entry = dir->list; entry; entry = entry->next) {
+ if(entry->dir && entry->inode == NULL) {
+ /* Tar file didn't create this directory, and so it lacks
+ * an inode with metadata. Create a default definition ... */
+ struct stat buf;
+
+ memset(&buf, 0, sizeof(buf));
+ if(default_mode_opt)
+ buf.st_mode = default_mode | S_IFDIR;
+ else
+ buf.st_mode = S_IRWXU | S_IRGRP | S_IXGRP |
+ S_IROTH | S_IXOTH | S_IFDIR;
+ if(default_uid_opt)
+ buf.st_uid = default_uid;
+ else
+ buf.st_uid = getuid();
+ if(default_gid_opt)
+ buf.st_gid = default_gid;
+ else
+ buf.st_gid = getgid();
+ buf.st_mtime = time(NULL);
+ buf.st_dev = 0;
+ buf.st_ino = 0;
+ entry->inode = lookup_inode(&buf);
+ entry->inode->tar_file = NULL;
+ entry->inode->tarfile = TRUE;
+ }
+
+ if(entry->dir == NULL && S_ISDIR(entry->inode->buf.st_mode)) {
+ /* Tar file created this directory, but, never created
+ * anything in it. This will leave a NULL sub-directory,
+ * where the scanning code expects to find an empty
+ * directory. Create an empty directory in this case ... */
+ char *subpath = subpathname(entry);
+
+ entry->dir = create_dir("", subpath, dir->depth + 1);
+ entry->dir->dir_ent = entry;
+ }
+
+ if(entry->dir)
+ fixup_tree(entry->dir);
+ }
+}
+
+
+/*
+ * Add source to the tardir directory hierachy.
+ * Tarfile describes the tar file to be added.
+ */
+static struct dir_info *add_tarfile(struct dir_info *sdir, char *source,
+ char *subpath, struct tar_file *tarfile, struct pathnames *paths,
+ int depth, struct dir_ent **dir_ent, struct inode_info *link)
+{
+ struct dir_info *sub;
+ struct dir_ent *entry;
+ struct pathnames *new = NULL;
+ struct dir_info *dir = sdir;
+ char *name;
+
+ if(dir == NULL)
+ dir = create_dir("", subpath, depth);
+
+ source = get_component(source, &name);
+
+ if((strcmp(name, ".") == 0) || strcmp(name, "..") == 0)
+ BAD_ERROR("Error: Tar pathname can't have '.' or '..' in it\n");
+
+ entry = lookup_name(dir, name);
+
+ if(entry) {
+ /* existing matching entry */
+ if(entry->dir == NULL) {
+ /* No sub-directory which means this is the leaf
+ * component of a pre-existing tarfile */
+ if(source[0] != '\0') {
+ /* existing entry must be a directory */
+ subpath = subpathname(entry);
+ if(S_ISDIR(entry->inode->buf.st_mode)) {
+ /* recurse adding child components */
+ excluded(name, paths, &new);
+ entry->dir = add_tarfile(NULL, source, subpath, tarfile, new, depth + 1, dir_ent, link);
+ if(entry->dir == NULL)
+ goto failed_early;
+ entry->dir->dir_ent = entry;
+ } else
+ BAD_ERROR("%s exists in the tar file as"
+ " a non-directory, cannot add"
+ " tar pathname %s!\n",
+ subpath, tarfile->pathname);
+ } else {
+ ERROR("%s already exists in the tar file, ignoring\n", tarfile->pathname);
+ goto failed_early;
+ }
+ } else {
+ if(source[0] == '\0') {
+ /* sub-directory exists, we must be adding a
+ * directory, and we must not already have a
+ * definition for this directory */
+ if(S_ISDIR(tarfile->buf.st_mode)) {
+ if(entry->inode == NULL)
+ entry->inode = new_inode(tarfile);
+ else {
+ ERROR("%s already exists in the tar file, ignoring!\n", tarfile->pathname);
+ goto failed_early;
+ }
+ } else
+ BAD_ERROR("%s exists in the tar file as"
+ " both a directory and"
+ " non-directory!\n",
+ tarfile->pathname);
+ } else {
+ /* recurse adding child components */
+ excluded(name, paths, &new);
+ subpath = subpathname(entry);
+ sub = add_tarfile(entry->dir, source, subpath, tarfile, new, depth + 1, dir_ent, link);
+ if(sub == NULL)
+ goto failed_early;
+ }
+ }
+
+ free(name);
+ } else {
+ /*
+ * No matching name found.
+ *
+ * - If we're at the leaf of the source, then add it.
+ *
+ * - If we're not at the leaf of the source, we will add it,
+ * and recurse walking the source
+ */
+ if(old_exclude == FALSE && excluded(name, paths, &new))
+ goto failed_early;
+
+ entry = create_dir_entry(name, NULL, NULL, dir);
+
+ if(source[0] == '\0') {
+ if(S_ISDIR(tarfile->buf.st_mode)) {
+ add_dir_entry(entry, NULL, new_inode(tarfile));
+ dir->directory_count ++;
+ } else if (link == FALSE) {
+ add_dir_entry(entry, NULL, new_inode(tarfile));
+ if(S_ISREG(tarfile->buf.st_mode))
+ *dir_ent = entry;
+ } else if(no_hardlinks)
+ add_dir_entry(entry, NULL, copy_inode(link));
+ else
+ add_dir_entry(entry, NULL, link);
+ } else {
+ subpath = subpathname(entry);
+ sub = add_tarfile(NULL, source, subpath, tarfile, new, depth + 1, dir_ent, link);
+ if(sub == NULL)
+ goto failed_entry;
+ add_dir_entry(entry, sub, NULL);
+ dir->directory_count ++;
+ }
+ }
+
+ free(new);
+ return dir;
+
+failed_early:
+ free(new);
+ free(name);
+ if(sdir == NULL)
+ free_dir(dir);
+ return NULL;
+
+failed_entry:
+ free(new);
+ free_dir_entry(entry);
+ if(sdir == NULL)
+ free_dir(dir);
+ return NULL;
+}
+
+
+struct dir_ent *lookup_pathname(struct dir_info *dir, char *pathname)
+{
+ char *name;
+ struct dir_ent *entry;
+
+ pathname = get_component(pathname, &name);
+
+ if((strcmp(name, ".") == 0) || strcmp(name, "..") == 0) {
+ ERROR("Error: Tar hardlink pathname can't have '.' or '..' in it\n");
+ return NULL;
+ }
+
+ entry = lookup_name(dir, name);
+ free(name);
+
+ if(entry == NULL)
+ return NULL;
+
+ if(pathname[0] == '\0')
+ return entry;
+
+ if(entry->dir == NULL)
+ return NULL;
+
+ return lookup_pathname(entry->dir, pathname);
+}
+
+
+static inline int is_fragment(long long file_size)
+{
+ return !no_fragments && file_size && (file_size < block_size ||
+ (always_use_fragments && file_size & (block_size - 1)));
+}
+
+
+static void put_file_buffer(struct file_buffer *file_buffer)
+{
+ /*
+ * Decide where to send the file buffer:
+ * - compressible non-fragment blocks go to the deflate threads,
+ * - fragments go to the process fragment threads,
+ */
+ if(file_buffer->fragment)
+ queue_put(to_process_frag, file_buffer);
+ else
+ queue_put(to_deflate, file_buffer);
+}
+
+
+int sparse_reader(struct tar_file *file, long long cur_offset, char *dest, int bytes, long long *off)
+{
+ static int cur;
+ static long long offset;
+ static long long number;
+ int avail, res;
+
+ if(bytes == 0) {
+ cur = 0;
+ offset = file->map[0].offset;
+ number = file->map[0].number;
+ *off = offset;
+ return 0;
+ }
+
+ if(cur_offset != offset)
+ return -1;
+
+ avail = bytes > number ? number : bytes;
+ res = read_bytes(STDIN_FILENO, dest, avail);
+ if(res != avail)
+ BAD_ERROR("Failed to read tar file %s, the tarfile appears to be truncated or corrupted\n", file->pathname);
+
+ offset += avail;
+ number -= avail;
+
+ if(number == 0) {
+ cur ++;
+ offset = file->map[cur].offset;
+ number = (file->map[cur].number + 511) & ~511;
+ }
+
+ *off = offset;
+ return avail;
+}
+
+
+static int read_sparse_block(struct tar_file *file, int fd, char *dest, int bytes, int block)
+{
+ static long long offset;
+ long long cur_offset = (long long) block * block_size;
+ int avail, copied = bytes;
+
+ if(block == 0)
+ sparse_reader(file, cur_offset, dest, 0, &offset);
+
+ if(offset - cur_offset >= block_size && bytes == block_size) {
+ memset(dest, 0, block_size);
+ return block_size;
+ }
+
+ while(bytes) {
+ if(offset - cur_offset > 0) {
+ avail = offset - cur_offset < bytes ? offset - cur_offset : bytes;
+
+ memset(dest, 0, avail);
+ dest += avail;
+ cur_offset += avail;
+ bytes -= avail;
+ } else if(cur_offset == offset) {
+ avail = sparse_reader(file, cur_offset, dest, bytes, &offset);
+
+ dest += avail;
+ cur_offset += avail;
+ bytes -= avail;
+ } else
+ return -1;
+ }
+
+ return copied;
+}
+
+
+static int read_block(struct tar_file *file, int fd, char *data, int bytes, int block)
+{
+ if(file->map)
+ return read_sparse_block(file, fd, data, bytes, block);
+ else
+ return read_bytes(fd, data, bytes);
+}
+
+
+static void skip_file(struct tar_file *tar_file)
+{
+ int blocks = (tar_file->buf.st_size + block_size - 1) >> block_log, i;
+
+ for(i = 0; i < blocks; i++)
+ cache_block_put(seq_queue_get(to_main));
+
+ progress_bar_size(-blocks);
+}
+
+static void read_tar_data(struct tar_file *tar_file)
+{
+ struct stat *buf = &tar_file->buf;
+ struct file_buffer *file_buffer;
+ int blocks, block = 0;
+ long long bytes, read_size;
+
+ bytes = 0;
+ read_size = buf->st_size;
+ blocks = (read_size + block_size - 1) >> block_log;
+
+ do {
+ file_buffer = cache_get_nohash(reader_buffer);
+ file_buffer->file_size = read_size;
+ file_buffer->tar_file = tar_file;
+ file_buffer->sequence = sequence_count ++;
+ file_buffer->noD = noD;
+ file_buffer->error = FALSE;
+
+ if((block + 1) < blocks) {
+ /* non-tail block should be exactly block_size */
+ file_buffer->size = read_block(tar_file, STDIN_FILENO, file_buffer->data, block_size, block);
+ if(file_buffer->size != block_size)
+ BAD_ERROR("Failed to read tar file %s, the tarfile appears to be truncated or corrupted\n", tar_file->pathname);
+
+ bytes += file_buffer->size;
+
+ file_buffer->fragment = FALSE;
+ put_file_buffer(file_buffer);
+ } else {
+ /* The remaining bytes will be rounded up to 512 bytes */
+ int expected = (read_size + 511 - bytes) & ~511;
+ int size = read_block(tar_file, STDIN_FILENO, file_buffer->data, expected, block);
+
+ if(size != expected)
+ BAD_ERROR("Failed to read tar file %s, the tarfile appears to be truncated or corrupted\n", tar_file->pathname);
+
+ file_buffer->size = read_size - bytes;
+ }
+ } while(++ block < blocks);
+
+ file_buffer->fragment = is_fragment(read_size);
+ put_file_buffer(file_buffer);
+
+ return;
+}
+
+
+static char *skip_components(char *filename, int size, int *sizep)
+{
+ while(1) {
+ if(size >= 3 && strncmp(filename, "../", 3) == 0) {
+ filename += 3;
+ size -= 3;
+ } else if(size >= 2 && strncmp(filename, "./", 2) == 0) {
+ filename += 2;
+ size -= 2;
+ } else if(size >= 1 && *filename == '/') {
+ filename++;
+ size--;
+ } else
+ break;
+ }
+
+ if(sizep)
+ *sizep = size;
+
+ return filename;
+}
+
+
+static int read_sparse_value(struct tar_file *file, char *value, int map_entries)
+{
+ int bytes, res, i = 0;
+ long long number;
+
+ while(1) {
+ res = sscanf(value, "%lld %n", &number, &bytes);
+ if(res < 1 || value[bytes] != ',')
+ goto failed;
+
+ file->map[i].offset = number;
+
+ value += bytes + 1;
+
+ res = sscanf(value, "%lld %n", &number, &bytes);
+ if(res < 1 || (value[bytes] != ',' && value[bytes] != '\0'))
+ goto failed;
+
+ file->map[i++].number = number;
+
+ if(value[bytes] == '\0' || i >= map_entries)
+ break;
+
+ value += bytes + 1;
+ }
+
+ return TRUE;
+
+failed:
+ return FALSE;
+}
+
+
+static int read_pax_header(struct tar_file *file, long long st_size)
+{
+ long long size = (st_size + 511) & ~511;
+ char *data, *ptr, *end, *keyword, *value;
+ int res, length, bytes, vsize;
+ long long number;
+ long long major = -1, minor = -1, realsize = -1;
+ int old_gnu_pax = FALSE, old_gnu_ver = -1;
+ int map_entries = 0, cur_entry = 0;
+ char *name = NULL;
+
+ data = malloc(size);
+ if(data == NULL)
+ MEM_ERROR();
+
+ res = read_bytes(STDIN_FILENO, data, size);
+ if(res < size) {
+ if(res != -1)
+ ERROR("Unexpected EOF (end of file), the tarfile appears to be truncated or corrupted\n");
+ free(data);
+ return FALSE;
+ }
+
+ for(ptr = data, end = data + st_size; ptr < end;) {
+ /*
+ * What follows should be <length> <keyword>=<value>,
+ * where <length> is the full length, including the
+ * <length> field and newline
+ */
+ res = sscanf(ptr, "%d%n", &length, &bytes);
+ if(res < 1 || length <= bytes || length > st_size)
+ goto failed;
+
+ length -= bytes;
+ ptr += bytes;
+
+ /* Skip whitespace */
+ for(; length && *ptr == ' '; length--, ptr++);
+
+ /* Store and parse keyword */
+ for(keyword = ptr; length && *ptr != '='; length--, ptr++);
+
+ /* length should be 2 or more, given it includes the = and newline */
+ if(length < 2)
+ goto failed;
+
+ /* Terminate the keyword string */
+ *ptr++ = '\0';
+ length --;
+
+ /* Store value */
+ value = ptr;
+
+ /* Check the string is terminated by '\n' */
+ if(value[length - 1] != '\n')
+ goto failed;
+
+ /* Replace the '\n' with a nul terminator.
+ * In some tars the value may be binary, and include nul
+ * characters, and so we have to not treat it as a
+ * null terminated string then, and so also store
+ * the length of the string */
+ value[length - 1] = '\0';
+ vsize = length - 1;
+
+ /* Evaluate keyword */
+ if(strcmp(keyword, "size") == 0) {
+ res = sscanf(value, "%lld %n", &number, &bytes);
+ if(res < 1 || value[bytes] != '\0')
+ goto failed;
+ file->buf.st_size = number;
+ file->have_size = TRUE;
+ } else if(strcmp(keyword, "uid") == 0) {
+ res = sscanf(value, "%lld %n", &number, &bytes);
+ if(res < 1 || value[bytes] != '\0')
+ goto failed;
+ file->buf.st_uid = number;
+ file->have_uid = TRUE;
+ } else if(strcmp(keyword, "gid") == 0) {
+ res = sscanf(value, "%lld %n", &number, &bytes);
+ if(res < 1 || value[bytes] != '\0')
+ goto failed;
+ file->buf.st_gid = number;
+ file->have_gid = TRUE;
+ } else if(strcmp(keyword, "mtime") == 0) {
+ res = sscanf(value, "%lld %n", &number, &bytes);
+ if(res < 1 || value[bytes] != '.')
+ goto failed;
+ file->buf.st_mtime = number;
+ file->have_mtime = TRUE;
+ } else if(strcmp(keyword, "uname") == 0)
+ file->uname = strdup(value);
+ else if(strcmp(keyword, "gname") == 0)
+ file->gname = strdup(value);
+ else if(strcmp(keyword, "path") == 0)
+ file->pathname = strdup(skip_components(value, vsize, NULL));
+ else if(strcmp(keyword, "linkpath") == 0)
+ file->link = strdup(value);
+ else if(strcmp(keyword, "GNU.sparse.major") == 0) {
+ res = sscanf(value, "%lld %n", &number, &bytes);
+ if(res < 1 || value[bytes] != '\0')
+ goto failed;
+ major = number;
+ } else if(strcmp(keyword, "GNU.sparse.minor") == 0) {
+ res = sscanf(value, "%lld %n", &number, &bytes);
+ if(res < 1 || value[bytes] != '\0')
+ goto failed;
+ minor = number;
+ } else if(strcmp(keyword, "GNU.sparse.realsize") == 0) {
+ res = sscanf(value, "%lld %n", &number, &bytes);
+ if(res < 1 || value[bytes] != '\0')
+ goto failed;
+ realsize = number;
+ } else if(strcmp(keyword, "GNU.sparse.name") == 0)
+ name = strdup(value);
+ else if(strcmp(keyword, "GNU.sparse.size") == 0) {
+ res = sscanf(value, "%lld %n", &number, &bytes);
+ if(res < 1 || value[bytes] != '\0')
+ goto failed;
+ realsize = number;
+ old_gnu_pax = 1;
+ } else if(strcmp(keyword, "GNU.sparse.numblocks") == 0 && old_gnu_pax == 1) {
+ res = sscanf(value, "%lld %n", &number, &bytes);
+ if(res < 1 || value[bytes] != '\0')
+ goto failed;
+ file->map = malloc(number * sizeof(struct file_map));
+ if(file->map == NULL)
+ MEM_ERROR();
+ map_entries = number;
+ cur_entry = 0;
+ old_gnu_pax = 2;
+ } else if(strcmp(keyword, "GNU.sparse.offset") == 0 && old_gnu_pax == 2 && old_gnu_ver != 1) {
+ res = sscanf(value, "%lld %n", &number, &bytes);
+ if(res < 1 || value[bytes] != '\0')
+ goto failed;
+ if(cur_entry < map_entries)
+ file->map[cur_entry].offset = number;
+ old_gnu_ver = 0;
+ } else if(strcmp(keyword, "GNU.sparse.numbytes") == 0 && old_gnu_pax == 2 && old_gnu_ver != 1) {
+ res = sscanf(value, "%lld %n", &number, &bytes);
+ if(res < 1 || value[bytes] != '\0')
+ goto failed;
+ if(cur_entry < map_entries)
+ file->map[cur_entry++].number = number;
+ old_gnu_ver = 0;
+ } else if(strcmp(keyword, "GNU.sparse.map") == 0 && old_gnu_pax == 2 && old_gnu_ver != 0) {
+ res = read_sparse_value(file, value, map_entries);
+ if(res == FALSE)
+ goto failed;
+ old_gnu_ver = 1;
+ } else if(strncmp(keyword, "LIBARCHIVE.xattr.", strlen("LIBARCHIVE.xattr.")) == 0)
+ read_tar_xattr(keyword + strlen("LIBARCHIVE.xattr."), value, strlen(value), ENCODING_BASE64, file);
+ else if(strncmp(keyword, "SCHILY.xattr.", strlen("SCHILY.xattr.")) == 0)
+ read_tar_xattr(keyword + strlen("SCHILY.xattr."), value, vsize, ENCODING_BINARY, file);
+ else if(strcmp(keyword, "GNU.sparse.numblocks") != 0 &&
+ strcmp(keyword, "GNU.sparse.offset") != 0 &&
+ strcmp(keyword, "GNU.sparse.numbytes") != 0 &&
+ strcmp(keyword, "GNU.sparse.map") != 0 &&
+ strcmp(keyword, "atime") != 0 &&
+ strcmp(keyword, "ctime") != 0 &&
+ strcmp(keyword, "comment") != 0)
+ ERROR("Unrecognised keyword \"%s\" in pax header, ignoring\n", keyword);
+
+ ptr += length;
+ }
+
+ /* Is this a sparse file, and version (1.0)?
+ * If it is flag it, and the sparse map will be read
+ * later */
+ if(!old_gnu_pax && major != -1 && minor != -1 && realsize != -1 && name) {
+ if(major == 1 && minor == 0) {
+ file->realsize = realsize;
+ file->sparse_pax = 2;
+ file->pathname = name;
+ } else {
+ ERROR("Pax sparse file not Major 1, Minor 0!\n");
+ free(name);
+ }
+ }
+
+ /* Is this an older sparse format? */
+ if(old_gnu_pax == 2 && (old_gnu_ver == 0 || (old_gnu_ver == 1 && name))) {
+ file->realsize = realsize;
+ file->map_entries = map_entries;
+ file->sparse_pax = 1;
+ if(old_gnu_ver == 1)
+ file->pathname = name;
+ }
+
+ free(data);
+ return TRUE;
+
+failed:
+ ERROR("Failed to parse pax header\n");
+ free(data);
+ return FALSE;
+}
+
+
+static int check_sparse_map(struct file_map *map, int map_entries, long long size, long long realsize)
+{
+ long long total_data = 0;
+ long long total_sparse = map[0].offset;
+ int i;
+
+ for(i = 0; i < map_entries; i++) {
+ if(i > 0)
+ total_sparse += (map[i].offset - (map[i - 1].offset + map[i - 1].number));
+ total_data += map[i].number;
+ }
+
+ return total_data == size && total_data + total_sparse == realsize;
+}
+
+
+static struct file_map *read_sparse_headers(struct tar_file *file, struct short_sparse_header *short_header, int *entries)
+{
+ struct long_sparse_header long_header;
+ int res, i, map_entries, isextended;
+ struct file_map *map = NULL;
+ long long realsize;
+
+ realsize = read_number(short_header->realsize, 12);
+ if(realsize == -1) {
+ ERROR("Failed to read offset from sparse header\n");
+ goto failed;
+ }
+
+ map = malloc(4 * sizeof(struct file_map));
+ if(map == NULL)
+ MEM_ERROR();
+
+ /* There should always be at least one sparse entry */
+ map[0].offset = read_number(short_header->sparse[0].offset, 12);
+ if(map[0].offset == -1) {
+ ERROR("Failed to read offset from sparse header\n");
+ goto failed;
+ }
+
+ map[0].number = read_number(short_header->sparse[0].number, 12);
+ if(map[0].number == -1) {
+ ERROR("Failed to read number from sparse header\n");
+ goto failed;
+ }
+
+ /* There may be three more sparse entries in this short header.
+ * An offset of 0 means unused */
+ for(i = 1; i < 4; i++) {
+ map[i].offset = read_number(short_header->sparse[i].offset, 12);
+ if(map[i].offset == -1) {
+ ERROR("Failed to read offset from sparse header\n");
+ goto failed;
+ }
+
+ if(map[i].offset == 0)
+ break;
+
+ map[i].number = read_number(short_header->sparse[i].number, 12);
+ if(map[i].number == -1) {
+ ERROR("Failed to read number from sparse header\n");
+ goto failed;
+ }
+ }
+
+ /* If we've read two or less entries, then we expect the isextended
+ * entry to be FALSE */
+ isextended = read_number(&short_header->isextended, 1);
+ if(i < 3 && isextended) {
+ ERROR("Invalid sparse header\n");
+ goto failed;
+ }
+
+ map_entries = i;
+
+ while(isextended) {
+ res = read_bytes(STDIN_FILENO, &long_header, 512);
+ if(res < 512) {
+ if(res != -1)
+ ERROR("Unexpected EOF (end of file), the tarfile appears to be truncated or corrupted\n");
+ goto failed;
+ }
+
+ map = realloc(map, (map_entries + 21) * sizeof(struct file_map));
+ if(map == NULL)
+ MEM_ERROR();
+
+ /* There may be up to 21 sparse entries in this long header.
+ * An offset of 0 means unused */
+ for(i = map_entries; i < (map_entries + 21); i++) {
+ map[i].offset = read_number(long_header.sparse[i - map_entries].offset, 12);
+ if(map[i].offset == -1) {
+ ERROR("Failed to read offset from sparse header\n");
+ goto failed;
+ }
+
+ if(map[i].offset == 0)
+ break;
+
+ map[i].number = read_number(long_header.sparse[i - map_entries].number, 12);
+ if(map[i].number == -1) {
+ ERROR("Failed to read number from sparse header\n");
+ goto failed;
+ }
+ }
+
+ /* If we've read less than 21 entries, then we expect the isextended
+ * entry to be FALSE */
+ isextended = read_number(&long_header.isextended, 1);
+ if(i < (map_entries + 21) && isextended) {
+ ERROR("Invalid sparse header\n");
+ goto failed;
+ }
+
+ map_entries = i;
+ }
+
+ res = check_sparse_map(map, map_entries, file->buf.st_size, realsize);
+ if(res == FALSE) {
+ ERROR("Sparse file map inconsistent\n");
+ goto failed;
+ }
+
+ *entries = map_entries;
+ file->buf.st_size = realsize;
+
+ return map;
+
+failed:
+ free(map);
+ return NULL;
+}
+
+
+static struct file_map *read_sparse_map(struct tar_file *file, int *entries)
+{
+ int map_entries, bytes, size;
+ struct file_map *map = NULL;
+ char buffer[529], *src = buffer;
+ long long offset = 0, number, res;
+ int atoffset = TRUE, i = 0;
+
+ res = read_bytes(STDIN_FILENO, buffer, 512);
+ if(res < 512) {
+ if(res != -1)
+ ERROR("Unexpected EOF (end of file), the tarfile appears to be truncated or corrupted\n");
+ goto failed;
+ }
+
+ /* First number is the number of map entries */
+ map_entries = read_decimal(src, 512, &bytes);
+ if(map_entries < 0) {
+ ERROR("Could not parse Pax sparse map data\n");
+ goto failed;
+ }
+
+ src += bytes;
+ size = 512 - bytes;
+ file->buf.st_size -= 512;
+
+ while(i < map_entries) {
+ res = read_decimal(src, size, &bytes);
+ if(res == -1) {
+ ERROR("Could not parse Pax sparse map data\n");
+ goto failed;
+ }
+
+ if(res == -2) {
+ /* Out of data */
+ if(size >= 18) {
+ /* Too large block of '0' .. '9' without a '\n' */
+ ERROR("Could not parse Pax sparse map data\n");
+ goto failed;
+ }
+
+ memmove(buffer, src, size);
+ res = read_bytes(STDIN_FILENO, buffer + size, 512);
+ if(res < 512) {
+ if(res != -1)
+ ERROR("Unexpected EOF (end of file), the tarfile appears to be truncated or corrupted\n");
+ goto failed;
+ }
+
+ src = buffer;
+ size += 512;
+ file->buf.st_size -= 512;
+ continue;
+ }
+
+ src += bytes;
+ size -= bytes;
+
+ if(atoffset)
+ offset = res;
+ else {
+ number = res;
+
+ if(i % 50 == 0) {
+ map = realloc(map, (i + 50) * sizeof(struct file_map));
+ if(map == NULL)
+ MEM_ERROR();
+ }
+
+ map[i].offset = offset;
+ map[i++].number = number;
+ }
+
+ atoffset = !atoffset;
+ }
+
+ *entries = map_entries;
+ return map;
+
+failed:
+ free(map);
+ return NULL;
+}
+
+
+static void copy_tar_header(struct tar_file *dest, struct tar_file *source)
+{
+ memcpy(dest, source, sizeof(struct tar_file));
+ if(source->pathname)
+ dest->pathname = strdup(source->pathname);
+ if(source->link)
+ dest->link = strdup(source->link);
+ if(source->uname)
+ dest->uname = strdup(source->uname);
+ if(source->gname)
+ dest->gname = strdup(source->gname);
+}
+
+
+static int skip_to_valid_header(struct tar_header *header)
+{
+ int res, first = TRUE;
+
+ while(1) {
+ res = read_bytes(STDIN_FILENO, header, 512);
+
+ if(res < 512) {
+ if(res == 0)
+ return 0;
+ if(res != -1)
+ ERROR("Unexpected EOF (end of file), the tarfile appears to be truncated or corrupted\n");
+ return -1;
+ }
+
+ if(all_zero(header))
+ continue;
+
+ if(checksum_matches(header, TRUE))
+ return 1;
+
+ if(first) {
+ ERROR("sqfstar: Skipping to next header\n");
+ first = FALSE;
+ }
+ }
+}
+
+
+static struct tar_file *read_tar_header(int *status)
+{
+ struct tar_header header;
+ struct tar_file *file;
+ long long res;
+ int size, type;
+ char *filename, *user, *group;
+ static struct tar_file *global = NULL;
+
+ file = malloc(sizeof(struct tar_file));
+ if(file == NULL)
+ MEM_ERROR();
+
+ if(global)
+ copy_tar_header(file, global);
+ else
+ memset(file, 0, sizeof(struct tar_file));
+
+again:
+ res = read_bytes(STDIN_FILENO, &header, 512);
+ if(res < 512) {
+ if(res == 0)
+ goto eof;
+ if(res != -1)
+ ERROR("Unexpected EOF (end of file), the tarfile appears to be truncated or corrupted\n");
+ goto failed;
+ }
+
+ if(all_zero(&header)) {
+ if(ignore_zeros) {
+ res = skip_to_valid_header(&header);
+ if(res == 0)
+ goto eof;
+ if(res == -1)
+ goto failed;
+ } else
+ goto eof;
+ } else if(checksum_matches(&header, FALSE) == FALSE) {
+ ERROR("Tar header checksum does not match!\n");
+ goto failed;
+ }
+
+ /* Read filesize */
+ if(file->have_size == FALSE) {
+ res = read_number(header.size, 12);
+ if(res == -1) {
+ ERROR("Failed to read file size from tar header\n");
+ goto failed;
+ }
+ file->buf.st_size = res;
+ }
+
+ switch(header.type) {
+ case GNUTAR_SPARSE:
+ file->map = read_sparse_headers(file, (struct short_sparse_header *) &header, &file->map_entries);
+ if(file->map == NULL)
+ goto failed;
+ /* fall through */
+ case TAR_NORMAL1:
+ case TAR_NORMAL2:
+ case TAR_NORMAL3:
+ type = S_IFREG;
+ break;
+ case TAR_DIR:
+ type = S_IFDIR;
+ break;
+ case TAR_SYM:
+ type = S_IFLNK;
+ break;
+ case TAR_HARD:
+ type = S_IFHRD;
+ break;
+ case TAR_CHAR:
+ type = S_IFCHR;
+ break;
+ case TAR_BLOCK:
+ type = S_IFBLK;
+ break;
+ case TAR_FIFO:
+ type = S_IFIFO;
+ break;
+ case TAR_XHDR:
+ case SOLARIS_XHDR:
+ res = read_pax_header(file, file->buf.st_size);
+ if(res == FALSE) {
+ ERROR("Failed to read pax header\n");
+ goto failed;
+ }
+ goto again;
+ case TAR_GXHDR:
+ if(global == NULL) {
+ global = malloc(sizeof(struct tar_file));
+ if(global == NULL)
+ MEM_ERROR();
+ memset(global, 0, sizeof(struct tar_file));
+ }
+ res = read_pax_header(global, file->buf.st_size);
+ if(res == FALSE) {
+ ERROR("Failed to read pax header\n");
+ goto failed;
+ }
+ /* file is now out of date, and needs to be
+ * (re-)synced with the global header */
+ free(file->pathname);
+ free(file->link);
+ free(file->uname);
+ free(file->gname);
+ copy_tar_header(file, global);
+ goto again;
+ case GNUTAR_LONG_NAME:
+ file->pathname = read_long_string(file->buf.st_size, TRUE);
+ if(file->pathname == NULL) {
+ ERROR("Failed to read GNU Long Name\n");
+ goto failed;
+ }
+ goto again;
+ case GNUTAR_LONG_LINK:
+ file->link = read_long_string(file->buf.st_size, FALSE);
+ if(file->link == NULL) {
+ ERROR("Failed to read GNU Long Name\n");
+ goto failed;
+ }
+ goto again;
+ default:
+ ERROR("Unhandled tar type in header 0x%x - ignoring\n", header.type);
+ goto ignored;
+ }
+
+ /* Process filename - skip any leading slashes or ./ or ../ */
+ if(file->pathname == NULL && header.prefix[0] != '\0') {
+ int length1, length2;
+
+ filename = skip_components(header.prefix, 155, &size);
+ length1 = strnlen(filename, size);
+ length2 = strnlen(header.name, 100);
+ file->pathname = malloc(length1 + length2 + 2);
+ if(file->pathname == NULL)
+ MEM_ERROR();
+
+ memcpy(file->pathname, filename, length1);
+ file->pathname[length1] = '/';
+ memcpy(file->pathname + length1 + 1, header.name, length2);
+ file->pathname[length1 + length2 + 1] = '\0';
+ } else if (file->pathname == NULL) {
+ filename = skip_components(header.name, 100, &size);
+ file->pathname = strndup(filename, size);
+ }
+
+ /* Ignore empty filenames */
+ if(strlen(file->pathname) == 0) {
+ ERROR("Empty tar filename after skipping leading /, ./, or ../, ignoring\n");
+ goto ignored;
+ }
+
+ /* Read mtime */
+ if(file->have_mtime == FALSE) {
+ res = read_number(header.mtime, 12);
+ if(res == -1) {
+ ERROR("Failed to read file mtime from tar header\n");
+ goto failed;
+ }
+ file->buf.st_mtime = res;
+ }
+
+ /* Read mode and file type */
+ res = read_number(header.mode, 8);
+ if(res == -1) {
+ ERROR("Failed to read file mode from tar header\n");
+ goto failed;
+ }
+ file->buf.st_mode = res;
+
+ /* V7 and others used to append a trailing '/' to indicate a
+ * directory */
+ if(file->pathname[strlen(file->pathname) - 1] == '/') {
+ file->pathname[strlen(file->pathname) - 1] = '\0';
+ type = S_IFDIR;
+ }
+
+ file->buf.st_mode |= type;
+
+ /* Get user information - if file->uname non NULL (from PAX header),
+ * use that if recognised by the system, otherwise if header.user
+ * filled, and it is recognised by the system use that, otherwise
+ * fallback to using uid, either from PAX header (if have_uid TRUE),
+ * or header.uid */
+ res = -1;
+ if(file->uname)
+ user = file->uname;
+ else
+ user = strndup(header.user, 32);
+
+ if(strlen(user)) {
+ struct passwd *pwuid = getpwnam(user);
+ if(pwuid)
+ res = pwuid->pw_uid;
+ }
+
+ if(res == -1) {
+ if(file->have_uid == FALSE) {
+ res = read_number(header.uid, 8);
+ if(res == -1) {
+ ERROR("Failed to read file uid from tar header\n");
+ goto failed;
+ }
+ file->buf.st_uid = res;
+ }
+ } else
+ file->buf.st_uid = res;
+
+ free(user);
+
+ /* Get group information - if file->gname non NULL (from PAX header),
+ * use that if recognised by the system, otherwise if header.group
+ * filled, and it is recognised by the system use that, otherwise
+ * fallback to using gid, either from PAX header (if have_gid TRUE),
+ * or header.gid */
+ res = -1;
+ if(file->gname)
+ group = file->gname;
+ else
+ group = strndup(header.group, 32);
+
+ if(strlen(group)) {
+ struct group *grgid = getgrnam(group);
+ if(grgid)
+ res = grgid->gr_gid;
+ }
+
+ if(res == -1) {
+ if(file->have_gid == FALSE) {
+ res = read_number(header.gid, 8);
+ if(res == -1) {
+ ERROR("Failed to read file gid from tar header\n");
+ goto failed;
+ }
+ file->buf.st_gid = res;
+ }
+ } else
+ file->buf.st_gid = res;
+
+ free(group);
+
+ /* Read major and minor for device files */
+ if(type == S_IFCHR || type == S_IFBLK) {
+ int major, minor;
+
+ major = read_number(header.major, 8);
+ if(major == -1) {
+ ERROR("Failed to read device major tar header\n");
+ goto failed;
+ }
+
+ minor = read_number(header.minor, 8);
+ if(minor == -1) {
+ ERROR("Failed to read device minor from tar header\n");
+ goto failed;
+ }
+ file->buf.st_rdev = (major << 8) | (minor & 0xff) | ((minor & ~0xff) << 12);
+ }
+
+ /* Handle symbolic links */
+ if(type == S_IFLNK) {
+ /* Permissions on symbolic links are always rwxrwxrwx */
+ file->buf.st_mode = 0777 | S_IFLNK;
+
+ if(file->link == FALSE)
+ file->link = strndup(header.link, 100);
+ }
+
+ /* Handle hard links */
+ if(type == S_IFHRD) {
+ if(file->link) {
+ char *link = skip_components(file->link, strlen(file->link), NULL);
+
+ if(link != file->link) {
+ char *old = file->link;
+
+ file->link = strdup(link);
+ free(old);
+ }
+ } else {
+ filename = skip_components(header.link, 100, &size);
+ file->link = strndup(filename, size);
+ }
+ }
+
+ *status = TAR_OK;
+ return file;
+
+failed:
+ free_tar_xattrs(file);
+ free(file->pathname);
+ free(file->link);
+ free(file);
+ *status = TAR_ERROR;
+ return NULL;
+
+ignored:
+ if(file->buf.st_size) {
+ /* Skip any data blocks */
+ long long size = file->buf.st_size;
+
+ while(size > 0) {
+ res = read_bytes(STDIN_FILENO, &header, 512);
+ if(res < 512) {
+ if(res != -1)
+ ERROR("Unexpected EOF (end of file), the tarfile appears to be truncated or corrupted\n");
+ goto failed;
+ }
+ size -= 512;
+ }
+ }
+
+ free(file->pathname);
+ free(file->link);
+ free(file);
+ *status = TAR_IGNORED;
+ return NULL;
+
+eof:
+ *status = TAR_EOF;
+ free(file);
+ return NULL;
+}
+
+
+void read_tar_file()
+{
+ struct tar_file *tar_file;
+ int status, res;
+
+ while(1) {
+ struct file_buffer *file_buffer;
+
+ file_buffer = malloc(sizeof(struct file_buffer));
+ if(file_buffer == NULL)
+ MEM_ERROR();
+
+ while(1) {
+ tar_file = read_tar_header(&status);
+ if(status != TAR_IGNORED)
+ break;
+ }
+
+ if(status == TAR_ERROR)
+ BAD_ERROR("Error occurred reading tar file. Aborting\n");
+
+ /* If Pax 1.0 sparse file, read the map data now */
+ if(tar_file && tar_file->sparse_pax == 2) {
+ tar_file->map = read_sparse_map(tar_file, &tar_file->map_entries);
+ if(tar_file->map == NULL)
+ BAD_ERROR("Error occurred reading tar file. Aborting\n");
+ }
+
+ /* Check Pax sparse map for consistency */
+ if(tar_file && tar_file->sparse_pax) {
+ res = check_sparse_map(tar_file->map, tar_file->map_entries, tar_file->buf.st_size, tar_file->realsize);
+ if(res == FALSE)
+ BAD_ERROR("Sparse file map inconsistent. Aborting\n");
+ tar_file->buf.st_size = tar_file->realsize;
+ }
+
+ if(tar_file && (tar_file->buf.st_mode & S_IFMT) == S_IFREG)
+ progress_bar_size((tar_file->buf.st_size + block_size - 1)
+ >> block_log);
+
+ file_buffer->cache = NULL;
+ file_buffer->fragment = FALSE;
+ file_buffer->tar_file = tar_file;
+ file_buffer->sequence = sequence_count ++;
+ seq_queue_put(to_main, file_buffer);
+
+ if(status == TAR_EOF)
+ break;
+
+ if(S_ISREG(tar_file->buf.st_mode))
+ read_tar_data(tar_file);
+ }
+}
+
+
+squashfs_inode process_tar_file(int progress)
+{
+ struct stat buf;
+ struct dir_info *new;
+ struct dir_ent *dir_ent;
+ struct tar_file *tar_file;
+ struct file_buffer *file_buffer;
+
+ queue_put(to_reader, NULL);
+ set_progressbar_state(progress);
+
+ while(1) {
+ struct inode_info *link = NULL;
+
+ file_buffer = seq_queue_get(to_main);
+ if(file_buffer->tar_file == NULL)
+ break;
+
+ tar_file = file_buffer->tar_file;
+
+ if(S_ISHRD(tar_file->buf.st_mode)) {
+ /* Hard link, need to resolve where it points to, and
+ * replace with a reference to that inode */
+ struct dir_ent *entry = lookup_pathname(root_dir, tar_file->link);
+ if(entry== NULL) {
+ ERROR("Could not resolve hardlink %s, file %s doesn't exist\n", tar_file->pathname, tar_file->link);
+ free(file_buffer);
+ free(tar_file->pathname);
+ free(tar_file->link);
+ free(tar_file);
+ continue;
+ }
+
+ if(entry->inode == NULL || S_ISDIR(entry->inode->buf.st_mode)) {
+ ERROR("Could not resolve hardlink %s, because %s is a directory\n", tar_file->pathname, tar_file->link);
+ free(file_buffer);
+ free(tar_file->pathname);
+ free(tar_file->link);
+ free(tar_file);
+ continue;
+ }
+
+ link = entry->inode;
+ free(tar_file->link);
+ tar_file->link = NULL;
+ }
+
+ new = add_tarfile(root_dir, tar_file->pathname, "",
+ tar_file, paths, 1, &dir_ent, link);
+
+ if(new) {
+ int duplicate_file;
+ root_dir = new;
+
+ if(S_ISREG(tar_file->buf.st_mode) && dir_ent->inode->read == FALSE) {
+ update_info(dir_ent);
+ tar_file->file = write_file(dir_ent, &duplicate_file);
+ dir_ent->inode->read = TRUE;
+ INFO("file %s, uncompressed size %lld bytes %s\n", tar_file->pathname,
+ (long long) tar_file->buf.st_size, duplicate_file ? "DUPLICATE" : "");
+ }
+
+ if(link) {
+ if(no_hardlinks)
+ INFO("file %s, uncompressed size %lld bytes DUPLICATE\n", tar_file->pathname,
+ (long long) link->buf.st_size);
+ else
+ link->nlink ++;
+ free(tar_file->pathname);
+ free(tar_file);
+ }
+ } else if(S_ISREG(tar_file->buf.st_mode))
+ skip_file(file_buffer->tar_file);
+
+ free(file_buffer);
+ }
+
+ free(file_buffer);
+
+ if(root_dir)
+ fixup_tree(root_dir);
+ else
+ root_dir = scan1_opendir("", "", 0);
+
+ /* Create root directory dir_ent and associated inode, and connect
+ * it to the root directory dir_info structure */
+ dir_ent = create_dir_entry("", NULL, "", scan1_opendir("", "", 0));
+
+ memset(&buf, 0, sizeof(buf));
+ if(root_mode_opt)
+ buf.st_mode = root_mode | S_IFDIR;
+ else
+ buf.st_mode = S_IRWXU | S_IRWXG | S_IRWXO | S_IFDIR;
+ if(root_uid_opt)
+ buf.st_uid = root_uid;
+ else
+ buf.st_uid = getuid();
+ if(root_gid_opt)
+ buf.st_gid = root_gid;
+ else
+ buf.st_gid = getgid();
+ if(root_time_opt)
+ buf.st_mtime = root_time;
+ else
+ buf.st_mtime = time(NULL);
+ if(pseudo_override && global_uid_opt)
+ buf.st_uid = global_uid;
+
+ if(pseudo_override && global_gid_opt)
+ buf.st_gid = global_gid;
+ buf.st_dev = 0;
+ buf.st_ino = 0;
+ dir_ent->inode = lookup_inode(&buf);
+ dir_ent->inode->dummy_root_dir = TRUE;
+ dir_ent->dir = root_dir;
+ root_dir->dir_ent = dir_ent;
+
+ return do_directory_scans(dir_ent, progress);
+}
diff --git a/squashfs-tools/tar.h b/squashfs-tools/tar.h
new file mode 100644
index 0000000..1de762d
--- /dev/null
+++ b/squashfs-tools/tar.h
@@ -0,0 +1,153 @@
+#ifndef TAR_H
+#define TAR_H
+
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2021
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * tar.h
+ */
+
+struct tar_header {
+ union {
+ unsigned char udata[512];
+ signed char sdata[512];
+ struct {
+ char name[100];
+ char mode[8];
+ char uid[8];
+ char gid[8];
+ char size[12];
+ char mtime[12];
+ char checksum[8];
+ char type;
+ char link[100];
+ char magic[8];
+ char user[32];
+ char group[32];
+ char major[8];
+ char minor[8];
+ char prefix[155];
+ };
+ };
+};
+
+
+struct sparse_entry {
+ char offset[12];
+ char number[12];
+};
+
+
+struct short_sparse_header {
+ char pad[386];
+ struct sparse_entry sparse[4];
+ char isextended;
+ char realsize[12];
+};
+
+
+struct long_sparse_header {
+ struct sparse_entry sparse[21];
+ char isextended;
+ char pad[7];
+};
+
+
+struct file_map {
+ long long offset;
+ long long number;
+};
+
+
+struct tar_file {
+ long long realsize;
+ struct stat buf;
+ struct file_info *file;
+ struct xattr_list *xattr_list;
+ struct file_map *map;
+ char *pathname;
+ char *link;
+ char *uname;
+ char *gname;
+ int xattrs;
+ int map_entries;
+ char have_size;
+ char have_uid;
+ char have_gid;
+ char have_mtime;
+ char sparse_pax;
+};
+
+#define IS_TARFILE(a) (a->tarfile)
+#define TAR_NORMAL1 '0'
+#define TAR_NORMAL2 '\0'
+#define TAR_HARD '1'
+#define TAR_SYM '2'
+#define TAR_CHAR '3'
+#define TAR_BLOCK '4'
+#define TAR_DIR '5'
+#define TAR_FIFO '6'
+#define TAR_NORMAL3 '7'
+#define TAR_GXHDR 'g'
+#define TAR_XHDR 'x'
+
+#define GNUTAR_LONG_NAME 'L'
+#define GNUTAR_LONG_LINK 'K'
+#define GNUTAR_SPARSE 'S'
+
+#define SOLARIS_XHDR 'X'
+
+#define V7_MAGIC "\0\0\0\0\0\0\0"
+#define GNU_MAGIC "ustar "
+#define USTAR_MAGIC "ustar\00000"
+
+#define S_IFHRD S_IFMT
+
+#define S_ISHRD(a) ((a & S_IFMT) == S_IFHRD)
+
+#define TAR_OK 0
+#define TAR_EOF 1
+#define TAR_ERROR 2
+#define TAR_IGNORED 3
+
+#define ENCODING_BASE64 0
+#define ENCODING_BINARY 1
+
+extern void read_tar_file();
+extern squashfs_inode process_tar_file(int progress);
+extern int ignore_zeros;
+extern int default_uid_opt;
+extern unsigned int default_uid;
+extern int default_gid_opt;
+extern unsigned int default_gid;
+extern int default_mode_opt;
+extern mode_t default_mode;
+
+#ifdef XATTR_SUPPORT
+extern int xattr_get_prefix(struct xattr_list *, char *);
+extern void read_tar_xattr(char *, char *, int, int, struct tar_file *);
+extern void free_tar_xattrs(struct tar_file *);
+extern int read_xattrs_from_tarfile(struct inode_info *, struct xattr_list **);
+#else
+#define read_tar_xattr(a, b, c, d, e)
+#define free_tar_xattrs(a)
+#define read_xattrs_from_tarfile(a, b) 0
+#endif
+#endif
diff --git a/squashfs-tools/tar_xattr.c b/squashfs-tools/tar_xattr.c
new file mode 100644
index 0000000..fc9eef5
--- /dev/null
+++ b/squashfs-tools/tar_xattr.c
@@ -0,0 +1,122 @@
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2021, 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * tar_xattr.c
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <dirent.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <regex.h>
+
+#include "squashfs_fs.h"
+#include "mksquashfs.h"
+#include "mksquashfs_error.h"
+#include "tar.h"
+#include "xattr.h"
+
+#define TRUE 1
+#define FALSE 0
+
+extern regex_t *xattr_exclude_preg;
+extern regex_t *xattr_include_preg;
+
+
+void read_tar_xattr(char *name, char *value, int size, int encoding, struct tar_file *file)
+{
+ char *data;
+ struct xattr_list *xattr;
+ int i;
+
+ /* Some tars output both LIBARCHIVE and SCHILY xattrs, which
+ * will lead to multiple definitions of the same xattr.
+ * So check that this xattr hasn't already been defined */
+ for(i = 0; i < file->xattrs; i++)
+ if(strcmp(name, file->xattr_list[i].full_name) == 0)
+ return;
+
+ if(xattr_exclude_preg) {
+ int res = regexec(xattr_exclude_preg, name, (size_t) 0, NULL, 0);
+
+ if(res == 0)
+ return;
+ }
+
+ if(xattr_include_preg) {
+ int res = regexec(xattr_include_preg, name, (size_t) 0, NULL, 0);
+
+ if(res)
+ return;
+ }
+
+ if(encoding == ENCODING_BASE64) {
+ data = base64_decode(value, size, &size);
+ if(data == NULL) {
+ ERROR("Invalid LIBARCHIVE xattr base64 value, ignoring\n");
+ return;
+ }
+ } else {
+ data = malloc(size);
+ if(data == NULL)
+ MEM_ERROR();
+ memcpy(data, value, size);
+ }
+
+ file->xattr_list = realloc(file->xattr_list, (file->xattrs + 1) *
+ sizeof(struct xattr_list));
+ if(file->xattr_list == NULL)
+ MEM_ERROR();
+
+ xattr = &file->xattr_list[file->xattrs];
+
+ xattr->type = xattr_get_prefix(xattr, name);
+ if(xattr->type == -1) {
+ ERROR("Unrecognised tar xattr prefix %s, ignoring\n", name);
+ free(data);
+ return;
+ }
+
+ xattr->value = data;
+ xattr->vsize = size;
+ file->xattrs ++;
+}
+
+
+int read_xattrs_from_tarfile(struct inode_info *inode, struct xattr_list **xattr_list)
+{
+ if(inode->tar_file) {
+ *xattr_list = inode->tar_file->xattr_list;
+ return inode->tar_file->xattrs;
+ } else
+ return 0;
+}
+
+
+void free_tar_xattrs(struct tar_file *file)
+{
+ int i;
+
+ for(i = 0; i < file->xattrs; i++)
+ free(file->xattr_list[i].full_name);
+
+ free(file->xattr_list);
+}
diff --git a/squashfs-tools/unsquash-1.c b/squashfs-tools/unsquash-1.c
new file mode 100644
index 0000000..27b2766
--- /dev/null
+++ b/squashfs-tools/unsquash-1.c
@@ -0,0 +1,582 @@
+/*
+ * Unsquash a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2009, 2010, 2011, 2012, 2019, 2021, 2022, 2023
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * unsquash-1.c
+ */
+
+#include "unsquashfs.h"
+#include "squashfs_compat.h"
+#include "compressor.h"
+
+static unsigned int *uid_table, *guid_table;
+static squashfs_operations ops;
+
+static void read_block_list(unsigned int *block_list, long long start,
+ unsigned int offset, int blocks)
+{
+ unsigned short *source;
+ int i, res;
+
+ TRACE("read_block_list: blocks %d\n", blocks);
+
+ source = malloc(blocks * sizeof(unsigned short));
+ if(source == NULL)
+ MEM_ERROR();
+
+ if(swap) {
+ char *swap_buff;
+
+ swap_buff = malloc(blocks * sizeof(unsigned short));
+ if(swap_buff == NULL)
+ MEM_ERROR();
+
+ res = read_inode_data(swap_buff, &start, &offset, blocks * sizeof(unsigned short));
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_block_list: failed to read "
+ "inode index %lld:%d\n", start, offset);
+ SQUASHFS_SWAP_SHORTS_3(source, swap_buff, blocks);
+ free(swap_buff);
+ } else {
+ res = read_inode_data(source, &start, &offset, blocks * sizeof(unsigned short));
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_block_list: failed to read "
+ "inode index %lld:%d\n", start, offset);
+ }
+
+ for(i = 0; i < blocks; i++)
+ block_list[i] = SQUASHFS_COMPRESSED_SIZE(source[i]) |
+ (SQUASHFS_COMPRESSED(source[i]) ? 0 :
+ SQUASHFS_COMPRESSED_BIT_BLOCK);
+ free(source);
+}
+
+
+static struct inode *read_inode(unsigned int start_block, unsigned int offset)
+{
+ static union squashfs_inode_header_1 header;
+ long long start = sBlk.s.inode_table_start + start_block;
+ long long st = start;
+ unsigned int off = offset, uid;
+ static struct inode i;
+ int res;
+
+ TRACE("read_inode: reading inode [%d:%d]\n", start_block, offset);
+
+ if(swap) {
+ squashfs_base_inode_header_1 sinode;
+ res = read_inode_data(&sinode, &st, &off, sizeof(sinode));
+ if(res)
+ SQUASHFS_SWAP_BASE_INODE_HEADER_1(&header.base, &sinode,
+ sizeof(squashfs_base_inode_header_1));
+ } else
+ res = read_inode_data(&header.base, &st, &off, sizeof(header.base));
+
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read inode %lld:%d\n", st, off);
+
+ uid = (header.base.inode_type - 1) / SQUASHFS_TYPES * 16 + header.base.uid;
+
+ if(uid >= sBlk.no_uids)
+ EXIT_UNSQUASH("File system corrupted - uid index in inode too large (uid: %u)\n", uid);
+
+ i.uid = (uid_t) uid_table[uid];
+
+ if(header.base.inode_type == SQUASHFS_IPC_TYPE) {
+ squashfs_ipc_inode_header_1 *inodep = &header.ipc;
+
+ if(swap) {
+ squashfs_ipc_inode_header_1 sinodep;
+ res = read_inode_data(&sinodep, &start, &offset, sizeof(sinodep));
+ if(res)
+ SQUASHFS_SWAP_IPC_INODE_HEADER_1(inodep, &sinodep);
+ } else
+ res = read_inode_data(inodep, &start, &offset, sizeof(*inodep));
+
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode %lld:%d\n", start, offset);
+
+ if(inodep->type == SQUASHFS_SOCKET_TYPE) {
+ i.mode = S_IFSOCK | header.base.mode;
+ i.type = SQUASHFS_SOCKET_TYPE;
+ } else {
+ i.mode = S_IFIFO | header.base.mode;
+ i.type = SQUASHFS_FIFO_TYPE;
+ }
+
+ uid = inodep->offset * 16 + inodep->uid;
+ if(uid >= sBlk.no_uids)
+ EXIT_UNSQUASH("File system corrupted - uid index in inode too large (uid: %u)\n", uid);
+
+ i.uid = (uid_t) uid_table[uid];
+ } else {
+ i.mode = lookup_type[(header.base.inode_type - 1) %
+ SQUASHFS_TYPES + 1] | header.base.mode;
+ i.type = (header.base.inode_type - 1) % SQUASHFS_TYPES + 1;
+ }
+
+ i.xattr = SQUASHFS_INVALID_XATTR;
+
+ if(header.base.guid == 15)
+ i.gid = i.uid;
+ else if(header.base.guid >= sBlk.no_guids)
+ EXIT_UNSQUASH("File system corrupted - gid index in inode too large (gid: %u)\n", header.base.guid);
+ else
+ i.gid = (uid_t) guid_table[header.base.guid];
+
+ i.inode_number = inode_number ++;
+
+ switch(i.type) {
+ case SQUASHFS_DIR_TYPE: {
+ squashfs_dir_inode_header_1 *inode = &header.dir;
+
+ if(swap) {
+ squashfs_dir_inode_header_1 sinode;
+ res = read_inode_data(&sinode, &start, &offset, sizeof(sinode));
+ if(res)
+ SQUASHFS_SWAP_DIR_INODE_HEADER_1(inode,
+ &sinode);
+ } else
+ res = read_inode_data(inode, &start, &offset, sizeof(*inode));
+
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode %lld:%d\n", start, offset);
+
+ i.data = inode->file_size;
+ i.offset = inode->offset;
+ i.start = inode->start_block;
+ if(time_opt)
+ i.time = timeval;
+ else
+ i.time = inode->mtime;
+ break;
+ }
+ case SQUASHFS_FILE_TYPE: {
+ squashfs_reg_inode_header_1 *inode = &header.reg;
+
+ if(swap) {
+ squashfs_reg_inode_header_1 sinode;
+ res = read_inode_data(&sinode, &start, &offset, sizeof(sinode));
+ if(res)
+ SQUASHFS_SWAP_REG_INODE_HEADER_1(inode,
+ &sinode);
+ } else
+ res = read_inode_data(inode, &start, &offset, sizeof(*inode));
+
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode %lld:%d\n", start, offset);
+
+ i.data = inode->file_size;
+ if(time_opt)
+ i.time = timeval;
+ else
+ i.time = inode->mtime;
+ i.blocks = (i.data + sBlk.s.block_size - 1) >>
+ sBlk.s.block_log;
+ i.start = inode->start_block;
+ i.block_start = start;
+ i.block_offset = offset;
+ i.fragment = 0;
+ i.frag_bytes = 0;
+ i.offset = 0;
+ i.sparse = 0;
+ break;
+ }
+ case SQUASHFS_SYMLINK_TYPE: {
+ squashfs_symlink_inode_header_1 *inodep =
+ &header.symlink;
+
+ if(swap) {
+ squashfs_symlink_inode_header_1 sinodep;
+ res = read_inode_data(&sinodep, &start, &offset, sizeof(sinodep));
+ if(res)
+ SQUASHFS_SWAP_SYMLINK_INODE_HEADER_1(inodep,
+ &sinodep);
+ } else
+ res = read_inode_data(inodep, &start, &offset, sizeof(*inodep));
+
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode %lld:%d\n", start, offset);
+
+ i.symlink = malloc(inodep->symlink_size + 1);
+ if(i.symlink == NULL)
+ MEM_ERROR();
+
+ res = read_inode_data(i.symlink, &start, &offset, inodep->symlink_size);
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode symbolic link %lld:%d\n", start, offset);
+ i.symlink[inodep->symlink_size] = '\0';
+ i.data = inodep->symlink_size;
+ if(time_opt)
+ i.time = timeval;
+ else
+ i.time = sBlk.s.mkfs_time;
+ break;
+ }
+ case SQUASHFS_BLKDEV_TYPE:
+ case SQUASHFS_CHRDEV_TYPE: {
+ squashfs_dev_inode_header_1 *inodep = &header.dev;
+
+ if(swap) {
+ squashfs_dev_inode_header_1 sinodep;
+ res = read_inode_data(&sinodep, &start, &offset, sizeof(sinodep));
+ if(res)
+ SQUASHFS_SWAP_DEV_INODE_HEADER_1(inodep,
+ &sinodep);
+ } else
+ res = read_inode_data(inodep, &start, &offset, sizeof(*inodep));
+
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode %lld:%d\n", start, offset);
+
+ i.data = inodep->rdev;
+ if(time_opt)
+ i.time = timeval;
+ else
+ i.time = sBlk.s.mkfs_time;
+ break;
+ }
+ case SQUASHFS_FIFO_TYPE:
+ case SQUASHFS_SOCKET_TYPE: {
+ i.data = 0;
+ if(time_opt)
+ i.time = timeval;
+ else
+ i.time = sBlk.s.mkfs_time;
+ break;
+ }
+ default:
+ EXIT_UNSQUASH("Unknown inode type %d in "
+ " read_inode_header_1!\n",
+ header.base.inode_type);
+ }
+ return &i;
+}
+
+
+static struct dir *squashfs_opendir(unsigned int block_start, unsigned int offset,
+ struct inode **i)
+{
+ squashfs_dir_header_2 dirh;
+ char buffer[sizeof(squashfs_dir_entry_2) + SQUASHFS_NAME_LEN + 1]
+ __attribute__((aligned));
+ squashfs_dir_entry_2 *dire = (squashfs_dir_entry_2 *) buffer;
+ long long start;
+ int bytes = 0;
+ int dir_count, size, res;
+ struct dir_ent *ent, *cur_ent = NULL;
+ struct dir *dir;
+
+ TRACE("squashfs_opendir: inode start block %d, offset %d\n",
+ block_start, offset);
+
+ *i = read_inode(block_start, offset);
+
+ dir = malloc(sizeof(struct dir));
+ if(dir == NULL)
+ MEM_ERROR();
+
+ dir->dir_count = 0;
+ dir->cur_entry = NULL;
+ dir->mode = (*i)->mode;
+ dir->uid = (*i)->uid;
+ dir->guid = (*i)->gid;
+ dir->mtime = (*i)->time;
+ dir->xattr = (*i)->xattr;
+ dir->dirs = NULL;
+
+ if ((*i)->data == 0)
+ /*
+ * if the directory is empty, skip the unnecessary
+ * lookup_entry, this fixes the corner case with
+ * completely empty filesystems where lookup_entry correctly
+ * returning -1 is incorrectly treated as an error
+ */
+ return dir;
+
+ start = sBlk.s.directory_table_start + (*i)->start;
+ offset = (*i)->offset;
+ size = (*i)->data + bytes;
+
+ while(bytes < size) {
+ if(swap) {
+ squashfs_dir_header_2 sdirh;
+ res = read_directory_data(&sdirh, &start, &offset, sizeof(sdirh));
+ if(res)
+ SQUASHFS_SWAP_DIR_HEADER_2(&dirh, &sdirh);
+ } else
+ res = read_directory_data(&dirh, &start, &offset, sizeof(dirh));
+
+ if(res == FALSE)
+ goto corrupted;
+
+ dir_count = dirh.count + 1;
+ TRACE("squashfs_opendir: Read directory header @ byte position "
+ "%d, %d directory entries\n", bytes, dir_count);
+ bytes += sizeof(dirh);
+
+ /* dir_count should never be larger than SQUASHFS_DIR_COUNT */
+ if(dir_count > SQUASHFS_DIR_COUNT) {
+ ERROR("File system corrupted: too many entries in directory\n");
+ goto corrupted;
+ }
+
+ while(dir_count--) {
+ if(swap) {
+ squashfs_dir_entry_2 sdire;
+ res = read_directory_data(&sdire, &start,
+ &offset, sizeof(sdire));
+ if(res)
+ SQUASHFS_SWAP_DIR_ENTRY_2(dire, &sdire);
+ } else
+ res = read_directory_data(dire, &start,
+ &offset, sizeof(*dire));
+
+ if(res == FALSE)
+ goto corrupted;
+
+ bytes += sizeof(*dire);
+
+ /* size should never be SQUASHFS_NAME_LEN or larger */
+ if(dire->size >= SQUASHFS_NAME_LEN) {
+ ERROR("File system corrupted: filename too long\n");
+ goto corrupted;
+ }
+
+ res = read_directory_data(dire->name, &start, &offset,
+ dire->size + 1);
+
+ if(res == FALSE)
+ goto corrupted;
+
+ dire->name[dire->size + 1] = '\0';
+
+ /* check name for invalid characters (i.e /, ., ..) */
+ if(check_name(dire->name, dire->size + 1) == FALSE) {
+ ERROR("File system corrupted: invalid characters in name\n");
+ goto corrupted;
+ }
+
+ TRACE("squashfs_opendir: directory entry %s, inode "
+ "%d:%d, type %d\n", dire->name,
+ dirh.start_block, dire->offset, dire->type);
+
+ ent = malloc(sizeof(struct dir_ent));
+ if(ent == NULL)
+ MEM_ERROR();
+
+ ent->name = strdup(dire->name);
+ ent->start_block = dirh.start_block;
+ ent->offset = dire->offset;
+ ent->type = dire->type;
+ ent->next = NULL;
+ if(cur_ent == NULL)
+ dir->dirs = ent;
+ else
+ cur_ent->next = ent;
+ cur_ent = ent;
+ dir->dir_count ++;
+ bytes += dire->size + 1;
+ }
+ }
+
+ /* check directory for duplicate names. Need to sort directory first */
+ sort_directory(&(dir->dirs), dir->dir_count);
+ if(check_directory(dir) == FALSE) {
+ ERROR("File system corrupted: directory has duplicate names\n");
+ goto corrupted;
+ }
+ return dir;
+
+corrupted:
+ squashfs_closedir(dir);
+ return NULL;
+}
+
+
+static int read_filesystem_tables()
+{
+ long long table_start;
+
+ /* Read uid and gid lookup tables */
+
+ /* Sanity check super block contents */
+ if(sBlk.no_guids) {
+ if(sBlk.guid_start >= sBlk.s.bytes_used) {
+ ERROR("read_filesystem_tables: gid start too large in super block\n");
+ goto corrupted;
+ }
+
+ /* In 1.x filesystems, there should never be more than 15 gids */
+ if(sBlk.no_guids > 15) {
+ ERROR("read_filesystem_tables: gids too large in super block\n");
+ goto corrupted;
+ }
+
+ if(read_ids(sBlk.no_guids, sBlk.guid_start, sBlk.s.bytes_used, &guid_table) == FALSE)
+ goto corrupted;
+
+ table_start = sBlk.guid_start;
+ } else {
+ /* no guids, guid_start should be 0 */
+ if(sBlk.guid_start != 0) {
+ ERROR("read_filesystem_tables: gid start too large in super block\n");
+ goto corrupted;
+ }
+
+ table_start = sBlk.s.bytes_used;
+ }
+
+ if(sBlk.uid_start >= table_start) {
+ ERROR("read_filesystem_tables: uid start too large in super block\n");
+ goto corrupted;
+ }
+
+ /* There should be at least one uid */
+ if(sBlk.no_uids == 0) {
+ ERROR("read_filesystem_tables: uid count bad in super block\n");
+ goto corrupted;
+ }
+
+ /* In 1.x filesystems, there should never be more than 48 uids */
+ if(sBlk.no_uids > 48) {
+ ERROR("read_filesystem_tables: uids too large in super block\n");
+ goto corrupted;
+ }
+
+ if(read_ids(sBlk.no_uids, sBlk.uid_start, table_start, &uid_table) == FALSE)
+ goto corrupted;
+
+ table_start = sBlk.uid_start;
+
+ /* Sanity check super block directory table values */
+ if(sBlk.s.directory_table_start > table_start) {
+ ERROR("read_filesystem_tables: directory table start too large in super block\n");
+ goto corrupted;
+ }
+
+ /* Sanity check super block inode table values */
+ if(sBlk.s.inode_table_start >= sBlk.s.directory_table_start) {
+ ERROR("read_filesystem_tables: inode table start too large in super block\n");
+ goto corrupted;
+ }
+
+ return TRUE;
+
+corrupted:
+ return FALSE;
+}
+
+
+int read_super_1(squashfs_operations **s_ops, void *s)
+{
+ squashfs_super_block_3 *sBlk_3 = s;
+
+ if(sBlk_3->s_magic != SQUASHFS_MAGIC || sBlk_3->s_major != 1 ||
+ sBlk_3->s_minor != 0)
+ return -1;
+
+ sBlk.s.s_magic = sBlk_3->s_magic;
+ sBlk.s.inodes = sBlk_3->inodes;
+ sBlk.s.mkfs_time = sBlk_3->mkfs_time;
+ sBlk.s.block_size = sBlk_3->block_size_1;
+ sBlk.s.fragments = 0;
+ sBlk.s.block_log = sBlk_3->block_log;
+ sBlk.s.flags = sBlk_3->flags;
+ sBlk.s.s_major = sBlk_3->s_major;
+ sBlk.s.s_minor = sBlk_3->s_minor;
+ sBlk.s.root_inode = sBlk_3->root_inode;
+ sBlk.s.bytes_used = sBlk_3->bytes_used_2;
+ sBlk.s.inode_table_start = sBlk_3->inode_table_start_2;
+ sBlk.s.directory_table_start = sBlk_3->directory_table_start_2;
+ sBlk.s.fragment_table_start = SQUASHFS_INVALID_BLK;
+ sBlk.s.lookup_table_start = sBlk_3->lookup_table_start;
+ sBlk.no_uids = sBlk_3->no_uids;
+ sBlk.no_guids = sBlk_3->no_guids;
+ sBlk.uid_start = sBlk_3->uid_start_2;
+ sBlk.guid_start = sBlk_3->guid_start_2;
+ sBlk.s.xattr_id_table_start = SQUASHFS_INVALID_BLK;
+
+ *s_ops = &ops;
+
+ /*
+ * 1.x filesystems use gzip compression.
+ */
+ comp = lookup_compressor("gzip");
+ return TRUE;
+}
+
+
+static void squashfs_stat(char *source)
+{
+ time_t mkfs_time = (time_t) sBlk.s.mkfs_time;
+ struct tm *t = use_localtime ? localtime(&mkfs_time) :
+ gmtime(&mkfs_time);
+ char *mkfs_str = asctime(t);
+
+#if __BYTE_ORDER == __BIG_ENDIAN
+ printf("Found a valid %sSQUASHFS %d:%d superblock on %s.\n",
+ swap ? "little endian " : "big endian ", sBlk.s.s_major,
+ sBlk.s.s_minor, source);
+#else
+ printf("Found a valid %sSQUASHFS %d:%d superblock on %s.\n",
+ swap ? "big endian " : "little endian ", sBlk.s.s_major,
+ sBlk.s.s_minor, source);
+#endif
+
+ printf("Creation or last append time %s", mkfs_str ? mkfs_str :
+ "failed to get time\n");
+ printf("Filesystem size %llu bytes (%.2f Kbytes / %.2f Mbytes)\n",
+ sBlk.s.bytes_used, sBlk.s.bytes_used / 1024.0,
+ sBlk.s.bytes_used / (1024.0 * 1024.0));
+ printf("Block size %d\n", sBlk.s.block_size);
+ printf("Filesystem is %sexportable via NFS\n",
+ SQUASHFS_EXPORTABLE(sBlk.s.flags) ? "" : "not ");
+ printf("Inodes are %scompressed\n",
+ SQUASHFS_UNCOMPRESSED_INODES(sBlk.s.flags) ? "un" : "");
+ printf("Data is %scompressed\n",
+ SQUASHFS_UNCOMPRESSED_DATA(sBlk.s.flags) ? "un" : "");
+ printf("Check data is %spresent in the filesystem\n",
+ SQUASHFS_CHECK_DATA(sBlk.s.flags) ? "" : "not ");
+ printf("Duplicates are removed\n");
+ printf("Number of inodes %d\n", sBlk.s.inodes);
+ printf("Number of uids %d\n", sBlk.no_uids);
+ printf("Number of gids %d\n", sBlk.no_guids);
+
+ TRACE("sBlk.s.inode_table_start 0x%llx\n", sBlk.s.inode_table_start);
+ TRACE("sBlk.s.directory_table_start 0x%llx\n", sBlk.s.directory_table_start);
+ TRACE("sBlk.uid_start 0x%llx\n", sBlk.uid_start);
+ TRACE("sBlk.guid_start 0x%llx\n", sBlk.guid_start);
+}
+
+
+static squashfs_operations ops = {
+ .opendir = squashfs_opendir,
+ .read_block_list = read_block_list,
+ .read_inode = read_inode,
+ .read_filesystem_tables = read_filesystem_tables,
+ .stat = squashfs_stat
+};
diff --git a/squashfs-tools/unsquash-12.c b/squashfs-tools/unsquash-12.c
new file mode 100644
index 0000000..3a6f0ae
--- /dev/null
+++ b/squashfs-tools/unsquash-12.c
@@ -0,0 +1,30 @@
+/*
+ * Unsquash a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2021
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * unsquash-12.c
+ *
+ * Helper functions used by unsquash-1 and unsquash-2.
+ */
+
+#include "unsquashfs.h"
+#include "merge_sort.h"
+
+SORT(sort_directory, dir_ent, name, next);
diff --git a/squashfs-tools/unsquash-123.c b/squashfs-tools/unsquash-123.c
new file mode 100644
index 0000000..8c0a51c
--- /dev/null
+++ b/squashfs-tools/unsquash-123.c
@@ -0,0 +1,79 @@
+/*
+ * Unsquash a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2019
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * unsquash-123.c
+ *
+ * Helper functions used by unsquash-1, unsquash-2 and unsquash-3.
+ */
+
+#include "unsquashfs.h"
+#include "squashfs_compat.h"
+
+int read_ids(int ids, long long start, long long end, unsigned int **id_table)
+{
+ /* Note on overflow limits:
+ * Size of ids is 2^8
+ * Max length is 2^8*4 or 1024
+ */
+ int res;
+ int length = ids * sizeof(unsigned int);
+
+ /*
+ * The size of the index table (length bytes) should match the
+ * table start and end points
+ */
+ if(length != (end - start)) {
+ ERROR("read_ids: Bad inode count in super block\n");
+ return FALSE;
+ }
+
+ TRACE("read_ids: no_ids %d\n", ids);
+
+ *id_table = malloc(length);
+ if(*id_table == NULL)
+ MEM_ERROR();
+
+ if(swap) {
+ unsigned int *sid_table = malloc(length);
+
+ if(sid_table == NULL)
+ MEM_ERROR();
+
+ res = read_fs_bytes(fd, start, length, sid_table);
+ if(res == FALSE) {
+ ERROR("read_ids: failed to read uid/gid table"
+ "\n");
+ free(sid_table);
+ return FALSE;
+ }
+ SQUASHFS_SWAP_INTS_3((*id_table), sid_table, ids);
+ free(sid_table);
+ } else {
+ res = read_fs_bytes(fd, start, length, *id_table);
+ if(res == FALSE) {
+ ERROR("read_ids: failed to read uid/gid table"
+ "\n");
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
diff --git a/squashfs-tools/unsquash-1234.c b/squashfs-tools/unsquash-1234.c
new file mode 100644
index 0000000..98a81ed
--- /dev/null
+++ b/squashfs-tools/unsquash-1234.c
@@ -0,0 +1,95 @@
+/*
+ * Unsquash a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2021
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * unsquash-1234.c
+ *
+ * Helper functions used by unsquash-1, unsquash-2, unsquash-3 and
+ * unsquash-4.
+ */
+
+#include "unsquashfs.h"
+
+/*
+ * Check name for validity, name should not
+ * - be ".", "./", or
+ * - be "..", "../" or
+ * - have a "/" anywhere in the name, or
+ * - be shorter than the expected size
+ */
+int check_name(char *name, int size)
+{
+ char *start = name;
+
+ if(name[0] == '.') {
+ if(name[1] == '.')
+ name++;
+ if(name[1] == '/' || name[1] == '\0')
+ return FALSE;
+ }
+
+ while(name[0] != '/' && name[0] != '\0')
+ name ++;
+
+ if(name[0] == '/')
+ return FALSE;
+
+ if((name - start) != size)
+ return FALSE;
+
+ return TRUE;
+}
+
+
+void squashfs_closedir(struct dir *dir)
+{
+ struct dir_ent *ent = dir->dirs;
+
+ while(ent) {
+ struct dir_ent *tmp = ent;
+
+ ent = ent->next;
+ free(tmp->name);
+ free(tmp);
+ }
+
+ free(dir);
+}
+
+
+/*
+ * Check directory for duplicate names. As the directory should be sorted,
+ * duplicates will be consecutive. Obviously we also need to check if the
+ * directory has been deliberately unsorted, to evade this check.
+ */
+int check_directory(struct dir *dir)
+{
+ int i;
+ struct dir_ent *ent;
+
+ if(dir->dir_count < 2)
+ return TRUE;
+
+ for(ent = dir->dirs, i = 0; i < dir->dir_count - 1; ent = ent->next, i++)
+ if(strcmp(ent->name, ent->next->name) >= 0)
+ return FALSE;
+
+ return TRUE;
+}
diff --git a/squashfs-tools/unsquash-2.c b/squashfs-tools/unsquash-2.c
new file mode 100644
index 0000000..b2546d5
--- /dev/null
+++ b/squashfs-tools/unsquash-2.c
@@ -0,0 +1,715 @@
+/*
+ * Unsquash a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2009, 2010, 2013, 2019, 2021, 2022, 2023
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * unsquash-2.c
+ */
+
+#include "unsquashfs.h"
+#include "squashfs_compat.h"
+#include "compressor.h"
+
+static squashfs_fragment_entry_2 *fragment_table;
+static unsigned int *uid_table, *guid_table;
+static squashfs_operations ops;
+static int needs_sorting = FALSE;
+
+
+static void read_block_list(unsigned int *block_list, long long start,
+ unsigned int offset, int blocks)
+{
+ int res;
+
+ TRACE("read_block_list: blocks %d\n", blocks);
+
+ if(swap) {
+ char *block_ptr = malloc(blocks * sizeof(unsigned int));
+ if(block_ptr == NULL)
+ MEM_ERROR();
+ res = read_inode_data(block_ptr, &start, &offset, blocks * sizeof(unsigned int));
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_block_list: failed to read "
+ "inode index %lld:%d\n", start, offset);
+ SQUASHFS_SWAP_INTS_3(block_list, block_ptr, blocks);
+ free(block_ptr);
+ } else {
+ res = read_inode_data(block_list, &start, &offset, blocks * sizeof(unsigned int));
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_block_list: failed to read "
+ "inode index %lld:%d\n", start, offset);
+ }
+}
+
+
+static int read_fragment_table(long long *table_start)
+{
+ /*
+ * Note on overflow limits:
+ * Size of SBlk.s.fragments is 2^32 (unsigned int)
+ * Max size of bytes is 2^32*8 or 2^35
+ * Max indexes is (2^32*8)/8K or 2^22
+ * Max length is ((2^32*8)/8K)*4 or 2^24 or 16M
+ */
+ int res, i;
+ long long bytes = SQUASHFS_FRAGMENT_BYTES_2((long long) sBlk.s.fragments);
+ int indexes = SQUASHFS_FRAGMENT_INDEXES_2((long long) sBlk.s.fragments);
+ int length = SQUASHFS_FRAGMENT_INDEX_BYTES_2((long long) sBlk.s.fragments);
+ unsigned int *fragment_table_index;
+
+ /*
+ * The size of the index table (length bytes) should match the
+ * table start and end points
+ */
+ if(length != (*table_start- sBlk.s.fragment_table_start)) {
+ ERROR("read_ids: Bad inode count in super block\n");
+ return FALSE;
+ }
+
+ TRACE("read_fragment_table: %d fragments, reading %d fragment indexes "
+ "from 0x%llx\n", sBlk.s.fragments, indexes,
+ sBlk.s.fragment_table_start);
+
+ fragment_table_index = malloc(length);
+ if(fragment_table_index == NULL)
+ MEM_ERROR();
+
+ fragment_table = malloc(bytes);
+ if(fragment_table == NULL)
+ MEM_ERROR();
+
+ if(swap) {
+ unsigned int *sfragment_table_index = malloc(length);
+
+ if(sfragment_table_index == NULL)
+ MEM_ERROR();
+
+ res = read_fs_bytes(fd, sBlk.s.fragment_table_start,
+ length, sfragment_table_index);
+ if(res == FALSE) {
+ ERROR("read_fragment_table: failed to read fragment "
+ "table index\n");
+ free(sfragment_table_index);
+ goto failed;
+ }
+ SQUASHFS_SWAP_FRAGMENT_INDEXES_2(fragment_table_index,
+ sfragment_table_index, indexes);
+ free(sfragment_table_index);
+ } else {
+ res = read_fs_bytes(fd, sBlk.s.fragment_table_start,
+ length, fragment_table_index);
+ if(res == FALSE) {
+ ERROR("read_fragment_table: failed to read fragment "
+ "table index\n");
+ goto failed;
+ }
+ }
+
+ for(i = 0; i < indexes; i++) {
+ int expected = (i + 1) != indexes ? SQUASHFS_METADATA_SIZE :
+ bytes & (SQUASHFS_METADATA_SIZE - 1);
+ int length = read_block(fd, fragment_table_index[i], NULL,
+ expected, ((char *) fragment_table) + ((long long) i *
+ SQUASHFS_METADATA_SIZE));
+ TRACE("Read fragment table block %d, from 0x%x, length %d\n", i,
+ fragment_table_index[i], length);
+ if(length == FALSE) {
+ ERROR("read_fragment_table: failed to read fragment "
+ "table block\n");
+ goto failed;
+ }
+ }
+
+ if(swap) {
+ squashfs_fragment_entry_2 sfragment;
+ for(i = 0; i < sBlk.s.fragments; i++) {
+ SQUASHFS_SWAP_FRAGMENT_ENTRY_2((&sfragment),
+ (&fragment_table[i]));
+ memcpy((char *) &fragment_table[i], (char *) &sfragment,
+ sizeof(squashfs_fragment_entry_2));
+ }
+ }
+
+ *table_start = fragment_table_index[0];
+ free(fragment_table_index);
+
+ return TRUE;
+
+failed:
+ free(fragment_table_index);
+ return FALSE;
+}
+
+
+static void read_fragment(unsigned int fragment, long long *start_block, int *size)
+{
+ TRACE("read_fragment: reading fragment %d\n", fragment);
+
+ squashfs_fragment_entry_2 *fragment_entry = &fragment_table[fragment];
+ *start_block = fragment_entry->start_block;
+ *size = fragment_entry->size;
+}
+
+
+static struct inode *read_inode(unsigned int start_block, unsigned int offset)
+{
+ static union squashfs_inode_header_2 header;
+ long long start = sBlk.s.inode_table_start + start_block;
+ long long st = start;
+ unsigned int off = offset;
+ static struct inode i;
+ int res;
+
+ TRACE("read_inode: reading inode [%d:%d]\n", start_block, offset);
+
+ if(swap) {
+ squashfs_base_inode_header_2 sinode;
+ res = read_inode_data(&sinode, &st, &off, sizeof(sinode));
+ if(res)
+ SQUASHFS_SWAP_BASE_INODE_HEADER_2(&header.base, &sinode,
+ sizeof(squashfs_base_inode_header_2));
+ } else
+ res = read_inode_data(&header.base, &st, &off, sizeof(header.base));
+
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read inode %lld:%d\n", st, off);
+
+ i.xattr = SQUASHFS_INVALID_XATTR;
+
+ if(header.base.uid >= sBlk.no_uids)
+ EXIT_UNSQUASH("File system corrupted - uid index in inode too large (uid: %u)\n", header.base.uid);
+
+ i.uid = (uid_t) uid_table[header.base.uid];
+
+ if(header.base.guid == SQUASHFS_GUIDS)
+ i.gid = i.uid;
+ else if(header.base.guid >= sBlk.no_guids)
+ EXIT_UNSQUASH("File system corrupted - gid index in inode too large (gid: %d)\n", header.base.guid);
+ else
+ i.gid = (uid_t) guid_table[header.base.guid];
+
+ if(header.base.inode_type < 1 || header.base.inode_type > 8)
+ EXIT_UNSQUASH("File system corrupted - invalid type in inode (type: %u)\n", header.base.inode_type);
+
+ i.mode = lookup_type[header.base.inode_type] | header.base.mode;
+ i.type = header.base.inode_type;
+ i.inode_number = inode_number++;
+
+ switch(header.base.inode_type) {
+ case SQUASHFS_DIR_TYPE: {
+ squashfs_dir_inode_header_2 *inode = &header.dir;
+
+ if(swap) {
+ squashfs_dir_inode_header_2 sinode;
+ res = read_inode_data(&sinode, &start, &offset, sizeof(sinode));
+ if(res)
+ SQUASHFS_SWAP_DIR_INODE_HEADER_2(&header.dir,
+ &sinode);
+ } else
+ res = read_inode_data(inode, &start, &offset, sizeof(*inode));
+
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode %lld:%d\n", start, offset);
+
+ i.data = inode->file_size;
+ i.offset = inode->offset;
+ i.start = inode->start_block;
+ if(time_opt)
+ i.time = timeval;
+ else
+ i.time = inode->mtime;
+ break;
+ }
+ case SQUASHFS_LDIR_TYPE: {
+ squashfs_ldir_inode_header_2 *inode = &header.ldir;
+
+ if(swap) {
+ squashfs_ldir_inode_header_2 sinode;
+ res = read_inode_data(&sinode, &start, &offset, sizeof(sinode));
+ if(res)
+ SQUASHFS_SWAP_LDIR_INODE_HEADER_2(&header.ldir,
+ &sinode);
+ } else
+ res = read_inode_data(inode, &start, &offset, sizeof(*inode));
+
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode %lld:%d\n", start, offset);
+
+ i.data = inode->file_size;
+ i.offset = inode->offset;
+ i.start = inode->start_block;
+ if(time_opt)
+ i.time = timeval;
+ else
+ i.time = inode->mtime;
+ break;
+ }
+ case SQUASHFS_FILE_TYPE: {
+ squashfs_reg_inode_header_2 *inode = &header.reg;
+
+ if(swap) {
+ squashfs_reg_inode_header_2 sinode;
+ res = read_inode_data(&sinode, &start, &offset, sizeof(sinode));
+ if(res)
+ SQUASHFS_SWAP_REG_INODE_HEADER_2(inode,
+ &sinode);
+ } else
+ res = read_inode_data(inode, &start, &offset, sizeof(*inode));
+
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode %lld:%d\n", start, offset);
+
+ i.data = inode->file_size;
+ if(time_opt)
+ i.time = timeval;
+ else
+ i.time = inode->mtime;
+ i.frag_bytes = inode->fragment == SQUASHFS_INVALID_FRAG
+ ? 0 : inode->file_size % sBlk.s.block_size;
+ i.fragment = inode->fragment;
+ i.offset = inode->offset;
+ i.blocks = inode->fragment == SQUASHFS_INVALID_FRAG ?
+ (i.data + sBlk.s.block_size - 1) >>
+ sBlk.s.block_log : i.data >>
+ sBlk.s.block_log;
+ i.start = inode->start_block;
+ i.block_start = start;
+ i.block_offset = offset;
+ i.sparse = 0;
+ break;
+ }
+ case SQUASHFS_SYMLINK_TYPE: {
+ squashfs_symlink_inode_header_2 *inodep =
+ &header.symlink;
+
+ if(swap) {
+ squashfs_symlink_inode_header_2 sinodep;
+ res = read_inode_data(&sinodep, &start, &offset, sizeof(sinodep));
+ if(res)
+ SQUASHFS_SWAP_SYMLINK_INODE_HEADER_2(inodep,
+ &sinodep);
+ } else
+ res = read_inode_data(inodep, &start, &offset, sizeof(*inodep));
+
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode %lld:%d\n", start, offset);
+
+ i.symlink = malloc(inodep->symlink_size + 1);
+ if(i.symlink == NULL)
+ MEM_ERROR();
+
+ res = read_inode_data(i.symlink, &start, &offset, inodep->symlink_size);
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode symbolic link %lld:%d\n", start, offset);
+ i.symlink[inodep->symlink_size] = '\0';
+ i.data = inodep->symlink_size;
+ if(time_opt)
+ i.time = timeval;
+ else
+ i.time = sBlk.s.mkfs_time;
+ break;
+ }
+ case SQUASHFS_BLKDEV_TYPE:
+ case SQUASHFS_CHRDEV_TYPE: {
+ squashfs_dev_inode_header_2 *inodep = &header.dev;
+
+ if(swap) {
+ squashfs_dev_inode_header_2 sinodep;
+ res = read_inode_data(&sinodep, &start, &offset, sizeof(sinodep));
+ if(res)
+ SQUASHFS_SWAP_DEV_INODE_HEADER_2(inodep,
+ &sinodep);
+ } else
+ res = read_inode_data(inodep, &start, &offset, sizeof(*inodep));
+
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode %lld:%d\n", start, offset);
+
+ i.data = inodep->rdev;
+ if(time_opt)
+ i.time = timeval;
+ else
+ i.time = sBlk.s.mkfs_time;
+ break;
+ }
+ case SQUASHFS_FIFO_TYPE:
+ case SQUASHFS_SOCKET_TYPE:
+ i.data = 0;
+ if(time_opt)
+ i.time = timeval;
+ else
+ i.time = sBlk.s.mkfs_time;
+ break;
+ default:
+ EXIT_UNSQUASH("Unknown inode type %d in "
+ "read_inode_header_2!\n",
+ header.base.inode_type);
+ }
+ return &i;
+}
+
+
+static struct dir *squashfs_opendir(unsigned int block_start, unsigned int offset,
+ struct inode **i)
+{
+ squashfs_dir_header_2 dirh;
+ char buffer[sizeof(squashfs_dir_entry_2) + SQUASHFS_NAME_LEN + 1]
+ __attribute__((aligned));
+ squashfs_dir_entry_2 *dire = (squashfs_dir_entry_2 *) buffer;
+ long long start;
+ int bytes = 0;
+ int dir_count, size, res;
+ struct dir_ent *ent, *cur_ent = NULL;
+ struct dir *dir;
+
+ TRACE("squashfs_opendir: inode start block %d, offset %d\n",
+ block_start, offset);
+
+ *i = read_inode(block_start, offset);
+
+ dir = malloc(sizeof(struct dir));
+ if(dir == NULL)
+ MEM_ERROR();
+
+ dir->dir_count = 0;
+ dir->cur_entry = NULL;
+ dir->mode = (*i)->mode;
+ dir->uid = (*i)->uid;
+ dir->guid = (*i)->gid;
+ dir->mtime = (*i)->time;
+ dir->xattr = (*i)->xattr;
+ dir->dirs = NULL;
+
+ if ((*i)->data == 0)
+ /*
+ * if the directory is empty, skip the unnecessary
+ * lookup_entry, this fixes the corner case with
+ * completely empty filesystems where lookup_entry correctly
+ * returning -1 is incorrectly treated as an error
+ */
+ return dir;
+
+ start = sBlk.s.directory_table_start + (*i)->start;
+ offset = (*i)->offset;
+ size = (*i)->data + bytes;
+
+ while(bytes < size) {
+ if(swap) {
+ squashfs_dir_header_2 sdirh;
+ res = read_directory_data(&sdirh, &start, &offset, sizeof(sdirh));
+ if(res)
+ SQUASHFS_SWAP_DIR_HEADER_2(&dirh, &sdirh);
+ } else
+ res = read_directory_data(&dirh, &start, &offset, sizeof(dirh));
+
+ if(res == FALSE)
+ goto corrupted;
+
+ dir_count = dirh.count + 1;
+ TRACE("squashfs_opendir: Read directory header @ byte position "
+ "%d, %d directory entries\n", bytes, dir_count);
+ bytes += sizeof(dirh);
+
+ /* dir_count should never be larger than SQUASHFS_DIR_COUNT */
+ if(dir_count > SQUASHFS_DIR_COUNT) {
+ ERROR("File system corrupted: too many entries in directory\n");
+ goto corrupted;
+ }
+
+ while(dir_count--) {
+ if(swap) {
+ squashfs_dir_entry_2 sdire;
+ res = read_directory_data(&sdire, &start,
+ &offset, sizeof(sdire));
+ if(res)
+ SQUASHFS_SWAP_DIR_ENTRY_2(dire, &sdire);
+ } else
+ res = read_directory_data(dire, &start,
+ &offset, sizeof(*dire));
+
+ if(res == FALSE)
+ goto corrupted;
+
+ bytes += sizeof(*dire);
+
+ /* size should never be SQUASHFS_NAME_LEN or larger */
+ if(dire->size >= SQUASHFS_NAME_LEN) {
+ ERROR("File system corrupted: filename too long\n");
+ goto corrupted;
+ }
+
+ res = read_directory_data(dire->name, &start, &offset,
+ dire->size + 1);
+
+ if(res == FALSE)
+ goto corrupted;
+
+ dire->name[dire->size + 1] = '\0';
+
+ /* check name for invalid characters (i.e /, ., ..) */
+ if(check_name(dire->name, dire->size + 1) == FALSE) {
+ ERROR("File system corrupted: invalid characters in name\n");
+ goto corrupted;
+ }
+
+ TRACE("squashfs_opendir: directory entry %s, inode "
+ "%d:%d, type %d\n", dire->name,
+ dirh.start_block, dire->offset, dire->type);
+
+ ent = malloc(sizeof(struct dir_ent));
+ if(ent == NULL)
+ MEM_ERROR();
+
+ ent->name = strdup(dire->name);
+ ent->start_block = dirh.start_block;
+ ent->offset = dire->offset;
+ ent->type = dire->type;
+ ent->next = NULL;
+ if(cur_ent == NULL)
+ dir->dirs = ent;
+ else
+ cur_ent->next = ent;
+ cur_ent = ent;
+ dir->dir_count ++;
+ bytes += dire->size + 1;
+ }
+ }
+
+ if(needs_sorting)
+ sort_directory(&(dir->dirs), dir->dir_count);
+
+ /* check directory for duplicate names and sorting */
+ if(check_directory(dir) == FALSE) {
+ if(needs_sorting)
+ ERROR("File system corrupted: directory has duplicate names\n");
+ else
+ ERROR("File system corrupted: directory has duplicate names or is unsorted\n");
+ goto corrupted;
+ }
+ return dir;
+
+corrupted:
+ squashfs_closedir(dir);
+ return NULL;
+}
+
+
+static int read_filesystem_tables()
+{
+ long long table_start;
+
+ /* Read uid and gid lookup tables */
+
+ /* Sanity check super block contents */
+ if(sBlk.no_guids) {
+ if(sBlk.guid_start >= sBlk.s.bytes_used) {
+ ERROR("read_filesystem_tables: gid start too large in super block\n");
+ goto corrupted;
+ }
+
+ if(read_ids(sBlk.no_guids, sBlk.guid_start, sBlk.s.bytes_used, &guid_table) == FALSE)
+ goto corrupted;
+
+ table_start = sBlk.guid_start;
+ } else {
+ /* no guids, guid_start should be 0 */
+ if(sBlk.guid_start != 0) {
+ ERROR("read_filesystem_tables: gid start too large in super block\n");
+ goto corrupted;
+ }
+
+ table_start = sBlk.s.bytes_used;
+ }
+
+ if(sBlk.uid_start >= table_start) {
+ ERROR("read_filesystem_tables: uid start too large in super block\n");
+ goto corrupted;
+ }
+
+ /* There should be at least one uid */
+ if(sBlk.no_uids == 0) {
+ ERROR("read_filesystem_tables: uid count bad in super block\n");
+ goto corrupted;
+ }
+
+ if(read_ids(sBlk.no_uids, sBlk.uid_start, table_start, &uid_table) == FALSE)
+ goto corrupted;
+
+ table_start = sBlk.uid_start;
+
+ /* Read fragment table */
+ if(sBlk.s.fragments != 0) {
+
+ /* Sanity check super block contents */
+ if(sBlk.s.fragment_table_start >= table_start) {
+ ERROR("read_filesystem_tables: fragment table start too large in super block\n");
+ goto corrupted;
+ }
+
+ /* The number of fragments should not exceed the number of inodes */
+ if(sBlk.s.fragments > sBlk.s.inodes) {
+ ERROR("read_filesystem_tables: Bad fragment count in super block\n");
+ goto corrupted;
+ }
+
+ if(read_fragment_table(&table_start) == FALSE)
+ goto corrupted;
+ } else {
+ /*
+ * Sanity check super block contents - with 0 fragments,
+ * the fragment table should be empty
+ */
+ if(sBlk.s.fragment_table_start != table_start) {
+ ERROR("read_filesystem_tables: fragment table start invalid in super block\n");
+ goto corrupted;
+ }
+ }
+
+ /* Sanity check super block directory table values */
+ if(sBlk.s.directory_table_start > table_start) {
+ ERROR("read_filesystem_tables: directory table start too large in super block\n");
+ goto corrupted;
+ }
+
+ /* Sanity check super block inode table values */
+ if(sBlk.s.inode_table_start >= sBlk.s.directory_table_start) {
+ ERROR("read_filesystem_tables: inode table start too large in super block\n");
+ goto corrupted;
+ }
+
+ return TRUE;
+
+corrupted:
+ return FALSE;
+}
+
+
+int read_super_2(squashfs_operations **s_ops, void *s)
+{
+ squashfs_super_block_3 *sBlk_3 = s;
+
+ if(sBlk_3->s_magic != SQUASHFS_MAGIC || sBlk_3->s_major != 2 ||
+ sBlk_3->s_minor > 1)
+ return -1;
+
+ sBlk.s.s_magic = sBlk_3->s_magic;
+ sBlk.s.inodes = sBlk_3->inodes;
+ sBlk.s.mkfs_time = sBlk_3->mkfs_time;
+ sBlk.s.block_size = sBlk_3->block_size;
+ sBlk.s.fragments = sBlk_3->fragments;
+ sBlk.s.block_log = sBlk_3->block_log;
+ sBlk.s.flags = sBlk_3->flags;
+ sBlk.s.s_major = sBlk_3->s_major;
+ sBlk.s.s_minor = sBlk_3->s_minor;
+ sBlk.s.root_inode = sBlk_3->root_inode;
+ sBlk.s.bytes_used = sBlk_3->bytes_used_2;
+ sBlk.s.inode_table_start = sBlk_3->inode_table_start;
+ sBlk.s.directory_table_start = sBlk_3->directory_table_start_2;
+ sBlk.s.fragment_table_start = sBlk_3->fragment_table_start_2;
+ sBlk.s.inode_table_start = sBlk_3->inode_table_start_2;
+ sBlk.no_uids = sBlk_3->no_uids;
+ sBlk.no_guids = sBlk_3->no_guids;
+ sBlk.uid_start = sBlk_3->uid_start_2;
+ sBlk.guid_start = sBlk_3->guid_start_2;
+ sBlk.s.xattr_id_table_start = SQUASHFS_INVALID_BLK;
+
+ *s_ops = &ops;
+
+ /*
+ * 2.x filesystems use gzip compression.
+ */
+ comp = lookup_compressor("gzip");
+
+ if(sBlk_3->s_minor == 0)
+ needs_sorting = TRUE;
+
+ return TRUE;
+}
+
+
+static void squashfs_stat(char *source)
+{
+ time_t mkfs_time = (time_t) sBlk.s.mkfs_time;
+ struct tm *t = use_localtime ? localtime(&mkfs_time) :
+ gmtime(&mkfs_time);
+ char *mkfs_str = asctime(t);
+
+#if __BYTE_ORDER == __BIG_ENDIAN
+ printf("Found a valid %sSQUASHFS %d:%d superblock on %s.\n",
+ swap ? "little endian " : "big endian ", sBlk.s.s_major,
+ sBlk.s.s_minor, source);
+#else
+ printf("Found a valid %sSQUASHFS %d:%d superblock on %s.\n",
+ swap ? "big endian " : "little endian ", sBlk.s.s_major,
+ sBlk.s.s_minor, source);
+#endif
+
+ printf("Creation or last append time %s", mkfs_str ? mkfs_str :
+ "failed to get time\n");
+ printf("Filesystem size %llu bytes (%.2f Kbytes / %.2f Mbytes)\n",
+ sBlk.s.bytes_used, sBlk.s.bytes_used / 1024.0,
+ sBlk.s.bytes_used / (1024.0 * 1024.0));
+
+ printf("Block size %d\n", sBlk.s.block_size);
+ printf("Filesystem is %sexportable via NFS\n",
+ SQUASHFS_EXPORTABLE(sBlk.s.flags) ? "" : "not ");
+ printf("Inodes are %scompressed\n",
+ SQUASHFS_UNCOMPRESSED_INODES(sBlk.s.flags) ? "un" : "");
+ printf("Data is %scompressed\n",
+ SQUASHFS_UNCOMPRESSED_DATA(sBlk.s.flags) ? "un" : "");
+
+ if(SQUASHFS_NO_FRAGMENTS(sBlk.s.flags))
+ printf("Fragments are not stored\n");
+ else {
+ printf("Fragments are %scompressed\n",
+ SQUASHFS_UNCOMPRESSED_FRAGMENTS(sBlk.s.flags) ? "un" : "");
+ printf("Always-use-fragments option is %sspecified\n",
+ SQUASHFS_ALWAYS_FRAGMENTS(sBlk.s.flags) ? "" : "not ");
+ }
+
+ printf("Check data is %spresent in the filesystem\n",
+ SQUASHFS_CHECK_DATA(sBlk.s.flags) ? "" : "not ");
+ printf("Duplicates are %sremoved\n", SQUASHFS_DUPLICATES(sBlk.s.flags) ? "" : "not ");
+ printf("Number of fragments %d\n", sBlk.s.fragments);
+ printf("Number of inodes %d\n", sBlk.s.inodes);
+ printf("Number of uids %d\n", sBlk.no_uids);
+ printf("Number of gids %d\n", sBlk.no_guids);
+
+ TRACE("sBlk.s.inode_table_start 0x%llx\n", sBlk.s.inode_table_start);
+ TRACE("sBlk.s.directory_table_start 0x%llx\n", sBlk.s.directory_table_start);
+ TRACE("sBlk.s.fragment_table_start 0x%llx\n\n", sBlk.s.fragment_table_start);
+ TRACE("sBlk.uid_start 0x%llx\n", sBlk.uid_start);
+ TRACE("sBlk.guid_start 0x%llx\n", sBlk.guid_start);
+}
+
+
+static squashfs_operations ops = {
+ .opendir = squashfs_opendir,
+ .read_fragment = read_fragment,
+ .read_block_list = read_block_list,
+ .read_inode = read_inode,
+ .read_filesystem_tables = read_filesystem_tables,
+ .stat = squashfs_stat
+};
diff --git a/squashfs-tools/unsquash-3.c b/squashfs-tools/unsquash-3.c
new file mode 100644
index 0000000..8eab216
--- /dev/null
+++ b/squashfs-tools/unsquash-3.c
@@ -0,0 +1,824 @@
+/*
+ * Unsquash a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2019, 2021, 2022, 2023
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * unsquash-3.c
+ */
+
+#include "unsquashfs.h"
+#include "squashfs_compat.h"
+#include "compressor.h"
+
+static squashfs_fragment_entry_3 *fragment_table;
+static unsigned int *uid_table, *guid_table;
+static squashfs_operations ops;
+
+static long long *salloc_index_table(int indexes)
+{
+ static long long *alloc_table = NULL;
+ static int alloc_size = 0;
+ int length = indexes * sizeof(long long);
+
+ if(alloc_size < length || length == 0) {
+ long long *table = realloc(alloc_table, length);
+
+ if(table == NULL && length !=0 )
+ MEM_ERROR();
+
+ alloc_table = table;
+ alloc_size = length;
+ }
+
+ return alloc_table;
+}
+
+
+static void read_block_list(unsigned int *block_list, long long start,
+ unsigned int offset, int blocks)
+{
+ int res;
+
+ TRACE("read_block_list: blocks %d\n", blocks);
+
+ if(swap) {
+ unsigned int *block_ptr = malloc(blocks * sizeof(unsigned int));
+ if(block_ptr == NULL)
+ MEM_ERROR();
+ res = read_inode_data(block_ptr, &start, &offset, blocks * sizeof(unsigned int));
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_block_list: failed to read "
+ "inode index %lld:%d\n", start, offset);
+ SQUASHFS_SWAP_INTS_3(block_list, block_ptr, blocks);
+ free(block_ptr);
+ } else {
+ res = read_inode_data(block_list, &start, &offset, blocks * sizeof(unsigned int));
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_block_list: failed to read "
+ "inode index %lld:%d\n", start, offset);
+ }
+}
+
+
+static int read_fragment_table(long long *table_start)
+{
+ /*
+ * Note on overflow limits:
+ * Size of SBlk.s.fragments is 2^32 (unsigned int)
+ * Max size of bytes is 2^32*16 or 2^36
+ * Max indexes is (2^32*16)/8K or 2^23
+ * Max length is ((2^32*16)/8K)*8 or 2^26 or 64M
+ */
+ int res, i;
+ long long bytes = SQUASHFS_FRAGMENT_BYTES_3((long long) sBlk.s.fragments);
+ int indexes = SQUASHFS_FRAGMENT_INDEXES_3((long long) sBlk.s.fragments);
+ int length = SQUASHFS_FRAGMENT_INDEX_BYTES_3((long long) sBlk.s.fragments);
+ long long *fragment_table_index;
+
+ /*
+ * The size of the index table (length bytes) should match the
+ * table start and end points
+ */
+ if(length != (*table_start - sBlk.s.fragment_table_start)) {
+ ERROR("read_fragment_table: Bad fragment count in super block\n");
+ return FALSE;
+ }
+
+ TRACE("read_fragment_table: %d fragments, reading %d fragment indexes "
+ "from 0x%llx\n", sBlk.s.fragments, indexes,
+ sBlk.s.fragment_table_start);
+
+ fragment_table_index = alloc_index_table(indexes);
+ fragment_table = malloc(bytes);
+ if(fragment_table == NULL)
+ EXIT_UNSQUASH("read_fragment_table: failed to allocate "
+ "fragment table\n");
+
+ if(swap) {
+ long long *sfragment_table_index = salloc_index_table(indexes);
+
+ res = read_fs_bytes(fd, sBlk.s.fragment_table_start,
+ length, sfragment_table_index);
+ if(res == FALSE) {
+ ERROR("read_fragment_table: failed to read fragment "
+ "table index\n");
+ return FALSE;
+ }
+ SQUASHFS_SWAP_FRAGMENT_INDEXES_3(fragment_table_index,
+ sfragment_table_index, indexes);
+ } else {
+ res = read_fs_bytes(fd, sBlk.s.fragment_table_start,
+ length, fragment_table_index);
+ if(res == FALSE) {
+ ERROR("read_fragment_table: failed to read fragment "
+ "table index\n");
+ return FALSE;
+ }
+ }
+
+ for(i = 0; i < indexes; i++) {
+ int expected = (i + 1) != indexes ? SQUASHFS_METADATA_SIZE :
+ bytes & (SQUASHFS_METADATA_SIZE - 1);
+ int length = read_block(fd, fragment_table_index[i], NULL,
+ expected, ((char *) fragment_table) + ((long long) i *
+ SQUASHFS_METADATA_SIZE));
+ TRACE("Read fragment table block %d, from 0x%llx, length %d\n",
+ i, fragment_table_index[i], length);
+ if(length == FALSE) {
+ ERROR("read_fragment_table: failed to read fragment "
+ "table block\n");
+ return FALSE;
+ }
+ }
+
+ if(swap) {
+ squashfs_fragment_entry_3 sfragment;
+ for(i = 0; i < sBlk.s.fragments; i++) {
+ SQUASHFS_SWAP_FRAGMENT_ENTRY_3((&sfragment),
+ (&fragment_table[i]));
+ memcpy((char *) &fragment_table[i], (char *) &sfragment,
+ sizeof(squashfs_fragment_entry_3));
+ }
+ }
+
+ *table_start = fragment_table_index[0];
+ return TRUE;
+}
+
+
+static void read_fragment(unsigned int fragment, long long *start_block, int *size)
+{
+ TRACE("read_fragment: reading fragment %d\n", fragment);
+
+ squashfs_fragment_entry_3 *fragment_entry = &fragment_table[fragment];
+ *start_block = fragment_entry->start_block;
+ *size = fragment_entry->size;
+}
+
+
+static struct inode *read_inode(unsigned int start_block, unsigned int offset)
+{
+ static union squashfs_inode_header_3 header;
+ long long start = sBlk.s.inode_table_start + start_block;
+ long long st = start;
+ unsigned int off = offset;
+ static struct inode i;
+ int res;
+
+ TRACE("read_inode: reading inode [%d:%d]\n", start_block, offset);
+
+ if(swap) {
+ squashfs_base_inode_header_3 sinode;
+ res = read_inode_data(&sinode, &st, &off, sizeof(sinode));
+ if(res)
+ SQUASHFS_SWAP_BASE_INODE_HEADER_3(&header.base, &sinode,
+ sizeof(squashfs_base_inode_header_3));
+ } else
+ res = read_inode_data(&header.base, &st, &off, sizeof(header.base));
+
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read inode %lld:%d\n", st, off);
+
+ i.xattr = SQUASHFS_INVALID_XATTR;
+
+ if(header.base.uid >= sBlk.no_uids)
+ EXIT_UNSQUASH("File system corrupted - uid index in inode too large (uid: %u)\n", header.base.uid);
+
+ i.uid = (uid_t) uid_table[header.base.uid];
+
+ if(header.base.guid == SQUASHFS_GUIDS)
+ i.gid = i.uid;
+ else if(header.base.guid >= sBlk.no_guids)
+ EXIT_UNSQUASH("File system corrupted - gid index in inode too large (gid: %u)\n", header.base.guid);
+ else
+ i.gid = (uid_t) guid_table[header.base.guid];
+
+ if(header.base.inode_type < 1 || header.base.inode_type > 9)
+ EXIT_UNSQUASH("File system corrupted - invalid type in inode (type: %u)\n", header.base.inode_type);
+
+ if(header.base.inode_number > sBlk.s.inodes)
+ EXIT_UNSQUASH("File system corrupted - inode number in inode too large (inode_number: %u)\n", header.base.inode_number);
+
+ if(header.base.inode_number == 0)
+ EXIT_UNSQUASH("File system corrupted - inode number zero is invalid\n", header.base.inode_number);
+
+ i.mode = lookup_type[header.base.inode_type] | header.base.mode;
+ i.type = header.base.inode_type;
+ if(time_opt)
+ i.time = timeval;
+ else
+ i.time = header.base.mtime;
+ i.inode_number = header.base.inode_number;
+
+ switch(header.base.inode_type) {
+ case SQUASHFS_DIR_TYPE: {
+ squashfs_dir_inode_header_3 *inode = &header.dir;
+
+ if(swap) {
+ squashfs_dir_inode_header_3 sinode;
+ res = read_inode_data(&sinode, &start, &offset, sizeof(sinode));
+ if(res)
+ SQUASHFS_SWAP_DIR_INODE_HEADER_3(&header.dir,
+ &sinode);
+ } else
+ res = read_inode_data(inode, &start, &offset, sizeof(*inode));
+
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode %lld:%d\n", start, offset);
+
+ i.data = inode->file_size;
+ i.offset = inode->offset;
+ i.start = inode->start_block;
+ break;
+ }
+ case SQUASHFS_LDIR_TYPE: {
+ squashfs_ldir_inode_header_3 *inode = &header.ldir;
+
+ if(swap) {
+ squashfs_ldir_inode_header_3 sinode;
+ res = read_inode_data(&sinode, &start, &offset, sizeof(sinode));
+ if(res)
+ SQUASHFS_SWAP_LDIR_INODE_HEADER_3(&header.ldir,
+ &sinode);
+ } else
+ res = read_inode_data(inode, &start, &offset, sizeof(*inode));
+
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode %lld:%d\n", start, offset);
+
+ i.data = inode->file_size;
+ i.offset = inode->offset;
+ i.start = inode->start_block;
+ break;
+ }
+ case SQUASHFS_FILE_TYPE: {
+ squashfs_reg_inode_header_3 *inode = &header.reg;
+
+ if(swap) {
+ squashfs_reg_inode_header_3 sinode;
+ res = read_inode_data(&sinode, &start, &offset, sizeof(sinode));
+ if(res)
+ SQUASHFS_SWAP_REG_INODE_HEADER_3(inode,
+ &sinode);
+ } else
+ res = read_inode_data(inode, &start, &offset, sizeof(*inode));
+
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode %lld:%d\n", start, offset);
+
+ i.data = inode->file_size;
+ i.frag_bytes = inode->fragment == SQUASHFS_INVALID_FRAG
+ ? 0 : inode->file_size % sBlk.s.block_size;
+ i.fragment = inode->fragment;
+ i.offset = inode->offset;
+ i.blocks = inode->fragment == SQUASHFS_INVALID_FRAG ?
+ (i.data + sBlk.s.block_size - 1) >>
+ sBlk.s.block_log :
+ i.data >> sBlk.s.block_log;
+ i.start = inode->start_block;
+ i.block_start = start;
+ i.block_offset = offset;
+ i.sparse = 1;
+ break;
+ }
+ case SQUASHFS_LREG_TYPE: {
+ squashfs_lreg_inode_header_3 *inode = &header.lreg;
+
+ if(swap) {
+ squashfs_lreg_inode_header_3 sinode;
+ res = read_inode_data(&sinode, &start, &offset, sizeof(sinode));
+ if(res)
+ SQUASHFS_SWAP_LREG_INODE_HEADER_3(inode,
+ &sinode);
+ } else
+ res = read_inode_data(inode, &start, &offset, sizeof(*inode));
+
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode %lld:%d\n", start, offset);
+
+ i.data = inode->file_size;
+ i.frag_bytes = inode->fragment == SQUASHFS_INVALID_FRAG
+ ? 0 : inode->file_size % sBlk.s.block_size;
+ i.fragment = inode->fragment;
+ i.offset = inode->offset;
+ i.blocks = inode->fragment == SQUASHFS_INVALID_FRAG ?
+ (inode->file_size + sBlk.s.block_size - 1) >>
+ sBlk.s.block_log :
+ inode->file_size >> sBlk.s.block_log;
+ i.start = inode->start_block;
+ i.block_start = start;
+ i.block_offset = offset;
+ i.sparse = 1;
+ break;
+ }
+ case SQUASHFS_SYMLINK_TYPE: {
+ squashfs_symlink_inode_header_3 *inodep =
+ &header.symlink;
+
+ if(swap) {
+ squashfs_symlink_inode_header_3 sinodep;
+ res = read_inode_data(&sinodep, &start, &offset, sizeof(sinodep));
+ if(res)
+ SQUASHFS_SWAP_SYMLINK_INODE_HEADER_3(inodep,
+ &sinodep);
+ } else
+ res = read_inode_data(inodep, &start, &offset, sizeof(*inodep));
+
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode %lld:%d\n", start, offset);
+
+ i.symlink = malloc(inodep->symlink_size + 1);
+ if(i.symlink == NULL)
+ MEM_ERROR();
+
+ res = read_inode_data(i.symlink, &start, &offset, inodep->symlink_size);
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode symbolic link %lld:%d\n", start, offset);
+
+ i.symlink[inodep->symlink_size] = '\0';
+ i.data = inodep->symlink_size;
+ break;
+ }
+ case SQUASHFS_BLKDEV_TYPE:
+ case SQUASHFS_CHRDEV_TYPE: {
+ squashfs_dev_inode_header_3 *inodep = &header.dev;
+
+ if(swap) {
+ squashfs_dev_inode_header_3 sinodep;
+ res = read_inode_data(&sinodep, &start, &offset, sizeof(sinodep));
+ if(res)
+ SQUASHFS_SWAP_DEV_INODE_HEADER_3(inodep,
+ &sinodep);
+ } else
+ res = read_inode_data(inodep, &start, &offset, sizeof(*inodep));
+
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode %lld:%d\n", start, offset);
+
+ i.data = inodep->rdev;
+ break;
+ }
+ case SQUASHFS_FIFO_TYPE:
+ case SQUASHFS_SOCKET_TYPE:
+ i.data = 0;
+ break;
+ default:
+ EXIT_UNSQUASH("Unknown inode type %d in read_inode!\n",
+ header.base.inode_type);
+ }
+ return &i;
+}
+
+
+static struct dir *squashfs_opendir(unsigned int block_start, unsigned int offset,
+ struct inode **i)
+{
+ squashfs_dir_header_3 dirh;
+ char buffer[sizeof(squashfs_dir_entry_3) + SQUASHFS_NAME_LEN + 1]
+ __attribute__((aligned));
+ squashfs_dir_entry_3 *dire = (squashfs_dir_entry_3 *) buffer;
+ long long start;
+ int bytes = 0;
+ int dir_count, size, res;
+ struct dir_ent *ent, *cur_ent = NULL;
+ struct dir *dir;
+
+ TRACE("squashfs_opendir: inode start block %d, offset %d\n",
+ block_start, offset);
+
+ *i = read_inode(block_start, offset);
+
+ dir = malloc(sizeof(struct dir));
+ if(dir == NULL)
+ MEM_ERROR();
+
+ dir->dir_count = 0;
+ dir->cur_entry = NULL;
+ dir->mode = (*i)->mode;
+ dir->uid = (*i)->uid;
+ dir->guid = (*i)->gid;
+ dir->mtime = (*i)->time;
+ dir->xattr = (*i)->xattr;
+ dir->dirs = NULL;
+
+ if ((*i)->data == 3)
+ /*
+ * if the directory is empty, skip the unnecessary
+ * lookup_entry, this fixes the corner case with
+ * completely empty filesystems where lookup_entry correctly
+ * returning -1 is incorrectly treated as an error
+ */
+ return dir;
+
+ start = sBlk.s.directory_table_start + (*i)->start;
+ offset = (*i)->offset;
+ size = (*i)->data + bytes - 3;
+
+ while(bytes < size) {
+ if(swap) {
+ squashfs_dir_header_3 sdirh;
+ res = read_directory_data(&sdirh, &start, &offset, sizeof(sdirh));
+ if(res)
+ SQUASHFS_SWAP_DIR_HEADER_3(&dirh, &sdirh);
+ } else
+ res = read_directory_data(&dirh, &start, &offset, sizeof(dirh));
+
+ if(res == FALSE)
+ goto corrupted;
+
+ dir_count = dirh.count + 1;
+ TRACE("squashfs_opendir: Read directory header @ byte position "
+ "%d, %d directory entries\n", bytes, dir_count);
+ bytes += sizeof(dirh);
+
+ /* dir_count should never be larger than SQUASHFS_DIR_COUNT */
+ if(dir_count > SQUASHFS_DIR_COUNT) {
+ ERROR("File system corrupted: too many entries in directory\n");
+ goto corrupted;
+ }
+
+ while(dir_count--) {
+ if(swap) {
+ squashfs_dir_entry_3 sdire;
+ res = read_directory_data(&sdire, &start,
+ &offset, sizeof(sdire));
+ if(res)
+ SQUASHFS_SWAP_DIR_ENTRY_3(dire, &sdire);
+ } else
+ res = read_directory_data(dire, &start,
+ &offset, sizeof(*dire));
+
+ if(res == FALSE)
+ goto corrupted;
+
+ bytes += sizeof(*dire);
+
+ /* size should never be SQUASHFS_NAME_LEN or larger */
+ if(dire->size >= SQUASHFS_NAME_LEN) {
+ ERROR("File system corrupted: filename too long\n");
+ goto corrupted;
+ }
+
+ res = read_directory_data(dire->name, &start, &offset,
+ dire->size + 1);
+
+ if(res == FALSE)
+ goto corrupted;
+
+ dire->name[dire->size + 1] = '\0';
+
+ /* check name for invalid characters (i.e /, ., ..) */
+ if(check_name(dire->name, dire->size + 1) == FALSE) {
+ ERROR("File system corrupted: invalid characters in name\n");
+ goto corrupted;
+ }
+
+ TRACE("squashfs_opendir: directory entry %s, inode "
+ "%d:%d, type %d\n", dire->name,
+ dirh.start_block, dire->offset, dire->type);
+
+ ent = malloc(sizeof(struct dir_ent));
+ if(ent == NULL)
+ MEM_ERROR();
+
+ ent->name = strdup(dire->name);
+ ent->start_block = dirh.start_block;
+ ent->offset = dire->offset;
+ ent->type = dire->type;
+ ent->next = NULL;
+ if(cur_ent == NULL)
+ dir->dirs = ent;
+ else
+ cur_ent->next = ent;
+ cur_ent = ent;
+ dir->dir_count ++;
+ bytes += dire->size + 1;
+ }
+ }
+
+ /* check directory for duplicate names and sorting */
+ if(check_directory(dir) == FALSE) {
+ ERROR("File system corrupted: directory has duplicate names or is unsorted\n");
+ goto corrupted;
+ }
+
+ return dir;
+
+corrupted:
+ squashfs_closedir(dir);
+ return NULL;
+}
+
+
+static int parse_exports_table(long long *table_start)
+{
+ /*
+ * Note on overflow limits:
+ * Size of SBlk.s.inodes is 2^32 (unsigned int)
+ * Max indexes is (2^32*8)/8K or 2^22
+ * Max length is ((2^32*8)/8K)*8 or 2^25
+ */
+ int res;
+ int indexes = SQUASHFS_LOOKUP_BLOCKS_3((long long) sBlk.s.inodes);
+ int length = SQUASHFS_LOOKUP_BLOCK_BYTES_3((long long) sBlk.s.inodes);
+ long long *export_index_table;
+
+ /*
+ * The size of the index table (length bytes) should match the
+ * table start and end points
+ */
+ if(length != (*table_start - sBlk.s.lookup_table_start)) {
+ ERROR("parse_exports_table: Bad inode count in super block\n");
+ return FALSE;
+ }
+
+ export_index_table = alloc_index_table(indexes);
+
+ if(swap) {
+ long long *sexport_index_table = salloc_index_table(indexes);
+
+ res = read_fs_bytes(fd, sBlk.s.lookup_table_start,
+ length, sexport_index_table);
+ if(res == FALSE) {
+ ERROR("parse_exorts_table: failed to read export "
+ "index table\n");
+ return FALSE;
+ }
+ SQUASHFS_SWAP_LOOKUP_BLOCKS_3(export_index_table,
+ sexport_index_table, indexes);
+ } else {
+ res = read_fs_bytes(fd, sBlk.s.lookup_table_start, length,
+ export_index_table);
+ if(res == FALSE) {
+ ERROR("parse_exorts_table: failed to read export "
+ "index table\n");
+ return FALSE;
+ }
+ }
+
+ /*
+ * export_index_table[0] stores the start of the compressed export blocks.
+ * This by definition is also the end of the previous filesystem
+ * table - the fragment table.
+ */
+ *table_start = export_index_table[0];
+
+ return TRUE;
+}
+
+
+static int read_filesystem_tables()
+{
+ long long table_start;
+
+ /* Read uid and gid lookup tables */
+
+ /* Sanity check super block contents */
+ if(sBlk.no_guids) {
+ if(sBlk.guid_start >= sBlk.s.bytes_used) {
+ ERROR("read_filesystem_tables: gid start too large in super block\n");
+ goto corrupted;
+ }
+
+ if(read_ids(sBlk.no_guids, sBlk.guid_start, sBlk.s.bytes_used, &guid_table) == FALSE)
+ goto corrupted;
+
+ table_start = sBlk.guid_start;
+ } else {
+ /* no guids, guid_start should be 0 */
+ if(sBlk.guid_start != 0) {
+ ERROR("read_filesystem_tables: gid start too large in super block\n");
+ goto corrupted;
+ }
+
+ table_start = sBlk.s.bytes_used;
+ }
+
+ if(sBlk.uid_start >= table_start) {
+ ERROR("read_filesystem_tables: uid start too large in super block\n");
+ goto corrupted;
+ }
+
+ /* There should be at least one uid */
+ if(sBlk.no_uids == 0) {
+ ERROR("read_filesystem_tables: uid count bad in super block\n");
+ goto corrupted;
+ }
+
+ if(read_ids(sBlk.no_uids, sBlk.uid_start, table_start, &uid_table) == FALSE)
+ goto corrupted;
+
+ table_start = sBlk.uid_start;
+
+ /* Read exports table */
+ if(sBlk.s.lookup_table_start != SQUASHFS_INVALID_BLK) {
+
+ /* sanity check super block contents */
+ if(sBlk.s.lookup_table_start >= table_start) {
+ ERROR("read_filesystem_tables: lookup table start too large in super block\n");
+ goto corrupted;
+ }
+
+ if(parse_exports_table(&table_start) == FALSE)
+ goto corrupted;
+ }
+
+ /* Read fragment table */
+ if(sBlk.s.fragments != 0) {
+
+ /* Sanity check super block contents */
+ if(sBlk.s.fragment_table_start >= table_start) {
+ ERROR("read_filesystem_tables: fragment table start too large in super block\n");
+ goto corrupted;
+ }
+
+ /* The number of fragments should not exceed the number of inodes */
+ if(sBlk.s.fragments > sBlk.s.inodes) {
+ ERROR("read_filesystem_tables: Bad fragment count in super block\n");
+ goto corrupted;
+ }
+
+ if(read_fragment_table(&table_start) == FALSE)
+ goto corrupted;
+ } else {
+ /*
+ * Sanity check super block contents - with 0 fragments,
+ * the fragment table should be empty
+ */
+ if(sBlk.s.fragment_table_start != table_start) {
+ ERROR("read_filesystem_tables: fragment table start invalid in super block\n");
+ goto corrupted;
+ }
+ }
+
+ /* Sanity check super block directory table values */
+ if(sBlk.s.directory_table_start > table_start) {
+ ERROR("read_filesystem_tables: directory table start too large in super block\n");
+ goto corrupted;
+ }
+
+ /* Sanity check super block inode table values */
+ if(sBlk.s.inode_table_start >= sBlk.s.directory_table_start) {
+ ERROR("read_filesystem_tables: inode table start too large in super block\n");
+ goto corrupted;
+ }
+
+ alloc_index_table(0);
+ salloc_index_table(0);
+
+ return TRUE;
+
+corrupted:
+ alloc_index_table(0);
+ salloc_index_table(0);
+
+ return FALSE;
+}
+
+
+int read_super_3(char *source, squashfs_operations **s_ops, void *s)
+{
+ squashfs_super_block_3 *sBlk_3 = s;
+
+ /*
+ * Try to read a squashfs 3 superblock (compatible with 1 and 2 filesystems)
+ */
+ int res = read_fs_bytes(fd, SQUASHFS_START, sizeof(*sBlk_3), sBlk_3);
+
+ if(res == FALSE)
+ return res;
+ /*
+ * Check it is a SQUASHFS superblock
+ */
+ swap = 0;
+ if(sBlk_3->s_magic == SQUASHFS_MAGIC_SWAP) {
+ squashfs_super_block_3 sblk;
+ ERROR("Reading a different endian SQUASHFS filesystem on %s\n", source);
+ SQUASHFS_SWAP_SUPER_BLOCK_3(&sblk, sBlk_3);
+ memcpy(sBlk_3, &sblk, sizeof(squashfs_super_block_3));
+ swap = 1;
+ }
+
+ if(sBlk_3->s_magic != SQUASHFS_MAGIC || sBlk_3->s_major != 3 ||
+ sBlk_3->s_minor > 1)
+ return -1;
+
+ sBlk.s.s_magic = sBlk_3->s_magic;
+ sBlk.s.inodes = sBlk_3->inodes;
+ sBlk.s.mkfs_time = sBlk_3->mkfs_time;
+ sBlk.s.block_size = sBlk_3->block_size;
+ sBlk.s.fragments = sBlk_3->fragments;
+ sBlk.s.block_log = sBlk_3->block_log;
+ sBlk.s.flags = sBlk_3->flags;
+ sBlk.s.s_major = sBlk_3->s_major;
+ sBlk.s.s_minor = sBlk_3->s_minor;
+ sBlk.s.root_inode = sBlk_3->root_inode;
+ sBlk.s.bytes_used = sBlk_3->bytes_used;
+ sBlk.s.inode_table_start = sBlk_3->inode_table_start;
+ sBlk.s.directory_table_start = sBlk_3->directory_table_start;
+ sBlk.s.fragment_table_start = sBlk_3->fragment_table_start;
+ sBlk.s.lookup_table_start = sBlk_3->lookup_table_start;
+ sBlk.no_uids = sBlk_3->no_uids;
+ sBlk.no_guids = sBlk_3->no_guids;
+ sBlk.uid_start = sBlk_3->uid_start;
+ sBlk.guid_start = sBlk_3->guid_start;
+ sBlk.s.xattr_id_table_start = SQUASHFS_INVALID_BLK;
+
+ *s_ops = &ops;
+
+ /*
+ * 3.x filesystems use gzip compression.
+ */
+ comp = lookup_compressor("gzip");
+ return TRUE;
+}
+
+
+static void squashfs_stat(char *source)
+{
+ time_t mkfs_time = (time_t) sBlk.s.mkfs_time;
+ struct tm *t = use_localtime ? localtime(&mkfs_time) :
+ gmtime(&mkfs_time);
+ char *mkfs_str = asctime(t);
+
+#if __BYTE_ORDER == __BIG_ENDIAN
+ printf("Found a valid %sSQUASHFS %d:%d superblock on %s.\n",
+ swap ? "little endian " : "big endian ", sBlk.s.s_major,
+ sBlk.s.s_minor, source);
+#else
+ printf("Found a valid %sSQUASHFS %d:%d superblock on %s.\n",
+ swap ? "big endian " : "little endian ", sBlk.s.s_major,
+ sBlk.s.s_minor, source);
+#endif
+
+ printf("Creation or last append time %s", mkfs_str ? mkfs_str :
+ "failed to get time\n");
+ printf("Filesystem size %llu bytes (%.2f Kbytes / %.2f Mbytes)\n",
+ sBlk.s.bytes_used, sBlk.s.bytes_used / 1024.0,
+ sBlk.s.bytes_used / (1024.0 * 1024.0));
+ printf("Block size %d\n", sBlk.s.block_size);
+ printf("Filesystem is %sexportable via NFS\n",
+ SQUASHFS_EXPORTABLE(sBlk.s.flags) ? "" : "not ");
+ printf("Inodes are %scompressed\n",
+ SQUASHFS_UNCOMPRESSED_INODES(sBlk.s.flags) ? "un" : "");
+ printf("Data is %scompressed\n",
+ SQUASHFS_UNCOMPRESSED_DATA(sBlk.s.flags) ? "un" : "");
+
+ if(SQUASHFS_NO_FRAGMENTS(sBlk.s.flags))
+ printf("Fragments are not stored\n");
+ else {
+ printf("Fragments are %scompressed\n",
+ SQUASHFS_UNCOMPRESSED_FRAGMENTS(sBlk.s.flags) ? "un" : "");
+ printf("Always-use-fragments option is %sspecified\n",
+ SQUASHFS_ALWAYS_FRAGMENTS(sBlk.s.flags) ? "" : "not ");
+ }
+
+ printf("Check data is %spresent in the filesystem\n",
+ SQUASHFS_CHECK_DATA(sBlk.s.flags) ? "" : "not ");
+ printf("Duplicates are %sremoved\n", SQUASHFS_DUPLICATES(sBlk.s.flags)
+ ? "" : "not ");
+ printf("Number of fragments %d\n", sBlk.s.fragments);
+ printf("Number of inodes %d\n", sBlk.s.inodes);
+ printf("Number of uids %d\n", sBlk.no_uids);
+ printf("Number of gids %d\n", sBlk.no_guids);
+
+ TRACE("sBlk.s.inode_table_start 0x%llx\n", sBlk.s.inode_table_start);
+ TRACE("sBlk.s.directory_table_start 0x%llx\n", sBlk.s.directory_table_start);
+ TRACE("sBlk.s.fragment_table_start 0x%llx\n\n", sBlk.s.fragment_table_start);
+ TRACE("sBlk.s.lookup_table_start 0x%llx\n\n", sBlk.s.lookup_table_start);
+ TRACE("sBlk.uid_start 0x%llx\n", sBlk.uid_start);
+ TRACE("sBlk.guid_start 0x%llx\n", sBlk.guid_start);
+}
+
+
+static squashfs_operations ops = {
+ .opendir = squashfs_opendir,
+ .read_fragment = read_fragment,
+ .read_block_list = read_block_list,
+ .read_inode = read_inode,
+ .read_filesystem_tables = read_filesystem_tables,
+ .stat = squashfs_stat
+};
diff --git a/squashfs-tools/unsquash-34.c b/squashfs-tools/unsquash-34.c
new file mode 100644
index 0000000..59d28f1
--- /dev/null
+++ b/squashfs-tools/unsquash-34.c
@@ -0,0 +1,183 @@
+/*
+ * Unsquash a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2019, 2022, 2023
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * unsquash-34.c
+ *
+ * Helper functions used by unsquash-3 and unsquash-4.
+ */
+
+#include "unsquashfs.h"
+
+static unsigned int **inumber_table = NULL;
+static char ***lookup_table = NULL;
+
+long long *alloc_index_table(int indexes)
+{
+ static long long *alloc_table = NULL;
+ static int alloc_size = 0;
+ int length = indexes * sizeof(long long);
+
+ if(alloc_size < length || length == 0) {
+ long long *table = realloc(alloc_table, length);
+
+ if(table == NULL && length !=0)
+ MEM_ERROR();
+
+ alloc_table = table;
+ alloc_size = length;
+ }
+
+ return alloc_table;
+}
+
+
+/* These functions implement a bit-table to track whether directories have been
+ * already visited. This is to trap corrupted filesystems which have multiple
+ * links to the same directory, which is invalid, and which may also create
+ * a directory loop, where Unsquashfs will endlessly recurse until either
+ * the pathname is too large (extracting), or the stack overflows.
+ *
+ * Each index entry is 8 Kbytes, and tracks 65536 inode numbers. The index is
+ * allocated on demand because Unsquashfs may not walk the complete filesystem.
+ */
+static void create_inumber_table()
+{
+ int indexes = INUMBER_INDEXES(sBlk.s.inodes);
+
+ inumber_table = malloc(indexes * sizeof(unsigned int *));
+ if(inumber_table == NULL)
+ MEM_ERROR();
+ memset(inumber_table, 0, indexes * sizeof(unsigned int *));
+}
+
+
+int inumber_lookup(unsigned int number)
+{
+ int index = INUMBER_INDEX(number - 1);
+ int offset = INUMBER_OFFSET(number - 1);
+ int bit = INUMBER_BIT(number - 1);
+
+ if(inumber_table == NULL)
+ create_inumber_table();
+
+ /* Lookup number in the bit table */
+ if(inumber_table[index] && (inumber_table[index][offset] & bit))
+ return TRUE;
+
+ if(inumber_table[index] == NULL) {
+ inumber_table[index] = malloc(INUMBER_BYTES);
+ if(inumber_table[index] == NULL)
+ MEM_ERROR();
+ memset(inumber_table[index], 0, INUMBER_BYTES);
+ }
+
+ inumber_table[index][offset] |= bit;
+ return FALSE;
+}
+
+
+void free_inumber_table()
+{
+ int i, indexes = INUMBER_INDEXES(sBlk.s.inodes);
+
+ if(inumber_table) {
+ for(i = 0; i < indexes; i++)
+ if(inumber_table[i])
+ free(inumber_table[i]);
+ free(inumber_table);
+ inumber_table = NULL;
+ }
+}
+
+
+/* These functions implement a lookup table to track creation of (non-directory)
+ * inodes, and to discover if a hard-link to a previously created file should
+ * be made.
+ *
+ * Each index entry is 32 Kbytes, and tracks 4096 inode numbers. The index is
+ * allocated on demand because Unsquashfs may not walk the complete filesystem.
+ */
+static void create_lookup_table()
+{
+ int indexes = LOOKUP_INDEXES(sBlk.s.inodes);
+
+ lookup_table = malloc(indexes * sizeof(char *));
+ if(lookup_table == NULL)
+ MEM_ERROR();
+ memset(lookup_table, 0, indexes * sizeof(char *));
+}
+
+
+char *lookup(unsigned int number)
+{
+ int index = LOOKUP_INDEX(number - 1);
+ int offset = LOOKUP_OFFSET(number - 1);
+
+ if(lookup_table == NULL)
+ create_lookup_table();
+
+ /* Lookup number in table */
+ if(lookup_table[index] == NULL)
+ return NULL;
+
+ return lookup_table[index][offset];
+}
+
+
+void insert_lookup(unsigned int number, char *pathname)
+{
+ int index = LOOKUP_INDEX(number - 1);
+ int offset = LOOKUP_OFFSET(number - 1);
+
+ if(lookup_table == NULL)
+ create_lookup_table();
+
+ if(lookup_table[index] == NULL) {
+ lookup_table[index] = malloc(LOOKUP_BYTES);
+ if(lookup_table[index] == NULL)
+ MEM_ERROR();
+ memset(lookup_table[index], 0, LOOKUP_BYTES);
+ }
+
+ lookup_table[index][offset] = pathname;
+}
+
+
+void free_lookup_table(int free_pathname)
+{
+ int i, indexes = LOOKUP_INDEXES(sBlk.s.inodes);
+
+ if(lookup_table) {
+ for(i = 0; i < indexes; i++)
+ if(lookup_table[i]) {
+ if(free_pathname) {
+ int j;
+
+ for(j = 0; j < LOOKUP_OFFSETS; j++)
+ if(lookup_table[i][j])
+ free(lookup_table[i][j]);
+ }
+ free(lookup_table[i]);
+ }
+ free(lookup_table);
+ lookup_table = NULL;
+ }
+}
diff --git a/squashfs-tools/unsquash-4.c b/squashfs-tools/unsquash-4.c
new file mode 100644
index 0000000..d0f6920
--- /dev/null
+++ b/squashfs-tools/unsquash-4.c
@@ -0,0 +1,832 @@
+/*
+ * Unsquash a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2019, 2021, 2022, 2023
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * unsquash-4.c
+ */
+
+#include "unsquashfs.h"
+#include "squashfs_swap.h"
+#include "xattr.h"
+#include "compressor.h"
+
+static struct squashfs_fragment_entry *fragment_table;
+static unsigned int *id_table;
+static squashfs_operations ops;
+
+static void read_block_list(unsigned int *block_list, long long start,
+ unsigned int offset, int blocks)
+{
+ int res;
+
+ TRACE("read_block_list: blocks %d\n", blocks);
+
+ res = read_inode_data(block_list, &start, &offset, blocks * sizeof(unsigned int));
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_block_list: failed to read "
+ "inode index %lld:%d\n", start, offset);
+
+ SQUASHFS_INSWAP_INTS(block_list, blocks);
+}
+
+
+static int read_fragment_table(long long *table_start)
+{
+ /*
+ * Note on overflow limits:
+ * Size of SBlk.s.fragments is 2^32 (unsigned int)
+ * Max size of bytes is 2^32*16 or 2^36
+ * Max indexes is (2^32*16)/8K or 2^23
+ * Max length is ((2^32*16)/8K)*8 or 2^26 or 64M
+ */
+ int res;
+ unsigned int i;
+ long long bytes = SQUASHFS_FRAGMENT_BYTES((long long) sBlk.s.fragments);
+ int indexes = SQUASHFS_FRAGMENT_INDEXES((long long) sBlk.s.fragments);
+ int length = SQUASHFS_FRAGMENT_INDEX_BYTES((long long) sBlk.s.fragments);
+ long long *fragment_table_index;
+
+ /*
+ * The size of the index table (length bytes) should match the
+ * table start and end points
+ */
+ if(length != (*table_start - sBlk.s.fragment_table_start)) {
+ ERROR("read_fragment_table: Bad fragment count in super block\n");
+ return FALSE;
+ }
+
+ TRACE("read_fragment_table: %u fragments, reading %d fragment indexes "
+ "from 0x%llx\n", sBlk.s.fragments, indexes,
+ sBlk.s.fragment_table_start);
+
+ fragment_table_index = alloc_index_table(indexes);
+ fragment_table = malloc(bytes);
+ if(fragment_table == NULL)
+ MEM_ERROR();
+
+ res = read_fs_bytes(fd, sBlk.s.fragment_table_start, length,
+ fragment_table_index);
+ if(res == FALSE) {
+ ERROR("read_fragment_table: failed to read fragment table "
+ "index\n");
+ return FALSE;
+ }
+ SQUASHFS_INSWAP_FRAGMENT_INDEXES(fragment_table_index, indexes);
+
+ for(i = 0; i < indexes; i++) {
+ int expected = (i + 1) != indexes ? SQUASHFS_METADATA_SIZE :
+ bytes & (SQUASHFS_METADATA_SIZE - 1);
+ int length = read_block(fd, fragment_table_index[i], NULL,
+ expected, ((char *) fragment_table) + (i *
+ SQUASHFS_METADATA_SIZE));
+ TRACE("Read fragment table block %d, from 0x%llx, length %d\n",
+ i, fragment_table_index[i], length);
+ if(length == FALSE) {
+ ERROR("read_fragment_table: failed to read fragment "
+ "table index\n");
+ return FALSE;
+ }
+ }
+
+ for(i = 0; i < sBlk.s.fragments; i++)
+ SQUASHFS_INSWAP_FRAGMENT_ENTRY(&fragment_table[i]);
+
+ *table_start = fragment_table_index[0];
+ return TRUE;
+}
+
+
+static void read_fragment(unsigned int fragment, long long *start_block, int *size)
+{
+ TRACE("read_fragment: reading fragment %d\n", fragment);
+
+ struct squashfs_fragment_entry *fragment_entry;
+
+ if(fragment >= sBlk.s.fragments)
+ EXIT_UNSQUASH("File system corrupted - fragment index in inode too large (fragment: %u)\n", fragment);
+
+ fragment_entry = &fragment_table[fragment];
+ *start_block = fragment_entry->start_block;
+ *size = fragment_entry->size;
+}
+
+
+static struct inode *read_inode(unsigned int start_block, unsigned int offset)
+{
+ static union squashfs_inode_header header;
+ long long start = sBlk.s.inode_table_start + start_block;
+ long long st = start;
+ unsigned int off = offset;
+ static struct inode i;
+ int res;
+
+ TRACE("read_inode: reading inode [%d:%d]\n", start_block, offset);
+
+ res = read_inode_data(&header.base, &st, &off, sizeof(header.base));
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read inode %lld:%d\n", st, off);
+
+ SQUASHFS_INSWAP_BASE_INODE_HEADER(&header.base);
+
+ if(header.base.uid >= sBlk.s.no_ids)
+ EXIT_UNSQUASH("File system corrupted - uid index in inode too large (uid: %u)\n", header.base.uid);
+
+ if(header.base.guid >= sBlk.s.no_ids)
+ EXIT_UNSQUASH("File system corrupted - gid index in inode too large (gid: %u)\n", header.base.guid);
+
+ if(header.base.inode_type < 1 || header.base.inode_type > 14)
+ EXIT_UNSQUASH("File system corrupted - invalid type in inode (type: %u)\n", header.base.inode_type);
+
+ if(header.base.inode_number > sBlk.s.inodes)
+ EXIT_UNSQUASH("File system corrupted - inode number in inode too large (inode_number: %u)\n", header.base.inode_number);
+
+ if(header.base.inode_number == 0)
+ EXIT_UNSQUASH("File system corrupted - inode number zero is invalid\n", header.base.inode_number);
+
+ i.uid = (uid_t) id_table[header.base.uid];
+ i.gid = (uid_t) id_table[header.base.guid];
+ i.mode = lookup_type[header.base.inode_type] | header.base.mode;
+ i.type = header.base.inode_type;
+ if(time_opt)
+ i.time = timeval;
+ else
+ i.time = header.base.mtime;
+ i.inode_number = header.base.inode_number;
+
+ switch(header.base.inode_type) {
+ case SQUASHFS_DIR_TYPE: {
+ struct squashfs_dir_inode_header *inode = &header.dir;
+
+ res = read_inode_data(inode, &start, &offset, sizeof(*inode));
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode %lld:%d\n", start, offset);
+
+ SQUASHFS_INSWAP_DIR_INODE_HEADER(inode);
+
+ i.data = inode->file_size;
+ i.offset = inode->offset;
+ i.start = inode->start_block;
+ i.xattr = SQUASHFS_INVALID_XATTR;
+ break;
+ }
+ case SQUASHFS_LDIR_TYPE: {
+ struct squashfs_ldir_inode_header *inode = &header.ldir;
+
+ res = read_inode_data(inode, &start, &offset, sizeof(*inode));
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode %lld:%d\n", start, offset);
+
+ SQUASHFS_INSWAP_LDIR_INODE_HEADER(inode);
+
+ i.data = inode->file_size;
+ i.offset = inode->offset;
+ i.start = inode->start_block;
+ i.xattr = inode->xattr;
+ break;
+ }
+ case SQUASHFS_FILE_TYPE: {
+ struct squashfs_reg_inode_header *inode = &header.reg;
+
+ res = read_inode_data(inode, &start, &offset, sizeof(*inode));
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode %lld:%d\n", start, offset);
+
+ SQUASHFS_INSWAP_REG_INODE_HEADER(inode);
+
+ i.data = inode->file_size;
+ i.frag_bytes = inode->fragment == SQUASHFS_INVALID_FRAG
+ ? 0 : inode->file_size % sBlk.s.block_size;
+ i.fragment = inode->fragment;
+ i.offset = inode->offset;
+ i.blocks = inode->fragment == SQUASHFS_INVALID_FRAG ?
+ (i.data + sBlk.s.block_size - 1) >>
+ sBlk.s.block_log :
+ i.data >> sBlk.s.block_log;
+ i.start = inode->start_block;
+ i.block_start = start;
+ i.block_offset = offset;
+ i.sparse = 0;
+ i.xattr = SQUASHFS_INVALID_XATTR;
+ break;
+ }
+ case SQUASHFS_LREG_TYPE: {
+ struct squashfs_lreg_inode_header *inode = &header.lreg;
+
+ res = read_inode_data(inode, &start, &offset, sizeof(*inode));
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode %lld:%d\n", start, offset);
+
+ SQUASHFS_INSWAP_LREG_INODE_HEADER(inode);
+
+ i.data = inode->file_size;
+ i.frag_bytes = inode->fragment == SQUASHFS_INVALID_FRAG
+ ? 0 : inode->file_size % sBlk.s.block_size;
+ i.fragment = inode->fragment;
+ i.offset = inode->offset;
+ i.blocks = inode->fragment == SQUASHFS_INVALID_FRAG ?
+ (inode->file_size + sBlk.s.block_size - 1) >>
+ sBlk.s.block_log :
+ inode->file_size >> sBlk.s.block_log;
+ i.start = inode->start_block;
+ i.block_start = start;
+ i.block_offset = offset;
+ i.sparse = inode->sparse != 0;
+ i.xattr = inode->xattr;
+ break;
+ }
+ case SQUASHFS_SYMLINK_TYPE:
+ case SQUASHFS_LSYMLINK_TYPE: {
+ struct squashfs_symlink_inode_header *inode = &header.symlink;
+
+ res = read_inode_data(inode, &start, &offset, sizeof(*inode));
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode %lld:%d\n", start, offset);
+
+ SQUASHFS_INSWAP_SYMLINK_INODE_HEADER(inode);
+
+ if(inode->symlink_size > SQUASHFS_SYMLINK_MAX)
+ EXIT_UNSQUASH("File system corrupted - symlink_size in inode too large (symlink_size: %u)\n", inode->symlink_size);
+
+ i.symlink = malloc(inode->symlink_size + 1);
+ if(i.symlink == NULL)
+ MEM_ERROR();
+
+ res = read_inode_data(i.symlink, &start, &offset, inode->symlink_size);
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode symbolic link %lld:%d\n", start, offset);
+
+ i.symlink[inode->symlink_size] = '\0';
+ i.data = inode->symlink_size;
+
+ if(header.base.inode_type == SQUASHFS_LSYMLINK_TYPE) {
+ res = read_inode_data(&i.xattr, &start, &offset, sizeof(unsigned int));
+ SQUASHFS_INSWAP_INTS(&i.xattr, 1);
+ } else
+ i.xattr = SQUASHFS_INVALID_XATTR;
+ break;
+ }
+ case SQUASHFS_BLKDEV_TYPE:
+ case SQUASHFS_CHRDEV_TYPE: {
+ struct squashfs_dev_inode_header *inode = &header.dev;
+
+ res = read_inode_data(inode, &start, &offset, sizeof(*inode));
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode %lld:%d\n", start, offset);
+
+ SQUASHFS_INSWAP_DEV_INODE_HEADER(inode);
+
+ i.data = inode->rdev;
+ i.xattr = SQUASHFS_INVALID_XATTR;
+ break;
+ }
+ case SQUASHFS_LBLKDEV_TYPE:
+ case SQUASHFS_LCHRDEV_TYPE: {
+ struct squashfs_ldev_inode_header *inode = &header.ldev;
+
+ res = read_inode_data(inode, &start, &offset, sizeof(*inode));
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode %lld:%d\n", start, offset);
+
+ SQUASHFS_INSWAP_LDEV_INODE_HEADER(inode);
+
+ i.data = inode->rdev;
+ i.xattr = inode->xattr;
+ break;
+ }
+ case SQUASHFS_FIFO_TYPE:
+ case SQUASHFS_SOCKET_TYPE:
+ i.data = 0;
+ i.xattr = SQUASHFS_INVALID_XATTR;
+ break;
+ case SQUASHFS_LFIFO_TYPE:
+ case SQUASHFS_LSOCKET_TYPE: {
+ struct squashfs_lipc_inode_header *inode = &header.lipc;
+
+ res = read_inode_data(inode, &start, &offset, sizeof(*inode));
+ if(res == FALSE)
+ EXIT_UNSQUASH("read_inode: failed to read "
+ "inode %lld:%d\n", start, offset);
+
+ SQUASHFS_INSWAP_LIPC_INODE_HEADER(inode);
+
+ i.data = 0;
+ i.xattr = inode->xattr;
+ break;
+ }
+ default:
+ EXIT_UNSQUASH("Unknown inode type %d in read_inode!\n",
+ header.base.inode_type);
+ }
+ return &i;
+}
+
+
+static struct dir *squashfs_opendir(unsigned int block_start, unsigned int offset,
+ struct inode **i)
+{
+ struct squashfs_dir_header dirh;
+ char buffer[sizeof(struct squashfs_dir_entry) + SQUASHFS_NAME_LEN + 1]
+ __attribute__((aligned));
+ struct squashfs_dir_entry *dire = (struct squashfs_dir_entry *) buffer;
+ long long start;
+ int bytes = 0, dir_count, size, res;
+ struct dir_ent *ent, *cur_ent = NULL;
+ struct dir *dir;
+
+ TRACE("squashfs_opendir: inode start block %d, offset %d\n",
+ block_start, offset);
+
+ *i = read_inode(block_start, offset);
+
+ dir = malloc(sizeof(struct dir));
+ if(dir == NULL)
+ MEM_ERROR();
+
+ dir->dir_count = 0;
+ dir->cur_entry = NULL;
+ dir->mode = (*i)->mode;
+ dir->uid = (*i)->uid;
+ dir->guid = (*i)->gid;
+ dir->mtime = (*i)->time;
+ dir->xattr = (*i)->xattr;
+ dir->dirs = NULL;
+
+ if ((*i)->data == 3)
+ /*
+ * if the directory is empty, skip the unnecessary
+ * lookup_entry, this fixes the corner case with
+ * completely empty filesystems where lookup_entry correctly
+ * returning -1 is incorrectly treated as an error
+ */
+ return dir;
+
+ start = sBlk.s.directory_table_start + (*i)->start;
+ offset = (*i)->offset;
+ size = (*i)->data + bytes - 3;
+
+ while(bytes < size) {
+ res = read_directory_data(&dirh, &start, &offset, sizeof(dirh));
+ if(res == FALSE)
+ goto corrupted;
+
+ SQUASHFS_INSWAP_DIR_HEADER(&dirh);
+
+ dir_count = dirh.count + 1;
+ TRACE("squashfs_opendir: Read directory header @ byte position "
+ "%d, %d directory entries\n", bytes, dir_count);
+ bytes += sizeof(dirh);
+
+ /* dir_count should never be larger than SQUASHFS_DIR_COUNT */
+ if(dir_count > SQUASHFS_DIR_COUNT) {
+ ERROR("File system corrupted: too many entries in directory\n");
+ goto corrupted;
+ }
+
+ while(dir_count--) {
+ res = read_directory_data(dire, &start, &offset, sizeof(*dire));
+ if(res == FALSE)
+ goto corrupted;
+
+ SQUASHFS_INSWAP_DIR_ENTRY(dire);
+
+ bytes += sizeof(*dire);
+
+ /* size should never be SQUASHFS_NAME_LEN or larger */
+ if(dire->size >= SQUASHFS_NAME_LEN) {
+ ERROR("File system corrupted: filename too long\n");
+ goto corrupted;
+ }
+
+ res = read_directory_data(dire->name, &start, &offset,
+ dire->size + 1);
+ if(res == FALSE)
+ goto corrupted;
+
+ dire->name[dire->size + 1] = '\0';
+
+ /* check name for invalid characters (i.e /, ., ..) */
+ if(check_name(dire->name, dire->size + 1) == FALSE) {
+ ERROR("File system corrupted: invalid characters in name\n");
+ goto corrupted;
+ }
+
+ TRACE("squashfs_opendir: directory entry %s, inode "
+ "%d:%d, type %d\n", dire->name,
+ dirh.start_block, dire->offset, dire->type);
+
+ ent = malloc(sizeof(struct dir_ent));
+ if(ent == NULL)
+ MEM_ERROR();
+
+ ent->name = strdup(dire->name);
+ ent->start_block = dirh.start_block;
+ ent->offset = dire->offset;
+ ent->type = dire->type;
+ ent->next = NULL;
+ if(cur_ent == NULL)
+ dir->dirs = ent;
+ else
+ cur_ent->next = ent;
+ cur_ent = ent;
+ dir->dir_count ++;
+ bytes += dire->size + 1;
+ }
+ }
+
+ /* check directory for duplicate names and sorting */
+ if(check_directory(dir) == FALSE) {
+ ERROR("File system corrupted: directory has duplicate names or is unsorted\n");
+ goto corrupted;
+ }
+
+ return dir;
+
+corrupted:
+ squashfs_closedir(dir);
+ return NULL;
+}
+
+
+static int read_id_table(long long *table_start)
+{
+ /*
+ * Note on overflow limits:
+ * Size of SBlk.s.no_ids is 2^16 (unsigned short)
+ * Max size of bytes is 2^16*4 or 256K
+ * Max indexes is (2^16*4)/8K or 32
+ * Max length is ((2^16*4)/8K)*8 or 256
+ */
+ int res, i;
+ int bytes = SQUASHFS_ID_BYTES(sBlk.s.no_ids);
+ int indexes = SQUASHFS_ID_BLOCKS(sBlk.s.no_ids);
+ int length = SQUASHFS_ID_BLOCK_BYTES(sBlk.s.no_ids);
+ long long *id_index_table;
+
+ /*
+ * The size of the index table (length bytes) should match the
+ * table start and end points
+ */
+ if(length != (*table_start - sBlk.s.id_table_start)) {
+ ERROR("read_id_table: Bad id count in super block\n");
+ return FALSE;
+ }
+
+ TRACE("read_id_table: no_ids %d\n", sBlk.s.no_ids);
+
+ id_index_table = alloc_index_table(indexes);
+
+ id_table = malloc(bytes);
+ if(id_table == NULL) {
+ ERROR("read_id_table: failed to allocate id table\n");
+ return FALSE;
+ }
+
+ res = read_fs_bytes(fd, sBlk.s.id_table_start, length, id_index_table);
+ if(res == FALSE) {
+ ERROR("read_id_table: failed to read id index table\n");
+ return FALSE;
+ }
+ SQUASHFS_INSWAP_ID_BLOCKS(id_index_table, indexes);
+
+ /*
+ * id_index_table[0] stores the start of the compressed id blocks.
+ * This by definition is also the end of the previous filesystem
+ * table - this may be the exports table if it is present, or the
+ * fragments table if it isn't.
+ */
+ *table_start = id_index_table[0];
+
+ for(i = 0; i < indexes; i++) {
+ int expected = (i + 1) != indexes ? SQUASHFS_METADATA_SIZE :
+ bytes & (SQUASHFS_METADATA_SIZE - 1);
+ res = read_block(fd, id_index_table[i], NULL, expected,
+ ((char *) id_table) + i * SQUASHFS_METADATA_SIZE);
+ if(res == FALSE) {
+ ERROR("read_id_table: failed to read id table block"
+ "\n");
+ return FALSE;
+ }
+ }
+
+ SQUASHFS_INSWAP_INTS(id_table, sBlk.s.no_ids);
+
+ return TRUE;
+}
+
+
+static int parse_exports_table(long long *table_start)
+{
+ /*
+ * Note on overflow limits:
+ * Size of SBlk.s.inodes is 2^32 (unsigned int)
+ * Max indexes is (2^32*8)/8K or 2^22
+ * Max length is ((2^32*8)/8K)*8 or 2^25
+ */
+ int res;
+ int indexes = SQUASHFS_LOOKUP_BLOCKS((long long) sBlk.s.inodes);
+ int length = SQUASHFS_LOOKUP_BLOCK_BYTES((long long) sBlk.s.inodes);
+ long long *export_index_table;
+
+ /*
+ * The size of the index table (length bytes) should match the
+ * table start and end points
+ */
+ if(length != (*table_start - sBlk.s.lookup_table_start)) {
+ ERROR("parse_exports_table: Bad inode count in super block\n");
+ return FALSE;
+ }
+
+ export_index_table = alloc_index_table(indexes);
+
+ res = read_fs_bytes(fd, sBlk.s.lookup_table_start, length,
+ export_index_table);
+ if(res == FALSE) {
+ ERROR("parse_exports_table: failed to read export index table\n");
+ return FALSE;
+ }
+ SQUASHFS_INSWAP_LOOKUP_BLOCKS(export_index_table, indexes);
+
+ /*
+ * export_index_table[0] stores the start of the compressed export blocks.
+ * This by definition is also the end of the previous filesystem
+ * table - the fragment table.
+ */
+ *table_start = export_index_table[0];
+
+ return TRUE;
+}
+
+
+static int read_filesystem_tables()
+{
+ long long table_start;
+
+ /* Read xattrs */
+ if(sBlk.s.xattr_id_table_start != SQUASHFS_INVALID_BLK) {
+ /* sanity check super block contents */
+ if(sBlk.s.xattr_id_table_start >= sBlk.s.bytes_used) {
+ ERROR("read_filesystem_tables: xattr id table start too large in super block\n");
+ goto corrupted;
+ }
+
+ sBlk.xattr_ids = read_xattrs_from_disk(fd, &sBlk.s, no_xattrs, &table_start);
+ if(sBlk.xattr_ids == 0)
+ exit(1);
+ } else
+ table_start = sBlk.s.bytes_used;
+
+ /* Read id lookup table */
+
+ /* Sanity check super block contents */
+ if(sBlk.s.id_table_start >= table_start) {
+ ERROR("read_filesystem_tables: id table start too large in super block\n");
+ goto corrupted;
+ }
+
+ /* there should always be at least one id */
+ if(sBlk.s.no_ids == 0) {
+ ERROR("read_filesystem_tables: Bad id count in super block\n");
+ goto corrupted;
+ }
+
+ /*
+ * the number of ids can never be more than double the number of inodes
+ * (the maximum is a unique uid and gid for each inode).
+ */
+ if(sBlk.s.no_ids > (sBlk.s.inodes * 2LL)) {
+ ERROR("read_filesystem_tables: Bad id count in super block\n");
+ goto corrupted;
+ }
+
+ if(read_id_table(&table_start) == FALSE)
+ goto corrupted;
+
+ /* Read exports table */
+ if(sBlk.s.lookup_table_start != SQUASHFS_INVALID_BLK) {
+
+ /* sanity check super block contents */
+ if(sBlk.s.lookup_table_start >= table_start) {
+ ERROR("read_filesystem_tables: lookup table start too large in super block\n");
+ goto corrupted;
+ }
+
+ if(parse_exports_table(&table_start) == FALSE)
+ goto corrupted;
+ }
+
+ /* Read fragment table */
+ if(sBlk.s.fragments != 0) {
+
+ /* Sanity check super block contents */
+ if(sBlk.s.fragment_table_start >= table_start) {
+ ERROR("read_filesystem_tables: fragment table start too large in super block\n");
+ goto corrupted;
+ }
+
+ /* The number of fragments should not exceed the number of inodes */
+ if(sBlk.s.fragments > sBlk.s.inodes) {
+ ERROR("read_filesystem_tables: Bad fragment count in super block\n");
+ goto corrupted;
+ }
+
+ if(read_fragment_table(&table_start) == FALSE)
+ goto corrupted;
+ }
+
+ /* Sanity check super block directory table values */
+ if(sBlk.s.directory_table_start > table_start) {
+ ERROR("read_filesystem_tables: directory table start too large in super block\n");
+ goto corrupted;
+ }
+
+ /* Sanity check super block inode table values */
+ if(sBlk.s.inode_table_start >= sBlk.s.directory_table_start) {
+ ERROR("read_filesystem_tables: inode table start too large in super block\n");
+ goto corrupted;
+ }
+
+ if(no_xattrs)
+ sBlk.s.xattr_id_table_start = SQUASHFS_INVALID_BLK;
+
+ alloc_index_table(0);
+
+ return TRUE;
+
+corrupted:
+ alloc_index_table(0);
+
+ return FALSE;
+}
+
+
+int read_super_4(squashfs_operations **s_ops)
+{
+ struct squashfs_super_block sBlk_4;
+
+ /*
+ * Try to read a Squashfs 4 superblock
+ */
+ int res = read_fs_bytes(fd, SQUASHFS_START,
+ sizeof(struct squashfs_super_block), &sBlk_4);
+
+ if(res == FALSE)
+ return res;
+
+ swap = sBlk_4.s_magic != SQUASHFS_MAGIC;
+ SQUASHFS_INSWAP_SUPER_BLOCK(&sBlk_4);
+
+ if(sBlk_4.s_magic == SQUASHFS_MAGIC && sBlk_4.s_major == 4 &&
+ sBlk_4.s_minor == 0) {
+ *s_ops = &ops;
+ memcpy(&sBlk, &sBlk_4, sizeof(sBlk_4));
+
+ /*
+ * Check the compression type
+ */
+ comp = lookup_compressor_id(sBlk.s.compression);
+ return TRUE;
+ }
+
+ return -1;
+}
+
+
+static long long read_xattr_ids()
+{
+ int res;
+ struct squashfs_xattr_table id_table;
+
+ if(sBlk.s.xattr_id_table_start == SQUASHFS_INVALID_BLK)
+ return 0;
+
+ /*
+ * Read xattr id table, containing start of xattr metadata and the
+ * number of xattrs in the file system
+ */
+ res = read_fs_bytes(fd, sBlk.s.xattr_id_table_start, sizeof(id_table),
+ &id_table);
+ if(res == FALSE)
+ return -1;
+
+ SQUASHFS_INSWAP_XATTR_TABLE(&id_table);
+
+ return id_table.xattr_ids;
+}
+
+
+static void squashfs_stat(char *source)
+{
+ time_t mkfs_time = (time_t) sBlk.s.mkfs_time;
+ struct tm *t = use_localtime ? localtime(&mkfs_time) :
+ gmtime(&mkfs_time);
+ char *mkfs_str = asctime(t);
+ long long xattr_ids = read_xattr_ids();
+
+ if(xattr_ids == -1)
+ EXIT_UNSQUASH("File system corruption detected\n");
+
+ printf("Found a valid SQUASHFS 4:0 superblock on %s.\n", source);
+ printf("Creation or last append time %s", mkfs_str ? mkfs_str :
+ "failed to get time\n");
+ printf("Filesystem size %llu bytes (%.2f Kbytes / %.2f Mbytes)\n",
+ sBlk.s.bytes_used, sBlk.s.bytes_used / 1024.0,
+ sBlk.s.bytes_used / (1024.0 * 1024.0));
+ printf("Compression %s\n", comp->name);
+
+ if(SQUASHFS_COMP_OPTS(sBlk.s.flags)) {
+ char buffer[SQUASHFS_METADATA_SIZE] __attribute__ ((aligned));
+ int bytes;
+
+ if(!comp->supported)
+ printf("\tCould not display compressor options, because"
+ " %s compression is not supported\n",
+ comp->name);
+ else {
+ bytes = read_block(fd, sizeof(sBlk.s), NULL, 0, buffer);
+ if(bytes == 0) {
+ ERROR("Failed to read compressor options\n");
+ return;
+ }
+
+ compressor_display_options(comp, buffer, bytes);
+ }
+ }
+
+ printf("Block size %d\n", sBlk.s.block_size);
+ printf("Filesystem is %sexportable via NFS\n",
+ SQUASHFS_EXPORTABLE(sBlk.s.flags) ? "" : "not ");
+ printf("Inodes are %scompressed\n",
+ SQUASHFS_UNCOMPRESSED_INODES(sBlk.s.flags) ? "un" : "");
+ printf("Data is %scompressed\n",
+ SQUASHFS_UNCOMPRESSED_DATA(sBlk.s.flags) ? "un" : "");
+ printf("Uids/Gids (Id table) are %scompressed\n",
+ SQUASHFS_UNCOMPRESSED_INODES(sBlk.s.flags) ||
+ SQUASHFS_UNCOMPRESSED_IDS(sBlk.s.flags) ? "un" : "");
+
+ if(SQUASHFS_NO_FRAGMENTS(sBlk.s.flags))
+ printf("Fragments are not stored\n");
+ else {
+ printf("Fragments are %scompressed\n",
+ SQUASHFS_UNCOMPRESSED_FRAGMENTS(sBlk.s.flags) ?
+ "un" : "");
+ printf("Always-use-fragments option is %sspecified\n",
+ SQUASHFS_ALWAYS_FRAGMENTS(sBlk.s.flags) ? "" : "not ");
+ }
+
+ if(SQUASHFS_NO_XATTRS(sBlk.s.flags))
+ printf("Xattrs are not stored\n");
+ else
+ printf("Xattrs are %scompressed\n",
+ SQUASHFS_UNCOMPRESSED_XATTRS(sBlk.s.flags) ? "un" : "");
+
+ printf("Duplicates are %sremoved\n", SQUASHFS_DUPLICATES(sBlk.s.flags)
+ ? "" : "not ");
+ printf("Number of fragments %u\n", sBlk.s.fragments);
+ printf("Number of inodes %u\n", sBlk.s.inodes);
+ printf("Number of ids %d\n", sBlk.s.no_ids);
+
+ if(!SQUASHFS_NO_XATTRS(sBlk.s.flags))
+ printf("Number of xattr ids %lld\n", xattr_ids);
+
+ TRACE("sBlk.s.inode_table_start 0x%llx\n", sBlk.s.inode_table_start);
+ TRACE("sBlk.s.directory_table_start 0x%llx\n", sBlk.s.directory_table_start);
+ TRACE("sBlk.s.fragment_table_start 0x%llx\n", sBlk.s.fragment_table_start);
+ TRACE("sBlk.s.lookup_table_start 0x%llx\n", sBlk.s.lookup_table_start);
+ TRACE("sBlk.s.id_table_start 0x%llx\n", sBlk.s.id_table_start);
+ TRACE("sBlk.s.xattr_id_table_start 0x%llx\n", sBlk.s.xattr_id_table_start);
+}
+
+
+static squashfs_operations ops = {
+ .opendir = squashfs_opendir,
+ .read_fragment = read_fragment,
+ .read_block_list = read_block_list,
+ .read_inode = read_inode,
+ .read_filesystem_tables = read_filesystem_tables,
+ .stat = squashfs_stat
+};
diff --git a/squashfs-tools/unsquashfs.c b/squashfs-tools/unsquashfs.c
new file mode 100644
index 0000000..0ac6356
--- /dev/null
+++ b/squashfs-tools/unsquashfs.c
@@ -0,0 +1,4655 @@
+/*
+ * Unsquash a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
+ * 2012, 2013, 2014, 2017, 2019, 2020, 2021, 2022, 2023
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * unsquashfs.c
+ */
+
+#include "unsquashfs.h"
+#include "squashfs_compat.h"
+#include "squashfs_swap.h"
+#include "compressor.h"
+#include "xattr.h"
+#include "unsquashfs_info.h"
+#include "stdarg.h"
+#include "fnmatch_compat.h"
+
+#ifdef __linux__
+#include <sched.h>
+#include <sys/sysinfo.h>
+#include <sys/sysmacros.h>
+#else
+#include <sys/sysctl.h>
+#endif
+
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <limits.h>
+#include <ctype.h>
+
+struct cache *fragment_cache, *data_cache;
+struct queue *to_reader, *to_inflate, *to_writer, *from_writer;
+pthread_t *thread, *inflator_thread;
+pthread_mutex_t fragment_mutex;
+static long long start_offset = 0;
+
+/* user options that control parallelisation */
+int processors = -1;
+
+struct super_block sBlk;
+squashfs_operations *s_ops;
+struct compressor *comp;
+
+int bytes = 0, swap, file_count = 0, dir_count = 0, sym_count = 0,
+ dev_count = 0, fifo_count = 0, socket_count = 0, hardlnk_count = 0;
+struct hash_table_entry *inode_table_hash[65536], *directory_table_hash[65536];
+int fd;
+unsigned int cached_frag = SQUASHFS_INVALID_FRAG;
+unsigned int block_size;
+unsigned int block_log;
+int lsonly = FALSE, info = FALSE, force = FALSE, short_ls = TRUE;
+int concise = FALSE, quiet = FALSE, numeric = FALSE;
+int use_regex = FALSE;
+int root_process;
+int columns;
+int rotate = 0;
+pthread_mutex_t screen_mutex;
+pthread_mutex_t pos_mutex = PTHREAD_MUTEX_INITIALIZER;
+int progress = TRUE, progress_enabled = FALSE, percent = FALSE;
+unsigned int total_files = 0, total_inodes = 0;
+long long total_blocks = 0;
+long long cur_blocks = 0;
+int inode_number = 1;
+int ignore_errors = FALSE;
+int strict_errors = FALSE;
+int use_localtime = TRUE;
+int max_depth = -1; /* unlimited */
+int follow_symlinks = FALSE;
+int missing_symlinks = FALSE;
+int no_wildcards = FALSE;
+int set_exit_code = TRUE;
+int treat_as_excludes = FALSE;
+int stat_sys = FALSE;
+int version = FALSE;
+int mkfs_time_opt = FALSE;
+int cat_files = FALSE;
+int fragment_buffer_size = FRAGMENT_BUFFER_DEFAULT;
+int data_buffer_size = DATA_BUFFER_DEFAULT;
+char *dest = "squashfs-root";
+struct pathnames *extracts = NULL, *excludes = NULL;
+struct pathname *extract = NULL, *exclude = NULL, *stickypath = NULL;
+int writer_fd = 1;
+int pseudo_file = FALSE;
+int pseudo_stdout = FALSE;
+char *pseudo_name;
+unsigned int timeval;
+int time_opt = FALSE;
+int full_precision = FALSE;
+
+/* extended attribute flags */
+int no_xattrs = XATTR_DEF;
+regex_t *xattr_exclude_preg = NULL;
+regex_t *xattr_include_preg = NULL;
+
+int lookup_type[] = {
+ 0,
+ S_IFDIR,
+ S_IFREG,
+ S_IFLNK,
+ S_IFBLK,
+ S_IFCHR,
+ S_IFIFO,
+ S_IFSOCK,
+ S_IFDIR,
+ S_IFREG,
+ S_IFLNK,
+ S_IFBLK,
+ S_IFCHR,
+ S_IFIFO,
+ S_IFSOCK
+};
+
+struct test table[] = {
+ { S_IFMT, S_IFSOCK, 0, 's' },
+ { S_IFMT, S_IFLNK, 0, 'l' },
+ { S_IFMT, S_IFBLK, 0, 'b' },
+ { S_IFMT, S_IFDIR, 0, 'd' },
+ { S_IFMT, S_IFCHR, 0, 'c' },
+ { S_IFMT, S_IFIFO, 0, 'p' },
+ { S_IRUSR, S_IRUSR, 1, 'r' },
+ { S_IWUSR, S_IWUSR, 2, 'w' },
+ { S_IRGRP, S_IRGRP, 4, 'r' },
+ { S_IWGRP, S_IWGRP, 5, 'w' },
+ { S_IROTH, S_IROTH, 7, 'r' },
+ { S_IWOTH, S_IWOTH, 8, 'w' },
+ { S_IXUSR | S_ISUID, S_IXUSR | S_ISUID, 3, 's' },
+ { S_IXUSR | S_ISUID, S_ISUID, 3, 'S' },
+ { S_IXUSR | S_ISUID, S_IXUSR, 3, 'x' },
+ { S_IXGRP | S_ISGID, S_IXGRP | S_ISGID, 6, 's' },
+ { S_IXGRP | S_ISGID, S_ISGID, 6, 'S' },
+ { S_IXGRP | S_ISGID, S_IXGRP, 6, 'x' },
+ { S_IXOTH | S_ISVTX, S_IXOTH | S_ISVTX, 9, 't' },
+ { S_IXOTH | S_ISVTX, S_ISVTX, 9, 'T' },
+ { S_IXOTH | S_ISVTX, S_IXOTH, 9, 'x' },
+ { 0, 0, 0, 0}
+};
+
+void progress_bar(long long current, long long max, int columns);
+
+#define MAX_LINE 16384
+
+void sigwinch_handler(int arg)
+{
+ struct winsize winsize;
+
+ if(ioctl(1, TIOCGWINSZ, &winsize) == -1) {
+ if(isatty(STDOUT_FILENO))
+ ERROR("TIOCGWINSZ ioctl failed, defaulting to 80 "
+ "columns\n");
+ columns = 80;
+ } else
+ columns = winsize.ws_col;
+}
+
+
+void sigalrm_handler(int arg)
+{
+ rotate = (rotate + 1) % 4;
+}
+
+
+int add_overflow(int a, int b)
+{
+ return (INT_MAX - a) < b;
+}
+
+
+int shift_overflow(int a, int shift)
+{
+ return (INT_MAX >> shift) < a;
+}
+
+
+int multiply_overflow(int a, int multiplier)
+{
+ return (INT_MAX / multiplier) < a;
+}
+
+
+struct queue *queue_init(int size)
+{
+ struct queue *queue = malloc(sizeof(struct queue));
+ if(queue == NULL)
+ MEM_ERROR();
+
+ if(add_overflow(size, 1) ||
+ multiply_overflow(size + 1, sizeof(void *)))
+ EXIT_UNSQUASH("Size too large in queue_init\n");
+
+ queue->data = malloc(sizeof(void *) * (size + 1));
+ if(queue->data == NULL)
+ MEM_ERROR();
+
+ queue->size = size + 1;
+ queue->readp = queue->writep = 0;
+ pthread_mutex_init(&queue->mutex, NULL);
+ pthread_cond_init(&queue->empty, NULL);
+ pthread_cond_init(&queue->full, NULL);
+
+ return queue;
+}
+
+
+void queue_put(struct queue *queue, void *data)
+{
+ int nextp;
+
+ pthread_mutex_lock(&queue->mutex);
+
+ while((nextp = (queue->writep + 1) % queue->size) == queue->readp)
+ pthread_cond_wait(&queue->full, &queue->mutex);
+
+ queue->data[queue->writep] = data;
+ queue->writep = nextp;
+ pthread_cond_signal(&queue->empty);
+ pthread_mutex_unlock(&queue->mutex);
+}
+
+
+void *queue_get(struct queue *queue)
+{
+ void *data;
+ pthread_mutex_lock(&queue->mutex);
+
+ while(queue->readp == queue->writep)
+ pthread_cond_wait(&queue->empty, &queue->mutex);
+
+ data = queue->data[queue->readp];
+ queue->readp = (queue->readp + 1) % queue->size;
+ pthread_cond_signal(&queue->full);
+ pthread_mutex_unlock(&queue->mutex);
+
+ return data;
+}
+
+
+void dump_queue(struct queue *queue)
+{
+ pthread_mutex_lock(&queue->mutex);
+
+ printf("Max size %d, size %d%s\n", queue->size - 1,
+ queue->readp <= queue->writep ? queue->writep - queue->readp :
+ queue->size - queue->readp + queue->writep,
+ queue->readp == queue->writep ? " (EMPTY)" :
+ ((queue->writep + 1) % queue->size) == queue->readp ?
+ " (FULL)" : "");
+
+ pthread_mutex_unlock(&queue->mutex);
+}
+
+
+/* Called with the cache mutex held */
+void insert_hash_table(struct cache *cache, struct cache_entry *entry)
+{
+ int hash = TABLE_HASH(entry->block);
+
+ entry->hash_next = cache->hash_table[hash];
+ cache->hash_table[hash] = entry;
+ entry->hash_prev = NULL;
+ if(entry->hash_next)
+ entry->hash_next->hash_prev = entry;
+}
+
+
+/* Called with the cache mutex held */
+void remove_hash_table(struct cache *cache, struct cache_entry *entry)
+{
+ if(entry->hash_prev)
+ entry->hash_prev->hash_next = entry->hash_next;
+ else
+ cache->hash_table[TABLE_HASH(entry->block)] =
+ entry->hash_next;
+ if(entry->hash_next)
+ entry->hash_next->hash_prev = entry->hash_prev;
+
+ entry->hash_prev = entry->hash_next = NULL;
+}
+
+
+/* Called with the cache mutex held */
+void insert_free_list(struct cache *cache, struct cache_entry *entry)
+{
+ if(cache->free_list) {
+ entry->free_next = cache->free_list;
+ entry->free_prev = cache->free_list->free_prev;
+ cache->free_list->free_prev->free_next = entry;
+ cache->free_list->free_prev = entry;
+ } else {
+ cache->free_list = entry;
+ entry->free_prev = entry->free_next = entry;
+ }
+}
+
+
+/* Called with the cache mutex held */
+void remove_free_list(struct cache *cache, struct cache_entry *entry)
+{
+ if(entry->free_prev == NULL || entry->free_next == NULL)
+ /* not in free list */
+ return;
+ else if(entry->free_prev == entry && entry->free_next == entry) {
+ /* only this entry in the free list */
+ cache->free_list = NULL;
+ } else {
+ /* more than one entry in the free list */
+ entry->free_next->free_prev = entry->free_prev;
+ entry->free_prev->free_next = entry->free_next;
+ if(cache->free_list == entry)
+ cache->free_list = entry->free_next;
+ }
+
+ entry->free_prev = entry->free_next = NULL;
+}
+
+
+struct cache *cache_init(int buffer_size, int max_buffers)
+{
+ struct cache *cache = malloc(sizeof(struct cache));
+ if(cache == NULL)
+ MEM_ERROR();
+
+ cache->max_buffers = max_buffers;
+ cache->buffer_size = buffer_size;
+ cache->count = 0;
+ cache->used = 0;
+ cache->free_list = NULL;
+ memset(cache->hash_table, 0, sizeof(struct cache_entry *) * 65536);
+ cache->wait_free = FALSE;
+ cache->wait_pending = FALSE;
+ pthread_mutex_init(&cache->mutex, NULL);
+ pthread_cond_init(&cache->wait_for_free, NULL);
+ pthread_cond_init(&cache->wait_for_pending, NULL);
+
+ return cache;
+}
+
+
+struct cache_entry *cache_get(struct cache *cache, long long block, int size)
+{
+ /*
+ * Get a block out of the cache. If the block isn't in the cache
+ * it is added and queued to the reader() and inflate() threads for
+ * reading off disk and decompression. The cache grows until max_blocks
+ * is reached, once this occurs existing discarded blocks on the free
+ * list are reused
+ */
+ int hash = TABLE_HASH(block);
+ struct cache_entry *entry;
+
+ pthread_mutex_lock(&cache->mutex);
+
+ for(entry = cache->hash_table[hash]; entry; entry = entry->hash_next)
+ if(entry->block == block)
+ break;
+
+ if(entry) {
+ /*
+ * found the block in the cache. If the block is currently
+ * unused remove it from the free list and increment cache
+ * used count.
+ */
+ if(entry->used == 0) {
+ cache->used ++;
+ remove_free_list(cache, entry);
+ }
+ entry->used ++;
+ pthread_mutex_unlock(&cache->mutex);
+ } else {
+ /*
+ * not in the cache
+ *
+ * first try to allocate new block
+ */
+ if(cache->count < cache->max_buffers) {
+ entry = malloc(sizeof(struct cache_entry));
+ if(entry == NULL)
+ MEM_ERROR();
+
+ entry->data = malloc(cache->buffer_size);
+ if(entry->data == NULL)
+ MEM_ERROR();
+
+ entry->cache = cache;
+ entry->free_prev = entry->free_next = NULL;
+ cache->count ++;
+ } else {
+ /*
+ * try to get from free list
+ */
+ while(cache->free_list == NULL) {
+ cache->wait_free = TRUE;
+ pthread_cond_wait(&cache->wait_for_free,
+ &cache->mutex);
+ }
+ entry = cache->free_list;
+ remove_free_list(cache, entry);
+ remove_hash_table(cache, entry);
+ }
+
+ /*
+ * Initialise block and insert into the hash table.
+ * Increment used which tracks how many buffers in the
+ * cache are actively in use (the other blocks, count - used,
+ * are in the cache and available for lookup, but can also be
+ * re-used).
+ */
+ entry->block = block;
+ entry->size = size;
+ entry->used = 1;
+ entry->error = FALSE;
+ entry->pending = TRUE;
+ insert_hash_table(cache, entry);
+ cache->used ++;
+
+ /*
+ * queue to read thread to read and ultimately (via the
+ * decompress threads) decompress the buffer
+ */
+ pthread_mutex_unlock(&cache->mutex);
+ queue_put(to_reader, entry);
+ }
+
+ return entry;
+}
+
+
+void cache_block_ready(struct cache_entry *entry, int error)
+{
+ /*
+ * mark cache entry as being complete, reading and (if necessary)
+ * decompression has taken place, and the buffer is valid for use.
+ * If an error occurs reading or decompressing, the buffer also
+ * becomes ready but with an error...
+ */
+ pthread_mutex_lock(&entry->cache->mutex);
+ entry->pending = FALSE;
+ entry->error = error;
+
+ /*
+ * if the wait_pending flag is set, one or more threads may be waiting
+ * on this buffer
+ */
+ if(entry->cache->wait_pending) {
+ entry->cache->wait_pending = FALSE;
+ pthread_cond_broadcast(&entry->cache->wait_for_pending);
+ }
+
+ pthread_mutex_unlock(&entry->cache->mutex);
+}
+
+
+void cache_block_wait(struct cache_entry *entry)
+{
+ /*
+ * wait for this cache entry to become ready, when reading and (if
+ * necessary) decompression has taken place
+ */
+ pthread_mutex_lock(&entry->cache->mutex);
+
+ while(entry->pending) {
+ entry->cache->wait_pending = TRUE;
+ pthread_cond_wait(&entry->cache->wait_for_pending,
+ &entry->cache->mutex);
+ }
+
+ pthread_mutex_unlock(&entry->cache->mutex);
+}
+
+
+void cache_block_put(struct cache_entry *entry)
+{
+ /*
+ * finished with this cache entry, once the usage count reaches zero it
+ * can be reused and is put onto the free list. As it remains
+ * accessible via the hash table it can be found getting a new lease of
+ * life before it is reused.
+ */
+ pthread_mutex_lock(&entry->cache->mutex);
+
+ entry->used --;
+ if(entry->used == 0) {
+ insert_free_list(entry->cache, entry);
+ entry->cache->used --;
+
+ /*
+ * if the wait_free flag is set, one or more threads may be
+ * waiting on this buffer
+ */
+ if(entry->cache->wait_free) {
+ entry->cache->wait_free = FALSE;
+ pthread_cond_broadcast(&entry->cache->wait_for_free);
+ }
+ }
+
+ pthread_mutex_unlock(&entry->cache->mutex);
+}
+
+
+void dump_cache(struct cache *cache)
+{
+ pthread_mutex_lock(&cache->mutex);
+
+ printf("Max buffers %d, Current size %d, Used %d, %s\n",
+ cache->max_buffers, cache->count, cache->used,
+ cache->free_list ? "Free buffers" : "No free buffers");
+
+ pthread_mutex_unlock(&cache->mutex);
+}
+
+
+char *modestr(char *str, int mode)
+{
+ int i;
+
+ strcpy(str, "----------");
+
+ for(i = 0; table[i].mask != 0; i++) {
+ if((mode & table[i].mask) == table[i].value)
+ str[table[i].position] = table[i].mode;
+ }
+
+ return str;
+}
+
+
+#define TOTALCHARS 25
+void print_filename(char *pathname, struct inode *inode)
+{
+ char str[11], dummy[12], dummy2[12]; /* overflow safe */
+ char *userstr, *groupstr;
+ int padchars;
+ struct passwd *user;
+ struct group *group;
+ struct tm *t;
+
+ if(short_ls) {
+ printf("%s\n", pathname);
+ return;
+ }
+
+ user = numeric ? NULL : getpwuid(inode->uid);
+ if(user == NULL) {
+ int res = snprintf(dummy, 12, "%u", inode->uid);
+ if(res < 0)
+ EXIT_UNSQUASH("snprintf failed in print_filename()\n");
+ else if(res >= 12)
+ /* unsigned int shouldn't ever need more than 11 bytes
+ * (including terminating '\0') to print in base 10 */
+ userstr = "*";
+ else
+ userstr = dummy;
+ } else
+ userstr = user->pw_name;
+
+ group = numeric ? NULL : getgrgid(inode->gid);
+ if(group == NULL) {
+ int res = snprintf(dummy2, 12, "%u", inode->gid);
+ if(res < 0)
+ EXIT_UNSQUASH("snprintf failed in print_filename()\n");
+ else if(res >= 12)
+ /* unsigned int shouldn't ever need more than 11 bytes
+ * (including terminating '\0') to print in base 10 */
+ groupstr = "*";
+ else
+ groupstr = dummy2;
+ } else
+ groupstr = group->gr_name;
+
+ printf("%s %s/%s ", modestr(str, inode->mode), userstr, groupstr);
+
+ switch(inode->mode & S_IFMT) {
+ case S_IFREG:
+ case S_IFDIR:
+ case S_IFSOCK:
+ case S_IFIFO:
+ case S_IFLNK:
+ padchars = TOTALCHARS - strlen(userstr) -
+ strlen(groupstr);
+
+ printf("%*lld ", padchars > 0 ? padchars : 0,
+ inode->data);
+ break;
+ case S_IFCHR:
+ case S_IFBLK:
+ padchars = TOTALCHARS - strlen(userstr) -
+ strlen(groupstr) - 7;
+
+ printf("%*s%3d,%3d ", padchars > 0 ? padchars : 0, " ",
+ (int) inode->data >> 8, (int) inode->data &
+ 0xff);
+ break;
+ }
+
+ t = use_localtime ? localtime(&inode->time) : gmtime(&inode->time);
+
+ if(full_precision)
+ printf("%d-%02d-%02d %02d:%02d:%02d %s", t->tm_year + 1900,
+ t->tm_mon + 1, t->tm_mday, t->tm_hour, t->tm_min,
+ t->tm_sec, pathname);
+ else
+ printf("%d-%02d-%02d %02d:%02d %s", t->tm_year + 1900,
+ t->tm_mon + 1, t->tm_mday, t->tm_hour, t->tm_min, pathname);
+ if((inode->mode & S_IFMT) == S_IFLNK)
+ printf(" -> %s", inode->symlink);
+ printf("\n");
+}
+
+
+long long read_bytes(int fd, void *buff, long long bytes)
+{
+ long long res, count;
+
+ for(count = 0; count < bytes; count += res) {
+ int len = (bytes - count) > MAXIMUM_READ_SIZE ?
+ MAXIMUM_READ_SIZE : bytes - count;
+
+ res = read(fd, buff + count, len);
+ if(res < 1) {
+ if(res == 0)
+ break;
+ else if(errno != EINTR) {
+ ERROR("Read failed because %s\n",
+ strerror(errno));
+ return -1;
+ } else
+ res = 0;
+ }
+ }
+
+ return count;
+}
+
+
+int read_fs_bytes(int fd, long long byte, long long bytes, void *buff)
+{
+ off_t off = byte;
+ long long res;
+
+ TRACE("read_bytes: reading from position 0x%llx, bytes %lld\n", byte,
+ bytes);
+
+ pthread_cleanup_push((void *) pthread_mutex_unlock, &pos_mutex);
+ pthread_mutex_lock(&pos_mutex);
+ if(lseek(fd, start_offset + off, SEEK_SET) == -1) {
+ ERROR("Lseek failed because %s\n", strerror(errno));
+ res = FALSE;
+ goto done;
+ }
+
+ res = read_bytes(fd, buff, bytes);
+
+ if(res != -1 && res < bytes)
+ ERROR("Read on filesystem failed because EOF\n");
+
+ res = res == bytes;
+
+done:
+ pthread_cleanup_pop(1);
+ return res;
+}
+
+
+int read_block(int fd, long long start, long long *next, int expected,
+ void *block)
+{
+ unsigned short c_byte;
+ int offset = 2, res, compressed;
+ int outlen = expected ? expected : SQUASHFS_METADATA_SIZE;
+ static char *buffer = NULL;
+
+ if(outlen > SQUASHFS_METADATA_SIZE)
+ return FALSE;
+
+ if(swap) {
+ if(read_fs_bytes(fd, start, 2, &c_byte) == FALSE)
+ goto failed;
+ c_byte = (c_byte >> 8) | ((c_byte & 0xff) << 8);
+ } else
+ if(read_fs_bytes(fd, start, 2, &c_byte) == FALSE)
+ goto failed;
+
+ TRACE("read_block: block @0x%llx, %d %s bytes\n", start,
+ SQUASHFS_COMPRESSED_SIZE(c_byte), SQUASHFS_COMPRESSED(c_byte) ?
+ "compressed" : "uncompressed");
+
+ if(SQUASHFS_CHECK_DATA(sBlk.s.flags))
+ offset = 3;
+
+ compressed = SQUASHFS_COMPRESSED(c_byte);
+ c_byte = SQUASHFS_COMPRESSED_SIZE(c_byte);
+
+ /*
+ * The block size should not be larger than
+ * the uncompressed size (or max uncompressed size if
+ * expected is 0)
+ */
+ if(c_byte > outlen)
+ return FALSE;
+
+ if(compressed) {
+ int error;
+
+ if(buffer == NULL) {
+ buffer = malloc(SQUASHFS_METADATA_SIZE);
+ if(buffer == NULL)
+ MEM_ERROR();
+ }
+
+ res = read_fs_bytes(fd, start + offset, c_byte, buffer);
+ if(res == FALSE)
+ goto failed;
+
+ res = compressor_uncompress(comp, block, buffer, c_byte,
+ outlen, &error);
+
+ if(res == -1) {
+ ERROR("%s uncompress failed with error code %d\n",
+ comp->name, error);
+ goto failed;
+ }
+ } else {
+ res = read_fs_bytes(fd, start + offset, c_byte, block);
+ if(res == FALSE)
+ goto failed;
+ res = c_byte;
+ }
+
+ if(next)
+ *next = start + offset + c_byte;
+
+ /*
+ * if expected, then check the (uncompressed) return data
+ * is of the expected size
+ */
+ if(expected && expected != res)
+ return FALSE;
+ else
+ return res;
+
+failed:
+ ERROR("read_block: failed to read block @0x%llx\n", start);
+ return FALSE;
+}
+
+
+static struct hash_table_entry *get_metadata(struct hash_table_entry *hash_table[],
+ long long start)
+{
+ int res, hash = TABLE_HASH(start);
+ struct hash_table_entry *entry;
+ void *buffer;
+ long long next;
+
+ for(entry = hash_table[hash]; entry; entry = entry->next)
+ if(entry->start == start)
+ return entry;
+
+ buffer = malloc(SQUASHFS_METADATA_SIZE);
+ if(buffer == NULL)
+ MEM_ERROR();
+
+ res = read_block(fd, start, &next, 0, buffer);
+ if(res == 0) {
+ ERROR("get_metadata: failed to read block\n");
+ free(buffer);
+ return NULL;
+ }
+
+ entry = malloc(sizeof(struct hash_table_entry));
+ if(entry == NULL)
+ MEM_ERROR();
+
+ entry->start = start;
+ entry->length = res;
+ entry->buffer = buffer;
+ entry->next_index = next;
+ entry->next = hash_table[hash];
+ hash_table[hash] = entry;
+
+ return entry;
+}
+
+/*
+ * Read length bytes from metadata position <block, offset> (block is the
+ * start of the compressed block on disk, and offset is the offset into
+ * the block once decompressed). Data is packed into consecutive blocks,
+ * and length bytes may require reading more than one block.
+ */
+static int read_metadata(struct hash_table_entry *hash_table[], void *buffer,
+ long long *blk, unsigned int *off, int length)
+{
+ int res = length;
+ struct hash_table_entry *entry;
+ long long block = *blk;
+ unsigned int offset = *off;
+
+ while (1) {
+ entry = get_metadata(hash_table, block);
+ if (entry == NULL || offset >= entry->length)
+ return FALSE;
+
+ if((entry->length - offset) < length) {
+ int copy = entry->length - offset;
+ memcpy(buffer, entry->buffer + offset, copy);
+ buffer += copy;
+ length -= copy;
+ block = entry->next_index;
+ offset = 0;
+ } else if((entry->length - offset) == length) {
+ memcpy(buffer, entry->buffer + offset, length);
+ *blk = entry->next_index;
+ *off = 0;
+ break;
+ } else {
+ memcpy(buffer, entry->buffer + offset, length);
+ *blk = block;
+ *off = offset + length;
+ break;
+ }
+ }
+
+ return res;
+}
+
+
+int read_inode_data(void *buffer, long long *blk, unsigned int *off, int length)
+{
+ return read_metadata(inode_table_hash, buffer, blk, off, length);
+}
+
+
+int read_directory_data(void *buffer, long long *blk, unsigned int *off, int length)
+{
+ return read_metadata(directory_table_hash, buffer, blk, off, length);
+}
+
+
+int set_attributes(char *pathname, int mode, uid_t uid, gid_t guid, time_t time,
+ unsigned int xattr, unsigned int set_mode)
+{
+ struct utimbuf times = { time, time };
+ int failed = FALSE;
+
+ if(utime(pathname, &times) == -1) {
+ EXIT_UNSQUASH_STRICT("set_attributes: failed to set time on "
+ "%s, because %s\n", pathname, strerror(errno));
+ failed = TRUE;
+ }
+
+ if(root_process) {
+ if(chown(pathname, uid, guid) == -1) {
+ EXIT_UNSQUASH_STRICT("set_attributes: failed to change"
+ " uid and gids on %s, because %s\n", pathname,
+ strerror(errno));
+ failed = TRUE;
+ }
+ } else
+ mode &= ~06000;
+
+ if(write_xattr(pathname, xattr) == FALSE)
+ failed = TRUE;
+
+ if((set_mode || (mode & 07000)) &&
+ chmod(pathname, (mode_t) mode) == -1) {
+ /*
+ * Some filesystems require root privileges to use the sticky
+ * bit. If we're not root and chmod() failed with EPERM when the
+ * sticky bit was included in the mode, try again without the
+ * sticky bit. Otherwise, fail with an error message.
+ */
+ if (root_process || errno != EPERM || !(mode & 01000) ||
+ chmod(pathname, (mode_t) (mode & ~01000)) == -1) {
+ EXIT_UNSQUASH_STRICT("set_attributes: failed to change"
+ " mode %s, because %s\n", pathname,
+ strerror(errno));
+ failed = TRUE;
+ }
+ }
+
+ return !failed;
+}
+
+
+int write_bytes(int fd, char *buff, int bytes)
+{
+ int res, count;
+
+ for(count = 0; count < bytes; count += res) {
+ res = write(fd, buff + count, bytes - count);
+ if(res == -1) {
+ if(errno != EINTR) {
+ ERROR("Write on output file failed because "
+ "%s\n", strerror(errno));
+ return -1;
+ }
+ res = 0;
+ }
+ }
+
+ return 0;
+}
+
+
+int lseek_broken = FALSE;
+char *zero_data = NULL;
+
+int write_block(int file_fd, char *buffer, int size, long long hole, int sparse)
+{
+ off_t off = hole;
+
+ if(hole) {
+ if(sparse && lseek_broken == FALSE) {
+ int error = lseek(file_fd, off, SEEK_CUR);
+ if(error == -1)
+ /* failed to seek beyond end of file */
+ lseek_broken = TRUE;
+ }
+
+ if((sparse == FALSE || lseek_broken) && zero_data == NULL) {
+ zero_data = malloc(block_size);
+ if(zero_data == NULL)
+ MEM_ERROR();
+ memset(zero_data, 0, block_size);
+ }
+
+ if(sparse == FALSE || lseek_broken) {
+ int blocks = (hole + block_size -1) / block_size;
+ int avail_bytes, i;
+ for(i = 0; i < blocks; i++, hole -= avail_bytes) {
+ avail_bytes = hole > block_size ? block_size :
+ hole;
+ if(write_bytes(file_fd, zero_data, avail_bytes)
+ == -1)
+ goto failure;
+ }
+ }
+ }
+
+ if(write_bytes(file_fd, buffer, size) == -1)
+ goto failure;
+
+ return TRUE;
+
+failure:
+ return FALSE;
+}
+
+
+pthread_mutex_t open_mutex = PTHREAD_MUTEX_INITIALIZER;
+pthread_cond_t open_empty = PTHREAD_COND_INITIALIZER;
+int open_unlimited, open_count;
+#define OPEN_FILE_MARGIN 10
+
+
+void open_init(int count)
+{
+ open_count = count;
+ open_unlimited = count == -1;
+}
+
+
+int open_wait(char *pathname, int flags, mode_t mode)
+{
+ if (!open_unlimited) {
+ pthread_mutex_lock(&open_mutex);
+ while (open_count == 0)
+ pthread_cond_wait(&open_empty, &open_mutex);
+ open_count --;
+ pthread_mutex_unlock(&open_mutex);
+ }
+
+ return open(pathname, flags, mode);
+}
+
+
+void close_wake(int fd)
+{
+ close(fd);
+
+ if (!open_unlimited) {
+ pthread_mutex_lock(&open_mutex);
+ open_count ++;
+ pthread_cond_signal(&open_empty);
+ pthread_mutex_unlock(&open_mutex);
+ }
+}
+
+
+void queue_file(char *pathname, int file_fd, struct inode *inode)
+{
+ struct squashfs_file *file = malloc(sizeof(struct squashfs_file));
+ if(file == NULL)
+ MEM_ERROR();
+
+ file->fd = file_fd;
+ file->file_size = inode->data;
+ file->mode = inode->mode;
+ file->gid = inode->gid;
+ file->uid = inode->uid;
+ file->time = inode->time;
+ file->pathname = strdup(pathname);
+ file->blocks = inode->blocks + (inode->frag_bytes > 0);
+ file->sparse = inode->sparse;
+ file->xattr = inode->xattr;
+ queue_put(to_writer, file);
+}
+
+
+void queue_dir(char *pathname, struct dir *dir)
+{
+ struct squashfs_file *file = malloc(sizeof(struct squashfs_file));
+ if(file == NULL)
+ MEM_ERROR();
+
+ file->fd = -1;
+ file->mode = dir->mode;
+ file->gid = dir->guid;
+ file->uid = dir->uid;
+ file->time = dir->mtime;
+ file->pathname = strdup(pathname);
+ file->xattr = dir->xattr;
+ queue_put(to_writer, file);
+}
+
+
+int write_file(struct inode *inode, char *pathname)
+{
+ unsigned int file_fd, i;
+ unsigned int *block_list = NULL;
+ int file_end = inode->data / block_size, res;
+ long long start = inode->start;
+ mode_t mode = inode->mode;
+ struct stat buf;
+
+ TRACE("write_file: regular file, blocks %d\n", inode->blocks);
+
+ if(!root_process && !(mode & S_IWUSR) && has_xattrs(inode->xattr))
+ mode |= S_IWUSR;
+
+ res = lstat(pathname, &buf);
+ if(res != -1 && force) {
+ res = unlink(pathname);
+ if(res == -1)
+ EXIT_UNSQUASH("write_file: failed to unlink file %s,"
+ " because %s\n", pathname, strerror(errno));
+ } else if(res != -1)
+ EXIT_UNSQUASH("write_file: file %s already exists\n", pathname);
+ else if(errno != ENOENT)
+ EXIT_UNSQUASH("write_file: failed to lstat file %s,"
+ " because %s\n", pathname, strerror(errno));
+
+ file_fd = open_wait(pathname, O_CREAT | O_WRONLY, mode & 0777);
+ if(file_fd == -1) {
+ EXIT_UNSQUASH_IGNORE("write_file: failed to create file %s,"
+ " because %s\n", pathname, strerror(errno));
+ return FALSE;
+ }
+
+ if(inode->blocks) {
+ block_list = malloc(inode->blocks * sizeof(unsigned int));
+ if(block_list == NULL)
+ MEM_ERROR();
+
+ s_ops->read_block_list(block_list, inode->block_start,
+ inode->block_offset, inode->blocks);
+ }
+
+ /*
+ * the writer thread is queued a squashfs_file structure describing the
+ * file. If the file has one or more blocks or a fragment they are
+ * queued separately (references to blocks in the cache).
+ */
+ queue_file(pathname, file_fd, inode);
+
+ for(i = 0; i < inode->blocks; i++) {
+ int c_byte = SQUASHFS_COMPRESSED_SIZE_BLOCK(block_list[i]);
+ struct file_entry *block = malloc(sizeof(struct file_entry));
+
+ if(block == NULL)
+ MEM_ERROR();
+
+ block->offset = 0;
+ block->size = i == file_end ? inode->data & (block_size - 1) :
+ block_size;
+ if(block_list[i] == 0) /* sparse block */
+ block->buffer = NULL;
+ else {
+ block->buffer = cache_get(data_cache, start,
+ block_list[i]);
+ start += c_byte;
+ }
+ queue_put(to_writer, block);
+ }
+
+ if(inode->frag_bytes) {
+ int size;
+ long long start;
+ struct file_entry *block = malloc(sizeof(struct file_entry));
+
+ if(block == NULL)
+ MEM_ERROR();
+
+ s_ops->read_fragment(inode->fragment, &start, &size);
+ block->buffer = cache_get(fragment_cache, start, size);
+ block->offset = inode->offset;
+ block->size = inode->frag_bytes;
+ queue_put(to_writer, block);
+ }
+
+ free(block_list);
+ return TRUE;
+}
+
+
+int cat_file(struct inode *inode, char *pathname)
+{
+ unsigned int i;
+ unsigned int *block_list = NULL;
+ int file_end = inode->data / block_size;
+ long long start = inode->start;
+
+ TRACE("cat_file: regular file, blocks %d\n", inode->blocks);
+
+ if(inode->blocks) {
+ block_list = malloc(inode->blocks * sizeof(unsigned int));
+ if(block_list == NULL)
+ MEM_ERROR();
+
+ s_ops->read_block_list(block_list, inode->block_start,
+ inode->block_offset, inode->blocks);
+ }
+
+ /*
+ * the writer thread is queued a squashfs_file structure describing the
+ * file. If the file has one or more blocks or a fragment they are
+ * queued separately (references to blocks in the cache).
+ */
+ queue_file(pathname, 0, inode);
+
+ for(i = 0; i < inode->blocks; i++) {
+ int c_byte = SQUASHFS_COMPRESSED_SIZE_BLOCK(block_list[i]);
+ struct file_entry *block = malloc(sizeof(struct file_entry));
+
+ if(block == NULL)
+ MEM_ERROR();
+
+ block->offset = 0;
+ block->size = i == file_end ? inode->data & (block_size - 1) :
+ block_size;
+ if(block_list[i] == 0) /* sparse block */
+ block->buffer = NULL;
+ else {
+ block->buffer = cache_get(data_cache, start,
+ block_list[i]);
+ start += c_byte;
+ }
+ queue_put(to_writer, block);
+ }
+
+ if(inode->frag_bytes) {
+ int size;
+ long long start;
+ struct file_entry *block = malloc(sizeof(struct file_entry));
+
+ if(block == NULL)
+ MEM_ERROR();
+
+ s_ops->read_fragment(inode->fragment, &start, &size);
+ block->buffer = cache_get(fragment_cache, start, size);
+ block->offset = inode->offset;
+ block->size = inode->frag_bytes;
+ queue_put(to_writer, block);
+ }
+
+ free(block_list);
+ return TRUE;
+}
+
+
+int create_inode(char *pathname, struct inode *i)
+{
+ int res;
+ int failed = FALSE;
+ char *link_path = lookup(i->inode_number);
+
+ TRACE("create_inode: pathname %s\n", pathname);
+
+ if(link_path) {
+ TRACE("create_inode: hard link\n");
+ if(force)
+ unlink(pathname);
+
+ if(link(link_path, pathname) == -1) {
+ EXIT_UNSQUASH_IGNORE("create_inode: failed to create"
+ " hardlink, because %s\n", strerror(errno));
+ return FALSE;
+ }
+
+ hardlnk_count++;
+ return TRUE;
+ }
+
+ switch(i->type) {
+ case SQUASHFS_FILE_TYPE:
+ case SQUASHFS_LREG_TYPE:
+ TRACE("create_inode: regular file, file_size %lld, "
+ "blocks %d\n", i->data, i->blocks);
+
+ res = write_file(i, pathname);
+ if(res == FALSE)
+ goto failed;
+
+ file_count ++;
+ break;
+ case SQUASHFS_SYMLINK_TYPE:
+ case SQUASHFS_LSYMLINK_TYPE: {
+ struct timeval times[2] = {
+ { i->time, 0 },
+ { i->time, 0 }
+ };
+
+ TRACE("create_inode: symlink, symlink_size %lld\n",
+ i->data);
+
+ if(force)
+ unlink(pathname);
+
+ res = symlink(i->symlink, pathname);
+ if(res == -1) {
+ EXIT_UNSQUASH_STRICT("create_inode: failed to"
+ " create symlink %s, because %s\n",
+ pathname, strerror(errno));
+ goto failed;
+ }
+
+ res = lutimes(pathname, times);
+ if(res == -1) {
+ EXIT_UNSQUASH_STRICT("create_inode: failed to"
+ " set time on %s, because %s\n",
+ pathname, strerror(errno));
+ }
+
+ if(root_process) {
+ res = lchown(pathname, i->uid, i->gid);
+ if(res == -1) {
+ EXIT_UNSQUASH_STRICT("create_inode: "
+ "failed to change uid and "
+ "gids on %s, because %s\n",
+ pathname, strerror(errno));
+ failed = TRUE;
+ }
+ }
+
+ res = write_xattr(pathname, i->xattr);
+ if(res == FALSE)
+ failed = TRUE;
+
+ if(failed)
+ goto failed;
+
+ sym_count ++;
+ break;
+ }
+ case SQUASHFS_BLKDEV_TYPE:
+ case SQUASHFS_CHRDEV_TYPE:
+ case SQUASHFS_LBLKDEV_TYPE:
+ case SQUASHFS_LCHRDEV_TYPE: {
+ int chrdev = 0;
+ unsigned major, minor;
+ if ( i->type == SQUASHFS_CHRDEV_TYPE ||
+ i->type == SQUASHFS_LCHRDEV_TYPE)
+ chrdev = 1;
+
+ TRACE("create_inode: dev, rdev 0x%llx\n", i->data);
+ if(root_process) {
+ if(force)
+ unlink(pathname);
+
+ /* Based on new_decode_dev() in kernel source */
+ major = (i->data & 0xfff00) >> 8;
+ minor = (i->data & 0xff) | ((i->data >> 12)
+ & 0xfff00);
+
+ res = mknod(pathname, chrdev ? S_IFCHR :
+ S_IFBLK, makedev(major, minor));
+ if(res == -1) {
+ EXIT_UNSQUASH_STRICT("create_inode: "
+ "failed to create %s device "
+ "%s, because %s\n", chrdev ?
+ "character" : "block", pathname,
+ strerror(errno));
+ goto failed;
+ }
+ res = set_attributes(pathname, i->mode, i->uid,
+ i->gid, i->time, i->xattr, TRUE);
+ if(res == FALSE)
+ goto failed;
+
+ dev_count ++;
+ } else {
+ EXIT_UNSQUASH_STRICT("create_inode: could not"
+ " create %s device %s, because you're"
+ " not superuser!\n", chrdev ?
+ "character" : "block", pathname);
+ goto failed;
+ }
+ break;
+ }
+ case SQUASHFS_FIFO_TYPE:
+ case SQUASHFS_LFIFO_TYPE:
+ TRACE("create_inode: fifo\n");
+
+ if(force)
+ unlink(pathname);
+
+ res = mknod(pathname, S_IFIFO, 0);
+ if(res == -1) {
+ ERROR("create_inode: failed to create fifo %s, "
+ "because %s\n", pathname,
+ strerror(errno));
+ goto failed;
+ }
+ res = set_attributes(pathname, i->mode, i->uid, i->gid,
+ i->time, i->xattr, TRUE);
+ if(res == FALSE)
+ goto failed;
+
+ fifo_count ++;
+ break;
+ case SQUASHFS_SOCKET_TYPE:
+ case SQUASHFS_LSOCKET_TYPE:
+ TRACE("create_inode: socket\n");
+
+ res = mknod(pathname, S_IFSOCK, 0);
+ if (res == -1) {
+ ERROR("create_inode: failed to create socket "
+ "%s, because %s\n", pathname,
+ strerror(errno));
+ goto failed;
+ }
+ res = set_attributes(pathname, i->mode, i->uid, i->gid,
+ i->time, i->xattr, TRUE);
+ if(res == FALSE)
+ goto failed;
+
+ socket_count++;
+ break;
+ default:
+ EXIT_UNSQUASH_STRICT("Unknown inode type %d in "
+ "create_inode_table!\n", i->type);
+ return FALSE;
+ }
+
+ insert_lookup(i->inode_number, strdup(pathname));
+
+ return TRUE;
+
+failed:
+ /*
+ * Mark the file as created (even though it may not have been), so
+ * any future hard links to it fail with a file not found, which
+ * is correct as the file *is* missing.
+ *
+ * If we don't mark it here as created, then any future hard links
+ * will try to create the file as a separate unlinked file.
+ * If we've had some transitory errors, this may produce files
+ * in various states, which should be hard-linked, but are not.
+ */
+ insert_lookup(i->inode_number, strdup(pathname));
+
+ return FALSE;
+}
+
+
+int squashfs_readdir(struct dir *dir, char **name, unsigned int *start_block,
+unsigned int *offset, unsigned int *type)
+{
+ if(dir->cur_entry == NULL)
+ dir->cur_entry = dir->dirs;
+ else
+ dir->cur_entry = dir->cur_entry->next;
+
+ if(dir->cur_entry == NULL)
+ return FALSE;
+
+ *name = dir->cur_entry->name;
+ *start_block = dir->cur_entry->start_block;
+ *offset = dir->cur_entry->offset;
+ *type = dir->cur_entry->type;
+
+ return TRUE;
+}
+
+
+char *get_component(char *target, char **targname)
+{
+ char *start;
+
+ while(*target == '/')
+ target ++;
+
+ if(*target == '\0')
+ return NULL;
+
+ start = target;
+ while(*target != '/' && *target != '\0')
+ target ++;
+
+ *targname = strndup(start, target - start);
+
+ while(*target == '/')
+ target ++;
+
+ return target;
+}
+
+
+void free_path(struct pathname *paths)
+{
+ int i;
+
+ for(i = 0; i < paths->names; i++) {
+ if(paths->name[i].paths)
+ free_path(paths->name[i].paths);
+ free(paths->name[i].name);
+ if(paths->name[i].preg) {
+ regfree(paths->name[i].preg);
+ free(paths->name[i].preg);
+ }
+ }
+
+ free(paths);
+}
+
+
+struct pathname *add_path(struct pathname *paths, int type, char *target,
+ char *alltarget)
+{
+ char *targname;
+ int i, error;
+
+ if(type == PATH_TYPE_EXTRACT)
+ TRACE("add_path: adding \"%s\" extract file\n", target);
+ else
+ TRACE("add_path: adding \"%s\" exclude file\n", target);
+
+ target = get_component(target, &targname);
+
+ if(target == NULL) {
+ if(type == PATH_TYPE_EXTRACT)
+ EXIT_UNSQUASH("Invalid extract file %s\n", alltarget);
+ else
+ EXIT_UNSQUASH("Invalid exclude file %s\n", alltarget);
+ }
+
+ if(paths == NULL) {
+ paths = malloc(sizeof(struct pathname));
+ if(paths == NULL)
+ MEM_ERROR();
+
+ paths->names = 0;
+ paths->name = NULL;
+ }
+
+ for(i = 0; i < paths->names; i++)
+ if(strcmp(paths->name[i].name, targname) == 0)
+ break;
+
+ if(i == paths->names) {
+ /*
+ * allocate new name entry
+ */
+ paths->names ++;
+ paths->name = realloc(paths->name, (i + 1) *
+ sizeof(struct path_entry));
+ if(paths->name == NULL)
+ MEM_ERROR();
+
+ paths->name[i].name = targname;
+ paths->name[i].paths = NULL;
+ if(use_regex) {
+ paths->name[i].preg = malloc(sizeof(regex_t));
+ if(paths->name[i].preg == NULL)
+ MEM_ERROR();
+ error = regcomp(paths->name[i].preg, targname,
+ REG_EXTENDED|REG_NOSUB);
+ if(error) {
+ char str[1024]; /* overflow safe */
+
+ regerror(error, paths->name[i].preg, str, 1024);
+ if(type == PATH_TYPE_EXTRACT)
+ EXIT_UNSQUASH("invalid regex %s in extract %s, "
+ "because %s\n", targname, alltarget,
+ str);
+ else
+ EXIT_UNSQUASH("invalid regex %s in exclude %s, "
+ "because %s\n", targname, alltarget,
+ str);
+ }
+ } else
+ paths->name[i].preg = NULL;
+
+ if(target[0] == '\0') {
+ /*
+ * at leaf pathname component
+ */
+ paths->name[i].paths = NULL;
+ paths->name[i].type = type;
+ } else {
+ /*
+ * recurse adding child components
+ */
+ paths->name[i].type = PATH_TYPE_LINK;
+ paths->name[i].paths = add_path(NULL, type, target,
+ alltarget);
+ }
+ } else {
+ /*
+ * existing matching entry
+ */
+ free(targname);
+
+ if(paths->name[i].type != PATH_TYPE_LINK) {
+ /*
+ * This is the leaf component of a pre-existing
+ * extract/exclude which is either the same as the one
+ * we're adding, or encompasses it (if the one we're
+ * adding still has some path to walk). In either case
+ * we don't need to add this extract/exclude file
+ */
+ } else if(target[0] == '\0') {
+ /*
+ * at leaf pathname component of the extract/exclude
+ * being added, but, child components exist from more
+ * specific extracts/excludes. Delete as they're
+ * encompassed by this
+ */
+ free_path(paths->name[i].paths);
+ paths->name[i].paths = NULL;
+ paths->name[i].type = type;
+ } else
+ /*
+ * recurse adding child components
+ */
+ add_path(paths->name[i].paths, type, target, alltarget);
+ }
+
+ return paths;
+}
+
+
+void add_extract(char *target)
+{
+ extract = add_path(extract, PATH_TYPE_EXTRACT, target, target);
+}
+
+
+void add_exclude(char *str)
+{
+ if(strncmp(str, "... ", 4) == 0)
+ stickypath = add_path(stickypath, PATH_TYPE_EXCLUDE, str + 4, str + 4);
+ else
+ exclude = add_path(exclude, PATH_TYPE_EXCLUDE, str, str);
+}
+
+
+struct pathnames *init_subdir()
+{
+ struct pathnames *new = malloc(sizeof(struct pathnames));
+ if(new == NULL)
+ MEM_ERROR();
+
+ new->count = 0;
+ return new;
+}
+
+
+struct pathnames *add_subdir(struct pathnames *paths, struct pathname *path)
+{
+ if(paths->count % PATHS_ALLOC_SIZE == 0) {
+ paths = realloc(paths, sizeof(struct pathnames *) +
+ (paths->count + PATHS_ALLOC_SIZE) *
+ sizeof(struct pathname *));
+ if(paths == NULL)
+ MEM_ERROR();
+ }
+
+ paths->path[paths->count++] = path;
+ return paths;
+}
+
+
+void free_subdir(struct pathnames *paths)
+{
+ free(paths);
+}
+
+
+int extract_matches(struct pathnames *paths, char *name, struct pathnames **new)
+{
+ int i, n;
+
+ /* nothing to match, extract */
+ if(paths == NULL) {
+ *new = NULL;
+ return TRUE;
+ }
+
+ *new = init_subdir();
+
+ for(n = 0; n < paths->count; n++) {
+ struct pathname *path = paths->path[n];
+ for(i = 0; i < path->names; i++) {
+ int match;
+
+ if(no_wildcards)
+ match = strcmp(path->name[i].name, name) == 0;
+ else if(use_regex)
+ match = regexec(path->name[i].preg, name,
+ (size_t) 0, NULL, 0) == 0;
+ else
+ match = fnmatch(path->name[i].name,
+ name, FNM_PATHNAME|FNM_PERIOD|
+ FNM_EXTMATCH) == 0;
+
+ if(match && path->name[i].type == PATH_TYPE_EXTRACT)
+ /*
+ * match on a leaf component, any subdirectories
+ * will implicitly match, therefore return an
+ * empty new search set
+ */
+ goto empty_set;
+
+ if(match)
+ /*
+ * match on a non-leaf component, add any
+ * subdirectories to the new set of
+ * subdirectories to scan for this name
+ */
+ *new = add_subdir(*new, path->name[i].paths);
+ }
+ }
+
+ if((*new)->count == 0) {
+ /*
+ * no matching names found, delete empty search set, and return
+ * FALSE
+ */
+ free_subdir(*new);
+ *new = NULL;
+ return FALSE;
+ }
+
+ /*
+ * one or more matches with sub-directories found (no leaf matches),
+ * return new search set and return TRUE
+ */
+ return TRUE;
+
+empty_set:
+ /*
+ * found matching leaf extract, return empty search set and return TRUE
+ */
+ free_subdir(*new);
+ *new = NULL;
+ return TRUE;
+}
+
+
+int exclude_match(struct pathname *path, char *name, struct pathnames **new)
+{
+ int i, match;
+
+ for(i = 0; i < path->names; i++) {
+ if(no_wildcards)
+ match = strcmp(path->name[i].name, name) == 0;
+ else if(use_regex)
+ match = regexec(path->name[i].preg, name,
+ (size_t) 0, NULL, 0) == 0;
+ else
+ match = fnmatch(path->name[i].name, name,
+ FNM_PATHNAME|FNM_PERIOD| FNM_EXTMATCH) == 0;
+
+ if(match && path->name[i].type == PATH_TYPE_EXCLUDE) {
+ /*
+ * match on a leaf component, any subdirectories
+ * will implicitly match, therefore return an
+ * empty new search set
+ */
+ free(*new);
+ *new = NULL;
+ return TRUE;
+ }
+
+ if(match)
+ /*
+ * match on a non-leaf component, add any
+ * subdirectories to the new set of
+ * subdirectories to scan for this name
+ */
+ *new = add_subdir(*new, path->name[i].paths);
+ }
+
+ return FALSE;
+}
+
+
+int exclude_matches(struct pathnames *paths, char *name, struct pathnames **new)
+{
+ int n;
+
+ /* nothing to match, don't exclude */
+ if(paths == NULL && stickypath == NULL) {
+ *new = NULL;
+ return FALSE;
+ }
+
+ *new = init_subdir();
+
+ if(stickypath && exclude_match(stickypath, name, new))
+ return TRUE;
+
+ for(n = 0; paths && n < paths->count; n++) {
+ int res = exclude_match(paths->path[n], name, new);
+
+ if(res)
+ return TRUE;
+ }
+
+ if((*new)->count == 0) {
+ /*
+ * no matching names found, don't exclude. Delete empty search
+ * set, and return FALSE
+ */
+ free_subdir(*new);
+ *new = NULL;
+ return FALSE;
+ }
+
+ /*
+ * one or more matches with sub-directories found (no leaf matches),
+ * return new search set and return FALSE
+ */
+ return FALSE;
+}
+
+
+struct directory_stack *create_stack()
+{
+ struct directory_stack *stack = malloc(sizeof(struct directory_stack));
+ if(stack == NULL)
+ MEM_ERROR();
+
+ stack->size = 0;
+ stack->stack = NULL;
+ stack->symlink = NULL;
+ stack->name = NULL;
+
+ return stack;
+}
+
+
+void add_stack(struct directory_stack *stack, unsigned int start_block,
+ unsigned int offset, char *name, int depth)
+{
+ if((depth - 1) == stack->size) {
+ /* Stack growing an extra level */
+ stack->stack = realloc(stack->stack, depth *
+ sizeof(struct directory_level));
+
+ if(stack->stack == NULL)
+ MEM_ERROR();
+
+ stack->stack[depth - 1].start_block = start_block;
+ stack->stack[depth - 1].offset = offset;
+ stack->stack[depth - 1].name = strdup(name);
+ } else if((depth + 1) == stack->size)
+ /* Stack shrinking a level */
+ free(stack->stack[depth].name);
+ else if(depth == stack->size)
+ /* Stack staying same size - nothing to do */
+ return;
+ else
+ /* Any other change in size is invalid */
+ EXIT_UNSQUASH("Invalid state in add_stack\n");
+
+ stack->size = depth;
+}
+
+
+struct directory_stack *clone_stack(struct directory_stack *stack)
+{
+ int i;
+ struct directory_stack *new = malloc(sizeof(struct directory_stack));
+ if(stack == NULL)
+ MEM_ERROR();
+
+ new->stack = malloc(stack->size * sizeof(struct directory_level));
+ if(new->stack == NULL)
+ MEM_ERROR();
+
+ for(i = 0; i < stack->size; i++) {
+ new->stack[i].start_block = stack->stack[i].start_block;
+ new->stack[i].offset = stack->stack[i].offset;
+ new->stack[i].name = strdup(stack->stack[i].name);
+ }
+
+ new->size = stack->size;
+ new->symlink = NULL;
+ new->name = NULL;
+
+ return new;
+}
+
+
+void pop_stack(struct directory_stack *stack)
+{
+ free(stack->stack[--stack->size].name);
+}
+
+
+void free_stack(struct directory_stack *stack)
+{
+ int i;
+ struct symlink *symlink = stack->symlink;
+
+ for(i = 0; i < stack->size; i++)
+ free(stack->stack[i].name);
+
+ while(symlink) {
+ struct symlink *s = symlink;
+
+ symlink = symlink->next;
+ free(s->pathname);
+ free(s);
+ }
+
+ free(stack->stack);
+ free(stack->name);
+ free(stack);
+}
+
+
+char *stack_pathname(struct directory_stack *stack, char *name)
+{
+ int i, size = 0;
+ char *pathname;
+
+ /* work out how much space is needed for the pathname */
+ for(i = 1; i < stack->size; i++)
+ size += strlen(stack->stack[i].name);
+
+ /* add room for leaf name, slashes and '\0' terminator */
+ size += strlen(name) + stack->size;
+
+ pathname = malloc(size);
+ if (pathname == NULL)
+ MEM_ERROR();
+
+ pathname[0] = '\0';
+
+ /* concatenate */
+ for(i = 1; i < stack->size; i++) {
+ strcat(pathname, stack->stack[i].name);
+ strcat(pathname, "/");
+ }
+
+ strcat(pathname, name);
+
+ return pathname;
+}
+
+
+void add_symlink(struct directory_stack *stack, char *name)
+{
+ struct symlink *symlink = malloc(sizeof(struct symlink));
+ if(symlink == NULL)
+ MEM_ERROR();
+
+ symlink->pathname = stack_pathname(stack, name);
+ symlink->next = stack->symlink;
+ stack->symlink = symlink;
+}
+
+
+/*
+ * Walk the supplied pathname. If any symlinks are encountered whilst walking
+ * the pathname, then recursively walk those, to obtain the fully
+ * dereferenced canonicalised pathname. Return that and the pathnames
+ * of all symlinks found during the walk.
+ *
+ * follow_path (-follow-symlinks option) implies no wildcard matching,
+ * due to the fact that with wildcards there is no single canonical pathname
+ * to be found. Many pathnames may match or none at all.
+ *
+ * If follow_path fails to walk a pathname either because a component
+ * doesn't exist, it is a non directory component when a directory
+ * component is expected, a symlink with an absolute path is encountered,
+ * or a symlink is encountered which cannot be recursively walked due to
+ * the above failures, then return FALSE.
+ */
+int follow_path(char *path, char *name, unsigned int start_block,
+ unsigned int offset, int depth, int symlinks,
+ struct directory_stack *stack)
+{
+ struct inode *i;
+ struct dir *dir;
+ char *target, *symlink;
+ unsigned int type;
+ int traversed = FALSE;
+ unsigned int entry_start, entry_offset;
+
+ while((path = get_component(path, &target))) {
+ if(strcmp(target, ".") != 0)
+ break;
+
+ free(target);
+ }
+
+ if(path == NULL)
+ return FALSE;
+
+ add_stack(stack, start_block, offset, name, depth);
+
+ if(strcmp(target, "..") == 0) {
+ if(depth > 1) {
+ start_block = stack->stack[depth - 2].start_block;
+ offset = stack->stack[depth - 2].offset;
+
+ traversed = follow_path(path, "", start_block, offset,
+ depth - 1, symlinks, stack);
+ }
+
+ free(target);
+ return traversed;
+ }
+
+ dir = s_ops->opendir(start_block, offset, &i);
+ if(dir == NULL) {
+ free(target);
+ return FALSE;
+ }
+
+ while(squashfs_readdir(dir, &name, &entry_start, &entry_offset, &type)) {
+ if(strcmp(name, target) == 0) {
+ switch(type) {
+ case SQUASHFS_SYMLINK_TYPE:
+ i = s_ops->read_inode(entry_start, entry_offset);
+ symlink = i->symlink;
+
+ /* Symlink must be relative to current
+ * directory and not be absolute, otherwise
+ * we can't follow it, as it is probably
+ * outside the Squashfs filesystem */
+ if(symlink[0] == '/') {
+ traversed = FALSE;
+ free(symlink);
+ break;
+ }
+
+ /* Detect circular symlinks */
+ if(symlinks >= MAX_FOLLOW_SYMLINKS) {
+ ERROR("Too many levels of symbolic "
+ "links\n");
+ traversed = FALSE;
+ free(symlink);
+ break;
+ }
+
+ /* Add symlink to list of symlinks found
+ * traversing the pathname */
+ add_symlink(stack, name);
+
+ traversed = follow_path(symlink, "",
+ start_block, offset, depth,
+ symlinks + 1, stack);
+
+ free(symlink);
+
+ if(traversed == TRUE) {
+ /* If we still have some path to
+ * walk, then walk it from where
+ * the symlink traversal left us
+ *
+ * Obviously symlink traversal must
+ * have left us at a directory to do
+ * this */
+ if(path[0] != '\0') {
+ if(stack->type !=
+ SQUASHFS_DIR_TYPE) {
+ traversed = FALSE;
+ break;
+ }
+
+ /* "Jump" to the traversed
+ * point */
+ depth = stack->size;
+ start_block = stack->start_block;
+ offset = stack->offset;
+ name = stack->name;
+
+ /* continue following path */
+ traversed = follow_path(path,
+ name, start_block,
+ offset, depth + 1,
+ symlinks, stack);
+ }
+ }
+
+ break;
+ case SQUASHFS_DIR_TYPE:
+ /* if at end of path, traversed OK */
+ if(path[0] == '\0') {
+ traversed = TRUE;
+ stack->name = strdup(name);
+ stack->type = type;
+ stack->start_block = entry_start;
+ stack->offset = entry_offset;
+ } else /* follow the path */
+ traversed = follow_path(path, name,
+ entry_start, entry_offset,
+ depth + 1, symlinks, stack);
+ break;
+ default:
+ /* leaf directory entry, can't go any further,
+ * and so path must not continue */
+ if(path[0] == '\0') {
+ traversed = TRUE;
+ stack->name = strdup(name);
+ stack->type = type;
+ stack->start_block = entry_start;
+ stack->offset = entry_offset;
+ } else
+ traversed = FALSE;
+ }
+ }
+ }
+
+ free(target);
+ squashfs_closedir(dir);
+
+ return traversed;
+}
+
+
+int pre_scan(char *parent_name, unsigned int start_block, unsigned int offset,
+ struct pathnames *extracts, struct pathnames *excludes, int depth)
+{
+ unsigned int type;
+ int scan_res = TRUE;
+ char *name;
+ struct pathnames *newt, *newc = NULL;
+ struct inode *i;
+ struct dir *dir;
+
+ if(max_depth != -1 && depth > max_depth)
+ return TRUE;
+
+ dir = s_ops->opendir(start_block, offset, &i);
+ if(dir == NULL)
+ return FALSE;
+
+ if(inumber_lookup(i->inode_number))
+ EXIT_UNSQUASH("File System corrupted: directory loop detected\n");
+
+ while(squashfs_readdir(dir, &name, &start_block, &offset, &type)) {
+ struct inode *i;
+ char *pathname;
+ int res;
+
+ TRACE("pre_scan: name %s, start_block %d, offset %d, type %d\n",
+ name, start_block, offset, type);
+
+ if(!extract_matches(extracts, name, &newt))
+ continue;
+
+ if(exclude_matches(excludes, name, &newc)) {
+ free_subdir(newt);
+ continue;
+ }
+
+ res = asprintf(&pathname, "%s/%s", parent_name, name);
+ if(res == -1)
+ MEM_ERROR();
+
+ if(type == SQUASHFS_DIR_TYPE) {
+ res = pre_scan(parent_name, start_block, offset, newt,
+ newc, depth + 1);
+ if(res == FALSE)
+ scan_res = FALSE;
+ } else if(newt == NULL) {
+ if(type == SQUASHFS_FILE_TYPE) {
+ i = s_ops->read_inode(start_block, offset);
+ if(lookup(i->inode_number) == NULL) {
+ insert_lookup(i->inode_number, (char *) i);
+ total_blocks += (i->data +
+ (block_size - 1)) >> block_log;
+ }
+ total_files ++;
+ }
+ total_inodes ++;
+ }
+
+ free_subdir(newt);
+ free_subdir(newc);
+ free(pathname);
+ }
+
+ squashfs_closedir(dir);
+
+ return scan_res;
+}
+
+
+int dir_scan(char *parent_name, unsigned int start_block, unsigned int offset,
+ struct pathnames *extracts, struct pathnames *excludes, int depth)
+{
+ unsigned int type;
+ int scan_res = TRUE;
+ char *name;
+ struct pathnames *newt, *newc = NULL;
+ struct inode *i;
+ struct dir *dir = s_ops->opendir(start_block, offset, &i);
+
+ if(dir == NULL) {
+ EXIT_UNSQUASH_IGNORE("dir_scan: failed to read directory %s\n",
+ parent_name);
+ return FALSE;
+ }
+
+ if(inumber_lookup(i->inode_number))
+ EXIT_UNSQUASH("File System corrupted: directory loop detected\n");
+
+ if((lsonly || info) && (!concise || dir->dir_count ==0))
+ print_filename(parent_name, i);
+
+ if(!lsonly) {
+ /*
+ * Make directory with default User rwx permissions rather than
+ * the permissions from the filesystem, as these may not have
+ * write/execute permission. These are fixed up later in
+ * set_attributes().
+ */
+ int res = mkdir(parent_name, S_IRUSR|S_IWUSR|S_IXUSR);
+ if(res == -1) {
+ /*
+ * Skip directory if mkdir fails, unless we're
+ * forcing and the error is -EEXIST
+ */
+ if((depth != 1 && !force) || errno != EEXIST) {
+ EXIT_UNSQUASH_IGNORE("dir_scan: failed to make"
+ " directory %s, because %s\n",
+ parent_name, strerror(errno));
+ squashfs_closedir(dir);
+ return FALSE;
+ }
+
+ /*
+ * Try to change permissions of existing directory so
+ * that we can write to it
+ */
+ res = chmod(parent_name, S_IRUSR|S_IWUSR|S_IXUSR);
+ if (res == -1) {
+ EXIT_UNSQUASH_IGNORE("dir_scan: failed to "
+ "change permissions for directory %s,"
+ " because %s\n", parent_name,
+ strerror(errno));
+ squashfs_closedir(dir);
+ return FALSE;
+ }
+ }
+ }
+
+ if(max_depth == -1 || depth <= max_depth) {
+ while(squashfs_readdir(dir, &name, &start_block, &offset,
+ &type)) {
+ char *pathname;
+ int res;
+
+ TRACE("dir_scan: name %s, start_block %d, offset %d,"
+ " type %d\n", name, start_block, offset, type);
+
+
+ if(!extract_matches(extracts, name, &newt))
+ continue;
+
+ if(exclude_matches(excludes, name, &newc)) {
+ free_subdir(newt);
+ continue;
+ }
+
+ res = asprintf(&pathname, "%s/%s", parent_name, name);
+ if(res == -1)
+ MEM_ERROR();
+
+ if(type == SQUASHFS_DIR_TYPE) {
+ res = dir_scan(pathname, start_block, offset,
+ newt, newc, depth + 1);
+ if(res == FALSE)
+ scan_res = FALSE;
+ free(pathname);
+ } else if(newt == NULL) {
+ update_info(pathname);
+
+ i = s_ops->read_inode(start_block, offset);
+
+ if(lsonly || info)
+ print_filename(pathname, i);
+
+ if(!lsonly) {
+ res = create_inode(pathname, i);
+ if(res == FALSE)
+ scan_res = FALSE;
+ }
+
+ if(i->type == SQUASHFS_SYMLINK_TYPE ||
+ i->type == SQUASHFS_LSYMLINK_TYPE)
+ free(i->symlink);
+ } else {
+ free(pathname);
+
+ if(i->type == SQUASHFS_SYMLINK_TYPE ||
+ i->type == SQUASHFS_LSYMLINK_TYPE)
+ free(i->symlink);
+ }
+
+ free_subdir(newt);
+ free_subdir(newc);
+ }
+ }
+
+ if(!lsonly)
+ queue_dir(parent_name, dir);
+
+ squashfs_closedir(dir);
+ dir_count ++;
+
+ return scan_res;
+}
+
+
+int check_compression(struct compressor *comp)
+{
+ int res, bytes = 0;
+ char buffer[SQUASHFS_METADATA_SIZE] __attribute__ ((aligned));
+
+ if(!comp->supported) {
+ ERROR("Filesystem uses %s compression, this is "
+ "unsupported by this version\n", comp->name);
+ ERROR("Decompressors available:\n");
+ display_compressors(stderr, "", "");
+ return FALSE;
+ }
+
+ /*
+ * Read compression options from disk if present, and pass to
+ * the compressor to ensure we know how to decompress a filesystem
+ * compressed with these compression options.
+ *
+ * Note, even if there is no compression options we still call the
+ * compressor because some compression options may be mandatory
+ * for some compressors.
+ */
+ if(SQUASHFS_COMP_OPTS(sBlk.s.flags)) {
+ bytes = read_block(fd, sizeof(sBlk.s), NULL, 0, buffer);
+ if(bytes == 0) {
+ ERROR("Failed to read compressor options\n");
+ return FALSE;
+ }
+ }
+
+ res = compressor_check_options(comp, sBlk.s.block_size, buffer, bytes);
+
+ return res != -1;
+}
+
+
+int read_super(char *source)
+{
+ squashfs_super_block_3 sBlk_3;
+
+ /*
+ * Try to read a Squashfs 4 superblock
+ */
+ int res = read_super_4(&s_ops);
+
+ if(res != -1)
+ return res;
+ res = read_super_3(source, &s_ops, &sBlk_3);
+ if(res != -1)
+ return res;
+ res = read_super_2(&s_ops, &sBlk_3);
+ if(res != -1)
+ return res;
+ res = read_super_1(&s_ops, &sBlk_3);
+ if(res != -1)
+ return res;
+
+ return FALSE;
+}
+
+
+void process_extract_files(char *filename)
+{
+ FILE *fd;
+ char buffer[MAX_LINE + 1]; /* overflow safe */
+ char *name;
+
+ fd = fopen(filename, "r");
+ if(fd == NULL)
+ EXIT_UNSQUASH("Failed to open extract file \"%s\" because %s\n",
+ filename, strerror(errno));
+
+ while(fgets(name = buffer, MAX_LINE + 1, fd) != NULL) {
+ int len = strlen(name);
+
+ if(len == MAX_LINE && name[len - 1] != '\n')
+ /* line too large */
+ EXIT_UNSQUASH("Line too long when reading "
+ "extract file \"%s\", larger than %d "
+ "bytes\n", filename, MAX_LINE);
+
+ /*
+ * Remove '\n' terminator if it exists (the last line
+ * in the file may not be '\n' terminated)
+ */
+ if(len && name[len - 1] == '\n')
+ name[len - 1] = '\0';
+
+ /* Skip any leading whitespace */
+ while(isspace(*name))
+ name ++;
+
+ /* if comment line, skip */
+ if(*name == '#')
+ continue;
+
+ /* check for initial backslash, to accommodate
+ * filenames with leading space or leading # character
+ */
+ if(*name == '\\')
+ name ++;
+
+ /* if line is now empty after skipping characters, skip it */
+ if(*name == '\0')
+ continue;
+
+ add_extract(name);
+ }
+
+ if(ferror(fd))
+ EXIT_UNSQUASH("Reading extract file \"%s\" failed because %s\n",
+ filename, strerror(errno));
+
+ fclose(fd);
+}
+
+
+void process_exclude_files(char *filename)
+{
+ FILE *fd;
+ char buffer[MAX_LINE + 1]; /* overflow safe */
+ char *name;
+
+ fd = fopen(filename, "r");
+ if(fd == NULL)
+ EXIT_UNSQUASH("Failed to open exclude file \"%s\" because %s\n",
+ filename, strerror(errno));
+
+ while(fgets(name = buffer, MAX_LINE + 1, fd) != NULL) {
+ int len = strlen(name);
+
+ if(len == MAX_LINE && name[len - 1] != '\n')
+ /* line too large */
+ EXIT_UNSQUASH("Line too long when reading "
+ "exclude file \"%s\", larger than %d "
+ "bytes\n", filename, MAX_LINE);
+
+ /*
+ * Remove '\n' terminator if it exists (the last line
+ * in the file may not be '\n' terminated)
+ */
+ if(len && name[len - 1] == '\n')
+ name[len - 1] = '\0';
+
+ /* Skip any leading whitespace */
+ while(isspace(*name))
+ name ++;
+
+ /* if comment line, skip */
+ if(*name == '#')
+ continue;
+
+ /* check for initial backslash, to accommodate
+ * filenames with leading space or leading # character
+ */
+ if(*name == '\\')
+ name ++;
+
+ /* if line is now empty after skipping characters, skip it */
+ if(*name == '\0')
+ continue;
+
+ add_exclude(name);
+ }
+
+ if(ferror(fd))
+ EXIT_UNSQUASH("Reading exclude file \"%s\" failed because %s\n",
+ filename, strerror(errno));
+
+ fclose(fd);
+}
+
+
+/*
+ * reader thread. This thread processes read requests queued by the
+ * cache_get() routine.
+ */
+void *reader(void *arg)
+{
+ while(1) {
+ struct cache_entry *entry = queue_get(to_reader);
+ int res = read_fs_bytes(fd, entry->block,
+ SQUASHFS_COMPRESSED_SIZE_BLOCK(entry->size),
+ entry->data);
+
+ if(res && SQUASHFS_COMPRESSED_BLOCK(entry->size))
+ /*
+ * queue successfully read block to the inflate
+ * thread(s) for further processing
+ */
+ queue_put(to_inflate, entry);
+ else
+ /*
+ * block has either been successfully read and is
+ * uncompressed, or an error has occurred, clear pending
+ * flag, set error appropriately, and wake up any
+ * threads waiting on this buffer
+ */
+ cache_block_ready(entry, !res);
+ }
+}
+
+
+/*
+ * writer thread. This processes file write requests queued by the
+ * write_file() routine.
+ */
+void *writer(void *arg)
+{
+ int i;
+ long exit_code = FALSE;
+
+ while(1) {
+ struct squashfs_file *file = queue_get(to_writer);
+ int file_fd;
+ long long hole = 0;
+ int local_fail = FALSE;
+ int res;
+
+ if(file == NULL) {
+ queue_put(from_writer, (void *) exit_code);
+ continue;
+ } else if(file->fd == -1) {
+ /* write attributes for directory file->pathname */
+ res = set_attributes(file->pathname, file->mode,
+ file->uid, file->gid, file->time, file->xattr,
+ TRUE);
+ if(res == FALSE)
+ exit_code = TRUE;
+ free(file->pathname);
+ free(file);
+ continue;
+ }
+
+ TRACE("writer: regular file, blocks %d\n", file->blocks);
+
+ file_fd = file->fd;
+
+ for(i = 0; i < file->blocks; i++, cur_blocks ++) {
+ struct file_entry *block = queue_get(to_writer);
+
+ if(block->buffer == 0) { /* sparse file */
+ hole += block->size;
+ free(block);
+ continue;
+ }
+
+ cache_block_wait(block->buffer);
+
+ if(block->buffer->error) {
+ EXIT_UNSQUASH_IGNORE("writer: failed to "
+ "read/uncompress file %s\n",
+ file->pathname);
+ exit_code = local_fail = TRUE;
+ }
+
+ if(local_fail == FALSE) {
+ res = write_block(file_fd,
+ block->buffer->data + block->offset,
+ block->size, hole, file->sparse);
+
+ if(res == FALSE) {
+ EXIT_UNSQUASH_IGNORE("writer: failed "
+ "to write file %s\n",
+ file->pathname);
+ exit_code = local_fail = TRUE;
+ }
+ }
+
+ hole = 0;
+ cache_block_put(block->buffer);
+ free(block);
+ }
+
+ if(hole && local_fail == FALSE) {
+ /*
+ * corner case for hole extending to end of file
+ */
+ if(file->sparse == FALSE ||
+ lseek(file_fd, hole, SEEK_CUR) == -1) {
+ /*
+ * for files which we don't want to write
+ * sparsely, or for broken lseeks which cannot
+ * seek beyond end of file, write_block will do
+ * the right thing
+ */
+ hole --;
+ if(write_block(file_fd, "\0", 1, hole,
+ file->sparse) == FALSE) {
+ EXIT_UNSQUASH_IGNORE("writer: failed "
+ "to write sparse data block "
+ "for file %s\n",
+ file->pathname);
+ exit_code = local_fail = TRUE;
+ }
+ } else if(ftruncate(file_fd, file->file_size) == -1) {
+ EXIT_UNSQUASH_IGNORE("writer: failed to write "
+ "sparse data block for file %s\n",
+ file->pathname);
+ exit_code = local_fail = TRUE;
+ }
+ }
+
+ close_wake(file_fd);
+ if(local_fail == FALSE) {
+ int set = !root_process && !(file->mode & S_IWUSR) && has_xattrs(file->xattr);
+
+ res = set_attributes(file->pathname, file->mode,
+ file->uid, file->gid, file->time, file->xattr,
+ force || set);
+ if(res == FALSE)
+ exit_code = TRUE;
+ } else
+ unlink(file->pathname);
+ free(file->pathname);
+ free(file);
+
+ }
+}
+
+
+void *cat_writer(void *arg)
+{
+ int i;
+ long exit_code = FALSE;
+
+ while(1) {
+ struct squashfs_file *file = queue_get(to_writer);
+ long long hole = 0;
+ int local_fail = FALSE;
+ int res;
+
+ if(file == NULL) {
+ queue_put(from_writer, (void *) exit_code);
+ continue;
+ }
+
+ TRACE("cat_writer: regular file, blocks %d\n", file->blocks);
+
+ for(i = 0; i < file->blocks; i++, cur_blocks ++) {
+ struct file_entry *block = queue_get(to_writer);
+
+ if(block->buffer == 0) { /* sparse file */
+ hole += block->size;
+ free(block);
+ continue;
+ }
+
+ cache_block_wait(block->buffer);
+
+ if(block->buffer->error) {
+ EXIT_UNSQUASH_IGNORE("cat: failed to "
+ "read/uncompress file %s\n",
+ file->pathname);
+ exit_code = local_fail = TRUE;
+ }
+
+ if(local_fail == FALSE) {
+ res = write_block(writer_fd,
+ block->buffer->data + block->offset,
+ block->size, hole, FALSE);
+
+ if(res == FALSE) {
+ EXIT_UNSQUASH_IGNORE("cat: failed "
+ "to write file %s\n",
+ file->pathname);
+ exit_code = local_fail = TRUE;
+ }
+ }
+
+ hole = 0;
+ cache_block_put(block->buffer);
+ free(block);
+ }
+
+ if(hole && local_fail == FALSE) {
+ /*
+ * corner case for hole extending to end of file
+ */
+ hole --;
+ if(write_block(writer_fd, "\0", 1, hole,
+ file->sparse) == FALSE) {
+ EXIT_UNSQUASH_IGNORE("cat: failed "
+ "to write sparse data block "
+ "for file %s\n",
+ file->pathname);
+ exit_code = local_fail = TRUE;
+ }
+ }
+
+ free(file->pathname);
+ free(file);
+ }
+}
+
+
+/*
+ * decompress thread. This decompresses buffers queued by the read thread
+ */
+void *inflator(void *arg)
+{
+ char *tmp = malloc(block_size);
+ if(tmp == NULL)
+ MEM_ERROR();
+
+ while(1) {
+ struct cache_entry *entry = queue_get(to_inflate);
+ int error, res;
+
+ res = compressor_uncompress(comp, tmp, entry->data,
+ SQUASHFS_COMPRESSED_SIZE_BLOCK(entry->size), block_size,
+ &error);
+
+ if(res == -1)
+ ERROR("%s uncompress failed with error code %d\n",
+ comp->name, error);
+ else
+ memcpy(entry->data, tmp, res);
+
+ /*
+ * block has been either successfully decompressed, or an error
+ * occurred, clear pending flag, set error appropriately and
+ * wake up any threads waiting on this block
+ */
+ cache_block_ready(entry, res == -1);
+ }
+}
+
+
+void *progress_thread(void *arg)
+{
+ struct timespec requested_time, remaining;
+ struct itimerval itimerval;
+ struct winsize winsize;
+
+ if(ioctl(1, TIOCGWINSZ, &winsize) == -1) {
+ if(isatty(STDOUT_FILENO))
+ ERROR("TIOCGWINSZ ioctl failed, defaulting to 80 "
+ "columns\n");
+ columns = 80;
+ } else
+ columns = winsize.ws_col;
+ signal(SIGWINCH, sigwinch_handler);
+ signal(SIGALRM, sigalrm_handler);
+
+ itimerval.it_value.tv_sec = 0;
+ itimerval.it_value.tv_usec = 250000;
+ itimerval.it_interval.tv_sec = 0;
+ itimerval.it_interval.tv_usec = 250000;
+ setitimer(ITIMER_REAL, &itimerval, NULL);
+
+ requested_time.tv_sec = 0;
+ requested_time.tv_nsec = 250000000;
+
+ while(1) {
+ int res = nanosleep(&requested_time, &remaining);
+
+ if(res == -1 && errno != EINTR)
+ EXIT_UNSQUASH("nanosleep failed in progress thread\n");
+
+ if(progress_enabled) {
+ pthread_mutex_lock(&screen_mutex);
+ progress_bar(sym_count + dev_count + fifo_count +
+ socket_count + file_count + hardlnk_count +
+ cur_blocks, total_inodes + total_blocks,
+ columns);
+ pthread_mutex_unlock(&screen_mutex);
+ }
+ }
+}
+
+
+void initialise_threads(int fragment_buffer_size, int data_buffer_size, int cat_file)
+{
+ struct rlimit rlim;
+ int i, max_files, res;
+ sigset_t sigmask, old_mask;
+
+ if(cat_file == FALSE) {
+ /* block SIGQUIT and SIGHUP, these are handled by the info thread */
+ sigemptyset(&sigmask);
+ sigaddset(&sigmask, SIGQUIT);
+ sigaddset(&sigmask, SIGHUP);
+ if(pthread_sigmask(SIG_BLOCK, &sigmask, NULL) != 0)
+ EXIT_UNSQUASH("Failed to set signal mask in initialise_threads\n");
+
+ /*
+ * temporarily block these signals so the created sub-threads will
+ * ignore them, ensuring the main thread handles them
+ */
+ sigemptyset(&sigmask);
+ sigaddset(&sigmask, SIGINT);
+ sigaddset(&sigmask, SIGTERM);
+ if(pthread_sigmask(SIG_BLOCK, &sigmask, &old_mask) != 0)
+ EXIT_UNSQUASH("Failed to set signal mask in initialise_threads\n");
+ } else {
+ /*
+ * temporarily block these signals so the created sub-threads will
+ * ignore them, ensuring the main thread handles them
+ */
+ sigemptyset(&sigmask);
+ sigaddset(&sigmask, SIGQUIT);
+ sigaddset(&sigmask, SIGHUP);
+ sigaddset(&sigmask, SIGINT);
+ sigaddset(&sigmask, SIGTERM);
+ if(pthread_sigmask(SIG_BLOCK, &sigmask, &old_mask) != 0)
+ EXIT_UNSQUASH("Failed to set signal mask in initialise_threads\n");
+ }
+
+ if(processors == -1) {
+#ifdef __linux__
+ cpu_set_t cpu_set;
+ CPU_ZERO(&cpu_set);
+
+ if(sched_getaffinity(0, sizeof cpu_set, &cpu_set) == -1)
+ processors = sysconf(_SC_NPROCESSORS_ONLN);
+ else
+ processors = CPU_COUNT(&cpu_set);
+#else
+ int mib[2];
+ size_t len = sizeof(processors);
+
+ mib[0] = CTL_HW;
+#ifdef HW_AVAILCPU
+ mib[1] = HW_AVAILCPU;
+#else
+ mib[1] = HW_NCPU;
+#endif
+
+ if(sysctl(mib, 2, &processors, &len, NULL, 0) == -1) {
+ ERROR("Failed to get number of available processors. "
+ "Defaulting to 1\n");
+ processors = 1;
+ }
+#endif
+ }
+
+ if(add_overflow(processors, 3) ||
+ multiply_overflow(processors + 3, sizeof(pthread_t)))
+ EXIT_UNSQUASH("Processors too large\n");
+
+ thread = malloc((3 + processors) * sizeof(pthread_t));
+ if(thread == NULL)
+ MEM_ERROR();
+
+ inflator_thread = &thread[3];
+
+ /*
+ * dimensioning the to_reader and to_inflate queues. The size of
+ * these queues is directly related to the amount of block
+ * read-ahead possible. To_reader queues block read requests to
+ * the reader thread and to_inflate queues block decompression
+ * requests to the inflate thread(s) (once the block has been read by
+ * the reader thread). The amount of read-ahead is determined by
+ * the combined size of the data_block and fragment caches which
+ * determine the total number of blocks which can be "in flight"
+ * at any one time (either being read or being decompressed)
+ *
+ * The maximum file open limit, however, affects the read-ahead
+ * possible, in that for normal sizes of the fragment and data block
+ * caches, where the incoming files have few data blocks or one fragment
+ * only, the file open limit is likely to be reached before the
+ * caches are full. This means the worst case sizing of the combined
+ * sizes of the caches is unlikely to ever be necessary. However, is is
+ * obvious read-ahead up to the data block cache size is always possible
+ * irrespective of the file open limit, because a single file could
+ * contain that number of blocks.
+ *
+ * Choosing the size as "file open limit + data block cache size" seems
+ * to be a reasonable estimate. We can reasonably assume the maximum
+ * likely read-ahead possible is data block cache size + one fragment
+ * per open file.
+ *
+ * dimensioning the to_writer queue. The size of this queue is
+ * directly related to the amount of block read-ahead possible.
+ * However, unlike the to_reader and to_inflate queues, this is
+ * complicated by the fact the to_writer queue not only contains
+ * entries for fragments and data_blocks but it also contains
+ * file entries, one per open file in the read-ahead.
+ *
+ * Choosing the size as "2 * (file open limit) +
+ * data block cache size" seems to be a reasonable estimate.
+ * We can reasonably assume the maximum likely read-ahead possible
+ * is data block cache size + one fragment per open file, and then
+ * we will have a file_entry for each open file.
+ */
+ res = getrlimit(RLIMIT_NOFILE, &rlim);
+ if (res == -1) {
+ ERROR("failed to get open file limit! Defaulting to 1\n");
+ rlim.rlim_cur = 1;
+ }
+
+ if (rlim.rlim_cur != RLIM_INFINITY) {
+ /*
+ * leave OPEN_FILE_MARGIN free (rlim_cur includes fds used by
+ * stdin, stdout, stderr and filesystem fd
+ */
+ if (rlim.rlim_cur <= OPEN_FILE_MARGIN)
+ /* no margin, use minimum possible */
+ max_files = 1;
+ else
+ max_files = rlim.rlim_cur - OPEN_FILE_MARGIN;
+ } else
+ max_files = -1;
+
+ /* set amount of available files for use by open_wait and close_wake */
+ open_init(max_files);
+
+ /*
+ * allocate to_reader, to_inflate and to_writer queues. Set based on
+ * cache limits, unless there is an open file limit which would produce
+ * smaller queues
+ *
+ * In doing so, check that the user supplied values do not overflow
+ * a signed int
+ */
+ if (max_files != -1 && max_files < fragment_buffer_size) {
+ if(add_overflow(data_buffer_size, max_files) ||
+ add_overflow(data_buffer_size, max_files * 2))
+ EXIT_UNSQUASH("Data queue size is too large\n");
+
+ to_reader = queue_init(max_files + data_buffer_size);
+ to_inflate = queue_init(max_files + data_buffer_size);
+ to_writer = queue_init(max_files * 2 + data_buffer_size);
+ } else {
+ int all_buffers_size;
+
+ if(add_overflow(fragment_buffer_size, data_buffer_size))
+ EXIT_UNSQUASH("Data and fragment queues combined are"
+ " too large\n");
+
+ all_buffers_size = fragment_buffer_size + data_buffer_size;
+
+ if(add_overflow(all_buffers_size, all_buffers_size))
+ EXIT_UNSQUASH("Data and fragment queues combined are"
+ " too large\n");
+
+ to_reader = queue_init(all_buffers_size);
+ to_inflate = queue_init(all_buffers_size);
+ to_writer = queue_init(all_buffers_size * 2);
+ }
+
+ from_writer = queue_init(1);
+
+ fragment_cache = cache_init(block_size, fragment_buffer_size);
+ data_cache = cache_init(block_size, data_buffer_size);
+
+ pthread_create(&thread[0], NULL, reader, NULL);
+ pthread_create(&thread[2], NULL, progress_thread, NULL);
+
+ if(pseudo_file) {
+ pthread_create(&thread[1], NULL, cat_writer, NULL);
+ init_info();
+ } else if(cat_files)
+ pthread_create(&thread[1], NULL, cat_writer, NULL);
+ else {
+ pthread_create(&thread[1], NULL, writer, NULL);
+ init_info();
+ }
+
+ pthread_mutex_init(&fragment_mutex, NULL);
+
+ for(i = 0; i < processors; i++) {
+ if(pthread_create(&inflator_thread[i], NULL, inflator, NULL) !=
+ 0)
+ EXIT_UNSQUASH("Failed to create thread\n");
+ }
+
+ if(pthread_sigmask(SIG_SETMASK, &old_mask, NULL) != 0)
+ EXIT_UNSQUASH("Failed to set signal mask in initialise_threads"
+ "\n");
+}
+
+
+void enable_progress_bar()
+{
+ pthread_mutex_lock(&screen_mutex);
+ progress_enabled = progress;
+ pthread_mutex_unlock(&screen_mutex);
+}
+
+
+void disable_progress_bar()
+{
+ pthread_mutex_lock(&screen_mutex);
+ if(progress_enabled) {
+ progress_bar(sym_count + dev_count + fifo_count + socket_count
+ + file_count + hardlnk_count + cur_blocks, total_inodes
+ + total_blocks, columns);
+ printf("\n");
+ }
+ progress_enabled = FALSE;
+ pthread_mutex_unlock(&screen_mutex);
+}
+
+
+void progressbar_error(char *fmt, ...)
+{
+ va_list ap;
+
+ pthread_mutex_lock(&screen_mutex);
+
+ if(progress_enabled)
+ fprintf(stderr, "\n");
+
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+
+ pthread_mutex_unlock(&screen_mutex);
+}
+
+
+void progressbar_info(char *fmt, ...)
+{
+ va_list ap;
+
+ pthread_mutex_lock(&screen_mutex);
+
+ if(progress_enabled)
+ printf("\n");
+
+ va_start(ap, fmt);
+ vprintf(fmt, ap);
+ va_end(ap);
+
+ pthread_mutex_unlock(&screen_mutex);
+}
+
+
+void progressbar(long long current, long long max, int columns)
+{
+ char rotate_list[] = { '|', '/', '-', '\\' };
+ int max_digits, used, hashes, spaces;
+ static int tty = -1;
+
+ if(max == 0)
+ return;
+
+ max_digits = floor(log10(max)) + 1;
+ used = max_digits * 2 + 11;
+ hashes = (current * (columns - used)) / max;
+ spaces = columns - used - hashes;
+
+ if((current > max) || (columns - used < 0))
+ return;
+
+ if(tty == -1)
+ tty = isatty(STDOUT_FILENO);
+ if(!tty) {
+ static long long previous = -1;
+
+ /* Updating too frequently results in huge log files */
+ if(current * 100 / max == previous && current != max)
+ return;
+ previous = current * 100 / max;
+ }
+
+ printf("\r[");
+
+ while (hashes --)
+ putchar('=');
+
+ putchar(rotate_list[rotate]);
+
+ while(spaces --)
+ putchar(' ');
+
+ printf("] %*lld/%*lld", max_digits, current, max_digits, max);
+ printf(" %3lld%%", current * 100 / max);
+ fflush(stdout);
+}
+
+
+void display_percentage(long long current, long long max)
+{
+ int percentage = max == 0 ? 100 : current * 100 / max;
+ static int previous = -1;
+
+ if(percentage != previous) {
+ printf("%d\n", percentage);
+ fflush(stdout);
+ previous = percentage;
+ }
+}
+
+
+void progress_bar(long long current, long long max, int columns)
+{
+ if(percent)
+ display_percentage(current, max);
+ else
+ progressbar(current, max, columns);
+}
+
+
+int multiply_overflowll(long long a, int multiplier)
+{
+ return (LLONG_MAX / multiplier) < a;
+}
+
+
+int parse_numberll(char *start, long long *res, int size)
+{
+ char *end;
+ long long number;
+
+ errno = 0; /* To distinguish success/failure after call */
+
+ number = strtoll(start, &end, 10);
+
+ /*
+ * check for strtoll underflow or overflow in conversion, and other
+ * errors.
+ */
+ if((errno == ERANGE && (number == LLONG_MIN || number == LLONG_MAX)) ||
+ (errno != 0 && number == 0))
+ return 0;
+
+ /* reject negative numbers as invalid */
+ if(number < 0)
+ return 0;
+
+ if(size) {
+ /*
+ * Check for multiplier and trailing junk.
+ * But first check that a number exists before the
+ * multiplier
+ */
+ if(end == start)
+ return 0;
+
+ switch(end[0]) {
+ case 'g':
+ case 'G':
+ if(multiply_overflowll(number, 1073741824))
+ return 0;
+ number *= 1073741824;
+
+ if(end[1] != '\0')
+ /* trailing junk after multiplier, but
+ * allow it to be "bytes" */
+ if(strcmp(end + 1, "bytes"))
+ return 0;
+
+ break;
+ case 'm':
+ case 'M':
+ if(multiply_overflowll(number, 1048576))
+ return 0;
+ number *= 1048576;
+
+ if(end[1] != '\0')
+ /* trailing junk after multiplier, but
+ * allow it to be "bytes" */
+ if(strcmp(end + 1, "bytes"))
+ return 0;
+
+ break;
+ case 'k':
+ case 'K':
+ if(multiply_overflowll(number, 1024))
+ return 0;
+ number *= 1024;
+
+ if(end[1] != '\0')
+ /* trailing junk after multiplier, but
+ * allow it to be "bytes" */
+ if(strcmp(end + 1, "bytes"))
+ return 0;
+
+ break;
+ case '\0':
+ break;
+ default:
+ /* trailing junk after number */
+ return 0;
+ }
+ } else if(end[0] != '\0')
+ /* trailing junk after number */
+ return 0;
+
+ *res = number;
+ return 1;
+}
+
+
+int parse_number(char *start, int *res)
+{
+ long long number;
+
+ if(!parse_numberll(start, &number, 0))
+ return 0;
+
+ /* check if long result will overflow signed int */
+ if(number > INT_MAX)
+ return 0;
+
+ *res = (int) number;
+ return 1;
+}
+
+
+int parse_number_unsigned(char *start, unsigned int *res)
+{
+ long long number;
+
+ if(!parse_numberll(start, &number, 0))
+ return 0;
+
+ /* check if long result will overflow unsigned int */
+ if(number > UINT_MAX)
+ return 0;
+
+ *res = (unsigned int) number;
+ return 1;
+}
+
+
+void resolve_symlinks(int argc, char *argv[])
+{
+ int n, found;
+ struct directory_stack *stack;
+ struct symlink *symlink;
+ char *pathname;
+
+ for(n = 0; n < argc; n++) {
+ /*
+ * Try to follow the extract file pathname, and
+ * return the canonicalised pathname, and all
+ * symlinks necessary to resolve it.
+ */
+ stack = create_stack();
+
+ found = follow_path(argv[n], "",
+ SQUASHFS_INODE_BLK(sBlk.s.root_inode),
+ SQUASHFS_INODE_OFFSET(sBlk.s.root_inode),
+ 1, 0, stack);
+
+ if(!found) {
+ if(missing_symlinks)
+ EXIT_UNSQUASH("Extract filename %s can't be "
+ "resolved\n", argv[n]);
+ else
+ ERROR("Extract filename %s can't be resolved\n",
+ argv[n]);
+
+ add_extract(argv[n]);
+ free_stack(stack);
+ continue;
+ }
+
+ pathname = stack_pathname(stack, stack->name);
+ add_extract(pathname);
+ free(pathname);
+
+ for(symlink = stack->symlink; symlink; symlink = symlink->next)
+ add_extract(symlink->pathname);
+
+ free_stack(stack);
+ }
+}
+
+
+char *new_pathname(char *path, char *name)
+{
+ char *newpath;
+
+ if(strcmp(path, "/") == 0) {
+ newpath = malloc(strlen(name) + 2);
+ if(newpath == NULL)
+ MEM_ERROR();
+
+ strcpy(newpath, "/");
+ strcat(newpath, name);
+ } else {
+ newpath = malloc(strlen(path) + strlen(name) + 2);
+ if(newpath == NULL)
+ MEM_ERROR();
+
+ strcpy(newpath, path);
+ strcat(newpath, "/");
+ strcat(newpath, name);
+ }
+
+ return newpath;
+}
+
+
+char *add_pathname(char *path, char *name)
+{
+ if(strcmp(path, "/") == 0) {
+ path = realloc(path, strlen(name) + 2);
+ if(path == NULL)
+ MEM_ERROR();
+
+ strcat(path, name);
+ } else {
+ path = realloc(path, strlen(path) + strlen(name) + 2);
+ if(path == NULL)
+ MEM_ERROR();
+
+ strcat(path, "/");
+ strcat(path, name);
+ }
+
+ return path;
+}
+
+
+int cat_scan(char *path, char *curpath, char *name, unsigned int start_block,
+ unsigned int offset, int depth, struct directory_stack *stack)
+{
+ struct inode *i;
+ struct dir *dir;
+ char *target, *newpath, *addpath, *symlink;
+ unsigned int type;
+ int matched = FALSE, traversed = TRUE;
+ int match, res;
+ unsigned int entry_start, entry_offset;
+ regex_t preg;
+ struct directory_stack *new;
+
+ newpath = new_pathname(curpath, name);
+
+ while((path = get_component(path, &target))) {
+ if(strcmp(target, ".") != 0)
+ break;
+
+ newpath = add_pathname(newpath, ".");
+ free(target);
+ }
+
+ if(path == NULL) {
+ ERROR("cat: %s is a directory\n", newpath);
+ free(newpath);
+ return FALSE;
+ }
+
+ add_stack(stack, start_block, offset, name, depth);
+
+ if(strcmp(target, "..") == 0) {
+ if(depth > 1) {
+ free(target);
+ start_block = stack->stack[depth - 2].start_block;
+ offset = stack->stack[depth - 2].offset;
+
+ new = clone_stack(stack);
+ res = cat_scan(path, newpath, "..", start_block, offset,
+ depth - 1, new);
+
+ free_stack(new);
+ return res;
+ } else {
+ newpath = add_pathname(newpath, "..");
+ ERROR("cat: %s, cannot ascend beyond root directory\n", newpath);
+ free(newpath);
+ free(target);
+ return FALSE;
+ }
+ }
+
+ dir = s_ops->opendir(start_block, offset, &i);
+ if(dir == NULL) {
+ free(newpath);
+ free(target);
+ return FALSE;
+ }
+
+ if(use_regex) {
+ res = regcomp(&preg, target, REG_EXTENDED|REG_NOSUB);
+ if(res) {
+ char str[1024]; /* overflow safe */
+
+ regerror(res, &preg, str, 1024);
+ ERROR("cat: invalid regex %s because %s\n", target, str);
+ free(newpath);
+ free(target);
+ squashfs_closedir(dir);
+ return FALSE;
+ }
+ }
+
+ while(squashfs_readdir(dir, &name, &entry_start, &entry_offset, &type)) {
+ if(no_wildcards)
+ match = strcmp(name, target) == 0;
+ else if(use_regex)
+ match = regexec(&preg, name, (size_t) 0, NULL, 0) == 0;
+ else
+ match = fnmatch(target, name, FNM_PATHNAME|FNM_PERIOD| FNM_EXTMATCH) == 0;
+
+ if(match) {
+ matched = TRUE;
+
+ switch(type) {
+ case SQUASHFS_DIR_TYPE:
+ /* if we're at leaf component then fail */
+ if(path[0] == '\0') {
+ addpath = new_pathname(newpath, name);
+ ERROR("cat: %s is a directory\n", addpath);
+ free(addpath);
+ traversed = FALSE;
+ continue;
+ }
+
+ /* follow the path */
+ res = cat_scan(path, newpath, name, entry_start, entry_offset,
+ depth + 1, stack);
+ if(res == FALSE)
+ traversed = FALSE;
+ pop_stack(stack);
+ break;
+ case SQUASHFS_FILE_TYPE:
+ /* if there's path still to walk, fail */
+ addpath = new_pathname(newpath, name);
+ if(path[0] != '\0') {
+ ERROR("cat: %s is not a directory\n", addpath);
+ free(addpath);
+ traversed = FALSE;
+ continue;
+ }
+
+ i = s_ops->read_inode(entry_start, entry_offset);
+ res = cat_file(i, addpath);
+ if(res == FALSE)
+ traversed = FALSE;
+ free(addpath);
+ break;
+ case SQUASHFS_SYMLINK_TYPE:
+ i = s_ops->read_inode(entry_start, entry_offset);
+ symlink = i->symlink;
+
+ /* Symlink must be relative to current
+ * directory and not be absolute, otherwise
+ * we can't follow it, as it is probably
+ * outside the Squashfs filesystem */
+ if(symlink[0] == '/') {
+ addpath = new_pathname(newpath, name);
+ ERROR("cat: %s failed to resolve symbolic link\n", addpath);
+ free(addpath);
+ traversed = FALSE;
+ free(symlink);
+ continue;
+ }
+
+ new = clone_stack(stack);
+
+ /* follow the symlink */
+ res= follow_path(symlink, name,
+ start_block, offset, depth, 1, new);
+
+ free(symlink);
+
+ if(res == FALSE) {
+ addpath = new_pathname(newpath, name);
+ ERROR("cat: %s failed to resolve symbolic link\n", addpath);
+ free(addpath);
+ free_stack(new);
+ traversed = FALSE;
+ continue;
+ }
+
+ /* If we still have some path to
+ * walk, then walk it from where
+ * the symlink traversal left us
+ *
+ * Obviously symlink traversal must
+ * have left us at a directory to do
+ * this */
+ if(path[0] != '\0') {
+ if(new->type != SQUASHFS_DIR_TYPE) {
+ addpath = new_pathname(newpath, name);
+ ERROR("cat: %s symbolic link does not resolve to a directory\n", addpath);
+ free(addpath);
+ traversed = FALSE;
+ free_stack(new);
+ continue;
+ }
+
+ /* continue following path */
+ res = cat_scan(path, newpath, name,
+ new->start_block, new->offset,
+ new->size + 1, new);
+ if(res == FALSE)
+ traversed = FALSE;
+ free_stack(new);
+ continue;
+ }
+
+ /* At leaf component, symlink must have
+ * resolved to a regular file */
+ if(new->type != SQUASHFS_FILE_TYPE) {
+ addpath = new_pathname(newpath, name);
+ ERROR("cat: %s symbolic link does not resolve to a regular file\n", addpath);
+ free(addpath);
+ free_stack(new);
+ traversed = FALSE;
+ continue;
+ }
+
+ i = s_ops->read_inode(new->start_block, new->offset);
+ addpath = new_pathname(newpath, name);
+ res = cat_file(i, addpath);
+ if(res == FALSE)
+ traversed = FALSE;
+ free_stack(new);
+ free(addpath);
+ break;
+ default:
+ /* not a directory, or a regular file, fail */
+ addpath = new_pathname(newpath, name);
+ if(path[0] == '\0')
+ ERROR("cat: %s is not a regular file\n", addpath);
+ else
+ ERROR("cat: %s is not a directory\n", addpath);
+ free(addpath);
+ traversed = FALSE;
+ continue;
+ }
+ }
+ }
+
+ if(matched == FALSE) {
+ newpath = add_pathname(newpath, target);
+ ERROR("cat: no matches for %s\n", newpath);
+ traversed = FALSE;
+ }
+
+ free(newpath);
+ free(target);
+ squashfs_closedir(dir);
+
+ return traversed;
+}
+
+
+int cat_path(int argc, char *argv[])
+{
+ int n, res, failed = FALSE;
+ struct directory_stack *stack;
+
+ for(n = 0; n < argc; n++) {
+ stack = create_stack();
+
+ res = cat_scan(argv[n], "/", "",
+ SQUASHFS_INODE_BLK(sBlk.s.root_inode),
+ SQUASHFS_INODE_OFFSET(sBlk.s.root_inode),
+ 1, stack);
+
+ if(res == FALSE)
+ failed = TRUE;
+
+ free_stack(stack);
+ }
+
+ queue_put(to_writer, NULL);
+ res = (long) queue_get(from_writer);
+
+ return (failed == TRUE || res == TRUE) && set_exit_code ? 2 : 0;
+}
+
+
+char *process_filename(char *filename)
+{
+ static char *saved = NULL;
+ char *ptr;
+ int count = 0;
+
+ for(ptr = filename; *ptr == '/'; ptr ++);
+
+ if(*ptr == '\0')
+ return "/";
+
+ filename = ptr;
+
+ while(*ptr != '\0') {
+ if(*ptr == '\"' || *ptr == '\\' || isspace(*ptr))
+ count ++;
+ ptr ++;
+ }
+
+ if(count == 0)
+ return filename;
+
+ saved = realloc(saved, strlen(filename) + count + 1);
+ if(saved == NULL)
+ MEM_ERROR();
+
+ for(ptr = saved; *filename != '\0'; ptr ++, filename ++) {
+ if(*filename == '\"' || *filename == '\\' || isspace(*filename))
+ *ptr ++ = '\\';
+
+ *ptr = *filename;
+ }
+
+ *ptr = '\0';
+
+ return saved;
+}
+
+
+void pseudo_print(char *pathname, struct inode *inode, char *link, long long offset)
+{
+ char userstr[12], groupstr[12]; /* overflow safe */
+ char *type_string = "DRSBCIIDRSBCII";
+ char *filename = process_filename(pathname);
+ char type = type_string[inode->type - 1];
+ int res;
+
+ if(link) {
+ char *name = strdup(filename);
+ char *linkname = process_filename(link);
+ res = dprintf(writer_fd, "%s L %s\n", name, linkname);
+ if(res == -1)
+ EXIT_UNSQUASH("Failed to write to pseudo output file\n");
+ free(name);
+ return;
+ }
+
+ res = snprintf(userstr, 12, "%d", inode->uid);
+ if(res < 0)
+ EXIT_UNSQUASH("snprintf failed in pseudo_print()\n");
+ else if(res >= 12)
+ EXIT_UNSQUASH("snprintf returned more than 11 digits in pseudo_print()\n");
+
+ res = snprintf(groupstr, 12, "%d", inode->gid);
+ if(res < 0)
+ EXIT_UNSQUASH("snprintf failed in pseudo_print()\n");
+ else if(res >= 12)
+ EXIT_UNSQUASH("snprintf returned more than 11 digits in pseudo_print()\n");
+
+ res = dprintf(writer_fd, "%s %c %ld %o %s %s", filename, type, inode->time, inode->mode & ~S_IFMT, userstr, groupstr);
+ if(res == -1)
+ EXIT_UNSQUASH("Failed to write to pseudo output file\n");
+
+ switch(inode->mode & S_IFMT) {
+ case S_IFDIR:
+ res = dprintf(writer_fd, "\n");
+ break;
+ case S_IFLNK:
+ res = dprintf(writer_fd, " %s\n", inode->symlink);
+ break;
+ case S_IFSOCK:
+ case S_IFIFO:
+ if(inode->type == SQUASHFS_SOCKET_TYPE || inode->type == SQUASHFS_LSOCKET_TYPE)
+ res = dprintf(writer_fd, " s\n");
+ else
+ res = dprintf(writer_fd, " f\n");
+ break;
+ case S_IFCHR:
+ case S_IFBLK:
+ res = dprintf(writer_fd, " %d %d\n", (int) inode->data >> 8, (int) inode->data & 0xff);
+ break;
+ case S_IFREG:
+ res = dprintf(writer_fd, " %lld %lld %d\n", inode->data,
+ offset, inode->sparse);
+ }
+
+ if(res == -1)
+ EXIT_UNSQUASH("Failed to write to pseudo output file\n");
+
+ print_xattr(filename, inode->xattr, writer_fd);
+}
+
+
+int pseudo_scan1(char *parent_name, unsigned int start_block, unsigned int offset,
+ struct pathnames *extracts, struct pathnames *excludes, int depth)
+{
+ unsigned int type;
+ char *name;
+ struct pathnames *newt, *newc = NULL;
+ struct inode *i;
+ struct dir *dir;
+ static long long byte_offset = 0;
+
+ if(max_depth != -1 && depth > max_depth)
+ return TRUE;
+
+ dir = s_ops->opendir(start_block, offset, &i);
+ if(dir == NULL) {
+ ERROR("pseudo_scan1: failed to read directory %s\n", parent_name);
+ return FALSE;
+ }
+
+ if(inumber_lookup(i->inode_number))
+ EXIT_UNSQUASH("File System corrupted: directory loop detected\n");
+
+ pseudo_print(parent_name, i, NULL, 0);
+
+ while(squashfs_readdir(dir, &name, &start_block, &offset, &type)) {
+ struct inode *i;
+ char *pathname;
+ int res;
+
+ TRACE("pseudo_scan1: name %s, start_block %d, offset %d, type %d\n",
+ name, start_block, offset, type);
+
+ if(!extract_matches(extracts, name, &newt))
+ continue;
+
+ if(exclude_matches(excludes, name, &newc)) {
+ free_subdir(newt);
+ continue;
+ }
+
+ res = asprintf(&pathname, "%s/%s", parent_name, name);
+ if(res == -1)
+ MEM_ERROR();
+
+ if(type == SQUASHFS_DIR_TYPE) {
+ res = pseudo_scan1(pathname, start_block, offset, newt,
+ newc, depth + 1);
+ if(res == FALSE) {
+ free_subdir(newt);
+ free_subdir(newc);
+ free(pathname);
+ return FALSE;
+ }
+ } else if(newt == NULL) {
+ char *link;
+
+ i = s_ops->read_inode(start_block, offset);
+ link = lookup(i->inode_number);
+
+ if(link == NULL) {
+ pseudo_print(pathname, i, NULL, byte_offset);
+ if(type == SQUASHFS_FILE_TYPE) {
+ byte_offset += i->data;
+ total_blocks += (i->data + (block_size - 1)) >> block_log;
+ }
+ insert_lookup(i->inode_number, strdup(pathname));
+ } else
+ pseudo_print(pathname, i, link, 0);
+
+ if(i->type == SQUASHFS_SYMLINK_TYPE || i->type == SQUASHFS_LSYMLINK_TYPE)
+ free(i->symlink);
+
+ }
+
+ free_subdir(newt);
+ free_subdir(newc);
+ free(pathname);
+ }
+
+ squashfs_closedir(dir);
+
+ return TRUE;
+}
+
+
+int pseudo_scan2(char *parent_name, unsigned int start_block, unsigned int offset,
+ struct pathnames *extracts, struct pathnames *excludes, int depth)
+{
+ unsigned int type;
+ char *name;
+ struct pathnames *newt, *newc = NULL;
+ struct inode *i;
+ struct dir *dir = s_ops->opendir(start_block, offset, &i);
+
+ if(dir == NULL) {
+ ERROR("pseudo_scan2: failed to read directory %s\n", parent_name);
+ return FALSE;
+ }
+
+ if(inumber_lookup(i->inode_number))
+ EXIT_UNSQUASH("File System corrupted: directory loop detected\n");
+
+ if(max_depth == -1 || depth <= max_depth) {
+ while(squashfs_readdir(dir, &name, &start_block, &offset, &type)) {
+ char *pathname;
+ int res;
+
+ TRACE("pseudo_scan2: name %s, start_block %d, offset %d,"
+ " type %d\n", name, start_block, offset, type);
+
+
+ if(!extract_matches(extracts, name, &newt))
+ continue;
+
+ if(exclude_matches(excludes, name, &newc)) {
+ free_subdir(newt);
+ continue;
+ }
+
+ res = asprintf(&pathname, "%s/%s", parent_name, name);
+ if(res == -1)
+ MEM_ERROR();
+
+ if(type == SQUASHFS_DIR_TYPE) {
+ res = pseudo_scan2(pathname, start_block, offset,
+ newt, newc, depth + 1);
+ free(pathname);
+ if(res == FALSE) {
+ free_subdir(newt);
+ free_subdir(newc);
+ return FALSE;
+ }
+ } else if(newt == NULL && type == SQUASHFS_FILE_TYPE) {
+ i = s_ops->read_inode(start_block, offset);
+
+ if(lookup(i->inode_number) == NULL) {
+ update_info(pathname);
+
+ res = cat_file(i, pathname);
+ if(res == FALSE) {
+ free_subdir(newt);
+ free_subdir(newc);
+ return FALSE;
+ }
+
+ insert_lookup(i->inode_number, strdup(pathname));
+ } else
+ free(pathname);
+ } else
+ free(pathname);
+
+ free_subdir(newt);
+ free_subdir(newc);
+ }
+ }
+
+ squashfs_closedir(dir);
+
+ return TRUE;
+}
+
+
+int generate_pseudo(char *pseudo_file)
+{
+ int res;
+
+ if(pseudo_stdout)
+ writer_fd = STDOUT_FILENO;
+ else {
+ writer_fd = open_wait(pseudo_file, O_CREAT | O_TRUNC | O_WRONLY,
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ if(writer_fd == -1)
+ EXIT_UNSQUASH("generate_pseudo: failed to create "
+ "pseudo file %s, because %s\n", pseudo_file,
+ strerror(errno));
+ }
+
+ res = pseudo_scan1("/", SQUASHFS_INODE_BLK(sBlk.s.root_inode),
+ SQUASHFS_INODE_OFFSET(sBlk.s.root_inode), extracts, excludes, 1);
+ if(res == FALSE)
+ goto failed;
+
+ free_inumber_table();
+ inode_number = 1;
+ free_lookup_table(TRUE);
+
+ res = dprintf(writer_fd, "#\n# START OF DATA - DO NOT MODIFY\n#\n");
+ if(res == -1)
+ EXIT_UNSQUASH("Failed to write to pseudo output file\n");
+
+ enable_progress_bar();
+
+ res = pseudo_scan2("/", SQUASHFS_INODE_BLK(sBlk.s.root_inode),
+ SQUASHFS_INODE_OFFSET(sBlk.s.root_inode), extracts, excludes, 1);
+ if(res == FALSE)
+ goto failed;
+
+ queue_put(to_writer, NULL);
+ res = (long) queue_get(from_writer);
+ if(res == TRUE)
+ goto failed;
+
+ disable_progress_bar();
+
+ if(pseudo_stdout == FALSE)
+ close(writer_fd);
+
+ return 0;
+
+failed:
+ disable_progress_bar();
+ queue_put(to_writer, NULL);
+ queue_get(from_writer);
+ unlink(pseudo_file);
+ return 1;
+}
+
+
+int parse_excludes(int argc, char *argv[])
+{
+ int i;
+
+ for(i = 0; i < argc; i ++) {
+ if(strcmp(argv[i], ";") == 0)
+ break;
+ add_exclude(argv[i]);
+ }
+
+ return (i == argc) ? 0 : i;
+}
+
+
+static void print_cat_options(FILE *stream, char *name)
+{
+ fprintf(stream, "SYNTAX: %s [OPTIONS] FILESYSTEM [list of files to cat to stdout]\n", name);
+ fprintf(stream, "\t-v[ersion]\t\tprint version, licence and copyright ");
+ fprintf(stream, "information\n");
+ fprintf(stream, "\t-p[rocessors] <number>\tuse <number> processors. ");
+ fprintf(stream, "By default will use\n");
+ fprintf(stream, "\t\t\t\tthe number of processors available\n");
+ fprintf(stream, "\t-o[ffset] <bytes>\tskip <bytes> at start of FILESYSTEM.\n");
+ fprintf(stream, "\t\t\t\tOptionally a suffix of K, M or G can be given to\n");
+ fprintf(stream, "\t\t\t\tspecify Kbytes, Mbytes or Gbytes respectively\n");
+ fprintf(stream, "\t\t\t\t(default 0 bytes).\n");
+ fprintf(stream, "\t-ig[nore-errors]\ttreat errors writing files to stdout ");
+ fprintf(stream, "as\n\t\t\t\tnon-fatal\n");
+ fprintf(stream, "\t-st[rict-errors]\ttreat all errors as fatal\n");
+ fprintf(stream, "\t-no-exit[-code]\t\tdon't set exit code (to nonzero) on ");
+ fprintf(stream, "non-fatal\n\t\t\t\terrors\n");
+ fprintf(stream, "\t-da[ta-queue] <size>\tset data queue to <size> Mbytes. ");
+ fprintf(stream, "Default %d\n\t\t\t\tMbytes\n", DATA_BUFFER_DEFAULT);
+ fprintf(stream, "\t-fr[ag-queue] <size>\tset fragment queue to <size> Mbytes. ");
+ fprintf(stream, "Default\n\t\t\t\t%d Mbytes\n", FRAGMENT_BUFFER_DEFAULT);
+ fprintf(stream, "\t-no-wild[cards]\t\tdo not use wildcard matching in filenames\n");
+ fprintf(stream, "\t-r[egex]\t\ttreat filenames as POSIX regular ");
+ fprintf(stream, "expressions\n");
+ fprintf(stream, "\t\t\t\trather than use the default shell ");
+ fprintf(stream, "wildcard\n\t\t\t\texpansion (globbing)\n");
+ fprintf(stream, "\t-h[elp]\t\t\toutput options text to stdout\n");
+ fprintf(stream, "\nDecompressors available:\n");
+ display_compressors(stream, "", "");
+
+ fprintf(stream, "\nExit status:\n");
+ fprintf(stream, " 0\tThe file or files were output to stdout OK.\n");
+ fprintf(stream, " 1\tFATAL errors occurred, e.g. filesystem ");
+ fprintf(stream, "corruption, I/O errors.\n");
+ fprintf(stream, "\tSqfscat did not continue and aborted.\n");
+ fprintf(stream, " 2\tNon-fatal errors occurred, e.g. not a regular ");
+ fprintf(stream, "file, or failed to resolve\n\tpathname. Sqfscat ");
+ fprintf(stream, "continued and did not abort.\n");
+ fprintf(stream, "\nSee -ignore-errors, -strict-errors and ");
+ fprintf(stream, "-no-exit-code options for how they affect\nthe exit ");
+ fprintf(stream, "status.\n");
+ fprintf(stream, "\nSee also:\n");
+ fprintf(stream, "The README for the Squashfs-tools 4.6.1 release, ");
+ fprintf(stream, "describing the new features can be\n");
+ fprintf(stream, "read here https://github.com/plougher/squashfs-tools/blob/master/README-4.6.1\n");
+
+ fprintf(stream, "\nThe Squashfs-tools USAGE guide can be read here\n");
+ fprintf(stream, "https://github.com/plougher/squashfs-tools/blob/master/USAGE-4.6\n");
+}
+
+
+static void print_options(FILE *stream, char *name)
+{
+ fprintf(stream, "SYNTAX: %s [OPTIONS] FILESYSTEM [files ", name);
+ fprintf(stream, "to extract or exclude (with -excludes) or cat (with -cat )]\n");
+ fprintf(stream, "\nFilesystem extraction (filtering) options:\n");
+ fprintf(stream, "\t-d[est] <pathname>\textract to <pathname>, ");
+ fprintf(stream, "default \"squashfs-root\".\n\t\t\t\tThis option ");
+ fprintf(stream, "also sets the prefix used when\n\t\t\t\tlisting the ");
+ fprintf(stream, "filesystem\n");
+ fprintf(stream, "\t-max[-depth] <levels>\tdescend at most <levels> of ");
+ fprintf(stream, "directories when\n\t\t\t\textracting\n");
+ fprintf(stream, "\t-excludes\t\ttreat files on command line as exclude files\n");
+ fprintf(stream, "\t-ex[clude-list]\t\tlist of files to be excluded, terminated\n");
+ fprintf(stream, "\t\t\t\twith ; e.g. file1 file2 ;\n");
+ fprintf(stream, "\t-extract-file <file>\tlist of directories or files to ");
+ fprintf(stream, "extract.\n\t\t\t\tOne per line\n");
+ fprintf(stream, "\t-exclude-file <file>\tlist of directories or files to ");
+ fprintf(stream, "exclude.\n\t\t\t\tOne per line\n");
+ fprintf(stream, "\t-match\t\t\tabort if any extract file does not ");
+ fprintf(stream, "match on\n\t\t\t\tanything, and can not be ");
+ fprintf(stream, "resolved. Implies\n\t\t\t\t-missing-symlinks and ");
+ fprintf(stream, "-no-wildcards\n");
+ fprintf(stream, "\t-follow[-symlinks]\tfollow symlinks in extract files, and ");
+ fprintf(stream, "add all\n\t\t\t\tfiles/symlinks needed to resolve extract ");
+ fprintf(stream, "file.\n\t\t\t\tImplies -no-wildcards\n");
+ fprintf(stream, "\t-missing[-symlinks]\tUnsquashfs will abort if any symlink ");
+ fprintf(stream, "can't be\n\t\t\t\tresolved in -follow-symlinks\n");
+ fprintf(stream, "\t-no-wild[cards]\t\tdo not use wildcard matching in extract ");
+ fprintf(stream, "and\n\t\t\t\texclude names\n");
+ fprintf(stream, "\t-r[egex]\t\ttreat extract names as POSIX regular ");
+ fprintf(stream, "expressions\n");
+ fprintf(stream, "\t\t\t\trather than use the default shell ");
+ fprintf(stream, "wildcard\n\t\t\t\texpansion (globbing)\n");
+ fprintf(stream, "\t-all[-time] <time>\tset all file timestamps to ");
+ fprintf(stream, "<time>, rather than\n\t\t\t\tthe time stored in the ");
+ fprintf(stream, "filesystem inode. <time>\n\t\t\t\tcan be an ");
+ fprintf(stream, "unsigned 32-bit int indicating\n\t\t\t\tseconds ");
+ fprintf(stream, "since the epoch (1970-01-01) or a string\n\t\t\t\t");
+ fprintf(stream, "value which is passed to the \"date\" command to\n");
+ fprintf(stream, "\t\t\t\tparse. Any string value which the date ");
+ fprintf(stream, "command\n\t\t\t\trecognises can be used such as ");
+ fprintf(stream, "\"now\", \"last\n\t\t\t\tweek\", or \"Wed Feb 15 ");
+ fprintf(stream, "21:02:39 GMT 2023\"\n");
+ fprintf(stream, "\t-cat\t\t\tcat the files on the command line to stdout\n");
+ fprintf(stream, "\t-f[orce]\t\tif file already exists then overwrite\n");
+ fprintf(stream, "\t-pf <file>\t\toutput a pseudo file equivalent ");
+ fprintf(stream, "of the input\n\t\t\t\tSquashfs filesystem, use - for stdout\n");
+ fprintf(stream, "\nFilesystem information and listing options:\n");
+ fprintf(stream, "\t-s[tat]\t\t\tdisplay filesystem superblock information\n");
+ fprintf(stream, "\t-max[-depth] <levels>\tdescend at most <levels> of ");
+ fprintf(stream, "directories when\n\t\t\t\tlisting\n");
+ fprintf(stream, "\t-i[nfo]\t\t\tprint files as they are extracted\n");
+ fprintf(stream, "\t-li[nfo]\t\tprint files as they are extracted with file\n");
+ fprintf(stream, "\t\t\t\tattributes (like ls -l output)\n");
+ fprintf(stream, "\t-l[s]\t\t\tlist filesystem, but do not extract files\n");
+ fprintf(stream, "\t-ll[s]\t\t\tlist filesystem with file attributes (like\n");
+ fprintf(stream, "\t\t\t\tls -l output), but do not extract files\n");
+ fprintf(stream, "\t-lln[umeric]\t\tsame as -lls but with numeric uids and gids\n");
+ fprintf(stream, "\t-lc\t\t\tlist filesystem concisely, displaying only ");
+ fprintf(stream, "files\n\t\t\t\tand empty directories. Do not extract files\n");
+ fprintf(stream, "\t-llc\t\t\tlist filesystem concisely with file ");
+ fprintf(stream, "attributes,\n\t\t\t\tdisplaying only files and empty ");
+ fprintf(stream, "directories.\n\t\t\t\tDo not extract files\n");
+ fprintf(stream, "\t-full[-precision]\tuse full precision when ");
+ fprintf(stream, "displaying times\n\t\t\t\tincluding seconds. Use ");
+ fprintf(stream, "with -linfo, -lls, -lln\n\t\t\t\tand -llc\n");
+ fprintf(stream, "\t-UTC\t\t\tuse UTC rather than local time zone ");
+ fprintf(stream, "when\n\t\t\t\tdisplaying time\n");
+ fprintf(stream, "\t-mkfs-time\t\tdisplay filesystem superblock time, which is an\n");
+ fprintf(stream, "\t\t\t\tunsigned 32-bit int representing the time in\n");
+ fprintf(stream, "\t\t\t\tseconds since the epoch (1970-01-01)\n");
+ fprintf(stream, "\nFilesystem extended attribute (xattrs) options:\n");
+ fprintf(stream, "\t-no[-xattrs]\t\tdo not extract xattrs in file system");
+ fprintf(stream, NOXOPT_STR"\n");
+ fprintf(stream, "\t-x[attrs]\t\textract xattrs in file system" XOPT_STR "\n");
+ fprintf(stream, "\t-xattrs-exclude <regex>\texclude any xattr names ");
+ fprintf(stream, "matching <regex>.\n\t\t\t\t<regex> is a POSIX ");
+ fprintf(stream, "regular expression, e.g.\n\t\t\t\t-xattrs-exclude ");
+ fprintf(stream, "'^user.' excludes xattrs from\n\t\t\t\tthe user ");
+ fprintf(stream, "namespace\n");
+ fprintf(stream, "\t-xattrs-include <regex>\tinclude any xattr names ");
+ fprintf(stream, "matching <regex>.\n\t\t\t\t<regex> is a POSIX ");
+ fprintf(stream, "regular expression, e.g.\n\t\t\t\t-xattrs-include ");
+ fprintf(stream, "'^user.' includes xattrs from\n\t\t\t\tthe user ");
+ fprintf(stream, "namespace\n");
+ fprintf(stream, "\nUnsquashfs runtime options:\n");
+ fprintf(stream, "\t-v[ersion]\t\tprint version, licence and ");
+ fprintf(stream, "copyright information\n");
+ fprintf(stream, "\t-p[rocessors] <number>\tuse <number> processors. ");
+ fprintf(stream, "By default will use\n");
+ fprintf(stream, "\t\t\t\tthe number of processors available\n");
+ fprintf(stream, "\t-q[uiet]\t\tno verbose output\n");
+ fprintf(stream, "\t-n[o-progress]\t\tdo not display the progress ");
+ fprintf(stream, "bar\n");
+ fprintf(stream, "\t-percentage\t\tdisplay a percentage rather than ");
+ fprintf(stream, "the full\n\t\t\t\tprogress bar. Can be used with ");
+ fprintf(stream, "dialog --gauge\n\t\t\t\tetc.\n");
+ fprintf(stream, "\t-ig[nore-errors]\ttreat errors writing files to ");
+ fprintf(stream, "output as\n\t\t\t\tnon-fatal\n");
+ fprintf(stream, "\t-st[rict-errors]\ttreat all errors as fatal\n");
+ fprintf(stream, "\t-no-exit[-code]\t\tdo not set exit code (to ");
+ fprintf(stream, "nonzero) on non-fatal\n\t\t\t\terrors\n");
+ fprintf(stream, "\t-da[ta-queue] <size>\tset data queue to <size> ");
+ fprintf(stream, "Mbytes. Default ");
+ fprintf(stream, "%d\n\t\t\t\tMbytes\n", DATA_BUFFER_DEFAULT);
+ fprintf(stream, "\t-fr[ag-queue] <size>\tset fragment queue to ");
+ fprintf(stream, "<size> Mbytes. Default\n\t\t\t\t");
+ fprintf(stream, "%d Mbytes\n", FRAGMENT_BUFFER_DEFAULT);
+ fprintf(stream, "\nMiscellaneous options:\n");
+ fprintf(stream, "\t-h[elp]\t\t\toutput this options text to stdout\n");
+ fprintf(stream, "\t-o[ffset] <bytes>\tskip <bytes> at start of FILESYSTEM. ");
+ fprintf(stream, "Optionally\n\t\t\t\ta suffix of K, M or G can be given to ");
+ fprintf(stream, "specify\n\t\t\t\tKbytes, Mbytes or Gbytes respectively ");
+ fprintf(stream, "(default\n\t\t\t\t0 bytes).\n");
+ fprintf(stream, "\t-fstime\t\t\tsynonym for -mkfs-time\n");
+ fprintf(stream, "\t-e[f] <extract file>\tsynonym for -extract-file\n");
+ fprintf(stream, "\t-exc[f] <exclude file>\tsynonym for -exclude-file\n");
+ fprintf(stream, "\t-L\t\t\tsynonym for -follow-symlinks\n");
+ fprintf(stream, "\t-pseudo-file <file>\talternative name for -pf\n");
+ fprintf(stream, "\nDecompressors available:\n");
+ display_compressors(stream, "", "");
+
+ fprintf(stream, "\nExit status:\n");
+ fprintf(stream, " 0\tThe filesystem listed or extracted OK.\n");
+ fprintf(stream, " 1\tFATAL errors occurred, e.g. filesystem corruption, ");
+ fprintf(stream, "I/O errors.\n");
+ fprintf(stream, "\tUnsquashfs did not continue and aborted.\n");
+ fprintf(stream, " 2\tNon-fatal errors occurred, e.g. no support for ");
+ fprintf(stream, "XATTRs, Symbolic links\n\tin output filesystem or ");
+ fprintf(stream, "couldn't write permissions to output filesystem.\n");
+ fprintf(stream, "\tUnsquashfs continued and did not abort.\n");
+ fprintf(stream, "\nSee -ignore-errors, -strict-errors and ");
+ fprintf(stream, "-no-exit-code options for how they affect\nthe exit ");
+ fprintf(stream, "status.\n");
+ fprintf(stream, "\nSee also:\n");
+ fprintf(stream, "The README for the Squashfs-tools 4.6.1 release, ");
+ fprintf(stream, "describing the new features can be\n");
+ fprintf(stream, "read here https://github.com/plougher/squashfs-tools/blob/master/README-4.6.1\n");
+
+ fprintf(stream, "\nThe Squashfs-tools USAGE guide can be read here\n");
+ fprintf(stream, "https://github.com/plougher/squashfs-tools/blob/master/USAGE-4.6\n");
+}
+
+
+void print_version(char *string)
+{
+ printf("%s version " VERSION " (" DATE ")\n", string);
+ printf("copyright (C) " YEAR " Phillip Lougher ");
+ printf("<phillip@squashfs.org.uk>\n\n");
+ printf("This program is free software; you can redistribute it and/or\n");
+ printf("modify it under the terms of the GNU General Public License\n");
+ printf("as published by the Free Software Foundation; either version ");
+ printf("2,\n");
+ printf("or (at your option) any later version.\n\n");
+ printf("This program is distributed in the hope that it will be ");
+ printf("useful,\n");
+ printf("but WITHOUT ANY WARRANTY; without even the implied warranty of\n");
+ printf("MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n");
+ printf("GNU General Public License for more details.\n");
+}
+
+
+int parse_cat_options(int argc, char *argv[])
+{
+ int i;
+
+ cat_files = TRUE;
+
+ for(i = 1; i < argc; i++) {
+ if(*argv[i] != '-')
+ break;
+ if(strcmp(argv[i], "-help") == 0 || strcmp(argv[i], "-h") == 0) {
+ print_cat_options(stdout, argv[0]);
+ exit(0);
+ } else if(strcmp(argv[i], "-no-exit-code") == 0 ||
+ strcmp(argv[i], "-no-exit") == 0)
+ set_exit_code = FALSE;
+ else if(strcmp(argv[i], "-no-wildcards") == 0 ||
+ strcmp(argv[i], "-no-wild") == 0)
+ no_wildcards = TRUE;
+ else if(strcmp(argv[i], "-strict-errors") == 0 ||
+ strcmp(argv[i], "-st") == 0)
+ strict_errors = TRUE;
+ else if(strcmp(argv[i], "-ignore-errors") == 0 ||
+ strcmp(argv[i], "-ig") == 0)
+ ignore_errors = TRUE;
+ else if(strcmp(argv[i], "-version") == 0 ||
+ strcmp(argv[i], "-v") == 0) {
+ print_version("sqfscat");
+ version = TRUE;
+ } else if(strcmp(argv[i], "-processors") == 0 ||
+ strcmp(argv[i], "-p") == 0) {
+ if((++i == argc) ||
+ !parse_number(argv[i],
+ &processors)) {
+ ERROR("%s: -processors missing or invalid "
+ "processor number\n", argv[0]);
+ exit(1);
+ }
+ if(processors < 1) {
+ ERROR("%s: -processors should be 1 or larger\n",
+ argv[0]);
+ exit(1);
+ }
+ } else if(strcmp(argv[i], "-data-queue") == 0 ||
+ strcmp(argv[i], "-da") == 0) {
+ if((++i == argc) ||
+ !parse_number(argv[i],
+ &data_buffer_size)) {
+ ERROR("%s: -data-queue missing or invalid "
+ "queue size\n", argv[0]);
+ exit(1);
+ }
+ if(data_buffer_size < 1) {
+ ERROR("%s: -data-queue should be 1 Mbyte or "
+ "larger\n", argv[0]);
+ exit(1);
+ }
+ } else if(strcmp(argv[i], "-frag-queue") == 0 ||
+ strcmp(argv[i], "-fr") == 0) {
+ if((++i == argc) ||
+ !parse_number(argv[i],
+ &fragment_buffer_size)) {
+ ERROR("%s: -frag-queue missing or invalid "
+ "queue size\n", argv[0]);
+ exit(1);
+ }
+ if(fragment_buffer_size < 1) {
+ ERROR("%s: -frag-queue should be 1 Mbyte or "
+ "larger\n", argv[0]);
+ exit(1);
+ }
+ } else if(strcmp(argv[i], "-regex") == 0 ||
+ strcmp(argv[i], "-r") == 0)
+ use_regex = TRUE;
+ else if(strcmp(argv[i], "-offset") == 0 ||
+ strcmp(argv[i], "-o") == 0) {
+ if((++i == argc) ||
+ !parse_numberll(argv[i], &start_offset,
+ 1)) {
+ ERROR("%s: %s missing or invalid offset size\n",
+ argv[0], argv[i - 1]);
+ exit(1);
+ }
+ } else {
+ print_cat_options(stderr, argv[0]);
+ exit(1);
+ }
+ }
+
+ if(strict_errors && ignore_errors)
+ EXIT_UNSQUASH("Both -strict-errors and -ignore-errors should "
+ "not be set\n");
+ if(strict_errors && set_exit_code == FALSE)
+ EXIT_UNSQUASH("Both -strict-errors and -no-exit-code should "
+ "not be set. All errors are fatal\n");
+
+ if(no_wildcards && use_regex)
+ EXIT_UNSQUASH("Both -no-wildcards and -regex should not be "
+ "set\n");
+ if(i == argc) {
+ if(!version)
+ print_cat_options(stderr, argv[0]);
+ exit(1);
+ }
+
+ return i;
+}
+
+
+int parse_options(int argc, char *argv[])
+{
+ int i, res;
+
+ for(i = 1; i < argc; i++) {
+ if(*argv[i] != '-')
+ break;
+ if(strcmp(argv[i], "-help") == 0 || strcmp(argv[i], "-h") == 0) {
+ print_options(stdout, argv[0]);
+ exit(0);
+ } else if(strcmp(argv[i], "-pseudo-file") == 0 ||
+ strcmp(argv[i], "-pf") == 0) {
+ if(++i == argc) {
+ fprintf(stderr, "%s: -pf missing filename\n",
+ argv[0]);
+ exit(1);
+ }
+ pseudo_name = argv[i];
+ pseudo_file = TRUE;
+ } else if(strcmp(argv[i], "-cat") == 0)
+ cat_files = TRUE;
+ else if(strcmp(argv[i], "-excludes") == 0)
+ treat_as_excludes = TRUE;
+ else if(strcmp(argv[i], "-exclude-list") == 0 ||
+ strcmp(argv[i], "-ex") == 0) {
+ res = parse_excludes(argc - i - 1, argv + i + 1);
+ if(res == 0) {
+ fprintf(stderr, "%s: -exclude-list missing "
+ "filenames or no ';' terminator\n", argv[0]);
+ exit(1);
+ }
+ i += res + 1;
+ } else if(strcmp(argv[i], "-no-exit-code") == 0 ||
+ strcmp(argv[i], "-no-exit") == 0)
+ set_exit_code = FALSE;
+ else if(strcmp(argv[i], "-follow-symlinks") == 0 ||
+ strcmp(argv[i], "-follow") == 0 ||
+ strcmp(argv[i], "-L") == 0) {
+ follow_symlinks = TRUE;
+ no_wildcards = TRUE;
+ } else if(strcmp(argv[i], "missing-symlinks") == 0 ||
+ strcmp(argv[i], "-missing") == 0 ||
+ strcmp(argv[i], "-match") == 0)
+ missing_symlinks = TRUE;
+ else if(strcmp(argv[i], "-no-wildcards") == 0 ||
+ strcmp(argv[i], "-no-wild") == 0)
+ no_wildcards = TRUE;
+ else if(strcmp(argv[i], "-UTC") == 0)
+ use_localtime = FALSE;
+ else if(strcmp(argv[i], "-strict-errors") == 0 ||
+ strcmp(argv[i], "-st") == 0)
+ strict_errors = TRUE;
+ else if(strcmp(argv[i], "-ignore-errors") == 0 ||
+ strcmp(argv[i], "-ig") == 0)
+ ignore_errors = TRUE;
+ else if(strcmp(argv[i], "-quiet") == 0 ||
+ strcmp(argv[i], "-q") == 0)
+ quiet = TRUE;
+ else if(strcmp(argv[i], "-version") == 0 ||
+ strcmp(argv[i], "-v") == 0) {
+ print_version("unsquashfs");
+ version = TRUE;
+ } else if(strcmp(argv[i], "-info") == 0 ||
+ strcmp(argv[i], "-i") == 0)
+ info = TRUE;
+ else if(strcmp(argv[i], "-ls") == 0 ||
+ strcmp(argv[i], "-l") == 0)
+ lsonly = TRUE;
+ else if(strcmp(argv[i], "-lc") == 0) {
+ lsonly = TRUE;
+ concise = TRUE;
+ } else if(strcmp(argv[i], "-no-progress") == 0 ||
+ strcmp(argv[i], "-n") == 0)
+ progress = FALSE;
+ else if(strcmp(argv[i], "-percentage") == 0)
+ percent = progress = TRUE;
+ else if(strcmp(argv[i], "-no-xattrs") == 0 ||
+ strcmp(argv[i], "-no") == 0)
+ no_xattrs = TRUE;
+ else if(strcmp(argv[i], "-xattrs") == 0 ||
+ strcmp(argv[i], "-x") == 0) {
+ if(xattrs_supported())
+ no_xattrs = FALSE;
+ else {
+ ERROR("%s: xattrs are unsupported in "
+ "this build\n", argv[0]);
+ exit(1);
+ }
+ } else if(strcmp(argv[i], "-user-xattrs") == 0 ||
+ strcmp(argv[i], "-u") == 0) {
+ if(!xattrs_supported()) {
+ ERROR("%s: xattrs are unsupported in "
+ "this build\n", argv[0]);
+ exit(1);
+ } else {
+ xattr_include_preg = xattr_regex("^user.", "include");
+ no_xattrs = FALSE;
+ }
+ } else if(strcmp(argv[i], "-xattrs-exclude") == 0) {
+ if(!xattrs_supported()) {
+ ERROR("%s: xattrs are unsupported in "
+ "this build\n", argv[0]);
+ exit(1);
+ } else if(++i == argc) {
+ ERROR("%s: -xattrs-exclude missing regex pattern\n", argv[0]);
+ exit(1);
+ } else {
+ xattr_exclude_preg = xattr_regex(argv[i], "exclude");
+ no_xattrs = FALSE;
+ }
+ } else if(strcmp(argv[i], "-xattrs-include") == 0) {
+ if(!xattrs_supported()) {
+ ERROR("%s: xattrs are unsupported in "
+ "this build\n", argv[0]);
+ exit(1);
+ } else if(++i == argc) {
+ ERROR("%s: -xattrs-include missing regex pattern\n", argv[0]);
+ exit(1);
+ } else {
+ xattr_include_preg = xattr_regex(argv[i], "include");
+ no_xattrs = FALSE;
+ }
+ } else if(strcmp(argv[i], "-dest") == 0 ||
+ strcmp(argv[i], "-d") == 0) {
+ if(++i == argc) {
+ fprintf(stderr, "%s: -dest missing filename\n",
+ argv[0]);
+ exit(1);
+ }
+ dest = argv[i];
+ } else if(strcmp(argv[i], "-processors") == 0 ||
+ strcmp(argv[i], "-p") == 0) {
+ if((++i == argc) ||
+ !parse_number(argv[i],
+ &processors)) {
+ ERROR("%s: -processors missing or invalid "
+ "processor number\n", argv[0]);
+ exit(1);
+ }
+ if(processors < 1) {
+ ERROR("%s: -processors should be 1 or larger\n",
+ argv[0]);
+ exit(1);
+ }
+ } else if(strcmp(argv[i], "-max-depth") == 0 ||
+ strcmp(argv[i], "-max") == 0) {
+ if((++i == argc) ||
+ !parse_number(argv[i],
+ &max_depth)) {
+ ERROR("%s: -max-depth missing or invalid "
+ "levels\n", argv[0]);
+ exit(1);
+ }
+ } else if(strcmp(argv[i], "-data-queue") == 0 ||
+ strcmp(argv[i], "-da") == 0) {
+ if((++i == argc) ||
+ !parse_number(argv[i],
+ &data_buffer_size)) {
+ ERROR("%s: -data-queue missing or invalid "
+ "queue size\n", argv[0]);
+ exit(1);
+ }
+ if(data_buffer_size < 1) {
+ ERROR("%s: -data-queue should be 1 Mbyte or "
+ "larger\n", argv[0]);
+ exit(1);
+ }
+ } else if(strcmp(argv[i], "-frag-queue") == 0 ||
+ strcmp(argv[i], "-fr") == 0) {
+ if((++i == argc) ||
+ !parse_number(argv[i],
+ &fragment_buffer_size)) {
+ ERROR("%s: -frag-queue missing or invalid "
+ "queue size\n", argv[0]);
+ exit(1);
+ }
+ if(fragment_buffer_size < 1) {
+ ERROR("%s: -frag-queue should be 1 Mbyte or "
+ "larger\n", argv[0]);
+ exit(1);
+ }
+ } else if(strcmp(argv[i], "-force") == 0 ||
+ strcmp(argv[i], "-f") == 0)
+ force = TRUE;
+ else if(strcmp(argv[i], "-stat") == 0 ||
+ strcmp(argv[i], "-s") == 0)
+ stat_sys = TRUE;
+ else if(strcmp(argv[i], "-mkfs-time") == 0 ||
+ strcmp(argv[i], "-fstime") == 0)
+ mkfs_time_opt = TRUE;
+ else if(strcmp(argv[i], "-lls") == 0 ||
+ strcmp(argv[i], "-ll") == 0) {
+ lsonly = TRUE;
+ short_ls = FALSE;
+ } else if(strcmp(argv[i], "-llnumeric") == 0 ||
+ strcmp(argv[i], "-lln") == 0) {
+ lsonly = TRUE;
+ short_ls = FALSE;
+ numeric = TRUE;
+ } else if(strcmp(argv[i], "-llc") == 0) {
+ lsonly = TRUE;
+ short_ls = FALSE;
+ concise = TRUE;
+ } else if(strcmp(argv[i], "-linfo") == 0 ||
+ strcmp(argv[i], "-li") == 0) {
+ info = TRUE;
+ short_ls = FALSE;
+ } else if(strcmp(argv[i], "-extract-file") == 0 ||
+ strcmp(argv[i], "-ef") == 0 ||
+ strcmp(argv[i], "-e") == 0) {
+ if(++i == argc) {
+ fprintf(stderr, "%s: -extract-file missing filename\n",
+ argv[0]);
+ exit(1);
+ }
+ process_extract_files(argv[i]);
+ } else if(strcmp(argv[i], "-exclude-file") == 0 ||
+ strcmp(argv[i], "-excf") == 0 ||
+ strcmp(argv[i], "-exc") == 0) {
+ if(++i == argc) {
+ fprintf(stderr, "%s: -exclude-file missing filename\n",
+ argv[0]);
+ exit(1);
+ }
+ process_exclude_files(argv[i]);
+ } else if(strcmp(argv[i], "-regex") == 0 ||
+ strcmp(argv[i], "-r") == 0)
+ use_regex = TRUE;
+ else if(strcmp(argv[i], "-offset") == 0 ||
+ strcmp(argv[i], "-o") == 0) {
+ if((++i == argc) ||
+ !parse_numberll(argv[i], &start_offset,
+ 1)) {
+ ERROR("%s: %s missing or invalid offset size\n",
+ argv[0], argv[i - 1]);
+ exit(1);
+ }
+ } else if(strcmp(argv[i], "-all-time") == 0 ||
+ strcmp(argv[i], "-all") == 0) {
+ if((++i == argc) ||
+ (!parse_number_unsigned(argv[i], &timeval)
+ && !exec_date(argv[i], &timeval))) {
+ ERROR("%s: %s missing or invalid time value\n",
+ argv[0], argv[i - 1]);
+ exit(1);
+ }
+ time_opt = TRUE;
+ } else if(strcmp(argv[i], "-full-precision") == 0 ||
+ strcmp(argv[i], "-full") == 0)
+ full_precision = TRUE;
+ else {
+ print_options(stderr, argv[0]);
+ exit(1);
+ }
+ }
+
+ if(dest[0] == '\0' && !lsonly)
+ EXIT_UNSQUASH("-dest: <pathname> is empty! Use '.' to "
+ "extract to current directory\n");
+
+ if(lsonly || info)
+ progress = FALSE;
+
+ if(lsonly)
+ quiet = TRUE;
+
+ if(lsonly && pseudo_file)
+ EXIT_UNSQUASH("File listing only (-ls, -lls etc.) and -pf "
+ "should not be set\n");
+
+ if(strict_errors && ignore_errors)
+ EXIT_UNSQUASH("Both -strict-errors and -ignore-errors should "
+ "not be set\n");
+ if(strict_errors && set_exit_code == FALSE)
+ EXIT_UNSQUASH("Both -strict-errors and -no-exit-code should "
+ "not be set. All errors are fatal\n");
+
+ if(missing_symlinks && !follow_symlinks) {
+ follow_symlinks = TRUE;
+ no_wildcards = TRUE;
+ }
+
+ if(no_wildcards && use_regex)
+ EXIT_UNSQUASH("Both -no-wildcards and -regex should not be "
+ "set\n");
+
+ if(pseudo_file && strcmp(pseudo_name, "-") == 0) {
+ info = progress = FALSE;
+ pseudo_stdout = quiet = TRUE;
+ }
+
+#ifdef SQUASHFS_TRACE
+ /*
+ * Disable progress bar if full debug tracing is enabled.
+ * The progress bar in this case just gets in the way of the
+ * debug trace output
+ */
+ progress = FALSE;
+#endif
+
+ if(i == argc) {
+ if(!version)
+ print_options(stderr, argv[0]);
+ exit(1);
+ }
+
+ return i;
+}
+
+
+int main(int argc, char *argv[])
+{
+ int i, n;
+ long res;
+ int exit_code = 0;
+ char *command;
+
+ pthread_mutex_init(&screen_mutex, NULL);
+ root_process = geteuid() == 0;
+ if(root_process)
+ umask(0);
+
+ /* skip leading path components in invocation command */
+ for(command = argv[0] + strlen(argv[0]) - 1; command >= argv[0] && command[0] != '/'; command--);
+
+ if(command < argv[0])
+ command = argv[0];
+ else
+ command++;
+
+ if(strcmp(command, "sqfscat") == 0)
+ i = parse_cat_options(argc, argv);
+ else
+ i = parse_options(argc, argv);
+
+ if((fd = open(argv[i], O_RDONLY)) == -1) {
+ ERROR("Could not open %s, because %s\n", argv[i],
+ strerror(errno));
+ exit(1);
+ }
+
+ if(read_super(argv[i]) == FALSE)
+ EXIT_UNSQUASH("Can't find a valid SQUASHFS superblock on %s\n", argv[i]);
+
+ if(mkfs_time_opt) {
+ printf("%u\n", sBlk.s.mkfs_time);
+ exit(0);
+ }
+
+ if(stat_sys) {
+ s_ops->stat(argv[i]);
+ exit(0);
+ }
+
+ if(!check_compression(comp))
+ exit(1);
+
+ block_size = sBlk.s.block_size;
+ block_log = sBlk.s.block_log;
+
+ /*
+ * Sanity check block size and block log.
+ *
+ * Check they're within correct limits
+ */
+ if(block_size > SQUASHFS_FILE_MAX_SIZE ||
+ block_log > SQUASHFS_FILE_MAX_LOG)
+ EXIT_UNSQUASH("Block size or block_log too large."
+ " File system is corrupt.\n");
+
+ if(block_size < 4096)
+ EXIT_UNSQUASH("Block size too small."
+ " File system is corrupt.\n");
+
+ /*
+ * Check block_size and block_log match
+ */
+ if(block_size != (1 << block_log))
+ EXIT_UNSQUASH("Block size and block_log do not match."
+ " File system is corrupt.\n");
+
+ /*
+ * convert from queue size in Mbytes to queue size in
+ * blocks.
+ *
+ * In doing so, check that the user supplied values do not
+ * overflow a signed int
+ */
+ if(shift_overflow(fragment_buffer_size, 20 - block_log))
+ EXIT_UNSQUASH("Fragment queue size is too large\n");
+ else
+ fragment_buffer_size <<= 20 - block_log;
+
+ if(shift_overflow(data_buffer_size, 20 - block_log))
+ EXIT_UNSQUASH("Data queue size is too large\n");
+ else
+ data_buffer_size <<= 20 - block_log;
+
+ if(!lsonly)
+ initialise_threads(fragment_buffer_size, data_buffer_size, cat_files);
+
+ res = s_ops->read_filesystem_tables();
+ if(res == FALSE)
+ EXIT_UNSQUASH("File system corruption detected\n");
+
+ if(cat_files)
+ return cat_path(argc - i - 1, argv + i + 1);
+ else if(treat_as_excludes)
+ for(n = i + 1; n < argc; n++)
+ add_exclude(argv[n]);
+ else if(follow_symlinks)
+ resolve_symlinks(argc - i - 1, argv + i + 1);
+ else
+ for(n = i + 1; n < argc; n++)
+ add_extract(argv[n]);
+
+ if(extract) {
+ extracts = init_subdir();
+ extracts = add_subdir(extracts, extract);
+ }
+
+ if(exclude) {
+ excludes = init_subdir();
+ excludes = add_subdir(excludes, exclude);
+ }
+
+ if(pseudo_file)
+ return generate_pseudo(pseudo_name);
+
+ if(!quiet || progress) {
+ res = pre_scan(dest, SQUASHFS_INODE_BLK(sBlk.s.root_inode),
+ SQUASHFS_INODE_OFFSET(sBlk.s.root_inode), extracts,
+ excludes, 1);
+ if(res == FALSE && set_exit_code)
+ exit_code = 2;
+
+ free_inumber_table();
+ inode_number = 1;
+ free_lookup_table(FALSE);
+
+ if(!quiet) {
+ printf("Parallel unsquashfs: Using %d processor%s\n",
+ processors, processors == 1 ? "" : "s");
+
+ printf("%u inodes (%lld blocks) to write\n\n",
+ total_inodes, total_blocks);
+ }
+
+ enable_progress_bar();
+ }
+
+ res = dir_scan(dest, SQUASHFS_INODE_BLK(sBlk.s.root_inode),
+ SQUASHFS_INODE_OFFSET(sBlk.s.root_inode), extracts, excludes, 1);
+ if(res == FALSE && set_exit_code)
+ exit_code = 2;
+
+ if(!lsonly) {
+ queue_put(to_writer, NULL);
+ res = (long) queue_get(from_writer);
+ if(res == TRUE && set_exit_code)
+ exit_code = 2;
+ }
+
+ disable_progress_bar();
+
+ if(!quiet) {
+ printf("\n");
+ printf("created %d %s\n", file_count, file_count == 1 ? "file" : "files");
+ printf("created %d %s\n", dir_count, dir_count == 1 ? "directory" : "directories");
+ printf("created %d %s\n", sym_count, sym_count == 1 ? "symlink" : "symlinks");
+ printf("created %d %s\n", dev_count, dev_count == 1 ? "device" : "devices");
+ printf("created %d %s\n", fifo_count, fifo_count == 1 ? "fifo" : "fifos");
+ printf("created %d %s\n", socket_count, socket_count == 1 ? "socket" : "sockets");
+ printf("created %d %s\n", hardlnk_count, hardlnk_count == 1 ? "hardlink" : "hardlinks");
+ }
+
+ return exit_code;
+}
diff --git a/squashfs-tools/unsquashfs.h b/squashfs-tools/unsquashfs.h
new file mode 100644
index 0000000..8871d6f
--- /dev/null
+++ b/squashfs-tools/unsquashfs.h
@@ -0,0 +1,345 @@
+#ifndef UNSQUASHFS_H
+#define UNSQUASHFS_H
+/*
+ * Unsquash a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2009, 2010, 2013, 2014, 2019, 2021, 2022, 2023
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * unsquashfs.h
+ */
+
+#define TRUE 1
+#define FALSE 0
+#include <stdio.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <utime.h>
+#include <pwd.h>
+#include <grp.h>
+#include <time.h>
+#include <regex.h>
+#include <signal.h>
+#include <pthread.h>
+#include <math.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+
+#include "endian_compat.h"
+#include "squashfs_fs.h"
+#include "unsquashfs_error.h"
+
+#define TABLE_HASH(start) (start & 0xffff)
+
+/*
+ * Unified superblock containing fields for all superblocks
+ */
+struct super_block {
+ struct squashfs_super_block s;
+ /* fields only used by squashfs 3 and earlier layouts */
+ unsigned int no_uids;
+ unsigned int no_guids;
+ long long uid_start;
+ long long guid_start;
+ /* fields only used by squashfs 4 */
+ unsigned int xattr_ids;
+};
+
+
+struct hash_table_entry {
+ long long start;
+ int length;
+ void *buffer;
+ long long next_index;
+ struct hash_table_entry *next;
+};
+
+struct inode {
+ int blocks;
+ long long block_start;
+ unsigned int block_offset;
+ long long data;
+ unsigned int fragment;
+ int frag_bytes;
+ gid_t gid;
+ unsigned int inode_number;
+ int mode;
+ int offset;
+ long long start;
+ char *symlink;
+ time_t time;
+ int type;
+ uid_t uid;
+ char sparse;
+ unsigned int xattr;
+};
+
+typedef struct squashfs_operations {
+ struct dir *(*opendir)(unsigned int block_start,
+ unsigned int offset, struct inode **i);
+ void (*read_fragment)(unsigned int fragment, long long *start_block,
+ int *size);
+ void (*read_block_list)(unsigned int *block_list, long long start,
+ unsigned int offset, int blocks);
+ struct inode *(*read_inode)(unsigned int start_block,
+ unsigned int offset);
+ int (*read_filesystem_tables)();
+ void (*stat)(char *);
+} squashfs_operations;
+
+struct test {
+ int mask;
+ int value;
+ int position;
+ char mode;
+};
+
+
+/* Cache status struct. Caches are used to keep
+ track of memory buffers passed between different threads */
+struct cache {
+ int max_buffers;
+ int count;
+ int used;
+ int buffer_size;
+ int wait_free;
+ int wait_pending;
+ pthread_mutex_t mutex;
+ pthread_cond_t wait_for_free;
+ pthread_cond_t wait_for_pending;
+ struct cache_entry *free_list;
+ struct cache_entry *hash_table[65536];
+};
+
+/* struct describing a cache entry passed between threads */
+struct cache_entry {
+ struct cache *cache;
+ long long block;
+ int size;
+ int used;
+ int error;
+ int pending;
+ struct cache_entry *hash_next;
+ struct cache_entry *hash_prev;
+ struct cache_entry *free_next;
+ struct cache_entry *free_prev;
+ char *data;
+};
+
+/* struct describing queues used to pass data between threads */
+struct queue {
+ int size;
+ int readp;
+ int writep;
+ pthread_mutex_t mutex;
+ pthread_cond_t empty;
+ pthread_cond_t full;
+ void **data;
+};
+
+/* default size of fragment buffer in Mbytes */
+#define FRAGMENT_BUFFER_DEFAULT 256
+/* default size of data buffer in Mbytes */
+#define DATA_BUFFER_DEFAULT 256
+
+#define DIR_ENT_SIZE 16
+
+struct dir_ent {
+ char *name;
+ unsigned int start_block;
+ unsigned int offset;
+ unsigned int type;
+ struct dir_ent *next;
+};
+
+struct dir {
+ int dir_count;
+ unsigned int mode;
+ uid_t uid;
+ gid_t guid;
+ unsigned int mtime;
+ unsigned int xattr;
+ struct dir_ent *dirs;
+ struct dir_ent *cur_entry;
+};
+
+struct file_entry {
+ int offset;
+ int size;
+ struct cache_entry *buffer;
+};
+
+
+struct squashfs_file {
+ int fd;
+ int blocks;
+ long long file_size;
+ int mode;
+ uid_t uid;
+ gid_t gid;
+ time_t time;
+ char *pathname;
+ char sparse;
+ unsigned int xattr;
+};
+
+struct path_entry {
+ char *name;
+ int type;
+ regex_t *preg;
+ struct pathname *paths;
+};
+
+struct pathname {
+ int names;
+ struct path_entry *name;
+};
+
+struct pathnames {
+ int count;
+ struct pathname *path[0];
+};
+
+#define PATHS_ALLOC_SIZE 10
+#define PATH_TYPE_LINK 1
+#define PATH_TYPE_EXTRACT 2
+#define PATH_TYPE_EXCLUDE 4
+
+struct directory_level {
+ unsigned int start_block;
+ unsigned int offset;
+ char *name;
+};
+
+struct symlink {
+ char *pathname;
+ struct symlink *next;
+};
+
+struct directory_stack {
+ int size;
+ unsigned int type;
+ unsigned int start_block;
+ unsigned int offset;
+ char *name;
+ struct directory_level *stack;
+ struct symlink *symlink;
+};
+
+#define MAX_FOLLOW_SYMLINKS 256
+
+/* These macros implement a bit-table to track whether directories have been
+ * already visited. This is to trap corrupted filesystems which have multiple
+ * links to the same directory, which is invalid, and which may also create
+ * a directory loop, where Unsquashfs will endlessly recurse until either
+ * the pathname is too large (extracting), or the stack overflows.
+ *
+ * Each index entry is 8 Kbytes, and tracks 65536 inode numbers. The index is
+ * allocated on demand because Unsquashfs may not walk the complete filesystem.
+ */
+#define INUMBER_INDEXES(INODES) ((((INODES) - 1) >> 16) + 1)
+#define INUMBER_INDEX(NUMBER) ((NUMBER) >> 16)
+#define INUMBER_OFFSET(NUMBER) (((NUMBER) & 0xffff) >> 5)
+#define INUMBER_BIT(NUMBER) (1 << ((NUMBER) & 0x1f))
+#define INUMBER_BYTES 8192
+
+/* These macros implement a lookup table to track creation of (non-directory)
+ * inodes, and to discover if a hard-link to a previously created file should
+ * be made.
+ *
+ * Each index entry is 32 Kbytes, and tracks 4096 inode numbers. The index is
+ * allocated on demand because Unsquashfs may not walk the complete filesystem.
+ */
+#define LOOKUP_INDEXES(INODES) ((((INODES) - 1) >> 12) + 1)
+#define LOOKUP_INDEX(NUMBER) ((NUMBER) >> 12)
+#define LOOKUP_OFFSET(NUMBER) ((NUMBER) & 0xfff)
+#define LOOKUP_BYTES 32768
+#define LOOKUP_OFFSETS 4096
+
+/* Maximum transfer size for Linux read() call on both 32-bit and 64-bit systems.
+ * See READ(2) */
+#define MAXIMUM_READ_SIZE 0x7ffff000
+
+/* globals */
+extern struct super_block sBlk;
+extern int swap;
+extern struct hash_table_entry *directory_table_hash[65536];
+extern pthread_mutex_t screen_mutex;
+extern int progress_enabled;
+extern int inode_number;
+extern int lookup_type[];
+extern int fd;
+extern int no_xattrs;
+extern struct queue *to_reader, *to_inflate, *to_writer;
+extern struct cache *fragment_cache, *data_cache;
+extern struct compressor *comp;
+extern int use_localtime;
+extern unsigned int timeval;
+extern int time_opt;
+
+/* unsquashfs.c */
+extern int read_inode_data(void *, long long *, unsigned int *, int);
+extern int read_directory_data(void *, long long *, unsigned int *, int);
+extern int read_fs_bytes(int fd, long long, long long, void *);
+extern int read_block(int, long long, long long *, int, void *);
+extern void enable_progress_bar();
+extern void disable_progress_bar();
+extern void dump_queue(struct queue *);
+extern void dump_cache(struct cache *);
+extern int write_bytes(int, char *, int);
+
+/* unsquash-1.c */
+int read_super_1(squashfs_operations **, void *);
+
+/* unsquash-2.c */
+int read_super_2(squashfs_operations **, void *);
+
+/* unsquash-3.c */
+int read_super_3(char *, squashfs_operations **, void *);
+
+/* unsquash-4.c */
+int read_super_4(squashfs_operations **);
+
+/* unsquash-123.c */
+extern int read_ids(int, long long, long long, unsigned int **);
+
+/* unsquash-34.c */
+extern long long *alloc_index_table(int);
+extern int inumber_lookup(unsigned int);
+extern void free_inumber_table();
+extern char *lookup(unsigned int);
+extern void insert_lookup(unsigned int, char *);
+extern void free_lookup_table(int);
+
+/* unsquash-1234.c */
+extern int check_name(char *, int);
+extern void squashfs_closedir(struct dir *);
+extern int check_directory(struct dir *);
+
+/* unsquash-12.c */
+extern void sort_directory(struct dir_ent **, int);
+
+/* date.c */
+extern int exec_date(char *, unsigned int *);
+#endif
diff --git a/squashfs-tools/unsquashfs_error.h b/squashfs-tools/unsquashfs_error.h
new file mode 100644
index 0000000..6b90537
--- /dev/null
+++ b/squashfs-tools/unsquashfs_error.h
@@ -0,0 +1,64 @@
+#ifndef UNSQUASHFS_ERROR_H
+#define UNSQUASHFS_ERROR_H
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2021
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * unsquashfs_error.h
+ */
+
+#include "error.h"
+
+#define INFO(s, args...) \
+ do {\
+ progressbar_info(s, ## args);\
+ } while(0)
+
+#define BAD_ERROR(s, args...) \
+ do {\
+ progressbar_error("FATAL ERROR: " s, ##args); \
+ exit(1); \
+ } while(0)
+
+#define EXIT_UNSQUASH(s, args...) BAD_ERROR(s, ##args)
+
+#define EXIT_UNSQUASH_IGNORE(s, args...) \
+ do {\
+ if(ignore_errors) \
+ ERROR(s, ##args); \
+ else \
+ BAD_ERROR(s, ##args); \
+ } while(0)
+
+#define EXIT_UNSQUASH_STRICT(s, args...) \
+ do {\
+ if(!strict_errors) \
+ ERROR(s, ##args); \
+ else \
+ BAD_ERROR(s, ##args); \
+ } while(0)
+
+#define MEM_ERROR() \
+ do {\
+ progressbar_error("FATAL ERROR: Out of memory (%s)\n", \
+ __func__); \
+ exit(1); \
+ } while(0)
+#endif
diff --git a/squashfs-tools/unsquashfs_info.c b/squashfs-tools/unsquashfs_info.c
new file mode 100644
index 0000000..2be9f66
--- /dev/null
+++ b/squashfs-tools/unsquashfs_info.c
@@ -0,0 +1,125 @@
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2013, 2021, 2023
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * unsquashfs_info.c
+ */
+
+#include <pthread.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <stdio.h>
+#include <math.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <dirent.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <string.h>
+
+#include "squashfs_fs.h"
+#include "unsquashfs.h"
+#include "unsquashfs_error.h"
+#include "signals.h"
+
+char *pathname = NULL;
+
+pthread_t info_thread;
+
+
+void disable_info()
+{
+ if(pathname)
+ free(pathname);
+
+ pathname = NULL;
+}
+
+
+void update_info(char *name)
+{
+ if(pathname)
+ free(pathname);
+
+ pathname = name;
+}
+
+
+void dump_state()
+{
+ disable_progress_bar();
+
+ printf("Queue and cache status dump\n");
+ printf("===========================\n");
+
+ printf("file buffer read queue (main thread -> reader thread)\n");
+ dump_queue(to_reader);
+
+ printf("file buffer decompress queue (reader thread -> inflate"
+ " thread(s))\n");
+ dump_queue(to_inflate);
+
+ printf("file buffer write queue (main thread -> writer thread)\n");
+ dump_queue(to_writer);
+
+ printf("\nbuffer cache (uncompressed blocks and compressed blocks "
+ "'in flight')\n");
+ dump_cache(data_cache);
+
+ printf("fragment buffer cache (uncompressed frags and compressed"
+ " frags 'in flight')\n");
+ dump_cache(fragment_cache);
+
+ enable_progress_bar();
+}
+
+
+void *info_thrd(void *arg)
+{
+ sigset_t sigmask;
+ int sig, waiting = 0;
+
+ sigemptyset(&sigmask);
+ sigaddset(&sigmask, SIGQUIT);
+ sigaddset(&sigmask, SIGHUP);
+
+ while(1) {
+ sig = wait_for_signal(&sigmask, &waiting);
+
+ if(sig == SIGQUIT && !waiting) {
+ if(pathname)
+ INFO("%s\n", pathname);
+
+ /* set one second interval period, if ^\ received
+ within then, dump queue and cache status */
+ waiting = 1;
+ } else
+ dump_state();
+ }
+}
+
+
+void init_info()
+{
+ pthread_create(&info_thread, NULL, info_thrd, NULL);
+}
diff --git a/squashfs-tools/unsquashfs_info.h b/squashfs-tools/unsquashfs_info.h
new file mode 100644
index 0000000..f85efd1
--- /dev/null
+++ b/squashfs-tools/unsquashfs_info.h
@@ -0,0 +1,30 @@
+#ifndef UNSQUASHFS_INFO_H
+#define UNSQUASHFS_INFO_H
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2013, 2014
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * unsquashfs_info.h
+ */
+
+extern void disable_info();
+extern void update_info(char *);
+extern void init_info();
+#endif
diff --git a/squashfs-tools/unsquashfs_xattr.c b/squashfs-tools/unsquashfs_xattr.c
new file mode 100644
index 0000000..377f9e2
--- /dev/null
+++ b/squashfs-tools/unsquashfs_xattr.c
@@ -0,0 +1,302 @@
+/*
+ * Unsquash a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2010, 2012, 2019, 2021, 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * unsquashfs_xattr.c
+ */
+
+#include "unsquashfs.h"
+#include "xattr.h"
+
+#include <sys/xattr.h>
+
+#ifdef XATTR_NOFOLLOW /* Apple's xattrs */
+ #define lsetxattr(path_, name_, val_, sz_, flags_) \
+ setxattr(path_, name_, val_, sz_, 0, flags_ | XATTR_NOFOLLOW)
+#endif
+
+#define NOSPACE_MAX 10
+
+extern int root_process;
+extern int ignore_errors;
+extern int strict_errors;
+extern regex_t *xattr_exclude_preg;
+extern regex_t *xattr_include_preg;
+
+int has_xattrs(unsigned int xattr)
+{
+ if(xattr == SQUASHFS_INVALID_XATTR ||
+ sBlk.s.xattr_id_table_start == SQUASHFS_INVALID_BLK)
+ return FALSE;
+ else
+ return TRUE;
+}
+
+
+static void print_xattr_name_value(struct xattr_list *xattr, int writer_fd)
+{
+ unsigned char *value = xattr->value;
+ int i, count = 0, printable = TRUE, res;
+
+ for(i = 0; i < xattr->vsize; i++) {
+ if(value[i] < 32 || value[i] > 126) {
+ printable = FALSE;
+ count += 4;
+ } else if(value[i] == '\\')
+ count += 4;
+ else
+ count ++;
+ }
+
+ if(!printable) {
+ unsigned char *new = malloc(count + 2), *dest;
+ if(new == NULL)
+ MEM_ERROR();
+
+ memcpy(new, "0t", 2);
+ count += 2;
+
+ for(dest = new + 2, i = 0; i < xattr->vsize; i++) {
+ if(value[i] < 32 || value[i] > 126 || value[i] == '\\') {
+ sprintf((char *) dest, "\\%03o", value[i]);
+ dest += 4;
+ } else
+ *dest ++ = value[i];
+ }
+
+ value = new;
+ } else
+ count = xattr->vsize;
+
+ res = dprintf(writer_fd, "%s=", xattr->full_name);
+ if(res == -1)
+ EXIT_UNSQUASH("Failed to write to pseudo output file\n");
+
+ res = write_bytes(writer_fd, (char *) value, count);
+ if(res == -1)
+ EXIT_UNSQUASH("Failed to write to pseudo output file\n");
+
+ res = dprintf(writer_fd, "\n");
+ if(res == -1)
+ EXIT_UNSQUASH("Failed to write to pseudo output file\n");
+
+ if(!printable)
+ free(value);
+}
+
+
+void print_xattr(char *pathname, unsigned int xattr, int writer_fd)
+{
+ unsigned int count;
+ struct xattr_list *xattr_list;
+ int i, failed, res;
+
+ if(!has_xattrs(xattr))
+ return;
+
+ if(xattr >= sBlk.xattr_ids)
+ EXIT_UNSQUASH("File system corrupted - xattr index in inode too large (xattr: %u)\n", xattr);
+
+ xattr_list = get_xattr(xattr, &count, &failed);
+ if(xattr_list == NULL && failed == FALSE)
+ exit(1);
+
+ if(failed)
+ EXIT_UNSQUASH_STRICT("write_xattr: Failed to read one or more xattrs for %s\n", pathname);
+
+ for(i = 0; i < count; i++) {
+ if(xattr_exclude_preg) {
+ int res = regexec(xattr_exclude_preg,
+ xattr_list[i].full_name, (size_t) 0, NULL, 0);
+
+ if(res == 0)
+ continue;
+ }
+
+ if(xattr_include_preg) {
+ int res = regexec(xattr_include_preg,
+ xattr_list[i].full_name, (size_t) 0, NULL, 0);
+
+ if(res)
+ continue;
+ }
+
+ res = dprintf(writer_fd, "%s x ", pathname);
+ if(res == -1)
+ EXIT_UNSQUASH("Failed to write to pseudo output file\n");
+
+ print_xattr_name_value(&xattr_list[i], writer_fd);
+ }
+
+ free_xattr(xattr_list, count);
+}
+
+
+int write_xattr(char *pathname, unsigned int xattr)
+{
+ unsigned int count;
+ struct xattr_list *xattr_list;
+ int i;
+ static int nonsuper_error = FALSE;
+ static int ignore_xattrs = FALSE;
+ static int nospace_error = 0;
+ int failed;
+
+ if(ignore_xattrs || !has_xattrs(xattr))
+ return TRUE;
+
+ if(xattr >= sBlk.xattr_ids)
+ EXIT_UNSQUASH("File system corrupted - xattr index in inode too large (xattr: %u)\n", xattr);
+
+ xattr_list = get_xattr(xattr, &count, &failed);
+ if(xattr_list == NULL && failed == FALSE)
+ exit(1);
+
+ if(failed)
+ EXIT_UNSQUASH_STRICT("write_xattr: Failed to read one or more xattrs for %s\n", pathname);
+
+ for(i = 0; i < count; i++) {
+ int prefix = xattr_list[i].type & SQUASHFS_XATTR_PREFIX_MASK;
+
+ if(ignore_xattrs)
+ continue;
+
+ if(xattr_exclude_preg) {
+ int res = regexec(xattr_exclude_preg,
+ xattr_list[i].full_name, (size_t) 0, NULL, 0);
+
+ if(res == 0)
+ continue;
+ }
+
+ if(xattr_include_preg) {
+ int res = regexec(xattr_include_preg,
+ xattr_list[i].full_name, (size_t) 0, NULL, 0);
+
+ if(res)
+ continue;
+ }
+
+ if(root_process || prefix == SQUASHFS_XATTR_USER) {
+ int res = lsetxattr(pathname, xattr_list[i].full_name,
+ xattr_list[i].value, xattr_list[i].vsize, 0);
+
+ if(res == -1) {
+ if(errno == ENOTSUP) {
+ /*
+ * If the destination filesystem cannot
+ * suppport xattrs, print error, and
+ * disable xattr output as this error is
+ * unlikely to go away, and printing
+ * screenfulls of the same error message
+ * is rather annoying
+ */
+ ERROR("write_xattr: failed to write "
+ "xattr %s for file %s because "
+ "extended attributes are not "
+ "supported by the destination "
+ "filesystem\n",
+ xattr_list[i].full_name,
+ pathname);
+ ERROR("Ignoring xattrs in "
+ "filesystem\n");
+ EXIT_UNSQUASH_STRICT("To avoid this error message, "
+ "specify -no-xattrs\n");
+ ignore_xattrs = TRUE;
+ } else if((errno == ENOSPC || errno == EDQUOT)
+ && nospace_error < NOSPACE_MAX) {
+ /*
+ * Many filesystems like ext2/3/4 have
+ * limits on the amount of xattr
+ * data that can be stored per file
+ * (typically one block or 4K), so
+ * we shouldn't disable xattr ouput,
+ * as the error may be restriced to one
+ * file only. If we get a lot of these
+ * then suppress the error messsage
+ */
+ EXIT_UNSQUASH_IGNORE("write_xattr: failed to write "
+ "xattr %s for file %s because "
+ "no extended attribute space "
+ "remaining (per file or "
+ "filesystem limit)\n",
+ xattr_list[i].full_name,
+ pathname);
+ if(++ nospace_error == NOSPACE_MAX)
+ ERROR("%d of these errors "
+ "printed, further error "
+ "messages of this type "
+ "are suppressed!\n",
+ NOSPACE_MAX);
+ } else
+ EXIT_UNSQUASH_IGNORE("write_xattr: failed to write "
+ "xattr %s for file %s because "
+ "%s\n", xattr_list[i].full_name,
+ pathname, strerror(errno));
+ failed = TRUE;
+ }
+ } else if(nonsuper_error == FALSE) {
+ /*
+ * if extract user xattrs only then
+ * error message is suppressed, if not
+ * print error, and then suppress further error
+ * messages to avoid possible screenfulls of the
+ * same error message!
+ */
+ ERROR("write_xattr: could not write xattr %s "
+ "for file %s because you're not "
+ "superuser!\n",
+ xattr_list[i].full_name, pathname);
+ EXIT_UNSQUASH_STRICT("write_xattr: to avoid this error message, either"
+ " specify -xattrs-include '^user.', -no-xattrs, or run as "
+ "superuser!\n");
+ ERROR("Further error messages of this type are "
+ "suppressed!\n");
+ nonsuper_error = TRUE;
+ failed = TRUE;
+ }
+ }
+
+ free_xattr(xattr_list, count);
+
+ return !failed;
+}
+
+
+regex_t *xattr_regex(char *pattern, char *option)
+{
+ int error;
+ regex_t *preg = malloc(sizeof(regex_t));
+
+ if(preg == NULL)
+ MEM_ERROR();
+
+ error = regcomp(preg, pattern, REG_EXTENDED|REG_NOSUB);
+
+ if(error) {
+ char str[1024]; /* overflow safe */
+
+ regerror(error, preg, str, 1024);
+ BAD_ERROR("invalid regex %s in xattrs-%s option, because %s\n",
+ pattern, option, str);
+ }
+
+ return preg;
+}
diff --git a/squashfs-tools/version.mk b/squashfs-tools/version.mk
new file mode 100644
index 0000000..2f17c6b
--- /dev/null
+++ b/squashfs-tools/version.mk
@@ -0,0 +1,2 @@
+HASH := d8cb82d9
+FULLDATE := 2023-03-25 20:53:37 +0000
diff --git a/squashfs-tools/xattr.c b/squashfs-tools/xattr.c
new file mode 100644
index 0000000..f9f4cc3
--- /dev/null
+++ b/squashfs-tools/xattr.c
@@ -0,0 +1,1322 @@
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2008, 2009, 2010, 2012, 2014, 2019, 2021, 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * xattr.c
+ */
+
+#include "endian_compat.h"
+
+#define TRUE 1
+#define FALSE 0
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <dirent.h>
+#include <string.h>
+#include <stdlib.h>
+#include <sys/xattr.h>
+#include <regex.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_swap.h"
+#include "mksquashfs.h"
+#include "xattr.h"
+#include "mksquashfs_error.h"
+#include "progressbar.h"
+#include "pseudo.h"
+#include "tar.h"
+#include "action.h"
+#include "merge_sort.h"
+
+#ifdef XATTR_NOFOLLOW /* Apple's xattrs */
+ #define llistxattr(path_, buf_, sz_) \
+ listxattr(path_, buf_, sz_, XATTR_NOFOLLOW)
+ #define lgetxattr(path_, name_, val_, sz_) \
+ getxattr(path_, name_, val_, sz_, 0, XATTR_NOFOLLOW)
+#endif
+
+/* compressed xattr table */
+static char *xattr_table = NULL;
+static unsigned int xattr_size = 0;
+
+/* cached uncompressed xattr data */
+static char *data_cache = NULL;
+static int cache_bytes = 0, cache_size = 0;
+
+/* cached uncompressed xattr id table */
+static struct squashfs_xattr_id *xattr_id_table = NULL;
+static int xattr_ids = 0;
+
+/* saved compressed xattr table */
+unsigned int sxattr_bytes = 0, stotal_xattr_bytes = 0;
+
+/* saved cached uncompressed xattr data */
+static char *sdata_cache = NULL;
+static int scache_bytes = 0;
+
+/* saved cached uncompressed xattr id table */
+static int sxattr_ids = 0;
+
+/* xattr hash table for value duplicate detection */
+static struct xattr_list *dupl_value[65536];
+
+/* xattr hash table for id duplicate detection */
+static struct dupl_id *dupl_id[65536];
+
+/* xattr-add option names and values */
+static struct xattr_add *xattr_add_list = NULL;
+static int xattr_add_count = 0;
+
+/* file system globals from mksquashfs.c */
+extern int no_xattrs, noX;
+extern long long bytes;
+extern int fd;
+extern unsigned int xattr_bytes, total_xattr_bytes;
+extern regex_t *xattr_exclude_preg;
+extern regex_t *xattr_include_preg;
+
+/* helper functions from mksquashfs.c */
+extern unsigned short get_checksum(char *, int, unsigned short);
+extern void write_destination(int, long long, long long, void *);
+extern long long generic_write_table(long long, void *, int, void *, int);
+extern int mangle(char *, char *, int, int, int, int);
+extern char *pathname(struct dir_ent *);
+
+/* helper functions and definitions from read_xattrs.c */
+extern unsigned int read_xattrs_from_disk(int, struct squashfs_super_block *, int, long long *);
+extern struct xattr_list *get_xattr(int, unsigned int *, int *);
+extern struct prefix prefix_table[];
+
+
+static int xattr_get_type(char *name)
+{
+ int i;
+
+ for(i = 0; prefix_table[i].type != -1; i++) {
+ struct prefix *p = &prefix_table[i];
+ if(strncmp(name, p->prefix, strlen(p->prefix)) == 0)
+ break;
+ }
+
+ return prefix_table[i].type;
+}
+
+
+static void xattr_copy_prefix(struct xattr_list *xattr, int t, char *name)
+{
+ xattr->full_name = strdup(name);
+ xattr->name = xattr->full_name + strlen(prefix_table[t].prefix);
+ xattr->size = strlen(xattr->name);
+}
+
+
+int xattr_get_prefix(struct xattr_list *xattr, char *name)
+{
+ int type = xattr_get_type(name);
+
+ if(type != -1)
+ xattr_copy_prefix(xattr, type, name);
+
+ return type;
+}
+
+
+static int read_xattrs_from_system(struct dir_ent *dir_ent, char *filename,
+ struct xattr_list **xattrs)
+{
+ ssize_t size, vsize;
+ char *xattr_names, *p;
+ int i = 0;
+ struct xattr_list *xattr_list = NULL;
+ struct xattr_data *xattr_exc_list;
+ struct xattr_data *xattr_inc_list;
+
+ while(1) {
+ size = llistxattr(filename, NULL, 0);
+ if(size <= 0) {
+ if(size < 0 && errno != ENOTSUP) {
+ ERROR_START("llistxattr for %s failed in "
+ "read_attrs, because %s", filename,
+ strerror(errno));
+ ERROR_EXIT(". Ignoring\n");
+ }
+ return 0;
+ }
+
+ xattr_names = malloc(size);
+ if(xattr_names == NULL)
+ MEM_ERROR();
+
+ size = llistxattr(filename, xattr_names, size);
+ if(size < 0) {
+ free(xattr_names);
+ if(errno == ERANGE)
+ /* xattr list grew? Try again */
+ continue;
+ else {
+ ERROR_START("llistxattr for %s failed in "
+ "read_attrs, because %s", filename,
+ strerror(errno));
+ ERROR_EXIT(". Ignoring\n");
+ return 0;
+ }
+ }
+
+ break;
+ }
+
+ xattr_exc_list = eval_xattr_exc_actions(root_dir, dir_ent);
+ xattr_inc_list = eval_xattr_inc_actions(root_dir, dir_ent);
+
+ for(p = xattr_names; p < xattr_names + size;) {
+ struct xattr_list *x;
+ int res;
+
+ res = match_xattr_exc_actions(xattr_exc_list, p);
+ if(res) {
+ p += strlen(p) + 1;
+ continue;
+ }
+
+ if(xattr_exclude_preg) {
+ res = regexec(xattr_exclude_preg, p, (size_t) 0, NULL, 0);
+ if(res == 0) {
+ p += strlen(p) + 1;
+ continue;
+ }
+ }
+
+ res = match_xattr_inc_actions(xattr_inc_list, p);
+ if(res) {
+ p += strlen(p) + 1;
+ continue;
+ }
+
+ if(xattr_include_preg) {
+ res = regexec(xattr_include_preg, p, (size_t) 0, NULL, 0);
+ if(res) {
+ p += strlen(p) + 1;
+ continue;
+ }
+ }
+
+ x = realloc(xattr_list, (i + 1) * sizeof(struct xattr_list));
+ if(x == NULL)
+ MEM_ERROR();
+ xattr_list = x;
+
+ xattr_list[i].type = xattr_get_prefix(&xattr_list[i], p);
+
+ if(xattr_list[i].type == -1) {
+ ERROR("Unrecognised xattr prefix %s\n", p);
+ p += strlen(p) + 1;
+ continue;
+ }
+
+ p += strlen(p) + 1;
+
+ while(1) {
+ vsize = lgetxattr(filename, xattr_list[i].full_name,
+ NULL, 0);
+ if(vsize < 0) {
+ ERROR_START("lgetxattr failed for %s in "
+ "read_attrs, because %s", filename,
+ strerror(errno));
+ ERROR_EXIT(". Ignoring\n");
+ free(xattr_list[i].full_name);
+ goto failed;
+ }
+
+ xattr_list[i].value = malloc(vsize);
+ if(xattr_list[i].value == NULL)
+ MEM_ERROR();
+
+ vsize = lgetxattr(filename, xattr_list[i].full_name,
+ xattr_list[i].value, vsize);
+ if(vsize < 0) {
+ free(xattr_list[i].value);
+ if(errno == ERANGE)
+ /* xattr grew? Try again */
+ continue;
+ else {
+ ERROR_START("lgetxattr failed for %s "
+ "in read_attrs, because %s",
+ filename, strerror(errno));
+ ERROR_EXIT(". Ignoring\n");
+ free(xattr_list[i].full_name);
+ goto failed;
+ }
+ }
+
+ break;
+ }
+
+ xattr_list[i].vsize = vsize;
+
+ TRACE("read_xattrs_from_system: filename %s, xattr name %s,"
+ " vsize %d\n", filename, xattr_list[i].full_name,
+ xattr_list[i].vsize);
+ i++;
+ }
+
+ free(xattr_names);
+
+ if(i > 0)
+ *xattrs = xattr_list;
+ else
+ free(xattr_list);
+ return i;
+
+failed:
+ while(--i >= 0) {
+ free(xattr_list[i].full_name);
+ free(xattr_list[i].value);
+ }
+ free(xattr_list);
+ free(xattr_names);
+ return 0;
+}
+
+
+static int get_xattr_size(struct xattr_list *xattr)
+{
+ int size = sizeof(struct squashfs_xattr_entry) +
+ sizeof(struct squashfs_xattr_val) + xattr->size;
+
+ if(xattr->type & XATTR_VALUE_OOL)
+ size += XATTR_VALUE_OOL_SIZE;
+ else
+ size += xattr->vsize;
+
+ return size;
+}
+
+
+static void *get_xattr_space(unsigned int req_size, long long *disk)
+{
+ int data_space;
+ unsigned short c_byte;
+
+ /*
+ * Move and compress cached uncompressed data into xattr table.
+ */
+ while(cache_bytes >= SQUASHFS_METADATA_SIZE) {
+ if((xattr_size - xattr_bytes) <
+ ((SQUASHFS_METADATA_SIZE << 1)) + 2) {
+ xattr_table = realloc(xattr_table, xattr_size +
+ (SQUASHFS_METADATA_SIZE << 1) + 2);
+ if(xattr_table == NULL)
+ MEM_ERROR();
+ xattr_size += (SQUASHFS_METADATA_SIZE << 1) + 2;
+ }
+
+ c_byte = mangle(xattr_table + xattr_bytes + BLOCK_OFFSET,
+ data_cache, SQUASHFS_METADATA_SIZE,
+ SQUASHFS_METADATA_SIZE, noX, 0);
+ TRACE("Xattr block @ 0x%x, size %d\n", xattr_bytes, c_byte);
+ SQUASHFS_SWAP_SHORTS(&c_byte, xattr_table + xattr_bytes, 1);
+ xattr_bytes += SQUASHFS_COMPRESSED_SIZE(c_byte) + BLOCK_OFFSET;
+ memmove(data_cache, data_cache + SQUASHFS_METADATA_SIZE,
+ cache_bytes - SQUASHFS_METADATA_SIZE);
+ cache_bytes -= SQUASHFS_METADATA_SIZE;
+ }
+
+ /*
+ * Ensure there's enough space in the uncompressed data cache
+ */
+ data_space = cache_size - cache_bytes;
+ if(data_space < req_size) {
+ int realloc_size = req_size - data_space;
+ data_cache = realloc(data_cache, cache_size +
+ realloc_size);
+ if(data_cache == NULL)
+ MEM_ERROR();
+ cache_size += realloc_size;
+ }
+
+ if(disk)
+ *disk = ((long long) xattr_bytes << 16) | cache_bytes;
+ cache_bytes += req_size;
+ return data_cache + cache_bytes - req_size;
+}
+
+
+static struct dupl_id *check_id_dupl(struct xattr_list *xattr_list, int xattrs)
+{
+ struct dupl_id *entry;
+ int i;
+ unsigned short checksum = 0;
+
+ /* compute checksum over all xattrs */
+ for(i = 0; i < xattrs; i++) {
+ struct xattr_list *xattr = &xattr_list[i];
+
+ checksum = get_checksum(xattr->full_name,
+ strlen(xattr->full_name), checksum);
+ checksum = get_checksum(xattr->value,
+ xattr->vsize, checksum);
+ }
+
+ for(entry = dupl_id[checksum]; entry; entry = entry->next) {
+ if (entry->xattrs != xattrs)
+ continue;
+
+ for(i = 0; i < xattrs; i++) {
+ struct xattr_list *xattr = &xattr_list[i];
+ struct xattr_list *dup_xattr = &entry->xattr_list[i];
+
+ if(strcmp(xattr->full_name, dup_xattr->full_name))
+ break;
+
+ if(xattr->vsize != dup_xattr->vsize)
+ break;
+
+ if(memcmp(xattr->value, dup_xattr->value, xattr->vsize))
+ break;
+ }
+
+ if(i == xattrs)
+ break;
+ }
+
+ if(entry == NULL) {
+ /* no duplicate exists */
+ entry = malloc(sizeof(*entry));
+ if(entry == NULL)
+ MEM_ERROR();
+ entry->xattrs = xattrs;
+ entry->xattr_list = xattr_list;
+ entry->xattr_id = SQUASHFS_INVALID_XATTR;
+ entry->next = dupl_id[checksum];
+ dupl_id[checksum] = entry;
+ }
+
+ return entry;
+}
+
+
+static void check_value_dupl(struct xattr_list *xattr)
+{
+ struct xattr_list *entry;
+
+ if(xattr->vsize < XATTR_VALUE_OOL_SIZE)
+ return;
+
+ /* Check if this is a duplicate of an existing value */
+ xattr->vchecksum = get_checksum(xattr->value, xattr->vsize, 0);
+ for(entry = dupl_value[xattr->vchecksum]; entry; entry = entry->vnext) {
+ if(entry->vsize != xattr->vsize)
+ continue;
+
+ if(memcmp(entry->value, xattr->value, xattr->vsize) == 0)
+ break;
+ }
+
+ if(entry == NULL) {
+ /*
+ * No duplicate exists, add to hash table, and mark as
+ * requiring writing
+ */
+ xattr->vnext = dupl_value[xattr->vchecksum];
+ dupl_value[xattr->vchecksum] = xattr;
+ xattr->ool_value = SQUASHFS_INVALID_BLK;
+ } else {
+ /*
+ * Duplicate exists, make type XATTR_VALUE_OOL, and
+ * remember where the duplicate is
+ */
+ xattr->type |= XATTR_VALUE_OOL;
+ xattr->ool_value = entry->ool_value;
+ /* on appending don't free duplicate values because the
+ * duplicate value already points to the non-duplicate value */
+ if(xattr->value != entry->value) {
+ free(xattr->value);
+ xattr->value = entry->value;
+ }
+ }
+}
+
+
+static int get_xattr_id(int xattrs, struct xattr_list *xattr_list,
+ long long xattr_disk, struct dupl_id *xattr_dupl)
+{
+ int i, size = 0;
+ struct squashfs_xattr_id *xattr_id;
+
+ xattr_id_table = realloc(xattr_id_table, (xattr_ids + 1) *
+ sizeof(struct squashfs_xattr_id));
+ if(xattr_id_table == NULL)
+ MEM_ERROR();
+
+ /* get total uncompressed size of xattr data, needed for stat */
+ for(i = 0; i < xattrs; i++)
+ size += strlen(xattr_list[i].full_name) + 1 +
+ xattr_list[i].vsize;
+
+ xattr_id = &xattr_id_table[xattr_ids];
+ xattr_id->xattr = xattr_disk;
+ xattr_id->count = xattrs;
+ xattr_id->size = size;
+
+ /*
+ * keep track of total uncompressed xattr data, needed for mksquashfs
+ * file system summary
+ */
+ total_xattr_bytes += size;
+
+ xattr_dupl->xattr_id = xattr_ids ++;
+ return xattr_dupl->xattr_id;
+}
+
+
+long long write_xattrs()
+{
+ unsigned short c_byte;
+ int i, avail_bytes;
+ char *datap = data_cache;
+ long long start_bytes = bytes;
+ struct squashfs_xattr_table header = {};
+
+ if(xattr_ids == 0)
+ return SQUASHFS_INVALID_BLK;
+
+ /*
+ * Move and compress cached uncompressed data into xattr table.
+ */
+ while(cache_bytes) {
+ if((xattr_size - xattr_bytes) <
+ ((SQUASHFS_METADATA_SIZE << 1)) + 2) {
+ xattr_table = realloc(xattr_table, xattr_size +
+ (SQUASHFS_METADATA_SIZE << 1) + 2);
+ if(xattr_table == NULL)
+ MEM_ERROR();
+ xattr_size += (SQUASHFS_METADATA_SIZE << 1) + 2;
+ }
+
+ avail_bytes = cache_bytes > SQUASHFS_METADATA_SIZE ?
+ SQUASHFS_METADATA_SIZE : cache_bytes;
+ c_byte = mangle(xattr_table + xattr_bytes + BLOCK_OFFSET, datap,
+ avail_bytes, SQUASHFS_METADATA_SIZE, noX, 0);
+ TRACE("Xattr block @ 0x%x, size %d\n", xattr_bytes, c_byte);
+ SQUASHFS_SWAP_SHORTS(&c_byte, xattr_table + xattr_bytes, 1);
+ xattr_bytes += SQUASHFS_COMPRESSED_SIZE(c_byte) + BLOCK_OFFSET;
+ datap += avail_bytes;
+ cache_bytes -= avail_bytes;
+ }
+
+ /*
+ * Write compressed xattr table to file system
+ */
+ write_destination(fd, bytes, xattr_bytes, xattr_table);
+ bytes += xattr_bytes;
+
+ /*
+ * Swap if necessary the xattr id table
+ */
+ for(i = 0; i < xattr_ids; i++)
+ SQUASHFS_INSWAP_XATTR_ID(&xattr_id_table[i]);
+
+ header.xattr_ids = xattr_ids;
+ header.xattr_table_start = start_bytes;
+ SQUASHFS_INSWAP_XATTR_TABLE(&header);
+
+ return generic_write_table(xattr_ids * sizeof(struct squashfs_xattr_id),
+ xattr_id_table, sizeof(header), &header, noX);
+}
+
+
+void free_xattr_list(int xattrs, struct xattr_list *xattr_list)
+{
+ int i;
+
+ for(i = 0; i < xattrs; i++) {
+ free(xattr_list[i].full_name);
+ free(xattr_list[i].value);
+ }
+
+ free(xattr_list);
+}
+
+
+int generate_xattrs(int xattrs, struct xattr_list *xattr_list)
+{
+ int total_size, i;
+ int xattr_value_max;
+ void *xp;
+ long long xattr_disk;
+ struct dupl_id *xattr_dupl;
+
+ /*
+ * check if the file xattrs are a complete duplicate of a pre-existing
+ * id
+ */
+ xattr_dupl = check_id_dupl(xattr_list, xattrs);
+ if(xattr_dupl->xattr_id != SQUASHFS_INVALID_XATTR) {
+ free_xattr_list(xattrs, xattr_list);
+ return xattr_dupl->xattr_id;
+ }
+
+ /*
+ * Scan the xattr_list deciding which type to assign to each
+ * xattr. The choice is fairly straightforward, and depends on the
+ * size of each xattr name/value and the overall size of the
+ * resultant xattr list stored in the xattr metadata table.
+ *
+ * Choices are whether to store data inline or out of line.
+ *
+ * The overall goal is to optimise xattr scanning and lookup, and
+ * to enable the file system layout to scale from a couple of
+ * small xattr name/values to a large number of large xattr
+ * names/values without affecting performance. While hopefully
+ * enabling the common case of a couple of small xattr name/values
+ * to be stored efficiently
+ *
+ * Code repeatedly scans, doing the following
+ * move xattr data out of line if it exceeds
+ * xattr_value_max. Where xattr_value_max is
+ * initially XATTR_INLINE_MAX. If the final uncompressed
+ * xattr list is larger than XATTR_TARGET_MAX then more
+ * aggressively move xattr data out of line by repeatedly
+ * setting inline threshold to 1/2, then 1/4, 1/8 of
+ * XATTR_INLINE_MAX until target achieved or there's
+ * nothing left to move out of line
+ */
+ xattr_value_max = XATTR_INLINE_MAX;
+ while(1) {
+ for(total_size = 0, i = 0; i < xattrs; i++) {
+ struct xattr_list *xattr = &xattr_list[i];
+ xattr->type &= XATTR_PREFIX_MASK; /* all inline */
+ if (xattr->vsize > xattr_value_max)
+ xattr->type |= XATTR_VALUE_OOL;
+
+ total_size += get_xattr_size(xattr);
+ }
+
+ /*
+ * If the total size of the uncompressed xattr list is <=
+ * XATTR_TARGET_MAX we're done
+ */
+ if(total_size <= XATTR_TARGET_MAX)
+ break;
+
+ if(xattr_value_max == XATTR_VALUE_OOL_SIZE)
+ break;
+
+ /*
+ * Inline target not yet at minimum and so reduce it, and
+ * try again
+ */
+ xattr_value_max /= 2;
+ if(xattr_value_max < XATTR_VALUE_OOL_SIZE)
+ xattr_value_max = XATTR_VALUE_OOL_SIZE;
+ }
+
+ /*
+ * Check xattr values for duplicates
+ */
+ for(i = 0; i < xattrs; i++) {
+ check_value_dupl(&xattr_list[i]);
+ }
+
+ /*
+ * Add each out of line value to the file system xattr table
+ * if it doesn't already exist as a duplicate
+ */
+ for(i = 0; i < xattrs; i++) {
+ struct xattr_list *xattr = &xattr_list[i];
+
+ if((xattr->type & XATTR_VALUE_OOL) &&
+ (xattr->ool_value == SQUASHFS_INVALID_BLK)) {
+ struct squashfs_xattr_val val;
+ int size = sizeof(val) + xattr->vsize;
+ xp = get_xattr_space(size, &xattr->ool_value);
+ val.vsize = xattr->vsize;
+ SQUASHFS_SWAP_XATTR_VAL(&val, xp);
+ memcpy(xp + sizeof(val), xattr->value, xattr->vsize);
+ }
+ }
+
+ /*
+ * Create xattr list and add to file system xattr table
+ */
+ get_xattr_space(0, &xattr_disk);
+ for(i = 0; i < xattrs; i++) {
+ struct xattr_list *xattr = &xattr_list[i];
+ struct squashfs_xattr_entry entry;
+ struct squashfs_xattr_val val;
+
+ xp = get_xattr_space(sizeof(entry) + xattr->size, NULL);
+ entry.type = xattr->type;
+ entry.size = xattr->size;
+ SQUASHFS_SWAP_XATTR_ENTRY(&entry, xp);
+ memcpy(xp + sizeof(entry), xattr->name, xattr->size);
+
+ if(xattr->type & XATTR_VALUE_OOL) {
+ int size = sizeof(val) + XATTR_VALUE_OOL_SIZE;
+ xp = get_xattr_space(size, NULL);
+ val.vsize = XATTR_VALUE_OOL_SIZE;
+ SQUASHFS_SWAP_XATTR_VAL(&val, xp);
+ SQUASHFS_SWAP_LONG_LONGS(&xattr->ool_value, xp +
+ sizeof(val), 1);
+ } else {
+ int size = sizeof(val) + xattr->vsize;
+ xp = get_xattr_space(size, &xattr->ool_value);
+ val.vsize = xattr->vsize;
+ SQUASHFS_SWAP_XATTR_VAL(&val, xp);
+ memcpy(xp + sizeof(val), xattr->value, xattr->vsize);
+ }
+ }
+
+ /*
+ * Add to xattr id lookup table
+ */
+ return get_xattr_id(xattrs, xattr_list, xattr_disk, xattr_dupl);
+}
+
+
+/*
+ * Instantiate two implementations of merge sort with different types and names
+ */
+SORT(sort_list, xattr_add, name, next);
+SORT(sort_xattr_list, xattr_list, full_name, vnext);
+
+
+int read_xattrs(void *d, int type)
+{
+ struct dir_ent *dir_ent = d;
+ struct inode_info *inode = dir_ent->inode;
+ char *filename = pathname(dir_ent);
+ struct xattr_list *xattr_list = NULL, *head;
+ int count, i = 0, j;
+ struct xattr_add *l1 = xattr_add_list, *l2 = NULL, *l3 = NULL;
+ struct xattr_add *action_add_list;
+
+ if(no_xattrs || inode->root_entry)
+ return SQUASHFS_INVALID_XATTR;
+
+ if(IS_TARFILE(inode))
+ i = read_xattrs_from_tarfile(inode, &xattr_list);
+ else if(!inode->dummy_root_dir && !IS_PSEUDO(inode))
+ i = read_xattrs_from_system(dir_ent, filename, &xattr_list);
+
+ action_add_list = eval_xattr_add_actions(root_dir, dir_ent, &count);
+
+ /*
+ * At this point we may have up to 3 lists of xattrs:
+ *
+ * 1. a list of xattrs created by the global xattrs-add command line
+ * 2. a list of xattrs created by one or more pseudo xattr definitions
+ * on this file.
+ * 3. a list of xattrs created by one or more xattr add actions on this
+ * file.
+ *
+ * The global xattrs are sorted, but, the pseudo xattr list and action
+ * xattr list are not.
+ *
+ * So sort the pseudo and action lists, and merge the three sorted lists
+ * together whilst adding them to the xattr_list
+ */
+
+ if(inode->xattr) {
+ sort_list(&(inode->xattr->xattr), inode->xattr->count);
+ l2 = inode->xattr->xattr;
+ }
+
+ if(action_add_list) {
+ sort_list(&action_add_list, count);
+ l3 = action_add_list;
+ }
+
+ while(l1 || l2 || l3) {
+ struct xattr_list *x;
+ struct xattr_add *entry;
+
+ if(l1 && l2 && l3) {
+ if(strcmp(l1->name, l2->name) <= 0) {
+ if(strcmp(l1->name, l3->name) <= 0) {
+ entry= l1;
+ l1 = l1->next;
+ } else {
+ entry = l3;
+ l3 = l3->next;
+ }
+ } else {
+ if(strcmp(l2->name, l3->name) <= 0) {
+ entry = l2;
+ l2 = l2->next;
+ } else {
+ entry = l3;
+ l3 = l3->next;
+ }
+ }
+ } else if(l1 && l2) {
+ if(strcmp(l1->name, l2->name) <= 0) {
+ entry = l1;
+ l1 = l1->next;
+ } else {
+ entry = l2;
+ l2 = l2->next;
+ }
+ } else if(l1 && l3) {
+ if(strcmp(l1->name, l3->name) <= 0) {
+ entry = l1;
+ l1 = l1->next;
+ } else {
+ entry = l3;
+ l3 = l3->next;
+ }
+ } else if(l2 && l3) {
+ if(strcmp(l2->name, l3->name) <= 0) {
+ entry = l2;
+ l2 = l2->next;
+ } else {
+ entry = l3;
+ l3 = l3->next;
+ }
+ } else if(l1) {
+ entry = l1;
+ l1 = l1->next;
+ } else if(l2) {
+ entry = l2;
+ l2 = l2->next;
+ } else {
+ entry = l3;
+ l3 = l3->next;
+ }
+
+ /*
+ * User extended attributes are only allowed for files and
+ * directories. See man 7 xattr for explanation.
+ */
+ if((entry->type == SQUASHFS_XATTR_USER) &&
+ (type != SQUASHFS_FILE_TYPE &&
+ type != SQUASHFS_DIR_TYPE))
+ continue;
+
+ x = realloc(xattr_list, (i + 1) * sizeof(struct xattr_list));
+ if(x == NULL)
+ MEM_ERROR();
+ xattr_list = x;
+
+ xattr_list[i].type = entry->type;
+ xattr_copy_prefix(&xattr_list[i], entry->type, entry->name);
+
+ xattr_list[i].value = malloc(entry->vsize);
+ if(xattr_list[i].value == NULL)
+ MEM_ERROR();
+
+ memcpy(xattr_list[i].value, entry->value, entry->vsize);
+ xattr_list[i].vsize = entry->vsize;
+
+ TRACE("read_xattrs: filename %s, xattr name %s,"
+ " vsize %d\n", filename, xattr_list[i].full_name,
+ xattr_list[i].vsize);
+ i++;
+ }
+
+ if(i == 0)
+ return SQUASHFS_INVALID_XATTR;
+ else if(i == 1)
+ goto skip_dup_check;
+
+ /*
+ * Sort and check xattr list for duplicates
+ */
+ for(j = 1; j < i; j++)
+ xattr_list[j - 1].vnext = &xattr_list[j];
+
+ xattr_list[i - 1].vnext = NULL;
+ head = xattr_list;
+
+ sort_xattr_list(&head, i);
+
+ for(j = 0; j < i - 1; head=head->vnext, j++)
+ if(strcmp(head->full_name, head->vnext->full_name) == 0)
+ BAD_ERROR("Duplicate xattr name %s in file %s\n",
+ head->full_name, filename);
+
+skip_dup_check:
+ return generate_xattrs(i, xattr_list);
+}
+
+
+/*
+ * Add the existing xattr ids and xattr metadata in the file system being
+ * appended to, to the in-memory xattr cache. This allows duplicate checking to
+ * take place against the xattrs already in the file system being appended to,
+ * and ensures the pre-existing xattrs are written out along with any new xattrs
+ */
+int get_xattrs(int fd, struct squashfs_super_block *sBlk)
+{
+ int res, i, id;
+ unsigned int count, ids;
+
+ TRACE("get_xattrs\n");
+
+ if(sBlk->xattr_id_table_start == SQUASHFS_INVALID_BLK)
+ return SQUASHFS_INVALID_BLK;
+
+ ids = read_xattrs_from_disk(fd, sBlk, FALSE, NULL);
+ if(ids == 0)
+ EXIT_MKSQUASHFS();
+
+ /*
+ * for each xattr id read and construct its list of xattr
+ * name:value pairs, and add them to the in-memory xattr cache
+ */
+ for(i = 0; i < ids; i++) {
+ struct xattr_list *xattr_list = get_xattr(i, &count, &res);
+ if(xattr_list == NULL && res == FALSE)
+ EXIT_MKSQUASHFS();
+
+ if(res) {
+ free_xattr(xattr_list, count);
+ return FALSE;
+ }
+ id = generate_xattrs(count, xattr_list);
+
+ /*
+ * Sanity check, the new xattr id should be the same as the
+ * xattr id in the original file system
+ */
+ if(id != i) {
+ ERROR("BUG, different xattr_id in get_xattrs\n");
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+
+/*
+ * Save current state of xattrs, needed for restoring state in the event of an
+ * abort in appending
+ */
+void save_xattrs()
+{
+ /* save the current state of the compressed xattr data */
+ sxattr_bytes = xattr_bytes;
+ stotal_xattr_bytes = total_xattr_bytes;
+
+ /*
+ * save the current state of the cached uncompressed xattr data.
+ * Note we have to save the contents of the data cache because future
+ * operations will delete the current contents
+ */
+ sdata_cache = malloc(cache_bytes);
+ if(sdata_cache == NULL)
+ MEM_ERROR();
+
+ memcpy(sdata_cache, data_cache, cache_bytes);
+ scache_bytes = cache_bytes;
+
+ /* save the current state of the xattr id table */
+ sxattr_ids = xattr_ids;
+}
+
+
+/*
+ * Restore xattrs in the event of an abort in appending
+ */
+void restore_xattrs()
+{
+ /* restore the state of the compressed xattr data */
+ xattr_bytes = sxattr_bytes;
+ total_xattr_bytes = stotal_xattr_bytes;
+
+ /* restore the state of the uncomoressed xattr data */
+ memcpy(data_cache, sdata_cache, scache_bytes);
+ cache_bytes = scache_bytes;
+
+ /* restore the state of the xattr id table */
+ xattr_ids = sxattr_ids;
+}
+
+
+regex_t *xattr_regex(char *pattern, char *option)
+{
+ int error;
+ regex_t *preg = malloc(sizeof(regex_t));
+
+ if(preg == NULL)
+ MEM_ERROR();
+
+ error = regcomp(preg, pattern, REG_EXTENDED|REG_NOSUB);
+
+ if(error) {
+ char str[1024]; /* overflow safe */
+
+ regerror(error, preg, str, 1024);
+ BAD_ERROR("invalid regex %s in xattrs-%s option, because %s\n",
+ pattern, option, str);
+ }
+
+ return preg;
+}
+
+
+char *base64_decode(char *source, int size, int *bytes)
+{
+ char *dest;
+ unsigned char *dest_ptr, *source_ptr = (unsigned char *) source;
+ int bit_pos = 0;
+ int output = 0;
+ int count;
+
+ if(size % 4 == 0) {
+ /* Check for and ignore any end padding */
+ if(source_ptr[size - 2] == '=' && source_ptr[size - 1] == '=')
+ size -= 2;
+ else if(source_ptr[size - 1] == '=')
+ size --;
+ }
+
+ /* Calculate number of bytes the base64 encoding represents */
+ count = size * 3 / 4;
+
+ dest = malloc(count);
+
+ for(dest_ptr = (unsigned char *) dest; size; size --, source_ptr ++) {
+ int value = *source_ptr;
+
+ if(value >= 'A' && value <= 'Z')
+ value -= 'A';
+ else if(value >= 'a' && value <= 'z')
+ value -= 'a' - 26;
+ else if(value >= '0' && value <= '9')
+ value -= '0' - 52;
+ else if(value == '+')
+ value = 62;
+ else if(value == '/')
+ value = 63;
+ else
+ goto failed;
+
+ if(bit_pos == 24) {
+ dest_ptr[0] = output >> 16;
+ dest_ptr[1] = (output >> 8) & 0xff;
+ dest_ptr[2] = output & 0xff;
+ bit_pos = 0;
+ output = 0;
+ dest_ptr += 3;
+ }
+
+ output = (output << 6) | value;
+ bit_pos += 6;
+ }
+
+ output = output << (24 - bit_pos);
+
+ if(bit_pos == 6)
+ goto failed;
+
+ if(bit_pos >= 12)
+ dest_ptr[0] = output >> 16;
+
+ if(bit_pos >= 18)
+ dest_ptr[1] = (output >> 8) & 0xff;
+
+ if(bit_pos == 24)
+ dest_ptr[2] = output & 0xff;
+
+ *bytes = (dest_ptr - (unsigned char *) dest) + (bit_pos / 8);
+ return dest;
+
+failed:
+ free(dest);
+ return NULL;
+}
+
+
+char *hex_decode(char *source, int size, int *bytes)
+{
+ char *dest;
+ unsigned char *dest_ptr, *source_ptr = (unsigned char *) source;
+ int first = 0;
+
+ if(size % 2 != 0)
+ return NULL;
+
+ dest = malloc(size >> 2);
+ if(dest == NULL)
+ MEM_ERROR();
+
+ for(dest_ptr = (unsigned char *) dest ; size; size --) {
+ int digit = *source_ptr ++;
+
+ if(digit >= 'A' && digit <= 'F')
+ digit -= 'A' - 10;
+ else if(digit >= 'a' && digit <= 'f')
+ digit -= 'a' - 10;
+ else if(digit >= '0' && digit <= '9')
+ digit -= '0';
+ else
+ goto failed;
+
+ if(size % 2 == 0)
+ first = digit;
+ else
+ *dest_ptr ++ = (first << 4) | digit;
+ }
+
+ *bytes = dest_ptr - (unsigned char *) dest;
+
+ return dest;
+
+failed:
+ free(dest);
+ return NULL;
+}
+
+
+int decode_octal(unsigned char *ptr)
+{
+ int i, output = 0;
+
+ for(i = 0; i < 3; i++) {
+ int val = *ptr ++;
+
+ if(val < '0' || val > '7')
+ return -1;
+
+ output = (output << 3) | (val - '0');
+ }
+
+ return output < 256 ? output : -1;
+}
+
+
+char *text_decode(char *source, int *bytes)
+{
+ unsigned char *dest, *dest_ptr, *ptr = (unsigned char *) source;
+ int size = 0;
+
+ for(; *ptr; size ++, ptr ++) {
+ if(*ptr == '\\') {
+ if(ptr[1] != '\0' && ptr[2] != '\0' && ptr[3] != '\0')
+ ptr += 3;
+ else
+ return NULL;
+ }
+ }
+
+ dest = malloc(size);
+ if(dest == NULL)
+ MEM_ERROR();
+
+ *bytes = size;
+
+ for(ptr = (unsigned char *) source, dest_ptr = dest; size; size --) {
+ if(*ptr == '\\') {
+ int res = decode_octal(++ ptr);
+
+ if(res == -1)
+ goto failed;
+
+ *dest_ptr ++ = res;
+ ptr += 3;
+ } else
+ *dest_ptr ++ = *ptr ++;
+ }
+
+ return (char *) dest;
+
+failed:
+ free(dest);
+ return NULL;
+}
+
+
+struct xattr_add *xattr_parse(char *str, char *pre, char *option)
+{
+ struct xattr_add *entry;
+ char *value;
+ int prefix, size;
+
+ /*
+ * Look for the "=" separating the xattr name from the value
+ */
+ for(value = str; *value != '=' && *value != '\0'; value ++);
+ if(*value == '\0') {
+ ERROR("%sinvalid argument \"%s\" in %s option, because no "
+ "`=` found\n", pre, str, option);
+ goto failed;
+ }
+
+ if(value == str) {
+ ERROR("%sinvalid argument \"%s\" in %s option, because xattr "
+ "name is empty\n", pre, str, option);
+ goto failed;
+ }
+
+ if(*(value + 1) == '\0') {
+ ERROR("%sinvalid argument \"%s\" in %s option, because xattr "
+ "value is empty\n", pre, str, option);
+ goto failed;
+ }
+
+ entry = malloc(sizeof(struct xattr_add));
+ if(entry == NULL)
+ MEM_ERROR();
+
+ entry->name = strndup(str, value++ - str);
+ entry->type = xattr_get_type(entry->name);
+
+ if(entry->type == -1) {
+ ERROR("%s%s: unrecognised xattr prefix in %s\n", pre, option,
+ entry->name);
+ goto failed2;
+ }
+
+ /*
+ * Evaluate the format prefix (if any)
+ */
+ if(*(value + 1) == '\0')
+ /*
+ * By definition an xattr value of 1 byte hasn't a prefix,
+ * and should be treated as binary
+ */
+ prefix = 0;
+ else
+ prefix = (*value << 8) + *(value + 1);
+
+ switch(prefix) {
+ case PREFIX_BASE64_0S:
+ case PREFIX_BASE64_0s:
+ value += 2;
+ if(*value == 0) {
+ ERROR("%sinvalid argument %s in %s option, because "
+ "xattr value is empty after format prefix 0S "
+ "or 0s\n", pre, str, option);
+ goto failed2;
+ }
+
+ entry->value = base64_decode(value, strlen(value), &size);
+ entry->vsize = size;
+
+ if(entry->value == NULL) {
+ ERROR("%sinvalid argument %s in %s option, because "
+ "invalid base64 value\n", pre, str, option);
+ goto failed2;
+ }
+ break;
+
+ case PREFIX_HEX_0X:
+ case PREFIX_HEX_0x:
+ value += 2;
+ if(*value == 0) {
+ ERROR("%sinvalid argument %s in %s option, because "
+ "xattr value is empty after format prefix 0X "
+ "or 0x\n", pre, str, option);
+ goto failed2;
+ }
+
+ entry->value = hex_decode(value, strlen(value), &size);
+ entry->vsize = size;
+
+ if(entry->value == NULL) {
+ ERROR("%sinvalid argument %s in %s option, because "
+ "invalid hexidecimal value\n", pre, str, option);
+ goto failed2;
+ }
+ break;
+
+ case PREFIX_TEXT_0T:
+ case PREFIX_TEXT_0t:
+ value += 2;
+ if(*value == 0) {
+ ERROR("%sinvalid argument %s in %s option, because "
+ "xattr value is empty after format prefix 0T "
+ "or 0t\n", pre, str, option);
+ goto failed2;
+ }
+
+ entry->value = text_decode(value, &size);
+ entry->vsize = size;
+
+ if(entry->value == NULL) {
+ ERROR("%sinvalid argument %s in %s option, because "
+ "invalid text value\n", pre, str, option);
+ goto failed2;
+ }
+ break;
+
+ case PREFIX_BINARY_0B:
+ case PREFIX_BINARY_0b:
+ value += 2;
+ if(*value == 0) {
+ ERROR("%sinvalid argument %s in %s option, because "
+ "xattr value is empty after format prefix 0B "
+ "or 0b\n", pre, str, option);
+ goto failed2;
+ }
+
+ /* fall through */
+ default:
+ entry->vsize = strlen(value);
+ entry->value = malloc(entry->vsize);
+
+ if(entry->value == NULL)
+ MEM_ERROR();
+
+ memcpy(entry->value, value, entry->vsize);
+ }
+
+ return entry;
+
+failed2:
+ free(entry->name);
+ free(entry);
+failed:
+ return NULL;
+}
+
+
+void xattrs_add(char *str)
+{
+ struct xattr_add *entry;
+
+ entry = xattr_parse(str, "FATAL ERROR: ", "xattrs-add");
+
+ if(entry) {
+ entry->next = xattr_add_list;
+ xattr_add_list = entry;
+
+ xattr_add_count ++;
+ } else
+ exit(1);
+}
+
+
+int add_xattrs(void) {
+ return xattr_add_count;
+}
+
+
+void sort_xattr_add_list(void)
+{
+ sort_list(&xattr_add_list, xattr_add_count);
+}
diff --git a/squashfs-tools/xattr.h b/squashfs-tools/xattr.h
new file mode 100644
index 0000000..0697f83
--- /dev/null
+++ b/squashfs-tools/xattr.h
@@ -0,0 +1,241 @@
+#ifndef XATTR_H
+#define XATTR_H
+/*
+ * Create a squashfs filesystem. This is a highly compressed read only
+ * filesystem.
+ *
+ * Copyright (c) 2010, 2012, 2013, 2014, 2019, 2021, 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * xattr.h
+ */
+
+#define XATTR_VALUE_OOL SQUASHFS_XATTR_VALUE_OOL
+#define XATTR_PREFIX_MASK SQUASHFS_XATTR_PREFIX_MASK
+
+#define XATTR_VALUE_OOL_SIZE sizeof(long long)
+
+/* maximum size of xattr value data that will be inlined */
+#define XATTR_INLINE_MAX 128
+
+/* the target size of an inode's xattr name:value list. If it
+ * exceeds this, then xattr value data will be successively out of lined
+ * until it meets the target */
+#define XATTR_TARGET_MAX 65536
+
+#define IS_XATTR(a) (a != SQUASHFS_INVALID_XATTR)
+
+#define PREFIX_BASE64_0S (0x3000 + 0x53)
+#define PREFIX_BASE64_0s (0x3000 + 0x73)
+#define PREFIX_BINARY_0B (0x3000 + 0x42)
+#define PREFIX_BINARY_0b (0x3000 + 0x62)
+#define PREFIX_HEX_0X (0x3000 + 0x58)
+#define PREFIX_HEX_0x (0x3000 + 0x78)
+#define PREFIX_TEXT_0T (0x3000 + 0x54)
+#define PREFIX_TEXT_0t (0x3000 + 0x74)
+
+struct xattr_list {
+ char *name;
+ char *full_name;
+ int size;
+ int vsize;
+ void *value;
+ int type;
+ long long ool_value;
+ unsigned short vchecksum;
+ struct xattr_list *vnext;
+};
+
+struct dupl_id {
+ struct xattr_list *xattr_list;
+ int xattrs;
+ int xattr_id;
+ struct dupl_id *next;
+};
+
+struct prefix {
+ char *prefix;
+ int type;
+};
+
+struct xattr_add {
+ char *name;
+ char *value;
+ unsigned int vsize;
+ int type;
+ struct xattr_add *next;
+};
+
+extern int generate_xattrs(int, struct xattr_list *);
+
+#ifdef XATTR_SUPPORT
+extern int get_xattrs(int, struct squashfs_super_block *);
+extern int read_xattrs(void *, int type);
+extern long long write_xattrs();
+extern void save_xattrs();
+extern void restore_xattrs();
+extern unsigned int xattr_bytes, total_xattr_bytes;
+extern int write_xattr(char *, unsigned int);
+extern unsigned int read_xattrs_from_disk(int, struct squashfs_super_block *, int, long long *);
+extern struct xattr_list *get_xattr(int, unsigned int *, int *);
+extern void free_xattr(struct xattr_list *, int);
+extern regex_t *xattr_regex(char *pattern, char *option);
+extern void xattrs_add(char *str);
+extern void sort_xattr_add_list(void);
+extern char *base64_decode(char *source, int size, int *bytes);
+extern int add_xattrs(void);
+extern struct xattr_add *xattr_parse(char *, char *, char *);
+extern int read_pseudo_xattr(char *orig_def, char *filename, char *name, char *def);
+extern void print_xattr(char *, unsigned int, int);
+extern int has_xattrs(unsigned int);
+#else
+#include "squashfs_swap.h"
+
+static inline int get_xattrs(int fd, struct squashfs_super_block *sBlk)
+{
+ if(sBlk->xattr_id_table_start != SQUASHFS_INVALID_BLK) {
+ fprintf(stderr, "Xattrs in filesystem! These are not "
+ "supported on this build of Mksquashfs\n");
+ return 0;
+ } else
+ return SQUASHFS_INVALID_BLK;
+}
+
+
+static inline int read_xattrs(void *dir_ent, int type)
+{
+ return SQUASHFS_INVALID_XATTR;
+}
+
+
+static inline long long write_xattrs()
+{
+ return SQUASHFS_INVALID_BLK;
+}
+
+
+static inline void save_xattrs()
+{
+}
+
+
+static inline void restore_xattrs()
+{
+}
+
+
+static inline int write_xattr(char *pathname, unsigned int xattr)
+{
+ return 1;
+}
+
+
+static inline unsigned int read_xattrs_from_disk(int fd, struct squashfs_super_block *sBlk, int sanity_only, long long *table_start)
+{
+ int res;
+ struct squashfs_xattr_table id_table;
+
+ /*
+ * Read sufficient xattr metadata to obtain the start of the xattr
+ * metadata on disk (table_start). This value is needed to do
+ * sanity checking of the filesystem.
+ */
+ res = read_fs_bytes(fd, sBlk->xattr_id_table_start, sizeof(id_table), &id_table);
+ if(res == 0)
+ return 0;
+
+ SQUASHFS_INSWAP_XATTR_TABLE(&id_table);
+
+ /*
+ * id_table.xattr_table_start stores the start of the compressed xattr
+ * metadata blocks. This by definition is also the end of the previous
+ * filesystem table - the id lookup table.
+ */
+ if(table_start != NULL)
+ *table_start = id_table.xattr_table_start;
+
+ return id_table.xattr_ids;
+}
+
+
+static inline struct xattr_list *get_xattr(int i, unsigned int *count, int j)
+{
+ return NULL;
+}
+
+static inline regex_t *xattr_regex(char *pattern, char *option)
+{
+ return NULL;
+}
+
+static inline void xattrs_add(char *str)
+{
+}
+
+static inline void sort_xattr_add_list(void)
+{
+}
+
+static inline int add_xattrs(void)
+{
+ return 0;
+}
+
+static inline struct xattr_add *xattr_parse(char *a, char *b, char *c)
+{
+ return NULL;
+}
+
+
+static inline int read_pseudo_xattr(char *orig_def, char *filename, char *name, char *def)
+{
+ free(filename);
+ fprintf(stderr, "Xattrs are unsupported in this build\n");
+
+ return 0;
+}
+
+
+static inline void print_xattr(char *pathname, unsigned int xattr, int writer_fd)
+{
+}
+
+
+static inline int has_xattrs(unsigned int xattr)
+{
+ return 0;
+}
+#endif
+
+#ifdef XATTR_SUPPORT
+#define xattrs_supported() TRUE
+#ifdef XATTR_DEFAULT
+#define NOXOPT_STR
+#define XOPT_STR " (default)"
+#define XATTR_DEF 0
+#else
+#define NOXOPT_STR " (default)"
+#define XOPT_STR
+#define XATTR_DEF 1
+#endif
+#else
+#define xattrs_supported() FALSE
+#define NOXOPT_STR " (default)"
+#define XOPT_STR " (unsupported)"
+#define XATTR_DEF 1
+#endif
+#endif
diff --git a/squashfs-tools/xz_wrapper.c b/squashfs-tools/xz_wrapper.c
new file mode 100644
index 0000000..0d650e0
--- /dev/null
+++ b/squashfs-tools/xz_wrapper.c
@@ -0,0 +1,551 @@
+/*
+ * Copyright (c) 2010, 2011, 2012, 2013, 2021, 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * xz_wrapper.c
+ *
+ * Support for XZ (LZMA2) compression using XZ Utils liblzma
+ * http://tukaani.org/xz/
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <lzma.h>
+
+#include "squashfs_fs.h"
+#include "xz_wrapper.h"
+#include "compressor.h"
+
+static struct bcj bcj[] = {
+ { "x86", LZMA_FILTER_X86, 0 },
+ { "powerpc", LZMA_FILTER_POWERPC, 0 },
+ { "ia64", LZMA_FILTER_IA64, 0 },
+ { "arm", LZMA_FILTER_ARM, 0 },
+ { "armthumb", LZMA_FILTER_ARMTHUMB, 0 },
+ { "sparc", LZMA_FILTER_SPARC, 0 },
+ { NULL, LZMA_VLI_UNKNOWN, 0 }
+};
+
+static int filter_count = 1;
+static int dictionary_size = 0;
+static float dictionary_percent = 0;
+
+
+/*
+ * This function is called by the options parsing code in mksquashfs.c
+ * to parse any -X compressor option.
+ *
+ * Two specific options are supported:
+ * -Xbcj
+ * -Xdict-size
+ *
+ * This function returns:
+ * >=0 (number of additional args parsed) on success
+ * -1 if the option was unrecognised, or
+ * -2 if the option was recognised, but otherwise bad in
+ * some way (e.g. invalid parameter)
+ *
+ * Note: this function sets internal compressor state, but does not
+ * pass back the results of the parsing other than success/failure.
+ * The xz_dump_options() function is called later to get the options in
+ * a format suitable for writing to the filesystem.
+ */
+static int xz_options(char *argv[], int argc)
+{
+ int i;
+ char *name;
+
+ if(strcmp(argv[0], "-Xbcj") == 0) {
+ if(argc < 2) {
+ fprintf(stderr, "xz: -Xbcj missing filter\n");
+ goto failed;
+ }
+
+ name = argv[1];
+ while(name[0] != '\0') {
+ for(i = 0; bcj[i].name; i++) {
+ int n = strlen(bcj[i].name);
+ if((strncmp(name, bcj[i].name, n) == 0) &&
+ (name[n] == '\0' ||
+ name[n] == ',')) {
+ if(bcj[i].selected == 0) {
+ bcj[i].selected = 1;
+ filter_count++;
+ }
+ name += name[n] == ',' ? n + 1 : n;
+ break;
+ }
+ }
+ if(bcj[i].name == NULL) {
+ fprintf(stderr, "xz: -Xbcj unrecognised "
+ "filter\n");
+ goto failed;
+ }
+ }
+
+ return 1;
+ } else if(strcmp(argv[0], "-Xdict-size") == 0) {
+ char *b;
+ float size;
+
+ if(argc < 2) {
+ fprintf(stderr, "xz: -Xdict-size missing dict-size\n");
+ goto failed;
+ }
+
+ size = strtof(argv[1], &b);
+ if(*b == '%') {
+ if(size <= 0 || size > 100) {
+ fprintf(stderr, "xz: -Xdict-size percentage "
+ "should be 0 < dict-size <= 100\n");
+ goto failed;
+ }
+
+ dictionary_percent = size;
+ dictionary_size = 0;
+ } else {
+ if((float) ((int) size) != size) {
+ fprintf(stderr, "xz: -Xdict-size can't be "
+ "fractional unless a percentage of the"
+ " block size\n");
+ goto failed;
+ }
+
+ dictionary_percent = 0;
+ dictionary_size = (int) size;
+
+ if(*b == 'k' || *b == 'K')
+ dictionary_size *= 1024;
+ else if(*b == 'm' || *b == 'M')
+ dictionary_size *= 1024 * 1024;
+ else if(*b != '\0') {
+ fprintf(stderr, "xz: -Xdict-size invalid "
+ "dict-size\n");
+ goto failed;
+ }
+ }
+
+ return 1;
+ }
+
+ return -1;
+
+failed:
+ return -2;
+}
+
+
+/*
+ * This function is called after all options have been parsed.
+ * It is used to do post-processing on the compressor options using
+ * values that were not expected to be known at option parse time.
+ *
+ * In this case block_size may not be known until after -Xdict-size has
+ * been processed (in the case where -b is specified after -Xdict-size)
+ *
+ * This function returns 0 on successful post processing, or
+ * -1 on error
+ */
+static int xz_options_post(int block_size)
+{
+ /*
+ * if -Xdict-size has been specified use this to compute the datablock
+ * dictionary size
+ */
+ if(dictionary_size || dictionary_percent) {
+ int n;
+
+ if(dictionary_size) {
+ if(dictionary_size > block_size) {
+ fprintf(stderr, "xz: -Xdict-size is larger than"
+ " block_size\n");
+ goto failed;
+ }
+ } else
+ dictionary_size = block_size * dictionary_percent / 100;
+
+ if(dictionary_size < 8192) {
+ fprintf(stderr, "xz: -Xdict-size should be 8192 bytes "
+ "or larger\n");
+ goto failed;
+ }
+
+ /*
+ * dictionary_size must be storable in xz header as either
+ * 2^n or as 2^n+2^(n+1)
+ */
+ n = ffs(dictionary_size) - 1;
+ if(dictionary_size != (1 << n) &&
+ dictionary_size != ((1 << n) + (1 << (n + 1)))) {
+ fprintf(stderr, "xz: -Xdict-size is an unsupported "
+ "value, dict-size must be storable in xz "
+ "header\n");
+ fprintf(stderr, "as either 2^n or as 2^n+2^(n+1). "
+ "Example dict-sizes are 75%%, 50%%, 37.5%%, "
+ "25%%,\n");
+ fprintf(stderr, "or 32K, 16K, 8K etc.\n");
+ goto failed;
+ }
+
+ } else
+ /* No -Xdict-size specified, use defaults */
+ dictionary_size = block_size;
+
+ return 0;
+
+failed:
+ return -1;
+}
+
+
+/*
+ * This function is called by mksquashfs to dump the parsed
+ * compressor options in a format suitable for writing to the
+ * compressor options field in the filesystem (stored immediately
+ * after the superblock).
+ *
+ * This function returns a pointer to the compression options structure
+ * to be stored (and the size), or NULL if there are no compression
+ * options
+ */
+static void *xz_dump_options(int block_size, int *size)
+{
+ static struct comp_opts comp_opts;
+ int flags = 0, i;
+
+ /*
+ * don't store compressor specific options in file system if the
+ * default options are being used - no compressor options in the
+ * file system means the default options are always assumed
+ *
+ * Defaults are:
+ * metadata dictionary size: SQUASHFS_METADATA_SIZE
+ * datablock dictionary size: block_size
+ * 1 filter
+ */
+ if(dictionary_size == block_size && filter_count == 1)
+ return NULL;
+
+ for(i = 0; bcj[i].name; i++)
+ flags |= bcj[i].selected << i;
+
+ comp_opts.dictionary_size = dictionary_size;
+ comp_opts.flags = flags;
+
+ SQUASHFS_INSWAP_COMP_OPTS(&comp_opts);
+
+ *size = sizeof(comp_opts);
+ return &comp_opts;
+}
+
+
+/*
+ * This function is a helper specifically for the append mode of
+ * mksquashfs. Its purpose is to set the internal compressor state
+ * to the stored compressor options in the passed compressor options
+ * structure.
+ *
+ * In effect this function sets up the compressor options
+ * to the same state they were when the filesystem was originally
+ * generated, this is to ensure on appending, the compressor uses
+ * the same compression options that were used to generate the
+ * original filesystem.
+ *
+ * Note, even if there are no compressor options, this function is still
+ * called with an empty compressor structure (size == 0), to explicitly
+ * set the default options, this is to ensure any user supplied
+ * -X options on the appending mksquashfs command line are over-ridden
+ *
+ * This function returns 0 on sucessful extraction of options, and
+ * -1 on error
+ */
+static int xz_extract_options(int block_size, void *buffer, int size)
+{
+ struct comp_opts *comp_opts = buffer;
+ int flags, i, n;
+
+ if(size == 0) {
+ /* set defaults */
+ dictionary_size = block_size;
+ flags = 0;
+ } else {
+ /* check passed comp opts struct is of the correct length */
+ if(size != sizeof(struct comp_opts))
+ goto failed;
+
+ SQUASHFS_INSWAP_COMP_OPTS(comp_opts);
+
+ dictionary_size = comp_opts->dictionary_size;
+ flags = comp_opts->flags;
+
+ /*
+ * check that the dictionary size seems correct - the dictionary
+ * size should 2^n or 2^n+2^(n+1)
+ */
+ n = ffs(dictionary_size) - 1;
+ if(dictionary_size != (1 << n) &&
+ dictionary_size != ((1 << n) + (1 << (n + 1))))
+ goto failed;
+ }
+
+ filter_count = 1;
+ for(i = 0; bcj[i].name; i++) {
+ if((flags >> i) & 1) {
+ bcj[i].selected = 1;
+ filter_count ++;
+ } else
+ bcj[i].selected = 0;
+ }
+
+ return 0;
+
+failed:
+ fprintf(stderr, "xz: error reading stored compressor options from "
+ "filesystem!\n");
+
+ return -1;
+}
+
+
+static void xz_display_options(void *buffer, int size)
+{
+ struct comp_opts *comp_opts = buffer;
+ int dictionary_size, flags, printed;
+ int i, n;
+
+ /* check passed comp opts struct is of the correct length */
+ if(size != sizeof(struct comp_opts))
+ goto failed;
+
+ SQUASHFS_INSWAP_COMP_OPTS(comp_opts);
+
+ dictionary_size = comp_opts->dictionary_size;
+ flags = comp_opts->flags;
+
+ /*
+ * check that the dictionary size seems correct - the dictionary
+ * size should 2^n or 2^n+2^(n+1)
+ */
+ n = ffs(dictionary_size) - 1;
+ if(dictionary_size != (1 << n) &&
+ dictionary_size != ((1 << n) + (1 << (n + 1))))
+ goto failed;
+
+ printf("\tDictionary size %d\n", dictionary_size);
+
+ printed = 0;
+ for(i = 0; bcj[i].name; i++) {
+ if((flags >> i) & 1) {
+ if(printed)
+ printf(", ");
+ else
+ printf("\tFilters selected: ");
+ printf("%s", bcj[i].name);
+ printed = 1;
+ }
+ }
+
+ if(!printed)
+ printf("\tNo filters specified\n");
+ else
+ printf("\n");
+
+ return;
+
+failed:
+ fprintf(stderr, "xz: error reading stored compressor options from "
+ "filesystem!\n");
+}
+
+
+/*
+ * This function is called by mksquashfs to initialise the
+ * compressor, before compress() is called.
+ *
+ * This function returns 0 on success, and
+ * -1 on error
+ */
+static int xz_init(void **strm, int block_size, int datablock)
+{
+ int i, j, filters = datablock ? filter_count : 1;
+ struct filter *filter = malloc(filters * sizeof(struct filter));
+ struct xz_stream *stream;
+
+ if(filter == NULL)
+ goto failed;
+
+ stream = *strm = malloc(sizeof(struct xz_stream));
+ if(stream == NULL)
+ goto failed2;
+
+ stream->filter = filter;
+ stream->filters = filters;
+
+ memset(filter, 0, filters * sizeof(struct filter));
+
+ stream->dictionary_size = datablock ? dictionary_size :
+ SQUASHFS_METADATA_SIZE;
+
+ filter[0].filter[0].id = LZMA_FILTER_LZMA2;
+ filter[0].filter[0].options = &stream->opt;
+ filter[0].filter[1].id = LZMA_VLI_UNKNOWN;
+
+ for(i = 0, j = 1; datablock && bcj[i].name; i++) {
+ if(bcj[i].selected) {
+ filter[j].buffer = malloc(block_size);
+ if(filter[j].buffer == NULL)
+ goto failed3;
+ filter[j].filter[0].id = bcj[i].id;
+ filter[j].filter[1].id = LZMA_FILTER_LZMA2;
+ filter[j].filter[1].options = &stream->opt;
+ filter[j].filter[2].id = LZMA_VLI_UNKNOWN;
+ j++;
+ }
+ }
+
+ return 0;
+
+failed3:
+ for(i = 1; i < filters; i++)
+ free(filter[i].buffer);
+ free(stream);
+
+failed2:
+ free(filter);
+
+failed:
+ return -1;
+}
+
+
+static int xz_compress(void *strm, void *dest, void *src, int size,
+ int block_size, int *error)
+{
+ int i;
+ lzma_ret res = 0;
+ struct xz_stream *stream = strm;
+ struct filter *selected = NULL;
+
+ stream->filter[0].buffer = dest;
+
+ for(i = 0; i < stream->filters; i++) {
+ struct filter *filter = &stream->filter[i];
+
+ if(lzma_lzma_preset(&stream->opt, LZMA_PRESET_DEFAULT))
+ goto failed;
+
+ stream->opt.dict_size = stream->dictionary_size;
+
+ filter->length = 0;
+ res = lzma_stream_buffer_encode(filter->filter,
+ LZMA_CHECK_CRC32, NULL, src, size, filter->buffer,
+ &filter->length, block_size);
+
+ if(res == LZMA_OK) {
+ if(!selected || selected->length > filter->length)
+ selected = filter;
+ } else if(res != LZMA_BUF_ERROR)
+ goto failed;
+ }
+
+ if(!selected)
+ /*
+ * Output buffer overflow. Return out of buffer space
+ */
+ return 0;
+
+ if(selected->buffer != dest)
+ memcpy(dest, selected->buffer, selected->length);
+
+ return (int) selected->length;
+
+failed:
+ /*
+ * All other errors return failure, with the compressor
+ * specific error code in *error
+ */
+ *error = res;
+ return -1;
+}
+
+
+static int xz_uncompress(void *dest, void *src, int size, int outsize,
+ int *error)
+{
+ size_t src_pos = 0;
+ size_t dest_pos = 0;
+ uint64_t memlimit = MEMLIMIT;
+
+ lzma_ret res = lzma_stream_buffer_decode(&memlimit, 0, NULL,
+ src, &src_pos, size, dest, &dest_pos, outsize);
+
+ if(res == LZMA_OK && size == (int) src_pos)
+ return (int) dest_pos;
+ else {
+ *error = res;
+ return -1;
+ }
+}
+
+
+static void xz_usage(FILE *stream)
+{
+ fprintf(stream, "\t -Xbcj filter1,filter2,...,filterN\n");
+ fprintf(stream, "\t\tCompress using filter1,filter2,...,filterN in");
+ fprintf(stream, " turn\n\t\t(in addition to no filter), and choose");
+ fprintf(stream, " the best compression.\n");
+ fprintf(stream, "\t\tAvailable filters: x86, arm, armthumb,");
+ fprintf(stream, " powerpc, sparc, ia64\n");
+ fprintf(stream, "\t -Xdict-size <dict-size>\n");
+ fprintf(stream, "\t\tUse <dict-size> as the XZ dictionary size. The");
+ fprintf(stream, " dictionary size\n\t\tcan be specified as a");
+ fprintf(stream, " percentage of the block size, or as an\n\t\t");
+ fprintf(stream, "absolute value. The dictionary size must be less");
+ fprintf(stream, " than or equal\n\t\tto the block size and 8192 bytes");
+ fprintf(stream, " or larger. It must also be\n\t\tstorable in the xz");
+ fprintf(stream, " header as either 2^n or as 2^n+2^(n+1).\n\t\t");
+ fprintf(stream, "Example dict-sizes are 75%%, 50%%, 37.5%%, 25%%, or");
+ fprintf(stream, " 32K, 16K, 8K\n\t\tetc.\n");
+}
+
+
+static int option_args(char *option)
+{
+ if(strcmp(option, "-Xbcj") == 0 ||
+ strcmp(option, "-Xdict-size") == 0)
+ return 1;
+
+ return 0;
+}
+
+
+struct compressor xz_comp_ops = {
+ .init = xz_init,
+ .compress = xz_compress,
+ .uncompress = xz_uncompress,
+ .options = xz_options,
+ .options_post = xz_options_post,
+ .dump_options = xz_dump_options,
+ .extract_options = xz_extract_options,
+ .display_options = xz_display_options,
+ .usage = xz_usage,
+ .option_args = option_args,
+ .id = XZ_COMPRESSION,
+ .name = "xz",
+ .supported = 1
+};
diff --git a/squashfs-tools/xz_wrapper.h b/squashfs-tools/xz_wrapper.h
new file mode 100644
index 0000000..eec4fe2
--- /dev/null
+++ b/squashfs-tools/xz_wrapper.h
@@ -0,0 +1,65 @@
+#ifndef XZ_WRAPPER_H
+#define XZ_WRAPPER_H
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2010
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * xz_wrapper.h
+ *
+ */
+
+#include "endian_compat.h"
+
+#if __BYTE_ORDER == __BIG_ENDIAN
+extern unsigned int inswap_le32(unsigned int);
+
+#define SQUASHFS_INSWAP_COMP_OPTS(s) { \
+ (s)->dictionary_size = inswap_le32((s)->dictionary_size); \
+ (s)->flags = inswap_le32((s)->flags); \
+}
+#else
+#define SQUASHFS_INSWAP_COMP_OPTS(s)
+#endif
+
+#define MEMLIMIT (32 * 1024 * 1024)
+
+struct bcj {
+ char *name;
+ lzma_vli id;
+ int selected;
+};
+
+struct filter {
+ void *buffer;
+ lzma_filter filter[3];
+ size_t length;
+};
+
+struct xz_stream {
+ struct filter *filter;
+ int filters;
+ int dictionary_size;
+ lzma_options_lzma opt;
+};
+
+struct comp_opts {
+ int dictionary_size;
+ int flags;
+};
+#endif
diff --git a/squashfs-tools/zstd_wrapper.c b/squashfs-tools/zstd_wrapper.c
new file mode 100644
index 0000000..3fa8676
--- /dev/null
+++ b/squashfs-tools/zstd_wrapper.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2017, 2021, 2022
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * zstd_wrapper.c
+ *
+ * Support for ZSTD compression http://zstd.net
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <zstd.h>
+#include <zstd_errors.h>
+
+#include "squashfs_fs.h"
+#include "zstd_wrapper.h"
+#include "compressor.h"
+
+static int compression_level = ZSTD_DEFAULT_COMPRESSION_LEVEL;
+
+/*
+ * This function is called by the options parsing code in mksquashfs.c
+ * to parse any -X compressor option.
+ *
+ * This function returns:
+ * >=0 (number of additional args parsed) on success
+ * -1 if the option was unrecognised, or
+ * -2 if the option was recognised, but otherwise bad in
+ * some way (e.g. invalid parameter)
+ *
+ * Note: this function sets internal compressor state, but does not
+ * pass back the results of the parsing other than success/failure.
+ * The zstd_dump_options() function is called later to get the options in
+ * a format suitable for writing to the filesystem.
+ */
+static int zstd_options(char *argv[], int argc)
+{
+ if (strcmp(argv[0], "-Xcompression-level") == 0) {
+ if (argc < 2) {
+ fprintf(stderr, "zstd: -Xcompression-level missing "
+ "compression level\n");
+ fprintf(stderr, "zstd: -Xcompression-level it should "
+ "be 1 <= n <= %d\n", ZSTD_maxCLevel());
+ goto failed;
+ }
+
+ compression_level = atoi(argv[1]);
+ if (compression_level < 1 ||
+ compression_level > ZSTD_maxCLevel()) {
+ fprintf(stderr, "zstd: -Xcompression-level invalid, it "
+ "should be 1 <= n <= %d\n", ZSTD_maxCLevel());
+ goto failed;
+ }
+
+ return 1;
+ }
+
+ return -1;
+failed:
+ return -2;
+}
+
+/*
+ * This function is called by mksquashfs to dump the parsed
+ * compressor options in a format suitable for writing to the
+ * compressor options field in the filesystem (stored immediately
+ * after the superblock).
+ *
+ * This function returns a pointer to the compression options structure
+ * to be stored (and the size), or NULL if there are no compression
+ * options.
+ */
+static void *zstd_dump_options(int block_size, int *size)
+{
+ static struct zstd_comp_opts comp_opts;
+
+ /* don't return anything if the options are all default */
+ if (compression_level == ZSTD_DEFAULT_COMPRESSION_LEVEL)
+ return NULL;
+
+ comp_opts.compression_level = compression_level;
+
+ SQUASHFS_INSWAP_COMP_OPTS(&comp_opts);
+
+ *size = sizeof(comp_opts);
+ return &comp_opts;
+}
+
+/*
+ * This function is a helper specifically for the append mode of
+ * mksquashfs. Its purpose is to set the internal compressor state
+ * to the stored compressor options in the passed compressor options
+ * structure.
+ *
+ * In effect this function sets up the compressor options
+ * to the same state they were when the filesystem was originally
+ * generated, this is to ensure on appending, the compressor uses
+ * the same compression options that were used to generate the
+ * original filesystem.
+ *
+ * Note, even if there are no compressor options, this function is still
+ * called with an empty compressor structure (size == 0), to explicitly
+ * set the default options, this is to ensure any user supplied
+ * -X options on the appending mksquashfs command line are over-ridden.
+ *
+ * This function returns 0 on sucessful extraction of options, and -1 on error.
+ */
+static int zstd_extract_options(int block_size, void *buffer, int size)
+{
+ struct zstd_comp_opts *comp_opts = buffer;
+
+ if (size == 0) {
+ /* Set default values */
+ compression_level = ZSTD_DEFAULT_COMPRESSION_LEVEL;
+ return 0;
+ }
+
+ /* we expect a comp_opts structure of sufficient size to be present */
+ if (size < sizeof(*comp_opts))
+ goto failed;
+
+ SQUASHFS_INSWAP_COMP_OPTS(comp_opts);
+
+ if (comp_opts->compression_level < 1 ||
+ comp_opts->compression_level > ZSTD_maxCLevel()) {
+ fprintf(stderr, "zstd: bad compression level in compression "
+ "options structure\n");
+ goto failed;
+ }
+
+ compression_level = comp_opts->compression_level;
+
+ return 0;
+
+failed:
+ fprintf(stderr, "zstd: error reading stored compressor options from "
+ "filesystem!\n");
+
+ return -1;
+}
+
+static void zstd_display_options(void *buffer, int size)
+{
+ struct zstd_comp_opts *comp_opts = buffer;
+
+ /* we expect a comp_opts structure of sufficient size to be present */
+ if (size < sizeof(*comp_opts))
+ goto failed;
+
+ SQUASHFS_INSWAP_COMP_OPTS(comp_opts);
+
+ if (comp_opts->compression_level < 1 ||
+ comp_opts->compression_level > ZSTD_maxCLevel()) {
+ fprintf(stderr, "zstd: bad compression level in compression "
+ "options structure\n");
+ goto failed;
+ }
+
+ printf("\tcompression-level %d\n", comp_opts->compression_level);
+
+ return;
+
+failed:
+ fprintf(stderr, "zstd: error reading stored compressor options from "
+ "filesystem!\n");
+}
+
+/*
+ * This function is called by mksquashfs to initialise the
+ * compressor, before compress() is called.
+ *
+ * This function returns 0 on success, and -1 on error.
+ */
+static int zstd_init(void **strm, int block_size, int datablock)
+{
+ ZSTD_CCtx *cctx = ZSTD_createCCtx();
+
+ if (!cctx) {
+ fprintf(stderr, "zstd: failed to allocate compression "
+ "context!\n");
+ return -1;
+ }
+
+ *strm = cctx;
+ return 0;
+}
+
+static int zstd_compress(void *strm, void *dest, void *src, int size,
+ int block_size, int *error)
+{
+ const size_t res = ZSTD_compressCCtx((ZSTD_CCtx*)strm, dest, block_size,
+ src, size, compression_level);
+
+ if (ZSTD_isError(res)) {
+ /* FIXME:
+ * zstd does not expose stable error codes. The error enum may
+ * change between versions. Until upstream zstd stablizes the
+ * error codes, we have no way of knowing why the error occurs.
+ * zstd shouldn't fail to compress any input unless there isn't
+ * enough output space. We assume that is the cause and return
+ * the special error code for not enough output space.
+ */
+ return 0;
+ }
+
+ return (int)res;
+}
+
+static int zstd_uncompress(void *dest, void *src, int size, int outsize,
+ int *error)
+{
+ const size_t res = ZSTD_decompress(dest, outsize, src, size);
+
+ if (ZSTD_isError(res)) {
+ fprintf(stderr, "\t%d %d\n", outsize, size);
+
+ *error = (int)ZSTD_getErrorCode(res);
+ return -1;
+ }
+
+ return (int)res;
+}
+
+static void zstd_usage(FILE *stream)
+{
+ fprintf(stream, "\t -Xcompression-level <compression-level>\n");
+ fprintf(stream, "\t\t<compression-level> should be 1 .. %d (default "
+ "%d)\n", ZSTD_maxCLevel(), ZSTD_DEFAULT_COMPRESSION_LEVEL);
+}
+
+
+static int option_args(char *option)
+{
+ if(strcmp(option, "-Xcompression-level") == 0)
+ return 1;
+
+ return 0;
+}
+
+
+struct compressor zstd_comp_ops = {
+ .init = zstd_init,
+ .compress = zstd_compress,
+ .uncompress = zstd_uncompress,
+ .options = zstd_options,
+ .dump_options = zstd_dump_options,
+ .extract_options = zstd_extract_options,
+ .display_options = zstd_display_options,
+ .usage = zstd_usage,
+ .option_args = option_args,
+ .id = ZSTD_COMPRESSION,
+ .name = "zstd",
+ .supported = 1
+};
diff --git a/squashfs-tools/zstd_wrapper.h b/squashfs-tools/zstd_wrapper.h
new file mode 100644
index 0000000..29fb33d
--- /dev/null
+++ b/squashfs-tools/zstd_wrapper.h
@@ -0,0 +1,42 @@
+#ifndef ZSTD_WRAPPER_H
+#define ZSTD_WRAPPER_H
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2017
+ * Phillip Lougher <phillip@squashfs.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * zstd_wrapper.h
+ *
+ */
+
+#include "endian_compat.h"
+
+#if __BYTE_ORDER == __BIG_ENDIAN
+extern unsigned int inswap_le16(unsigned short);
+extern unsigned int inswap_le32(unsigned int);
+
+#define SQUASHFS_INSWAP_COMP_OPTS(s) { \
+ (s)->compression_level = inswap_le32((s)->compression_level); \
+}
+#else
+#define SQUASHFS_INSWAP_COMP_OPTS(s)
+#endif
+
+/* Default compression */
+#define ZSTD_DEFAULT_COMPRESSION_LEVEL 15
+
+struct zstd_comp_opts {
+ int compression_level;
+};
+#endif