summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <mail@daniel-baumann.ch>2015-11-07 07:49:43 +0000
committerDaniel Baumann <mail@daniel-baumann.ch>2015-11-07 07:49:43 +0000
commit083c902a1bf6cd876dd4f683b7936a8246ec0cd9 (patch)
treea92dad44f2bad587e83d4080a70414d089367286
parentAdding debian version 1.10-1. (diff)
downloadlzip-083c902a1bf6cd876dd4f683b7936a8246ec0cd9.tar.xz
lzip-083c902a1bf6cd876dd4f683b7936a8246ec0cd9.zip
Merging upstream version 1.11.
Signed-off-by: Daniel Baumann <mail@daniel-baumann.ch>
-rw-r--r--AUTHORS7
-rw-r--r--ChangeLog41
-rw-r--r--Makefile.in36
-rw-r--r--NEWS53
-rw-r--r--README32
-rwxr-xr-xconfigure10
-rw-r--r--decoder.cc93
-rw-r--r--decoder.h66
-rw-r--r--doc/lzip.112
-rw-r--r--doc/lzip.info325
-rw-r--r--doc/lzip.texinfo313
-rw-r--r--doc/lziprecover.127
-rw-r--r--encoder.cc145
-rw-r--r--encoder.h101
-rw-r--r--fast_encoder.cc378
-rw-r--r--fast_encoder.h176
-rw-r--r--lzip.h115
-rw-r--r--lziprecover.cc613
-rw-r--r--main.cc256
-rwxr-xr-xtestsuite/check.sh88
-rw-r--r--testsuite/test.txt (renamed from testsuite/test1)0
-rw-r--r--testsuite/test_bad1.lzbin0 -> 11548 bytes
-rw-r--r--testsuite/test_bad2.lzbin0 -> 11548 bytes
-rw-r--r--testsuite/test_bad3.lzbin0 -> 11548 bytes
-rw-r--r--testsuite/test_bad4.lzbin0 -> 11548 bytes
-rw-r--r--testsuite/test_bad5.lzbin0 -> 11548 bytes
-rw-r--r--testsuite/test_sync.lzbin0 -> 11658 bytes
-rw-r--r--testsuite/test_v0.lz (renamed from testsuite/test1.lz)bin11540 -> 11540 bytes
-rw-r--r--testsuite/test_v1.lzbin0 -> 11548 bytes
-rw-r--r--testsuite/unzcrash.cc13
30 files changed, 2278 insertions, 622 deletions
diff --git a/AUTHORS b/AUTHORS
index 7555d5e..ada24df 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -1,4 +1,7 @@
Lzip was written by Antonio Diaz Diaz.
-Lzip implements a simplified version of the LZMA algorithm.
-The original LZMA algorithm was designed by Igor Pavlov.
+The ideas embodied in lzip are due to (at least) the following people:
+Abraham Lempel and Jacob Ziv (for the LZ algorithm), Andrey Markov (for
+the definition of Markov chains), G.N.N. Martin (for the definition of
+range encoding), Igor Pavlov (for putting all the above together in
+LZMA), and Julian Seward (for bzip2's CLI and the idea of unzcrash).
diff --git a/ChangeLog b/ChangeLog
index 0ae12fd..7d0eed3 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,38 @@
+2010-09-16 Antonio Diaz Diaz <ant_diaz@teleline.es>
+
+ * Version 1.11 released.
+ * Added new option `-0' which produces a compression speed and
+ ratio comparable to those of `gzip -9'.
+ * fast_encoder.h fast_encoder.cc: New files.
+ * main.cc: Match length limit set by options -1 to -8 has been
+ reduced to extend range of use towards gzip. Lower numbers now
+ compress less but faster. (-1 now takes 43% less time for only
+ 20% larger compressed size).
+ * encoder.cc: Compression of option -9 has been slightly increased.
+ * lziprecover.cc: Added new option "--merge" which tries to
+ produce a correct file merging the good parts of two or more
+ damaged copies.
+ * lziprecover.cc: Added new option "--repair" for repairing a
+ 1-byte error in single-member files.
+ * decoder.cc (decode_member): Detect file errors earlier to
+ improve efficiency of lziprecover's new repair capability.
+ This change also prevents (harmless) access to uninitialized
+ memory when decompressing a corrupt file.
+ * lziprecover.cc: Added new option "--force".
+ * lziprecover.cc: Added new option "--output".
+ * lziprecover.cc: Added new option "--split" to select the until
+ now only operation of splitting multimember files.
+ * lziprecover.cc: If no operation is specified, warn the user
+ and do nothing.
+ * main.cc: Fixed warning about fchown's return value being ignored.
+ * decoder.cc: "-tvvvv" now also shows compression ratio.
+ * main.cc: Set stdin/stdout in binary mode on MSVC and OS2.
+ * New examples have been added to the manual.
+ * testsuite: "test1" renamed to "test.txt"
+ * Matchfinder types HC4 (4 bytes hash-chain) and HT4 (4 bytes
+ hash-table) have been tested and found no better than the
+ current BT4.
+
2010-04-05 Antonio Diaz Diaz <ant_diaz@teleline.es>
* Version 1.10 released.
@@ -13,7 +48,7 @@
2010-01-17 Antonio Diaz Diaz <ant_diaz@teleline.es>
* Version 1.9 released.
- * main.cc (main): return at least 1 if closing stdout fails.
+ * main.cc (main): Return at least 1 if closing stdout fails.
* Makefile.in: Added "--name" option to help2man invocation.
* testsuite/check.sh: Use "test1" instead of "COPYING" for testing.
@@ -140,5 +175,5 @@
Copyright (C) 2008, 2009, 2010 Antonio Diaz Diaz.
This file is a collection of facts, and thus it is not copyrightable,
-but just in case, I give you unlimited permission to copy, distribute
-and modify it.
+but just in case, you have unlimited permission to copy, distribute and
+modify it.
diff --git a/Makefile.in b/Makefile.in
index e88a1b6..c366eb3 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -6,7 +6,8 @@ INSTALL_DATA = $(INSTALL) -p -m 644
INSTALL_DIR = $(INSTALL) -d -m 755
SHELL = /bin/sh
-objs = arg_parser.o decoder.o encoder.o main.o
+objs = arg_parser.o decoder.o encoder.o fast_encoder.o main.o
+recobjs = arg_parser.o decoder.o lziprecover.o
.PHONY : all install install-info install-man install-strip \
@@ -21,8 +22,11 @@ $(progname) : $(objs)
$(progname)_profiled : $(objs)
$(CXX) $(LDFLAGS) -pg -o $(progname)_profiled $(objs)
-lziprecover : arg_parser.o lziprecover.o
- $(CXX) $(LDFLAGS) -o lziprecover arg_parser.o lziprecover.o
+lziprecover : $(recobjs)
+ $(CXX) $(LDFLAGS) -o lziprecover $(recobjs)
+
+unzcrash : testsuite/unzcrash.cc
+ $(CXX) $(CPPFLAGS) $(CXXFLAGS) $(LDFLAGS) -o $@ $<
main.o : main.cc
$(CXX) $(CPPFLAGS) $(CXXFLAGS) -DPROGVERSION=\"$(pkgversion)\" -c -o $@ $<
@@ -33,12 +37,14 @@ lziprecover.o : lziprecover.cc
%.o : %.cc
$(CXX) $(CPPFLAGS) $(CXXFLAGS) -c -o $@ $<
-$(objs) : Makefile
-arg_parser.o : arg_parser.h
-decoder.o : lzip.h decoder.h
-encoder.o : lzip.h encoder.h
-main.o : arg_parser.h lzip.h decoder.h encoder.h
-lziprecover.o : arg_parser.h lzip.h Makefile
+$(objs) : Makefile
+arg_parser.o : arg_parser.h
+decoder.o : lzip.h decoder.h
+encoder.o : lzip.h encoder.h
+fast_encoder.o : lzip.h encoder.h fast_encoder.h
+main.o : arg_parser.h lzip.h decoder.h encoder.h fast_encoder.h
+lziprecover.o : arg_parser.h lzip.h decoder.h Makefile
+
doc : info man
@@ -50,18 +56,18 @@ $(VPATH)/doc/$(pkgname).info : $(VPATH)/doc/$(pkgname).texinfo
man : $(VPATH)/doc/$(progname).1 $(VPATH)/doc/lziprecover.1
$(VPATH)/doc/$(progname).1 : $(progname)
- help2man -n 'data compressor based on the LZMA algorithm' \
+ help2man -n 'reduces the size of files' \
-o $(VPATH)/doc/$(progname).1 ./$(progname)
$(VPATH)/doc/lziprecover.1 : lziprecover
- help2man -n 'recover undamaged members from lzip files' \
+ help2man -n 'recovers data from damaged lzip files' \
-o $(VPATH)/doc/lziprecover.1 --no-info ./lziprecover
Makefile : $(VPATH)/configure $(VPATH)/Makefile.in
./config.status
check : all
- @$(VPATH)/testsuite/check.sh $(VPATH)/testsuite
+ @$(VPATH)/testsuite/check.sh $(VPATH)/testsuite $(pkgversion)
install : all install-info install-man
if [ ! -d "$(DESTDIR)$(bindir)" ] ; then $(INSTALL_DIR) "$(DESTDIR)$(bindir)" ; fi
@@ -109,8 +115,10 @@ dist : doc
$(DISTNAME)/doc/$(pkgname).texinfo \
$(DISTNAME)/doc/lziprecover.1 \
$(DISTNAME)/testsuite/check.sh \
- $(DISTNAME)/testsuite/test1 \
- $(DISTNAME)/testsuite/test1.lz \
+ $(DISTNAME)/testsuite/test.txt \
+ $(DISTNAME)/testsuite/test_bad[1-5].lz \
+ $(DISTNAME)/testsuite/test_sync.lz \
+ $(DISTNAME)/testsuite/test_v[01].lz \
$(DISTNAME)/testsuite/unzcrash.cc \
$(DISTNAME)/*.h \
$(DISTNAME)/*.cc
diff --git a/NEWS b/NEWS
index b70b85b..e2a2c99 100644
--- a/NEWS
+++ b/NEWS
@@ -1,12 +1,49 @@
-Changes in version 1.10:
+Changes in version 1.11:
-File specified with option "-o" is now created with write permission for
-all if umask allows it, and deleted if lzip is interrupted by the user.
+The option "-0", which produces a compression speed and ratio comparable
+to those of "gzip -9", has been added to lzip.
-Regular files are now open in binary mode in non-POSIX platforms
-defining the O_BINARY macro.
+Match length limit set by options -1 to -8 has been reduced to extend
+range of use towards gzip. Lower numbers now compress less but faster.
+(-1 now takes 43% less time for only 20% larger compressed size).
-Dictionary size for options -2, -3, -4 and -8 has been changed to
-improve linearity of compressed sizes.
+(Note that the bidimensional parameter space of LZMA can't be mapped to
+a linear scale optimal for all files. If your files are large, very
+repetitive, etc, you may need to use the --match-length and
+--dictionary-size options directly to achieve optimal performance).
-Compiler warnings produced by over-optimization (-O3) have been fixed.
+Compression of option -9 has been slightly increased.
+
+The option "--merge", which tries to produce a correct file merging the
+good parts of two or more damaged copies, has been added to lziprecover.
+
+(To give you an idea of --merge's possibilities, when merging two copies
+each of them with one damaged area affecting 1 percent of the copy, the
+probability of obtaining a correct file is about 98 percent. With three
+such copies the probability rises to 99.97 percent. For large files with
+small errors, the probability approaches 100 percent even with only two
+copies).
+
+The option "--repair", which repairs any 1-byte error in the lzma stream
+of lzip files, has been added to lziprecover.
+
+Decompressor has been modified to detect file errors earlier, improving
+efficiency of lziprecover's new repair capability.
+
+The option "--force", which forces overwriting of existing output files,
+has been added to lziprecover.
+
+The option "--output", which sets the name of the output file, has been
+added to lziprecover.
+
+The option "--split", which selects the until now only operation of
+splitting multimember files, has been added to lziprecover.
+
+Lziprecover now needs the operation to be specified. Else it warns the
+user and does nothing.
+
+A warning about fchown's return value being ignored has been fixed.
+
+"lzip -tvvvv" now also shows file compression ratio.
+
+Some new examples have been added to the manual.
diff --git a/README b/README
index f0d0470..7c158d9 100644
--- a/README
+++ b/README
@@ -6,6 +6,10 @@ gzip or bzip2. Lzip decompresses almost as fast as gzip and compresses
better than bzip2, which makes it well suited for software distribution
and data archiving.
+Lziprecover is a data recovery tool for lzip compressed files able to
+repair slightly damaged files, recover badly damaged files from two or
+more copies, and extract undamaged members from multi-member files.
+
Lzip replaces every file given in the command line with a compressed
version of itself, with the name "original_name.lz". Each compressed
file has the same modification date, permissions, and, when possible,
@@ -30,9 +34,9 @@ standard input. This allows the direct creation of multivolume
compressed tar archives.
Lzip will automatically use the smallest possible dictionary size
-without exceeding the given limit. It is important to appreciate that
-the decompression memory requirement is affected at compression time by
-the choice of dictionary size limit.
+without exceeding the given limit. Keep in mind that the decompression
+memory requirement is affected at compression time by the choice of
+dictionary size limit.
As a self-check for your protection, lzip stores in the member trailer
the 32-bit CRC of the original data and the size of the original data,
@@ -46,14 +50,18 @@ something is wrong. It can't help you recover the original uncompressed
data.
Lzip implements a simplified version of the LZMA (Lempel-Ziv-Markov
-chain-Algorithm) algorithm. The original LZMA algorithm was designed by
-Igor Pavlov.
+chain-Algorithm) algorithm. The high compression of LZMA comes from
+combining two basic, well-proven compression ideas: sliding dictionaries
+(LZ77/78) and markov models (the thing used by every compression
+algorithm that uses a range encoder or similar order-0 entropy coder as
+its last stage) with segregation of contexts according to what the bits
+are used for.
-The high compression of LZMA comes from combining two basic, well-proven
-compression ideas: sliding dictionaries (LZ77/78) and markov models (the
-thing used by every compression algorithm that uses a range encoder or
-similar order-0 entropy coder as its last stage) with segregation of
-contexts according to what the bits are used for.
+The ideas embodied in lzip are due to (at least) the following people:
+Abraham Lempel and Jacob Ziv (for the LZ algorithm), Andrey Markov (for
+the definition of Markov chains), G.N.N. Martin (for the definition of
+range encoding), Igor Pavlov (for putting all the above together in
+LZMA), and Julian Seward (for bzip2's CLI and the idea of unzcrash).
Copyright (C) 2008, 2009, 2010 Antonio Diaz Diaz.
@@ -62,5 +70,5 @@ This file is free documentation: you have unlimited permission to copy,
distribute and modify it.
The file Makefile.in is a data file used by configure to produce the
-Makefile. It has the same copyright owner and permissions that this
-file.
+Makefile. It has the same copyright owner and permissions that configure
+itself.
diff --git a/configure b/configure
index a3a4609..eee97dc 100755
--- a/configure
+++ b/configure
@@ -1,16 +1,16 @@
#! /bin/sh
-# configure script for Lzip - A data compressor based on the LZMA algorithm
+# configure script for Lzip - Data compressor based on the LZMA algorithm
# Copyright (C) 2008, 2009, 2010 Antonio Diaz Diaz.
#
# This configure script is free software: you have unlimited permission
# to copy, distribute and modify it.
#
-# Date of this version: 2010-04-05
+# Date of this version: 2010-09-16
args=
no_create=
pkgname=lzip
-pkgversion=1.10
+pkgversion=1.11
progname=lzip
srctrigger=lzip.h
@@ -80,7 +80,7 @@ while [ -n "$1" ] ; do
bindir=`echo ${optarg} | sed -e 's,/$,,'` ;;
--datadir* | --da*)
datadir=`echo ${optarg} | sed -e 's,/$,,'` ;;
- --infodir* | --in*)
+ --infodir* | --inf*)
infodir=`echo ${optarg} | sed -e 's,/$,,'` ;;
--mandir* | --ma*)
mandir=`echo ${optarg} | sed -e 's,/$,,'` ;;
@@ -166,7 +166,7 @@ echo "CXXFLAGS = ${CXXFLAGS}"
echo "LDFLAGS = ${LDFLAGS}"
rm -f Makefile
cat > Makefile << EOF
-# Makefile for Lzip - A data compressor based on the LZMA algorithm
+# Makefile for Lzip - Data compressor based on the LZMA algorithm
# Copyright (C) 2008, 2009, 2010 Antonio Diaz Diaz.
# This file was generated automatically by configure. Do not edit.
#
diff --git a/decoder.cc b/decoder.cc
index 5aa1205..03d2ca6 100644
--- a/decoder.cc
+++ b/decoder.cc
@@ -1,4 +1,4 @@
-/* Lzip - A data compressor based on the LZMA algorithm
+/* Lzip - Data compressor based on the LZMA algorithm
Copyright (C) 2008, 2009, 2010 Antonio Diaz Diaz.
This program is free software: you can redistribute it and/or modify
@@ -32,18 +32,17 @@
const CRC32 crc32;
-
bool Range_decoder::read_block()
{
if( !at_stream_end )
{
- stream_pos = readblock( infd_, buffer, buffer_size );
- if( stream_pos != buffer_size && errno ) throw Error( "read error" );
+ stream_pos = readblock( infd, buffer, buffer_size );
+ if( stream_pos != buffer_size && errno ) throw Error( "Read error" );
at_stream_end = ( stream_pos < buffer_size );
partial_member_pos += pos;
pos = 0;
}
- return !finished();
+ return pos < stream_pos;
}
@@ -53,9 +52,9 @@ void LZ_decoder::flush_data()
if( size > 0 )
{
crc32.update( crc_, buffer + stream_pos, size );
- if( outfd_ >= 0 &&
- writeblock( outfd_, buffer + stream_pos, size ) != size )
- throw Error( "write error" );
+ if( outfd >= 0 &&
+ writeblock( outfd, buffer + stream_pos, size ) != size )
+ throw Error( "Write error" );
if( pos >= buffer_size ) { partial_data_pos += pos; pos = 0; }
stream_pos = pos;
}
@@ -66,7 +65,7 @@ bool LZ_decoder::verify_trailer( const Pretty_print & pp ) const
{
File_trailer trailer;
const int trailer_size = File_trailer::size( member_version );
- const long long member_size = member_position() + trailer_size;
+ const long long member_size = range_decoder.member_position() + trailer_size;
bool error = false;
for( int i = 0; i < trailer_size && !error; ++i )
@@ -76,10 +75,10 @@ bool LZ_decoder::verify_trailer( const Pretty_print & pp ) const
else
{
error = true;
- if( verbosity >= 0 )
+ if( pp.verbosity() >= 0 )
{
pp();
- std::fprintf( stderr, "trailer truncated at trailer position %d;"
+ std::fprintf( stderr, "Trailer truncated at trailer position %d;"
" some checks may fail.\n", i );
}
for( ; i < trailer_size; ++i ) trailer.data[i] = 0;
@@ -89,58 +88,73 @@ bool LZ_decoder::verify_trailer( const Pretty_print & pp ) const
if( !range_decoder.code_is_zero() )
{
error = true;
- if( verbosity >= 0 )
- {
- pp();
- std::fprintf( stderr, "range_decoder final code is not zero.\n" );
- }
+ pp( "Range decoder final code is not zero" );
}
if( trailer.data_crc() != crc() )
{
error = true;
- if( verbosity >= 0 )
+ if( pp.verbosity() >= 0 )
{
pp();
- std::fprintf( stderr, "crc mismatch; trailer says %08X, data crc is %08X.\n",
+ std::fprintf( stderr, "CRC mismatch; trailer says %08X, data CRC is %08X.\n",
(unsigned int)trailer.data_crc(), (unsigned int)crc() );
}
}
if( trailer.data_size() != data_position() )
{
error = true;
- if( verbosity >= 0 )
+ if( pp.verbosity() >= 0 )
{
pp();
- std::fprintf( stderr, "data size mismatch; trailer says %lld, data size is %lld (0x%llX).\n",
+ std::fprintf( stderr, "Data size mismatch; trailer says %lld, data size is %lld (0x%llX).\n",
trailer.data_size(), data_position(), data_position() );
}
}
if( trailer.member_size() != member_size )
{
error = true;
- if( verbosity >= 0 )
+ if( pp.verbosity() >= 0 )
{
pp();
- std::fprintf( stderr, "member size mismatch; trailer says %lld, member size is %lld (0x%llX).\n",
+ std::fprintf( stderr, "Member size mismatch; trailer says %lld, member size is %lld (0x%llX).\n",
trailer.member_size(), member_size, member_size );
}
}
- if( !error && verbosity >= 3 )
- std::fprintf( stderr, "data crc %08X, data size %9lld, member size %8lld. ",
+ if( !error && pp.verbosity() >= 4 && data_position() > 0 && member_size > 0 )
+ std::fprintf( stderr, "%6.3f:1, %6.3f bits/byte, %5.2f%% saved. ",
+ (double)data_position() / member_size,
+ ( 8.0 * member_size ) / data_position(),
+ 100.0 * ( 1.0 - ( (double)member_size / data_position() ) ) );
+ if( !error && pp.verbosity() >= 3 )
+ std::fprintf( stderr, "data CRC %08X, data size %9lld, member size %8lld. ",
(unsigned int)trailer.data_crc(), trailer.data_size(),
trailer.member_size() );
return !error;
}
- // Return value: 0 = OK, 1 = decoder error, 2 = unexpected EOF,
- // 3 = trailer error, 4 = unknown marker found.
+// Return value: 0 = OK, 1 = decoder error, 2 = unexpected EOF,
+// 3 = trailer error, 4 = unknown marker found.
int LZ_decoder::decode_member( const Pretty_print & pp )
{
+ Bit_model bm_match[State::states][pos_states];
+ Bit_model bm_rep[State::states];
+ Bit_model bm_rep0[State::states];
+ Bit_model bm_rep1[State::states];
+ Bit_model bm_rep2[State::states];
+ Bit_model bm_len[State::states][pos_states];
+ Bit_model bm_dis_slot[max_dis_states][1<<dis_slot_bits];
+ Bit_model bm_dis[modeled_distances-end_dis_model+1];
+ Bit_model bm_align[dis_align_size];
+
unsigned int rep0 = 0; // rep[0-3] latest four distances
unsigned int rep1 = 0; // used for efficient coding of
unsigned int rep2 = 0; // repeated distances
unsigned int rep3 = 0;
+
+ Len_decoder len_decoder;
+ Len_decoder rep_match_len_decoder;
+ Literal_decoder literal_decoder;
State state;
range_decoder.load();
@@ -150,7 +164,7 @@ int LZ_decoder::decode_member( const Pretty_print & pp )
const int pos_state = data_position() & pos_state_mask;
if( range_decoder.decode_bit( bm_match[state()][pos_state] ) == 0 )
{
- const uint8_t prev_byte = get_byte( 0 );
+ const uint8_t prev_byte = get_prev_byte();
if( state.is_char() )
put_byte( literal_decoder.decode( range_decoder, prev_byte ) );
else
@@ -164,12 +178,7 @@ int LZ_decoder::decode_member( const Pretty_print & pp )
if( range_decoder.decode_bit( bm_rep[state()] ) == 1 )
{
len = 0;
- if( range_decoder.decode_bit( bm_rep0[state()] ) == 0 )
- {
- if( range_decoder.decode_bit( bm_len[state()][pos_state] ) == 0 )
- { len = 1; state.set_short_rep(); }
- }
- else
+ if( range_decoder.decode_bit( bm_rep0[state()] ) == 1 )
{
unsigned int distance;
if( range_decoder.decode_bit( bm_rep1[state()] ) == 0 )
@@ -184,15 +193,20 @@ int LZ_decoder::decode_member( const Pretty_print & pp )
rep1 = rep0;
rep0 = distance;
}
+ else
+ {
+ if( range_decoder.decode_bit( bm_len[state()][pos_state] ) == 0 )
+ { state.set_short_rep(); len = 1; }
+ }
if( len == 0 )
{
- len = min_match_len + rep_match_len_decoder.decode( range_decoder, pos_state );
state.set_rep();
+ len = min_match_len + rep_match_len_decoder.decode( range_decoder, pos_state );
}
}
else
{
- unsigned int rep0_saved = rep0;
+ const unsigned int rep0_saved = rep0;
len = min_match_len + len_decoder.decode( range_decoder, pos_state );
const int dis_slot = range_decoder.decode_tree( bm_dis_slot[get_dis_state(len)], dis_slot_bits );
if( dis_slot < start_dis_model ) rep0 = dis_slot;
@@ -219,19 +233,20 @@ int LZ_decoder::decode_member( const Pretty_print & pp )
{
range_decoder.load(); continue;
}
- if( verbosity >= 0 )
+ if( pp.verbosity() >= 0 )
{
pp();
- std::fprintf( stderr, "unsupported marker code `%d'.\n", len );
+ std::fprintf( stderr, "Unsupported marker code `%d'.\n", len );
}
return 4;
}
- if( rep0 >= (unsigned int)dictionary_size )
- { flush_data(); return 1; }
}
}
rep3 = rep2; rep2 = rep1; rep1 = rep0_saved;
state.set_match();
+ if( rep0 >= (unsigned int)dictionary_size ||
+ ( rep0 >= (unsigned int)pos && !partial_data_pos ) )
+ { flush_data(); return 1; }
}
copy_block( rep0, len );
}
diff --git a/decoder.h b/decoder.h
index 36090c5..9fd1423 100644
--- a/decoder.h
+++ b/decoder.h
@@ -1,4 +1,4 @@
-/* Lzip - A data compressor based on the LZMA algorithm
+/* Lzip - Data compressor based on the LZMA algorithm
Copyright (C) 2008, 2009, 2010 Antonio Diaz Diaz.
This program is free software: you can redistribute it and/or modify
@@ -20,17 +20,17 @@ class Range_decoder
enum { buffer_size = 16384 };
long long partial_member_pos;
uint8_t * const buffer; // input buffer
- int pos;
+ int pos; // current pos in buffer
int stream_pos; // when reached, a new block must be read
uint32_t code;
uint32_t range;
- const int infd_; // input file descriptor
+ const int infd; // input file descriptor
bool at_stream_end;
bool read_block();
public:
- Range_decoder( const int infd )
+ Range_decoder( const int ifd )
:
partial_member_pos( 0 ),
buffer( new uint8_t[buffer_size] ),
@@ -38,13 +38,13 @@ public:
stream_pos( 0 ),
code( 0 ),
range( 0xFFFFFFFFU ),
- infd_( infd ),
+ infd( ifd ),
at_stream_end( false ) {}
~Range_decoder() { delete[] buffer; }
bool code_is_zero() const throw() { return ( code == 0 ); }
- bool finished() const throw() { return at_stream_end && pos >= stream_pos; }
+ bool finished() { return pos >= stream_pos && !read_block(); }
long long member_position() const throw()
{ return partial_member_pos + pos; }
void reset_member_position() throw()
@@ -52,11 +52,11 @@ public:
uint8_t get_byte()
{
- if( pos >= stream_pos && !read_block() ) return 0;
+ if( finished() ) return 0;
return buffer[pos++];
}
- void load() throw()
+ void load()
{
code = 0;
range = 0xFFFFFFFFU;
@@ -185,36 +185,32 @@ public:
uint8_t decode_matched( Range_decoder & range_decoder,
const uint8_t prev_byte, const uint8_t match_byte )
- { return range_decoder.decode_matched( bm_literal[lstate(prev_byte)], match_byte ); }
+ { return range_decoder.decode_matched( bm_literal[lstate(prev_byte)],
+ match_byte ); }
};
class LZ_decoder
{
long long partial_data_pos;
- const int member_version;
const int dictionary_size;
const int buffer_size;
- uint8_t * const buffer;
- int pos;
+ uint8_t * const buffer; // output buffer
+ int pos; // current pos in buffer
int stream_pos; // first byte not yet written to file
uint32_t crc_;
- const int outfd_; // output file descriptor
-
- Bit_model bm_match[State::states][pos_states];
- Bit_model bm_rep[State::states];
- Bit_model bm_rep0[State::states];
- Bit_model bm_rep1[State::states];
- Bit_model bm_rep2[State::states];
- Bit_model bm_len[State::states][pos_states];
- Bit_model bm_dis_slot[max_dis_states][1<<dis_slot_bits];
- Bit_model bm_dis[modeled_distances-end_dis_model];
- Bit_model bm_align[dis_align_size];
-
+ const int outfd; // output file descriptor
+ const int member_version;
Range_decoder & range_decoder;
- Len_decoder len_decoder;
- Len_decoder rep_match_len_decoder;
- Literal_decoder literal_decoder;
+
+ void flush_data();
+ bool verify_trailer( const Pretty_print & pp ) const;
+
+ uint8_t get_prev_byte() const throw()
+ {
+ const int i = ( ( pos > 0 ) ? pos : buffer_size ) - 1;
+ return buffer[i];
+ }
uint8_t get_byte( const int distance ) const throw()
{
@@ -238,7 +234,7 @@ class LZ_decoder
std::memcpy( buffer + pos, buffer + i, len );
pos += len;
}
- else for( ; len > 0 ; --len )
+ else for( ; len > 0; --len )
{
buffer[pos] = buffer[i];
if( ++pos >= buffer_size ) flush_data();
@@ -246,31 +242,27 @@ class LZ_decoder
}
}
- void flush_data();
- bool verify_trailer( const Pretty_print & pp ) const;
-
public:
- LZ_decoder( const File_header & header, Range_decoder & rdec, const int outfd )
+ LZ_decoder( const File_header & header, Range_decoder & rdec, const int ofd )
:
partial_data_pos( 0 ),
- member_version( header.version() ),
dictionary_size( header.dictionary_size() ),
buffer_size( std::max( 65536, dictionary_size ) ),
buffer( new uint8_t[buffer_size] ),
pos( 0 ),
stream_pos( 0 ),
crc_( 0xFFFFFFFFU ),
- outfd_( outfd ),
+ outfd( ofd ),
+ member_version( header.version() ),
range_decoder( rdec )
{ buffer[buffer_size-1] = 0; } // prev_byte of first_byte
~LZ_decoder() { delete[] buffer; }
uint32_t crc() const throw() { return crc_ ^ 0xFFFFFFFFU; }
- int decode_member( const Pretty_print & pp );
- long long member_position() const throw()
- { return range_decoder.member_position(); }
long long data_position() const throw()
{ return partial_data_pos + pos; }
+
+ int decode_member( const Pretty_print & pp );
};
diff --git a/doc/lzip.1 b/doc/lzip.1
index 3254f33..670d15e 100644
--- a/doc/lzip.1
+++ b/doc/lzip.1
@@ -1,12 +1,12 @@
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.37.1.
-.TH LZIP "1" "April 2010" "Lzip 1.10" "User Commands"
+.TH LZIP "1" "September 2010" "Lzip 1.11" "User Commands"
.SH NAME
-Lzip \- data compressor based on the LZMA algorithm
+Lzip \- reduces the size of files
.SH SYNOPSIS
.B lzip
[\fIoptions\fR] [\fIfiles\fR]
.SH DESCRIPTION
-Lzip \- A data compressor based on the LZMA algorithm.
+Lzip \- Data compressor based on the LZMA algorithm.
.SH OPTIONS
.TP
\fB\-h\fR, \fB\-\-help\fR
@@ -31,7 +31,7 @@ overwrite existing output files
keep (don't delete) input files
.TP
\fB\-m\fR, \fB\-\-match\-length=\fR<n>
-set match length limit in bytes [80]
+set match length limit in bytes [36]
.TP
\fB\-o\fR, \fB\-\-output=\fR<file>
if reading stdin, place the output into <file>
@@ -51,11 +51,11 @@ test compressed file integrity
\fB\-v\fR, \fB\-\-verbose\fR
be verbose (a 2nd \fB\-v\fR gives more)
.TP
-\fB\-1\fR .. \fB\-9\fR
+\fB\-0\fR .. \fB\-9\fR
set compression level [default 6]
.TP
\fB\-\-fast\fR
-alias for \fB\-1\fR
+alias for \fB\-0\fR
.TP
\fB\-\-best\fR
alias for \fB\-9\fR
diff --git a/doc/lzip.info b/doc/lzip.info
index 7914b91..00cf933 100644
--- a/doc/lzip.info
+++ b/doc/lzip.info
@@ -11,18 +11,19 @@ File: lzip.info, Node: Top, Next: Introduction, Up: (dir)
Lzip Manual
***********
-This manual is for Lzip (version 1.10, 5 April 2010).
+This manual is for Lzip (version 1.11, 16 September 2010).
* Menu:
-* Introduction:: Purpose and features of lzip
-* Algorithm:: How lzip compresses the data
-* Invoking Lzip:: Command line interface
-* File Format:: Detailed format of the compressed file
-* Examples:: A small tutorial with examples
-* Lziprecover:: Recovering data from damaged compressed files
-* Problems:: Reporting bugs
-* Concept Index:: Index of concepts
+* Introduction:: Purpose and features of lzip
+* Algorithm:: How lzip compresses the data
+* Invoking Lzip:: Command line interface
+* File Format:: Detailed format of the compressed file
+* Examples:: A small tutorial with examples
+* Lziprecover:: Recovering data from damaged compressed files
+* Invoking Lziprecover:: Command line interface
+* Problems:: Reporting bugs
+* Concept Index:: Index of concepts
Copyright (C) 2008, 2009, 2010 Antonio Diaz Diaz.
@@ -68,11 +69,14 @@ multivolume compressed tar archives.
The amount of memory required for compression is about 5 MiB plus 1
or 2 times the dictionary size limit (1 if input file size is less than
dictionary size limit, else 2) plus 8 times the dictionary size really
-used. For decompression it is a little more than the dictionary size
-really used. Lzip will automatically use the smallest possible
-dictionary size without exceeding the given limit. It is important to
-appreciate that the decompression memory requirement is affected at
-compression time by the choice of dictionary size limit.
+used. The option `-0' is special and only requires about 1.5 MiB at
+most. The amount of memory required for decompression is a little more
+than the dictionary size really used.
+
+ Lzip will automatically use the smallest possible dictionary size
+without exceeding the given limit. Keep in mind that the decompression
+memory requirement is affected at compression time by the choice of
+dictionary size limit.
When decompressing, lzip attempts to guess the name for the
decompressed file from that of the compressed file as follows:
@@ -104,14 +108,12 @@ File: lzip.info, Node: Algorithm, Next: Invoking Lzip, Prev: Introduction, U
***********
Lzip implements a simplified version of the LZMA (Lempel-Ziv-Markov
-chain-Algorithm) algorithm. The original LZMA algorithm was designed by
-Igor Pavlov.
-
- The high compression of LZMA comes from combining two basic,
-well-proven compression ideas: sliding dictionaries (LZ77/78) and
-markov models (the thing used by every compression algorithm that uses
-a range encoder or similar order-0 entropy coder as its last stage)
-with segregation of contexts according to what the bits are used for.
+chain-Algorithm) algorithm. The high compression of LZMA comes from
+combining two basic, well-proven compression ideas: sliding dictionaries
+(LZ77/78) and markov models (the thing used by every compression
+algorithm that uses a range encoder or similar order-0 entropy coder as
+its last stage) with segregation of contexts according to what the bits
+are used for.
Lzip is a two stage compressor. The first stage is a Lempel-Ziv
coder, which reduces redundancy by translating chunks of data to their
@@ -153,6 +155,13 @@ member or volume size limits are reached.
10) If there are more data to compress, go back to step 1.
+
+The ideas embodied in lzip are due to (at least) the following people:
+Abraham Lempel and Jacob Ziv (for the LZ algorithm), Andrey Markov (for
+the definition of Markov chains), G.N.N. Martin (for the definition of
+range encoding), Igor Pavlov (for putting all the above together in
+LZMA), and Julian Seward (for bzip2's CLI and the idea of unzcrash).
+

File: lzip.info, Node: Invoking Lzip, Next: File Format, Prev: Algorithm, Up: Top
@@ -178,7 +187,7 @@ The format for running lzip is:
Produce a multimember file and set the member size limit to SIZE
bytes. Minimum member size limit is 100kB. Small member size may
degrade compression ratio, so use it only when needed. The default
- is to produce single member files.
+ is to produce single-member files.
`--stdout'
`-c'
@@ -202,9 +211,10 @@ The format for running lzip is:
`--match-length=LENGTH'
`-m LENGTH'
- Set the match length limit in bytes. Valid values range from 5 to
- 273. Larger values usually give better compression ratios but
- longer compression times.
+ Set the match length limit in bytes. After a match this long is
+ found, the search is finished. Valid values range from 5 to 273.
+ Larger values usually give better compression ratios but longer
+ compression times.
`--output=FILE'
`-o FILE'
@@ -227,6 +237,11 @@ The format for running lzip is:
dictionary sizes are quantized. If the specified size does not
match one of the valid sizes, it will be rounded upwards.
+ For maximum compression you should use a dictionary size limit as
+ large as possible, but keep in mind that the decompression memory
+ requirement is affected at compression time by the choice of
+ dictionary size limit.
+
`--volume-size=SIZE'
`-S SIZE'
Split the compressed output into several volume files with names
@@ -240,28 +255,35 @@ The format for running lzip is:
`-t'
Check integrity of the specified file(s), but don't decompress
them. This really performs a trial decompression and throws away
- the result. Use `-tvv' or `-tvvv' to see information about the
- file.
+ the result. Use it together with `-v' to see information about
+ the file.
`--verbose'
`-v'
Verbose mode. Show the compression ratio for each file processed.
Further -v's increase the verbosity level.
-`-1 .. -9'
+`-0 .. -9'
Set the compression parameters (dictionary size and match length
limit) as shown in the table below. Note that `-9' can be much
- slower than `-1'. These options have no effect when decompressing.
+ slower than `-0'. These options have no effect when decompressing.
+
+ The bidimensional parameter space of LZMA can't be mapped to a
+ linear scale optimal for all files. If your files are large, very
+ repetitive, etc, you may need to use the `--match-length' and
+ `--dictionary-size' options directly to achieve optimal
+ performance.
Level Dictionary size Match length limit
- -1 1 MiB 10 bytes
- -2 1.5 MiB 12 bytes
- -3 2 MiB 17 bytes
- -4 3 MiB 26 bytes
- -5 4 MiB 44 bytes
- -6 8 MiB 80 bytes
- -7 16 MiB 108 bytes
- -8 24 MiB 163 bytes
+ -0 64 KiB 16 bytes
+ -1 1 MiB 5 bytes
+ -2 1.5 MiB 6 bytes
+ -3 2 MiB 8 bytes
+ -4 3 MiB 12 bytes
+ -5 4 MiB 20 bytes
+ -6 8 MiB 36 bytes
+ -7 16 MiB 68 bytes
+ -8 24 MiB 132 bytes
-9 32 MiB 273 bytes
`--fast'
@@ -316,7 +338,7 @@ additional information before, between, or after them.
All multibyte values are stored in little endian order.
`ID string'
- A four byte string, identifying the member type, with the value
+ A four byte string, identifying the lzip format, with the value
"LZIP".
`VN (version number, 1 byte)'
@@ -353,9 +375,12 @@ File: lzip.info, Node: Examples, Next: Lziprecover, Prev: File Format, Up: T
5 A small tutorial with examples
********************************
-WARNING! If your data is important, give the `--keep' option to lzip
-and do not remove the original file until you verify the compressed
-file with a command like `lzip -cd file.lz | cmp file -'.
+WARNING! Even if lzip is bug-free, other causes may result in a corrupt
+compressed file (bugs in the system libraries, memory errors, etc).
+Therefore, if the data you are going to compress is important give the
+`--keep' option to lzip and do not remove the original file until you
+verify the compressed file with a command like
+`lzip -cd file.lz | cmp file -'.
Example 1: Replace a regular file with its compressed version file.lz
@@ -365,77 +390,198 @@ and show the compression ratio.
Example 2: Like example 1 but the created file.lz is multimember with a
-member size of 1MiB.
+member size of 1MiB. The compression ratio is not shown.
lzip -b 1MiB file
-Example 3: Compress a whole floppy in /dev/fd0 and send the output to
+Example 3: Restore a regular file from its compressed version file.lz.
+If the operation is successful, file.lz is removed.
+
+ lzip -d file.lz
+
+
+Example 4: Verify the integrity of the compressed file file.lz and show
+status.
+
+ lzip -tv file.lz
+
+
+Example 5: Compress a whole floppy in /dev/fd0 and send the output to
file.lz.
lzip -c /dev/fd0 > file.lz
-Example 4: Create a multivolume compressed tar archive with a volume
+Example 6: Decompress file.lz partially until 10KiB of decompressed data
+are produced.
+
+ lzip -cd file.lz | dd bs=1024 count=10
+
+
+Example 7: Create a multivolume compressed tar archive with a volume
size of 1440KiB.
tar -c some_directory | lzip -S 1440KiB -o volume_name
-Example 5: Extract a multivolume compressed tar archive.
+Example 8: Extract a multivolume compressed tar archive.
lzip -cd volume_name*.lz | tar -xf -
-Example 6: Create a multivolume compressed backup of a big database file
+Example 9: Create a multivolume compressed backup of a big database file
with a volume size of 650MB, where each volume is a multimember file
with a member size of 32MiB.
- lzip -b 32MiB -S 650MB big_database
-
-
-Example 7: Recover the first volume of those created in example 6 from
-two copies, `big_database1_00001.lz' and `big_database2_00001.lz', with
-member 00007 damaged in the first copy and member 00018 damaged in the
-second copy. (Indented lines are lzip error messages).
-
- lziprecover big_database1_00001.lz
- lziprecover big_database2_00001.lz
- lzip -t rec*big_database1_00001.lz
- rec00007big_database1_00001.lz: crc mismatch
- lzip -t rec*big_database2_00001.lz
- rec00018big_database1_00001.lz: crc mismatch
- cp rec00007big_database2_00001.lz rec00007big_database1_00001.lz
- cat rec*big_database1_00001.lz > big_database3_00001.lz
+ lzip -b 32MiB -S 650MB big_db
+
+
+Example 10: Recover a compressed backup from two copies on CD-ROM (see
+the GNU ddrescue manual for details about ddrescue)
+
+ ddrescue -b2048 /dev/cdrom cdimage1 logfile1
+ mount -t iso9660 -o loop,ro cdimage1 /mnt/cdimage
+ cp /mnt/cdimage/backup.tar.lz rescued1.tar.lz
+ umount /mnt/cdimage
+ (insert second copy in the CD drive)
+ ddrescue -b2048 /dev/cdrom cdimage2 logfile2
+ mount -t iso9660 -o loop,ro cdimage2 /mnt/cdimage
+ cp /mnt/cdimage/backup.tar.lz rescued2.tar.lz
+ umount /mnt/cdimage
+ lziprecover -m -o rescued.tar.lz rescued1.tar.lz rescued2.tar.lz
+
+
+Example 11: Recover the first volume of those created in example 9 from
+two copies, `big_db1_00001.lz' and `big_db2_00001.lz', with member
+00007 damaged in the first copy, member 00018 damaged in the second
+copy, and member 00012 damaged in both copies. (Indented lines are
+abridged error messages from lzip/lziprecover). Two correct copies are
+produced and compared.
+
+ lziprecover -s big_db1_00001.lz
+ lziprecover -s big_db2_00001.lz
+ lzip -t rec*big_db1_00001.lz
+ rec00007big_db1_00001.lz: crc mismatch
+ rec00012big_db1_00001.lz: crc mismatch
+ lzip -t rec*big_db2_00001.lz
+ rec00012big_db2_00001.lz: crc mismatch
+ rec00018big_db2_00001.lz: crc mismatch
+ lziprecover -m rec00012big_db1_00001.lz rec00012big_db2_00001.lz
+ Input files merged successfully
+ cp rec00007big_db2_00001.lz rec00007big_db1_00001.lz
+ cp rec00012big_db1_00001_fixed.lz rec00012big_db1_00001.lz
+ cp rec00012big_db1_00001_fixed.lz rec00012big_db2_00001.lz
+ cp rec00018big_db1_00001.lz rec00018big_db2_00001.lz
+ cat rec*big_db1_00001.lz > big_db3_00001.lz
+ cat rec*big_db2_00001.lz > big_db4_00001.lz
+ zcmp big_db3_00001.lz big_db4_00001.lz

-File: lzip.info, Node: Lziprecover, Next: Problems, Prev: Examples, Up: Top
+File: lzip.info, Node: Lziprecover, Next: Invoking Lziprecover, Prev: Examples, Up: Top
6 Lziprecover
*************
-Lziprecover is a program that searches for members in .lz files, and
-writes each member in its own .lz file. You can then use `lzip -t' to
-test the integrity of the resulting files, and decompress those which
-are undamaged.
+Lziprecover is a data recovery tool for lzip compressed files able to
+repair slightly damaged files, recover badly damaged files from two or
+more copies, and extract undamaged members from multi-member files.
+
+ Lziprecover takes as arguments the names of the damaged files and
+writes zero or more recovered files depending on the operation selected
+and whether the recovery succeeded or not. The damaged files themselves
+are never modified.
- Data from damaged members can be partially recovered writing it to
-stdout as shown in the following example (the resulting file may contain
-garbage data at the end):
+ If the files are too damaged for lziprecover to repair them, data
+from damaged members can be partially recovered writing it to stdout as
+shown in the following example (the resulting file may contain garbage
+data at the end):
lzip -cd rec00001file.lz > rec00001file
- Lziprecover takes a single argument, the name of the damaged file,
-and writes a number of files `rec00001file.lz', `rec00002file.lz', etc,
-containing the extracted members. The output filenames are designed so
-that the use of wildcards in subsequent processing, for example,
-`lzip -dc rec*file.lz > recovered_data', processes the files in the
-correct order.
+ If the cause of file corruption is damaged media, the combination GNU
+ddrescue + lziprecover is the best option for recovering data from
+multiple damaged copies. *Note ddrescue-example::, for an example.
+
+
+File: lzip.info, Node: Invoking Lziprecover, Next: Problems, Prev: Lziprecover, Up: Top
+
+7 Invoking Lziprecover
+**********************
+
+The format for running lziprecover is:
+
+ lziprecover [OPTIONS] [FILES]
+
+ Lziprecover supports the following options:
+
+`--help'
+`-h'
+ Print an informative help message describing the options and exit.
+
+`--version'
+`-V'
+ Print the version number of lziprecover on the standard output and
+ exit.
+
+`--force'
+`-f'
+ Force overwrite of output file.
+
+`--merge'
+`-m'
+ Try to produce a correct file merging the good parts of two or more
+ damaged copies. The copies must be single-member files. The merge
+ will fail if the copies have too many damaged areas or if the same
+ byte is damaged in all copies. If successful, a repaired copy is
+ written to the file `FILE_fixed.lz'.
+
+ To give you an idea of its possibilities, when merging two copies
+ each of them with one damaged area affecting 1 percent of the
+ copy, the probability of obtaining a correct file is about 98
+ percent. With three such copies the probability rises to 99.97
+ percent. For large files with small errors, the probability
+ approaches 100 percent even with only two copies.
+
+`--output=FILE'
+`-o FILE'
+ Place the output into `FILE' instead of into `FILE_fixed.lz'.
+
+ If splitting, the names of the files produced are in the form
+ `rec00001FILE', etc.
+
+`--quiet'
+`-q'
+ Quiet operation. Suppress all messages.
+
+`--repair'
+`-R'
+ Try to repair a small error, affecting only one byte, in a
+ single-member FILE. If successful, a repaired copy is written to
+ the file `FILE_fixed.lz'. `FILE' is not modified at all.
+
+`--split'
+`-s'
+ Search for members in `FILE' and write each member in its own
+ `.lz' file. You can then use `lzip -t' to test the integrity of
+ the resulting files, decompress those which are undamaged, and try
+ to repair or partially decompress those which are damaged.
+
+ The names of the files produced are in the form `rec00001FILE.lz',
+ `rec00002FILE.lz', etc, and are designed so that the use of
+ wildcards in subsequent processing, for example,
+ `lzip -cd rec*FILE.lz > recovered_data', processes the files in
+ the correct order.
+
+`--verbose'
+`-v'
+ Verbose mode. Further -v's increase the verbosity level.
+

-File: lzip.info, Node: Problems, Next: Concept Index, Prev: Lziprecover, Up: Top
+File: lzip.info, Node: Problems, Next: Concept Index, Prev: Invoking Lziprecover, Up: Top
-7 Reporting Bugs
+8 Reporting Bugs
****************
There are probably bugs in lzip. There are certainly errors and
@@ -462,7 +608,8 @@ Concept Index
* file format: File Format. (line 6)
* getting help: Problems. (line 6)
* introduction: Introduction. (line 6)
-* invoking: Invoking Lzip. (line 6)
+* invoking lzip: Invoking Lzip. (line 6)
+* invoking lziprecover: Invoking Lziprecover. (line 6)
* lziprecover: Lziprecover. (line 6)
* options: Invoking Lzip. (line 6)
* usage: Invoking Lzip. (line 6)
@@ -472,13 +619,15 @@ Concept Index

Tag Table:
Node: Top224
-Node: Introduction897
-Node: Algorithm4207
-Node: Invoking Lzip6433
-Node: File Format10780
-Node: Examples12734
-Node: Lziprecover14572
-Node: Problems15487
-Node: Concept Index16012
+Node: Introduction1029
+Node: Algorithm4421
+Node: Invoking Lzip6939
+Node: File Format11911
+Node: Examples13865
+Ref: ddrescue-example15619
+Node: Lziprecover17412
+Node: Invoking Lziprecover18465
+Node: Problems20826
+Node: Concept Index21360

End Tag Table
diff --git a/doc/lzip.texinfo b/doc/lzip.texinfo
index 9cacd16..5c62d2f 100644
--- a/doc/lzip.texinfo
+++ b/doc/lzip.texinfo
@@ -5,8 +5,8 @@
@finalout
@c %**end of header
-@set UPDATED 5 April 2010
-@set VERSION 1.10
+@set UPDATED 16 September 2010
+@set VERSION 1.11
@dircategory Data Compression
@direntry
@@ -16,7 +16,7 @@
@titlepage
@title Lzip
-@subtitle A data compressor based on the LZMA algorithm
+@subtitle Data compressor based on the LZMA algorithm
@subtitle for Lzip version @value{VERSION}, @value{UPDATED}
@author by Antonio Diaz Diaz
@@ -24,7 +24,9 @@
@vskip 0pt plus 1filll
@end titlepage
+@ifnothtml
@contents
+@end ifnothtml
@node Top
@top
@@ -32,14 +34,15 @@
This manual is for Lzip (version @value{VERSION}, @value{UPDATED}).
@menu
-* Introduction:: Purpose and features of lzip
-* Algorithm:: How lzip compresses the data
-* Invoking Lzip:: Command line interface
-* File Format:: Detailed format of the compressed file
-* Examples:: A small tutorial with examples
-* Lziprecover:: Recovering data from damaged compressed files
-* Problems:: Reporting bugs
-* Concept Index:: Index of concepts
+* Introduction:: Purpose and features of lzip
+* Algorithm:: How lzip compresses the data
+* Invoking Lzip:: Command line interface
+* File Format:: Detailed format of the compressed file
+* Examples:: A small tutorial with examples
+* Lziprecover:: Recovering data from damaged compressed files
+* Invoking Lziprecover:: Command line interface
+* Problems:: Reporting bugs
+* Concept Index:: Index of concepts
@end menu
@sp 1
@@ -85,11 +88,14 @@ compressed tar archives.
The amount of memory required for compression is about 5 MiB plus 1 or 2
times the dictionary size limit (1 if input file size is less than
dictionary size limit, else 2) plus 8 times the dictionary size really
-used. For decompression it is a little more than the dictionary size
-really used. Lzip will automatically use the smallest possible
-dictionary size without exceeding the given limit. It is important to
-appreciate that the decompression memory requirement is affected at
-compression time by the choice of dictionary size limit.
+used. The option @samp{-0} is special and only requires about 1.5 MiB at
+most. The amount of memory required for decompression is a little more
+than the dictionary size really used.
+
+Lzip will automatically use the smallest possible dictionary size
+without exceeding the given limit. Keep in mind that the decompression
+memory requirement is affected at compression time by the choice of
+dictionary size limit.
When decompressing, lzip attempts to guess the name for the decompressed
file from that of the compressed file as follows:
@@ -122,14 +128,12 @@ caused lzip to panic.
@cindex algorithm
Lzip implements a simplified version of the LZMA (Lempel-Ziv-Markov
-chain-Algorithm) algorithm. The original LZMA algorithm was designed by
-Igor Pavlov.
-
-The high compression of LZMA comes from combining two basic, well-proven
-compression ideas: sliding dictionaries (LZ77/78) and markov models (the
-thing used by every compression algorithm that uses a range encoder or
-similar order-0 entropy coder as its last stage) with segregation of
-contexts according to what the bits are used for.
+chain-Algorithm) algorithm. The high compression of LZMA comes from
+combining two basic, well-proven compression ideas: sliding dictionaries
+(LZ77/78) and markov models (the thing used by every compression
+algorithm that uses a range encoder or similar order-0 entropy coder as
+its last stage) with segregation of contexts according to what the bits
+are used for.
Lzip is a two stage compressor. The first stage is a Lempel-Ziv coder,
which reduces redundancy by translating chunks of data to their
@@ -171,10 +175,18 @@ member or volume size limits are reached.
10) If there are more data to compress, go back to step 1.
+@sp 1
+@noindent
+The ideas embodied in lzip are due to (at least) the following people:
+Abraham Lempel and Jacob Ziv (for the LZ algorithm), Andrey Markov (for
+the definition of Markov chains), G.N.N. Martin (for the definition of
+range encoding), Igor Pavlov (for putting all the above together in
+LZMA), and Julian Seward (for bzip2's CLI and the idea of unzcrash).
+
@node Invoking Lzip
@chapter Invoking Lzip
-@cindex invoking
+@cindex invoking lzip
@cindex options
@cindex usage
@cindex version
@@ -201,7 +213,7 @@ Print the version number of lzip on the standard output and exit.
Produce a multimember file and set the member size limit to @var{size}
bytes. Minimum member size limit is 100kB. Small member size may degrade
compression ratio, so use it only when needed. The default is to produce
-single member files.
+single-member files.
@item --stdout
@itemx -c
@@ -223,9 +235,9 @@ Keep (don't delete) input files during compression or decompression.
@item --match-length=@var{length}
@itemx -m @var{length}
-Set the match length limit in bytes. Valid values range from 5 to 273.
-Larger values usually give better compression ratios but longer
-compression times.
+Set the match length limit in bytes. After a match this long is found,
+the search is finished. Valid values range from 5 to 273. Larger values
+usually give better compression ratios but longer compression times.
@item --output=@var{file}
@itemx -o @var{file}
@@ -248,6 +260,10 @@ member without exceeding this limit. Note that dictionary sizes are
quantized. If the specified size does not match one of the valid sizes,
it will be rounded upwards.
+For maximum compression you should use a dictionary size limit as large
+as possible, but keep in mind that the decompression memory requirement
+is affected at compression time by the choice of dictionary size limit.
+
@item --volume-size=@var{size}
@itemx -S @var{size}
Split the compressed output into several volume files with names
@@ -260,28 +276,35 @@ volume size may degrade compression ratio, so use it only when needed.
@itemx -t
Check integrity of the specified file(s), but don't decompress them.
This really performs a trial decompression and throws away the result.
-Use @samp{-tvv} or @samp{-tvvv} to see information about the file.
+Use it together with @samp{-v} to see information about the file.
@item --verbose
@itemx -v
Verbose mode. Show the compression ratio for each file processed.
Further -v's increase the verbosity level.
-@item -1 .. -9
+@item -0 .. -9
Set the compression parameters (dictionary size and match length limit)
as shown in the table below. Note that @samp{-9} can be much slower than
-@samp{-1}. These options have no effect when decompressing.
+@samp{-0}. These options have no effect when decompressing.
+
+The bidimensional parameter space of LZMA can't be mapped to a linear
+scale optimal for all files. If your files are large, very repetitive,
+etc, you may need to use the @samp{--match-length} and
+@samp{--dictionary-size} options directly to achieve optimal
+performance.
@multitable {Level} {Dictionary size} {Match length limit}
@item Level @tab Dictionary size @tab Match length limit
-@item -1 @tab 1 MiB @tab 10 bytes
-@item -2 @tab 1.5 MiB @tab 12 bytes
-@item -3 @tab 2 MiB @tab 17 bytes
-@item -4 @tab 3 MiB @tab 26 bytes
-@item -5 @tab 4 MiB @tab 44 bytes
-@item -6 @tab 8 MiB @tab 80 bytes
-@item -7 @tab 16 MiB @tab 108 bytes
-@item -8 @tab 24 MiB @tab 163 bytes
+@item -0 @tab 64 KiB @tab 16 bytes
+@item -1 @tab 1 MiB @tab 5 bytes
+@item -2 @tab 1.5 MiB @tab 6 bytes
+@item -3 @tab 2 MiB @tab 8 bytes
+@item -4 @tab 3 MiB @tab 12 bytes
+@item -5 @tab 4 MiB @tab 20 bytes
+@item -6 @tab 8 MiB @tab 36 bytes
+@item -7 @tab 16 MiB @tab 68 bytes
+@item -8 @tab 24 MiB @tab 132 bytes
@item -9 @tab 32 MiB @tab 273 bytes
@end multitable
@@ -346,7 +369,7 @@ All multibyte values are stored in little endian order.
@table @samp
@item ID string
-A four byte string, identifying the member type, with the value "LZIP".
+A four byte string, identifying the lzip format, with the value "LZIP".
@item VN (version number, 1 byte)
Just in case something needs to be modified in the future. Valid values
@@ -381,9 +404,12 @@ safe recovery of undamaged members from multimember files.
@chapter A small tutorial with examples
@cindex examples
-WARNING! If your data is important, give the @samp{--keep} option to
-lzip and do not remove the original file until you verify the compressed
-file with a command like @samp{lzip -cd file.lz | cmp file -}.
+WARNING! Even if lzip is bug-free, other causes may result in a corrupt
+compressed file (bugs in the system libraries, memory errors, etc).
+Therefore, if the data you are going to compress is important give the
+@samp{--keep} option to lzip and do not remove the original file until
+you verify the compressed file with a command like @w{@samp{lzip -cd
+file.lz | cmp file -}}.
@sp 1
@noindent
@@ -397,7 +423,7 @@ lzip -v file
@sp 1
@noindent
Example 2: Like example 1 but the created file.lz is multimember with a
-member size of 1MiB.
+member size of 1MiB. The compression ratio is not shown.
@example
lzip -b 1MiB file
@@ -405,7 +431,25 @@ lzip -b 1MiB file
@sp 1
@noindent
-Example 3: Compress a whole floppy in /dev/fd0 and send the output to
+Example 3: Restore a regular file from its compressed version file.lz.
+If the operation is successful, file.lz is removed.
+
+@example
+lzip -d file.lz
+@end example
+
+@sp 1
+@noindent
+Example 4: Verify the integrity of the compressed file file.lz and show
+status.
+
+@example
+lzip -tv file.lz
+@end example
+
+@sp 1
+@noindent
+Example 5: Compress a whole floppy in /dev/fd0 and send the output to
file.lz.
@example
@@ -414,7 +458,16 @@ lzip -c /dev/fd0 > file.lz
@sp 1
@noindent
-Example 4: Create a multivolume compressed tar archive with a volume
+Example 6: Decompress file.lz partially until 10KiB of decompressed data
+are produced.
+
+@example
+lzip -cd file.lz | dd bs=1024 count=10
+@end example
+
+@sp 1
+@noindent
+Example 7: Create a multivolume compressed tar archive with a volume
size of 1440KiB.
@example
@@ -423,7 +476,7 @@ tar -c some_directory | lzip -S 1440KiB -o volume_name
@sp 1
@noindent
-Example 5: Extract a multivolume compressed tar archive.
+Example 8: Extract a multivolume compressed tar archive.
@example
lzip -cd volume_name*.lz | tar -xf -
@@ -431,31 +484,60 @@ lzip -cd volume_name*.lz | tar -xf -
@sp 1
@noindent
-Example 6: Create a multivolume compressed backup of a big database file
+Example 9: Create a multivolume compressed backup of a big database file
with a volume size of 650MB, where each volume is a multimember file
with a member size of 32MiB.
@example
-lzip -b 32MiB -S 650MB big_database
+lzip -b 32MiB -S 650MB big_db
@end example
@sp 1
+@anchor{ddrescue-example}
@noindent
-Example 7: Recover the first volume of those created in example 6 from
-two copies, @samp{big_database1_00001.lz} and
-@samp{big_database2_00001.lz}, with member 00007 damaged in the first
-copy and member 00018 damaged in the second copy. (Indented lines are
-lzip error messages).
+Example 10: Recover a compressed backup from two copies on CD-ROM (see
+the GNU ddrescue manual for details about ddrescue)
@example
-lziprecover big_database1_00001.lz
-lziprecover big_database2_00001.lz
-lzip -t rec*big_database1_00001.lz
- rec00007big_database1_00001.lz: crc mismatch
-lzip -t rec*big_database2_00001.lz
- rec00018big_database1_00001.lz: crc mismatch
-cp rec00007big_database2_00001.lz rec00007big_database1_00001.lz
-cat rec*big_database1_00001.lz > big_database3_00001.lz
+ddrescue -b2048 /dev/cdrom cdimage1 logfile1
+mount -t iso9660 -o loop,ro cdimage1 /mnt/cdimage
+cp /mnt/cdimage/backup.tar.lz rescued1.tar.lz
+umount /mnt/cdimage
+ (insert second copy in the CD drive)
+ddrescue -b2048 /dev/cdrom cdimage2 logfile2
+mount -t iso9660 -o loop,ro cdimage2 /mnt/cdimage
+cp /mnt/cdimage/backup.tar.lz rescued2.tar.lz
+umount /mnt/cdimage
+lziprecover -m -o rescued.tar.lz rescued1.tar.lz rescued2.tar.lz
+@end example
+
+@sp 1
+@noindent
+Example 11: Recover the first volume of those created in example 9 from
+two copies, @samp{big_db1_00001.lz} and @samp{big_db2_00001.lz}, with
+member 00007 damaged in the first copy, member 00018 damaged in the
+second copy, and member 00012 damaged in both copies. (Indented lines
+are abridged error messages from lzip/lziprecover). Two correct copies
+are produced and compared.
+
+@example
+lziprecover -s big_db1_00001.lz
+lziprecover -s big_db2_00001.lz
+lzip -t rec*big_db1_00001.lz
+ rec00007big_db1_00001.lz: crc mismatch
+ rec00012big_db1_00001.lz: crc mismatch
+lzip -t rec*big_db2_00001.lz
+ rec00012big_db2_00001.lz: crc mismatch
+ rec00018big_db2_00001.lz: crc mismatch
+lziprecover -m rec00012big_db1_00001.lz rec00012big_db2_00001.lz
+ Input files merged successfully
+cp rec00007big_db2_00001.lz rec00007big_db1_00001.lz
+cp rec00012big_db1_00001_fixed.lz rec00012big_db1_00001.lz
+cp rec00012big_db1_00001_fixed.lz rec00012big_db2_00001.lz
+cp rec00018big_db1_00001.lz rec00018big_db2_00001.lz
+cat rec*big_db1_00001.lz > big_db3_00001.lz
+cat rec*big_db2_00001.lz > big_db4_00001.lz
+zcmp big_db3_00001.lz big_db4_00001.lz
@end example
@@ -463,25 +545,104 @@ cat rec*big_database1_00001.lz > big_database3_00001.lz
@chapter Lziprecover
@cindex lziprecover
-Lziprecover is a program that searches for members in .lz files, and
-writes each member in its own .lz file. You can then use
-@w{@samp{lzip -t}} to test the integrity of the resulting files, and
-decompress those which are undamaged.
+Lziprecover is a data recovery tool for lzip compressed files able to
+repair slightly damaged files, recover badly damaged files from two or
+more copies, and extract undamaged members from multi-member files.
-Data from damaged members can be partially recovered writing it to
-stdout as shown in the following example (the resulting file may contain
-garbage data at the end):
+Lziprecover takes as arguments the names of the damaged files and writes
+zero or more recovered files depending on the operation selected and
+whether the recovery succeeded or not. The damaged files themselves are
+never modified.
+
+If the files are too damaged for lziprecover to repair them, data from
+damaged members can be partially recovered writing it to stdout as shown
+in the following example (the resulting file may contain garbage data at
+the end):
@example
lzip -cd rec00001file.lz > rec00001file
@end example
-Lziprecover takes a single argument, the name of the damaged file, and
-writes a number of files @samp{rec00001file.lz}, @samp{rec00002file.lz},
-etc, containing the extracted members. The output filenames are designed
-so that the use of wildcards in subsequent processing, for example,
-@w{@samp{lzip -dc rec*file.lz > recovered_data}}, processes the files in
-the correct order.
+If the cause of file corruption is damaged media, the combination GNU
+ddrescue + lziprecover is the best option for recovering data from
+multiple damaged copies. @xref{ddrescue-example}, for an example.
+
+@node Invoking Lziprecover
+@chapter Invoking Lziprecover
+@cindex invoking lziprecover
+
+The format for running lziprecover is:
+
+@example
+lziprecover [@var{options}] [@var{files}]
+@end example
+
+Lziprecover supports the following options:
+
+@table @samp
+@item --help
+@itemx -h
+Print an informative help message describing the options and exit.
+
+@item --version
+@itemx -V
+Print the version number of lziprecover on the standard output and exit.
+
+@item --force
+@itemx -f
+Force overwrite of output file.
+
+@item --merge
+@itemx -m
+Try to produce a correct file merging the good parts of two or more
+damaged copies. The copies must be single-member files. The merge will
+fail if the copies have too many damaged areas or if the same byte is
+damaged in all copies. If successful, a repaired copy is written to the
+file @samp{@var{file}_fixed.lz}.
+
+To give you an idea of its possibilities, when merging two copies each
+of them with one damaged area affecting 1 percent of the copy, the
+probability of obtaining a correct file is about 98 percent. With three
+such copies the probability rises to 99.97 percent. For large files with
+small errors, the probability approaches 100 percent even with only two
+copies.
+
+@item --output=@var{file}
+@itemx -o @var{file}
+Place the output into @samp{@var{file}} instead of into
+@samp{@var{file}_fixed.lz}.
+
+If splitting, the names of the files produced are in the form
+@samp{rec00001@var{file}}, etc.
+
+@item --quiet
+@itemx -q
+Quiet operation. Suppress all messages.
+
+@item --repair
+@itemx -R
+Try to repair a small error, affecting only one byte, in a single-member
+@var{file}. If successful, a repaired copy is written to the file
+@samp{@var{file}_fixed.lz}. @samp{@var{file}} is not modified at all.
+
+@item --split
+@itemx -s
+Search for members in @samp{@var{file}} and write each member in its own
+@samp{.lz} file. You can then use @samp{lzip -t} to test the integrity
+of the resulting files, decompress those which are undamaged, and try to
+repair or partially decompress those which are damaged.
+
+The names of the files produced are in the form
+@samp{rec00001@var{file}.lz}, @samp{rec00002@var{file}.lz}, etc, and are
+designed so that the use of wildcards in subsequent processing, for
+example, @w{@samp{lzip -cd rec*@var{file}.lz > recovered_data}},
+processes the files in the correct order.
+
+@item --verbose
+@itemx -v
+Verbose mode. Further -v's increase the verbosity level.
+
+@end table
@node Problems
diff --git a/doc/lziprecover.1 b/doc/lziprecover.1
index 0a60e62..95ddb29 100644
--- a/doc/lziprecover.1
+++ b/doc/lziprecover.1
@@ -1,16 +1,12 @@
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.37.1.
-.TH LZIPRECOVER "1" "April 2010" "Lziprecover 1.10" "User Commands"
+.TH LZIPRECOVER "1" "September 2010" "Lziprecover 1.11" "User Commands"
.SH NAME
-Lziprecover \- recover undamaged members from lzip files
+Lziprecover \- recovers data from damaged lzip files
.SH SYNOPSIS
.B lziprecover
-[\fIoptions\fR] \fIfile\fR
+[\fIoptions\fR] [\fIfiles\fR]
.SH DESCRIPTION
-Lziprecover \- Member recoverer program for lzip compressed files.
-.PP
-Searches for members in .lz files, and writes each member in its own .lz
-file. You can then use `lzip \fB\-t\fR' to test the integrity of the resulting
-files, and decompress those which are undamaged.
+Lziprecover \- Data recovery tool for lzip compressed files.
.SH OPTIONS
.TP
\fB\-h\fR, \fB\-\-help\fR
@@ -19,9 +15,24 @@ display this help and exit
\fB\-V\fR, \fB\-\-version\fR
output version information and exit
.TP
+\fB\-f\fR, \fB\-\-force\fR
+overwrite existing output files
+.TP
+\fB\-m\fR, \fB\-\-merge\fR
+correct errors in file using several copies
+.TP
+\fB\-o\fR, \fB\-\-output=\fR<file>
+place the output into <file>
+.TP
\fB\-q\fR, \fB\-\-quiet\fR
suppress all messages
.TP
+\fB\-R\fR, \fB\-\-repair\fR
+try to repair a small error in file
+.TP
+\fB\-s\fR, \fB\-\-split\fR
+split a multimember file in single\-member files
+.TP
\fB\-v\fR, \fB\-\-verbose\fR
be verbose (a 2nd \fB\-v\fR gives more)
.SH "REPORTING BUGS"
diff --git a/encoder.cc b/encoder.cc
index b197e22..e10142a 100644
--- a/encoder.cc
+++ b/encoder.cc
@@ -1,4 +1,4 @@
-/* Lzip - A data compressor based on the LZMA algorithm
+/* Lzip - Data compressor based on the LZMA algorithm
Copyright (C) 2008, 2009, 2010 Antonio Diaz Diaz.
This program is free software: you can redistribute it and/or modify
@@ -33,42 +33,46 @@ Dis_slots dis_slots;
Prob_prices prob_prices;
-bool Matchfinder::read_block() throw()
+bool Matchfinder::read_block()
{
- const int size = buffer_size - stream_pos;
- const int rd = readblock( infd_, buffer + stream_pos, size );
- stream_pos += rd;
- if( rd < size ) at_stream_end = true;
- return ( rd == size || !errno );
+ if( !at_stream_end && stream_pos < buffer_size )
+ {
+ const int size = buffer_size - stream_pos;
+ const int rd = readblock( infd, buffer + stream_pos, size );
+ stream_pos += rd;
+ if( rd != size && errno ) throw Error( "Read error" );
+ at_stream_end = ( rd < size );
+ }
+ return pos < stream_pos;
}
Matchfinder::Matchfinder( const int dict_size, const int len_limit,
- const int infd )
+ const int ifd )
:
partial_data_pos( 0 ),
+ prev_positions( new int32_t[num_prev_positions] ),
pos( 0 ),
cyclic_pos( 0 ),
stream_pos( 0 ),
- infd_( infd ),
match_len_limit_( len_limit ),
- prev_positions( new int32_t[num_prev_positions] ),
+ cycles( ( len_limit < max_match_len ) ? 16 + ( len_limit / 2 ) : 256 ),
+ infd( ifd ),
at_stream_end( false )
{
const int buffer_size_limit = ( 2 * dict_size ) + before_size + after_size;
buffer_size = std::max( 65536, dict_size );
buffer = (uint8_t *)std::malloc( buffer_size );
if( !buffer ) throw std::bad_alloc();
- if( !read_block() ) throw Error( "read error" );
- if( !at_stream_end && buffer_size < buffer_size_limit )
+ if( read_block() && !at_stream_end && buffer_size < buffer_size_limit )
{
buffer_size = buffer_size_limit;
buffer = (uint8_t *)std::realloc( buffer, buffer_size );
if( !buffer ) throw std::bad_alloc();
- if( !read_block() ) throw Error( "read error" );
+ read_block();
}
if( at_stream_end && stream_pos < dict_size )
- dictionary_size_ = std::max( min_dictionary_size, stream_pos );
+ dictionary_size_ = std::max( (int)min_dictionary_size, stream_pos );
else dictionary_size_ = dict_size;
pos_limit = buffer_size;
if( !at_stream_end ) pos_limit -= after_size;
@@ -77,7 +81,7 @@ Matchfinder::Matchfinder( const int dict_size, const int len_limit,
}
-bool Matchfinder::reset() throw()
+void Matchfinder::reset()
{
const int size = stream_pos - pos;
if( size > 0 ) std::memmove( buffer, buffer + pos, size );
@@ -86,16 +90,17 @@ bool Matchfinder::reset() throw()
pos = 0;
cyclic_pos = 0;
for( int i = 0; i < num_prev_positions; ++i ) prev_positions[i] = -1;
- return ( at_stream_end || read_block() );
+ read_block();
}
-bool Matchfinder::move_pos() throw()
+void Matchfinder::move_pos()
{
if( ++cyclic_pos >= dictionary_size_ ) cyclic_pos = 0;
if( ++pos >= pos_limit )
{
- if( pos > stream_pos ) { pos = stream_pos; return false; }
+ if( pos > stream_pos )
+ internal_error( "pos > stream_pos in Matchfinder::move_pos" );
if( !at_stream_end )
{
const int offset = pos - dictionary_size_ - before_size;
@@ -108,10 +113,9 @@ bool Matchfinder::move_pos() throw()
if( prev_positions[i] >= 0 ) prev_positions[i] -= offset;
for( int i = 0; i < 2 * dictionary_size_; ++i )
if( prev_pos_tree[i] >= 0 ) prev_pos_tree[i] -= offset;
- return read_block();
+ read_block();
}
}
- return true;
}
@@ -154,44 +158,44 @@ int Matchfinder::longest_match_len( int * const distances ) throw()
int newpos = prev_positions[key4];
prev_positions[key4] = pos;
- int idx0 = cyclic_pos << 1;
- int idx1 = idx0 + 1;
+ int32_t * ptr0 = prev_pos_tree + ( cyclic_pos << 1 );
+ int32_t * ptr1 = ptr0 + 1;
int len = 0, len0 = 0, len1 = 0;
- for( int count = 16 + ( match_len_limit_ / 2 ); ; )
+ for( int count = cycles; ; )
{
- if( newpos < min_pos || --count < 0 )
- { prev_pos_tree[idx0] = prev_pos_tree[idx1] = -1; break; }
+ if( newpos < min_pos || --count < 0 ) { *ptr0 = *ptr1 = -1; break; }
const uint8_t * const newdata = buffer + newpos;
while( len < len_limit && newdata[len] == data[len] ) ++len;
const int delta = pos - newpos;
if( distances ) while( maxlen < len ) distances[++maxlen] = delta - 1;
- const int newidx = ( cyclic_pos - delta +
- ( ( cyclic_pos >= delta ) ? 0 : dictionary_size_ ) ) << 1;
+ int32_t * const newptr = prev_pos_tree +
+ ( ( cyclic_pos - delta +
+ ( ( cyclic_pos >= delta ) ? 0 : dictionary_size_ ) ) << 1 );
if( len < len_limit )
{
if( newdata[len] < data[len] )
{
- prev_pos_tree[idx0] = newpos;
- idx0 = newidx + 1;
- newpos = prev_pos_tree[idx0];
+ *ptr0 = newpos;
+ ptr0 = newptr + 1;
+ newpos = *ptr0;
len0 = len; if( len1 < len ) len = len1;
}
else
{
- prev_pos_tree[idx1] = newpos;
- idx1 = newidx;
- newpos = prev_pos_tree[idx1];
+ *ptr1 = newpos;
+ ptr1 = newptr;
+ newpos = *ptr1;
len1 = len; if( len0 < len ) len = len0;
}
}
else
{
- prev_pos_tree[idx0] = prev_pos_tree[newidx];
- prev_pos_tree[idx1] = prev_pos_tree[newidx+1];
+ *ptr0 = newptr[0];
+ *ptr1 = newptr[1];
break;
}
}
@@ -204,6 +208,18 @@ int Matchfinder::longest_match_len( int * const distances ) throw()
}
+void Range_encoder::flush_data()
+ {
+ if( pos > 0 )
+ {
+ if( outfd >= 0 && writeblock( outfd, buffer, pos ) != pos )
+ throw Error( "Write error" );
+ partial_member_pos += pos;
+ pos = 0;
+ }
+ }
+
+
void Len_encoder::encode( Range_encoder & range_encoder, int symbol,
const int pos_state )
{
@@ -241,6 +257,17 @@ void LZ_encoder::fill_align_prices() throw()
void LZ_encoder::fill_distance_prices() throw()
{
+ for( int dis = start_dis_model; dis < modeled_distances; ++dis )
+ {
+ const int dis_slot = dis_slots.table( dis );
+ const int direct_bits = ( dis_slot >> 1 ) - 1;
+ const int base = ( 2 | ( dis_slot & 1 ) ) << direct_bits;
+ const int price =
+ price_symbol_reversed( bm_dis + base - dis_slot, dis - base, direct_bits );
+ for( int dis_state = 0; dis_state < max_dis_states; ++dis_state )
+ dis_prices[dis_state][dis] = price;
+ }
+
for( int dis_state = 0; dis_state < max_dis_states; ++dis_state )
{
int * const dsp = dis_slot_prices[dis_state];
@@ -257,13 +284,7 @@ void LZ_encoder::fill_distance_prices() throw()
for( ; dis < start_dis_model; ++dis )
dp[dis] = dsp[dis];
for( ; dis < modeled_distances; ++dis )
- {
- const int dis_slot = dis_slots[dis];
- const int direct_bits = ( dis_slot >> 1 ) - 1;
- const int base = ( 2 | ( dis_slot & 1 ) ) << direct_bits;
- dp[dis] = dsp[dis_slot] +
- price_symbol_reversed( bm_dis + base - dis_slot, dis - base, direct_bits );
- }
+ dp[dis] += dsp[dis_slots.table( dis )];
}
}
@@ -293,7 +314,7 @@ int LZ_encoder::sequence_optimizer( const int reps[num_rep_distances],
{
trials[0].dis = rep_index;
trials[0].price = replens[rep_index];
- if( !move_pos( replens[rep_index], true ) ) return 0;
+ move_pos( replens[rep_index], true );
return replens[rep_index];
}
@@ -302,7 +323,7 @@ int LZ_encoder::sequence_optimizer( const int reps[num_rep_distances],
trials[0].dis = match_distances[matchfinder.match_len_limit()] +
num_rep_distances;
trials[0].price = main_len;
- if( !move_pos( main_len, true ) ) return 0;
+ move_pos( main_len, true );
return main_len;
}
@@ -332,7 +353,7 @@ int LZ_encoder::sequence_optimizer( const int reps[num_rep_distances],
{
trials[0].dis = trials[1].dis;
trials[0].price = 1;
- if( !matchfinder.move_pos() ) return 0;
+ matchfinder.move_pos();
return 1;
}
@@ -365,7 +386,7 @@ int LZ_encoder::sequence_optimizer( const int reps[num_rep_distances],
int cur = 0;
int num_trials = main_len;
- if( !matchfinder.move_pos() ) return 0;
+ matchfinder.move_pos();
while( true )
{
@@ -406,12 +427,13 @@ int LZ_encoder::sequence_optimizer( const int reps[num_rep_distances],
const uint8_t cur_byte = matchfinder[0];
const uint8_t match_byte = matchfinder[-cur_trial.reps[0]-1];
- int next_price = cur_trial.price + price0( bm_match[cur_trial.state()][pos_state] );
+ int next_price = cur_trial.price +
+ price0( bm_match[cur_trial.state()][pos_state] );
if( cur_trial.state.is_char() )
next_price += literal_encoder.price_symbol( prev_byte, cur_byte );
else
next_price += literal_encoder.price_matched( prev_byte, cur_byte, match_byte );
- if( !matchfinder.move_pos() ) return 0;
+ matchfinder.move_pos();
Trial & next_trial = trials[cur+1];
@@ -425,7 +447,7 @@ int LZ_encoder::sequence_optimizer( const int reps[num_rep_distances],
price_rep_len1( cur_trial.state, pos_state ) );
const int len_limit = std::min( std::min( max_num_trials - 1 - cur,
- matchfinder.available_bytes() ), matchfinder.match_len_limit() );
+ matchfinder.available_bytes() ), matchfinder.match_len_limit() );
if( len_limit < min_match_len ) continue;
for( int rep = 0; rep < num_rep_distances; ++rep )
@@ -492,7 +514,7 @@ LZ_encoder::LZ_encoder( Matchfinder & mf, const File_header & header,
range_encoder( outfd ),
len_encoder( matchfinder.match_len_limit() ),
rep_match_len_encoder( matchfinder.match_len_limit() ),
- num_dis_slots( 2 * File_header::real_bits( matchfinder.dictionary_size() - 1 ) )
+ num_dis_slots( 2 * real_bits( matchfinder.dictionary_size() - 1 ) )
{
fill_align_prices();
@@ -503,30 +525,33 @@ LZ_encoder::LZ_encoder( Matchfinder & mf, const File_header & header,
bool LZ_encoder::encode_member( const long long member_size )
{
- if( range_encoder.member_position() != File_header::size )
- return false; // can be called only once
const long long member_size_limit =
member_size - File_trailer::size() - max_marker_size;
+ const int fill_count = ( matchfinder.match_len_limit() > 12 ) ? 512 : 2048;
int fill_counter = 0;
int rep_distances[num_rep_distances];
State state;
for( int i = 0; i < num_rep_distances; ++i ) rep_distances[i] = 0;
- // encode first byte
- if( matchfinder.data_position() == 0 && !matchfinder.finished() )
+ if( matchfinder.data_position() != 0 ||
+ range_encoder.member_position() != File_header::size )
+ return false; // can be called only once
+
+ if( !matchfinder.finished() ) // encode first byte
{
- range_encoder.encode_bit( bm_match[state()][0], 0 );
const uint8_t prev_byte = 0;
const uint8_t cur_byte = matchfinder[0];
+ range_encoder.encode_bit( bm_match[state()][0], 0 );
literal_encoder.encode( range_encoder, prev_byte, cur_byte );
crc32.update( crc_, cur_byte );
- if( !move_pos( 1 ) ) return false;
+ move_pos( 1 );
}
while( true )
{
if( matchfinder.finished() ) { full_flush( state ); return true; }
- if( fill_counter <= 0 ) { fill_distance_prices(); fill_counter = 512; }
+ if( fill_counter <= 0 )
+ { fill_distance_prices(); fill_counter = fill_count; }
int ahead = sequence_optimizer( rep_distances, state );
if( ahead <= 0 ) return false;
@@ -534,7 +559,8 @@ bool LZ_encoder::encode_member( const long long member_size )
for( int i = 0; ; )
{
- const int pos_state = ( matchfinder.data_position() - ahead ) & pos_state_mask;
+ const int pos_state =
+ ( matchfinder.data_position() - ahead ) & pos_state_mask;
const int dis = trials[i].dis;
const int len = trials[i].price;
@@ -550,7 +576,8 @@ bool LZ_encoder::encode_member( const long long member_size )
else
{
const uint8_t match_byte = matchfinder[-ahead-rep_distances[0]-1];
- literal_encoder.encode_matched( range_encoder, prev_byte, match_byte, cur_byte );
+ literal_encoder.encode_matched( range_encoder,
+ prev_byte, cur_byte, match_byte );
}
state.set_char();
}
diff --git a/encoder.h b/encoder.h
index 9b050c4..cea753e 100644
--- a/encoder.h
+++ b/encoder.h
@@ -1,4 +1,4 @@
-/* Lzip - A data compressor based on the LZMA algorithm
+/* Lzip - Data compressor based on the LZMA algorithm
Copyright (C) 2008, 2009, 2010 Antonio Diaz Diaz.
This program is free software: you can redistribute it and/or modify
@@ -15,8 +15,8 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-const int max_num_trials = 1 << 12;
-const int price_shift = 6;
+enum { max_num_trials = 1 << 12,
+ price_shift = 6 };
class Dis_slots
{
@@ -35,6 +35,8 @@ public:
}
}
+ unsigned char table( const int dis ) const throw() { return data[dis]; }
+
int operator[]( const uint32_t dis ) const throw()
{
if( dis < (1 << 12) ) return data[dis];
@@ -54,13 +56,13 @@ public:
void init() throw()
{
const int num_bits = ( bit_model_total_bits - 2 );
- for( int i = num_bits - 1; i >= 0; --i )
+ int j = 1, end = 2;
+ data[0] = bit_model_total_bits << price_shift;
+ for( int i = num_bits - 1; i >= 0; --i, end <<= 1 )
{
- int start = 1 << ( num_bits - i - 1 );
- int end = 1 << ( num_bits - i);
- for( int j = start; j < end; ++j )
- data[j] = (i << price_shift) +
- ( ((end - j) << price_shift) >> (num_bits - i - 1) );
+ for( ; j < end; ++j )
+ data[j] = ( i << price_shift ) +
+ ( ( (end - j) << price_shift ) >> ( num_bits - i - 1 ) );
}
}
@@ -83,8 +85,8 @@ inline int price_bit( const Bit_model & bm, const int bit ) throw()
inline int price_symbol( const Bit_model bm[], int symbol, const int num_bits ) throw()
{
- symbol |= ( 1 << num_bits );
int price = 0;
+ symbol |= ( 1 << num_bits );
while( symbol > 1 )
{
const int bit = symbol & 1;
@@ -151,23 +153,24 @@ class Matchfinder
num_prev_positions2 };
long long partial_data_pos;
+ uint8_t * buffer; // input buffer
+ int32_t * const prev_positions; // last seen position of key
+ int32_t * prev_pos_tree;
int dictionary_size_; // bytes to keep in buffer before pos
int buffer_size;
- uint8_t * buffer;
- int pos;
- int cyclic_pos;
+ int pos; // current pos in buffer
+ int cyclic_pos; // current pos in dictionary
int stream_pos; // first byte not yet read from file
int pos_limit; // when reached, a new block must be read
- const int infd_; // input file descriptor
const int match_len_limit_;
- int32_t * const prev_positions; // last seen position of key
- int32_t * prev_pos_tree;
+ const int cycles;
+ const int infd; // input file descriptor
bool at_stream_end; // stream_pos shows real end of file
- bool read_block() throw();
+ bool read_block();
public:
- Matchfinder( const int dict_size, const int len_limit, const int infd );
+ Matchfinder( const int dict_size, const int len_limit, const int ifd );
~Matchfinder()
{ delete[] prev_pos_tree; delete[] prev_positions; std::free( buffer ); }
@@ -199,8 +202,8 @@ public:
return i;
}
- bool reset() throw();
- bool move_pos() throw();
+ void reset();
+ void move_pos();
int longest_match_len( int * const distances = 0 ) throw();
};
@@ -210,11 +213,11 @@ class Range_encoder
enum { buffer_size = 65536 };
uint64_t low;
long long partial_member_pos;
- uint8_t * const buffer;
- int pos;
+ uint8_t * const buffer; // output buffer
+ int pos; // current pos in buffer
uint32_t range;
int ff_count;
- const int outfd_; // output file descriptor
+ const int outfd; // output file descriptor
uint8_t cache;
void shift_low()
@@ -231,7 +234,7 @@ class Range_encoder
}
public:
- Range_encoder( const int outfd )
+ Range_encoder( const int ofd )
:
low( 0 ),
partial_member_pos( 0 ),
@@ -239,26 +242,13 @@ public:
pos( 0 ),
range( 0xFFFFFFFFU ),
ff_count( 0 ),
- outfd_( outfd ),
+ outfd( ofd ),
cache( 0 ) {}
~Range_encoder() { delete[] buffer; }
- void flush_data()
- {
- if( pos > 0 )
- {
- if( outfd_ >= 0 )
- {
- const int wr = writeblock( outfd_, buffer, pos );
- if( wr != pos ) throw Error( "write error" );
- }
- partial_member_pos += pos;
- pos = 0;
- }
- }
-
void flush() { for( int i = 0; i < 5; ++i ) shift_low(); }
+ void flush_data();
long long member_position() const throw()
{ return partial_member_pos + pos + ff_count; }
@@ -397,24 +387,28 @@ class Literal_encoder
{ return ( prev_byte >> ( 8 - literal_context_bits ) ); }
public:
- void encode( Range_encoder & range_encoder, uint8_t prev_byte, uint8_t symbol )
+ void encode( Range_encoder & range_encoder,
+ uint8_t prev_byte, uint8_t symbol )
{ range_encoder.encode_tree( bm_literal[lstate(prev_byte)], symbol, 8 ); }
- void encode_matched( Range_encoder & range_encoder, uint8_t prev_byte, uint8_t match_byte, uint8_t symbol )
- { range_encoder.encode_matched( bm_literal[lstate(prev_byte)], symbol, match_byte ); }
-
- int price_matched( uint8_t prev_byte, uint8_t symbol, uint8_t match_byte ) const throw()
- { return ::price_matched( bm_literal[lstate(prev_byte)], symbol, match_byte ); }
+ void encode_matched( Range_encoder & range_encoder,
+ uint8_t prev_byte, uint8_t symbol, uint8_t match_byte )
+ { range_encoder.encode_matched( bm_literal[lstate(prev_byte)],
+ symbol, match_byte ); }
int price_symbol( uint8_t prev_byte, uint8_t symbol ) const throw()
{ return ::price_symbol( bm_literal[lstate(prev_byte)], symbol, 8 ); }
+
+ int price_matched( uint8_t prev_byte, uint8_t symbol,
+ uint8_t match_byte ) const throw()
+ { return ::price_matched( bm_literal[lstate(prev_byte)],
+ symbol, match_byte ); }
};
class LZ_encoder
{
- enum { dis_align_mask = dis_align_size - 1,
- infinite_price = 0x0FFFFFFF,
+ enum { infinite_price = 0x0FFFFFFF,
max_marker_size = 16,
num_rep_distances = 4 }; // must be 4
@@ -426,7 +420,9 @@ class LZ_encoder
int price; // dual use var; cumulative price, match length
int reps[num_rep_distances];
void update( const int d, const int p_i, const int pr ) throw()
- { if( pr < price ) { dis = d; prev_index = p_i; price = pr; } }
+ {
+ if( pr < price ) { dis = d; prev_index = p_i; price = pr; }
+ }
};
int longest_match_found;
@@ -439,7 +435,7 @@ class LZ_encoder
Bit_model bm_rep2[State::states];
Bit_model bm_len[State::states][pos_states];
Bit_model bm_dis_slot[max_dis_states][1<<dis_slot_bits];
- Bit_model bm_dis[modeled_distances-end_dis_model];
+ Bit_model bm_dis[modeled_distances-end_dis_model+1];
Bit_model bm_align[dis_align_size];
Matchfinder & matchfinder;
@@ -509,7 +505,7 @@ class LZ_encoder
price += dis_prices[dis_state][dis];
else
price += dis_slot_prices[dis_state][dis_slots[dis]] +
- align_prices[dis & dis_align_mask];
+ align_prices[dis & (dis_align_size - 1)];
return price;
}
@@ -545,15 +541,14 @@ class LZ_encoder
return len;
}
- bool move_pos( int n, bool skip = false ) throw()
+ void move_pos( int n, bool skip = false )
{
while( --n >= 0 )
{
if( skip ) skip = false;
else matchfinder.longest_match_len();
- if( !matchfinder.move_pos() ) return false;
+ matchfinder.move_pos();
}
- return true;
}
void backward( int cur )
diff --git a/fast_encoder.cc b/fast_encoder.cc
new file mode 100644
index 0000000..f4becde
--- /dev/null
+++ b/fast_encoder.cc
@@ -0,0 +1,378 @@
+/* Lzip - Data compressor based on the LZMA algorithm
+ Copyright (C) 2008, 2009, 2010 Antonio Diaz Diaz.
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#define _FILE_OFFSET_BITS 64
+
+#include <algorithm>
+#include <cerrno>
+#include <cstdlib>
+#include <cstring>
+#include <string>
+#include <vector>
+#include <stdint.h>
+
+#include "lzip.h"
+#include "encoder.h"
+#include "fast_encoder.h"
+
+
+bool Fmatchfinder::read_block()
+ {
+ if( !at_stream_end && stream_pos < buffer_size )
+ {
+ const int size = buffer_size - stream_pos;
+ const int rd = readblock( infd, buffer + stream_pos, size );
+ stream_pos += rd;
+ if( rd != size && errno ) throw Error( "Read error" );
+ at_stream_end = ( rd < size );
+ }
+ return pos < stream_pos;
+ }
+
+
+Fmatchfinder::Fmatchfinder( const int ifd )
+ :
+ partial_data_pos( 0 ),
+ prev_positions( new int32_t[num_prev_positions] ),
+ pos( 0 ),
+ cyclic_pos( 0 ),
+ key4( 0 ),
+ stream_pos( 0 ),
+ match_len_limit_( 16 ),
+ infd( ifd ),
+ at_stream_end( false )
+ {
+ const int dict_size = 65536;
+ const int buffer_size_limit = ( 16 * dict_size ) + before_size + after_size;
+ buffer_size = dict_size;
+ buffer = (uint8_t *)std::malloc( buffer_size );
+ if( !buffer ) throw std::bad_alloc();
+ if( read_block() && !at_stream_end && buffer_size < buffer_size_limit )
+ {
+ buffer_size = buffer_size_limit;
+ buffer = (uint8_t *)std::realloc( buffer, buffer_size );
+ if( !buffer ) throw std::bad_alloc();
+ read_block();
+ }
+ if( at_stream_end && stream_pos < dict_size )
+ dictionary_size_ = std::max( (int)min_dictionary_size, stream_pos );
+ else dictionary_size_ = dict_size;
+ pos_limit = buffer_size;
+ if( !at_stream_end ) pos_limit -= after_size;
+ prev_pos_chain = new int32_t[dictionary_size_];
+ for( int i = 0; i < num_prev_positions; ++i ) prev_positions[i] = -1;
+ }
+
+
+void Fmatchfinder::reset()
+ {
+ const int size = stream_pos - pos;
+ if( size > 0 ) std::memmove( buffer, buffer + pos, size );
+ partial_data_pos = 0;
+ stream_pos -= pos;
+ pos = 0;
+ cyclic_pos = 0;
+ key4 = 0;
+ for( int i = 0; i < num_prev_positions; ++i ) prev_positions[i] = -1;
+ read_block();
+ }
+
+
+void Fmatchfinder::move_pos()
+ {
+ if( ++cyclic_pos >= dictionary_size_ ) cyclic_pos = 0;
+ if( ++pos >= pos_limit )
+ {
+ if( pos > stream_pos )
+ internal_error( "pos > stream_pos in Fmatchfinder::move_pos" );
+ if( !at_stream_end )
+ {
+ const int offset = pos - dictionary_size_ - before_size;
+ const int size = stream_pos - offset;
+ std::memmove( buffer, buffer + offset, size );
+ partial_data_pos += offset;
+ pos -= offset;
+ stream_pos -= offset;
+ for( int i = 0; i < num_prev_positions; ++i )
+ if( prev_positions[i] >= 0 ) prev_positions[i] -= offset;
+ for( int i = 0; i < dictionary_size_; ++i )
+ if( prev_pos_chain[i] >= 0 ) prev_pos_chain[i] -= offset;
+ read_block();
+ }
+ }
+ }
+
+
+int Fmatchfinder::longest_match_len( int * const distance ) throw()
+ {
+ int len_limit = match_len_limit_;
+ if( len_limit > available_bytes() )
+ {
+ len_limit = available_bytes();
+ if( len_limit < 4 ) return 0;
+ }
+
+ int maxlen = min_match_len - 1;
+ const int min_pos = (pos >= dictionary_size_) ?
+ (pos - dictionary_size_ + 1) : 0;
+ const uint8_t * const data = buffer + pos;
+ key4 = ( ( key4 << 4 ) ^ data[3] ) & ( num_prev_positions - 1 );
+
+ int newpos = prev_positions[key4];
+ prev_positions[key4] = pos;
+
+ int32_t * ptr0 = prev_pos_chain + cyclic_pos;
+
+ for( int count = 4; ; )
+ {
+ if( newpos < min_pos || --count < 0 ) { *ptr0 = -1; break; }
+ const uint8_t * const newdata = buffer + newpos;
+ int len = 0;
+ while( len < len_limit && newdata[len] == data[len] ) ++len;
+
+ const int delta = pos - newpos;
+ if( maxlen < len ) { maxlen = len; *distance = delta - 1; }
+
+ int32_t * const newptr = prev_pos_chain +
+ ( cyclic_pos - delta +
+ ( ( cyclic_pos >= delta ) ? 0 : dictionary_size_ ) );
+
+ if( len < len_limit )
+ {
+ *ptr0 = newpos;
+ ptr0 = newptr;
+ newpos = *ptr0;
+ }
+ else
+ {
+ *ptr0 = *newptr;
+ break;
+ }
+ }
+ return maxlen;
+ }
+
+
+void Fmatchfinder::longest_match_len() throw()
+ {
+ int len_limit = match_len_limit_;
+ if( len_limit > available_bytes() )
+ {
+ len_limit = available_bytes();
+ if( len_limit < 4 ) return;
+ }
+
+ const int min_pos = (pos >= dictionary_size_) ?
+ (pos - dictionary_size_ + 1) : 0;
+ const uint8_t * const data = buffer + pos;
+ key4 = ( ( key4 << 4 ) ^ data[3] ) & ( num_prev_positions - 1 );
+
+ const int newpos = prev_positions[key4];
+ prev_positions[key4] = pos;
+
+ int32_t * const ptr0 = prev_pos_chain + cyclic_pos;
+
+ if( newpos < min_pos ) *ptr0 = -1;
+ else
+ {
+ const uint8_t * const newdata = buffer + newpos;
+ if( newdata[len_limit-1] != data[len_limit-1] ||
+ std::memcmp( newdata, data, len_limit - 1 ) ) *ptr0 = newpos;
+ else
+ {
+ const int delta = pos - newpos;
+ int idx = cyclic_pos - delta;
+ if( idx < 0 ) idx += dictionary_size_;
+ *ptr0 = prev_pos_chain[idx];
+ }
+ }
+ }
+
+
+// Return value == number of bytes advanced (len).
+// *disp returns the distance to encode.
+// ( *disp == -1 && len == 1 ) means literal.
+int FLZ_encoder::sequence_optimizer( const int reps[num_rep_distances],
+ int * const disp, const State & state )
+ {
+ const int main_len = read_match_distances();
+
+ int replen = 0;
+ int rep_index = 0;
+ for( int i = 0; i < num_rep_distances; ++i )
+ {
+ const int len = fmatchfinder.true_match_len( 0, reps[i] + 1, max_match_len );
+ if( len > replen ) { replen = len; rep_index = i; }
+ }
+ if( replen > min_match_len )
+ {
+ *disp = rep_index;
+ move_pos( replen, true );
+ return replen;
+ }
+
+ if( main_len > min_match_len ||
+ ( main_len == min_match_len && match_distance < modeled_distances ) )
+ {
+ *disp = num_rep_distances + match_distance;
+ move_pos( main_len, true );
+ return main_len;
+ }
+
+ const uint8_t cur_byte = fmatchfinder[0];
+ const uint8_t match_byte = fmatchfinder[-reps[0]-1];
+
+ *disp = -1;
+ if( match_byte == cur_byte )
+ {
+ const uint8_t prev_byte = fmatchfinder[-1];
+ const int pos_state = fmatchfinder.data_position() & pos_state_mask;
+ int price = price0( bm_match[state()][pos_state] );
+ if( state.is_char() )
+ price += literal_encoder.price_symbol( prev_byte, cur_byte );
+ else
+ price += literal_encoder.price_matched( prev_byte, cur_byte, match_byte );
+ const int short_rep_price = price1( bm_match[state()][pos_state] ) +
+ price1( bm_rep[state()] ) +
+ price_rep_len1( state, pos_state );
+ if( short_rep_price < price ) *disp = 0;
+ }
+
+ fmatchfinder.move_pos();
+ return 1;
+ }
+
+
+ // End Of Stream mark => (dis == 0xFFFFFFFFU, len == min_match_len)
+void FLZ_encoder::full_flush( const State & state )
+ {
+ const int pos_state = fmatchfinder.data_position() & pos_state_mask;
+ range_encoder.encode_bit( bm_match[state()][pos_state], 1 );
+ range_encoder.encode_bit( bm_rep[state()], 0 );
+ encode_pair( 0xFFFFFFFFU, min_match_len, pos_state );
+ range_encoder.flush();
+ File_trailer trailer;
+ trailer.data_crc( crc() );
+ trailer.data_size( fmatchfinder.data_position() );
+ trailer.member_size( range_encoder.member_position() + File_trailer::size() );
+ for( int i = 0; i < File_trailer::size(); ++i )
+ range_encoder.put_byte( trailer.data[i] );
+ range_encoder.flush_data();
+ }
+
+
+FLZ_encoder::FLZ_encoder( Fmatchfinder & mf, const File_header & header,
+ const int outfd )
+ :
+ crc_( 0xFFFFFFFFU ),
+ fmatchfinder( mf ),
+ range_encoder( outfd ),
+ len_encoder( fmatchfinder.match_len_limit() ),
+ rep_match_len_encoder( fmatchfinder.match_len_limit() ),
+ num_dis_slots( 2 * real_bits( fmatchfinder.dictionary_size() - 1 ) )
+ {
+ for( int i = 0; i < File_header::size; ++i )
+ range_encoder.put_byte( header.data[i] );
+ }
+
+
+bool FLZ_encoder::encode_member( const long long member_size )
+ {
+ const long long member_size_limit =
+ member_size - File_trailer::size() - max_marker_size;
+ int rep_distances[num_rep_distances];
+ State state;
+ for( int i = 0; i < num_rep_distances; ++i ) rep_distances[i] = 0;
+
+ if( fmatchfinder.data_position() != 0 ||
+ range_encoder.member_position() != File_header::size )
+ return false; // can be called only once
+
+ if( !fmatchfinder.finished() ) // encode first byte
+ {
+ const uint8_t prev_byte = 0;
+ const uint8_t cur_byte = fmatchfinder[0];
+ range_encoder.encode_bit( bm_match[state()][0], 0 );
+ literal_encoder.encode( range_encoder, prev_byte, cur_byte );
+ crc32.update( crc_, cur_byte );
+ move_pos( 1 );
+ }
+
+ while( true )
+ {
+ if( fmatchfinder.finished() ) { full_flush( state ); return true; }
+
+ const int pos_state = fmatchfinder.data_position() & pos_state_mask;
+ int dis;
+ const int len = sequence_optimizer( rep_distances, &dis, state );
+ if( len <= 0 ) return false;
+
+ bool bit = ( dis < 0 && len == 1 );
+ range_encoder.encode_bit( bm_match[state()][pos_state], !bit );
+ if( bit ) // literal byte
+ {
+ const uint8_t prev_byte = fmatchfinder[-len-1];
+ const uint8_t cur_byte = fmatchfinder[-len];
+ crc32.update( crc_, cur_byte );
+ if( state.is_char() )
+ literal_encoder.encode( range_encoder, prev_byte, cur_byte );
+ else
+ {
+ const uint8_t match_byte = fmatchfinder[-len-rep_distances[0]-1];
+ literal_encoder.encode_matched( range_encoder,
+ prev_byte, cur_byte, match_byte );
+ }
+ state.set_char();
+ }
+ else // match or repeated match
+ {
+ crc32.update( crc_, fmatchfinder.ptr_to_current_pos() - len, len );
+ mtf_reps( dis, rep_distances );
+ bit = ( dis < num_rep_distances );
+ range_encoder.encode_bit( bm_rep[state()], bit );
+ if( bit )
+ {
+ bit = ( dis == 0 );
+ range_encoder.encode_bit( bm_rep0[state()], !bit );
+ if( bit )
+ range_encoder.encode_bit( bm_len[state()][pos_state], len > 1 );
+ else
+ {
+ range_encoder.encode_bit( bm_rep1[state()], dis > 1 );
+ if( dis > 1 )
+ range_encoder.encode_bit( bm_rep2[state()], dis > 2 );
+ }
+ if( len == 1 ) state.set_short_rep();
+ else
+ {
+ rep_match_len_encoder.encode( range_encoder, len, pos_state );
+ state.set_rep();
+ }
+ }
+ else
+ {
+ encode_pair( dis - num_rep_distances, len, pos_state );
+ state.set_match();
+ }
+ }
+ if( range_encoder.member_position() >= member_size_limit )
+ {
+ full_flush( state );
+ return true;
+ }
+ }
+ }
diff --git a/fast_encoder.h b/fast_encoder.h
new file mode 100644
index 0000000..4e817d8
--- /dev/null
+++ b/fast_encoder.h
@@ -0,0 +1,176 @@
+/* Lzip - Data compressor based on the LZMA algorithm
+ Copyright (C) 2008, 2009, 2010 Antonio Diaz Diaz.
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+class Fmatchfinder
+ {
+ enum { // bytes to keep in buffer before dictionary
+ before_size = max_match_len + 1,
+ // bytes to keep in buffer after pos
+ after_size = max_match_len,
+ num_prev_positions = 1 << 16 };
+
+ long long partial_data_pos;
+ uint8_t * buffer; // input buffer
+ int32_t * const prev_positions; // last seen position of key
+ int32_t * prev_pos_chain;
+ int dictionary_size_; // bytes to keep in buffer before pos
+ int buffer_size;
+ int pos; // current pos in buffer
+ int cyclic_pos; // current pos in dictionary
+ int key4; // key made from latest 4 bytes
+ int stream_pos; // first byte not yet read from file
+ int pos_limit; // when reached, a new block must be read
+ const int match_len_limit_;
+ const int infd; // input file descriptor
+ bool at_stream_end; // stream_pos shows real end of file
+
+ bool read_block();
+
+public:
+ Fmatchfinder( const int ifd );
+
+ ~Fmatchfinder()
+ { delete[] prev_pos_chain; delete[] prev_positions; std::free( buffer ); }
+
+ uint8_t operator[]( const int i ) const throw() { return buffer[pos+i]; }
+ int available_bytes() const throw() { return stream_pos - pos; }
+ long long data_position() const throw() { return partial_data_pos + pos; }
+ int dictionary_size() const throw() { return dictionary_size_; }
+ bool finished() const throw() { return at_stream_end && pos >= stream_pos; }
+ int match_len_limit() const throw() { return match_len_limit_; }
+ const uint8_t * ptr_to_current_pos() const throw() { return buffer + pos; }
+
+ int true_match_len( const int index, const int distance, int len_limit ) const throw()
+ {
+ if( index + len_limit > available_bytes() )
+ len_limit = available_bytes() - index;
+ const uint8_t * const data = buffer + pos + index - distance;
+ int i = 0;
+ while( i < len_limit && data[i] == data[i+distance] ) ++i;
+ return i;
+ }
+
+ void reset();
+ void move_pos();
+ int longest_match_len( int * const distance ) throw();
+ void longest_match_len() throw();
+ };
+
+
+class FLZ_encoder
+ {
+ enum { max_marker_size = 16,
+ num_rep_distances = 4 }; // must be 4
+
+ uint32_t crc_;
+
+ Bit_model bm_match[State::states][pos_states];
+ Bit_model bm_rep[State::states];
+ Bit_model bm_rep0[State::states];
+ Bit_model bm_rep1[State::states];
+ Bit_model bm_rep2[State::states];
+ Bit_model bm_len[State::states][pos_states];
+ Bit_model bm_dis_slot[max_dis_states][1<<dis_slot_bits];
+ Bit_model bm_dis[modeled_distances-end_dis_model+1];
+ Bit_model bm_align[dis_align_size];
+
+ Fmatchfinder & fmatchfinder;
+ Range_encoder range_encoder;
+ Len_encoder len_encoder;
+ Len_encoder rep_match_len_encoder;
+ Literal_encoder literal_encoder;
+
+ const int num_dis_slots;
+ int match_distance;
+
+ uint32_t crc() const throw() { return crc_ ^ 0xFFFFFFFFU; }
+
+ // move-to-front dis in/into reps
+ void mtf_reps( const int dis, int reps[num_rep_distances] ) throw()
+ {
+ if( dis >= num_rep_distances )
+ {
+ for( int i = num_rep_distances - 1; i > 0; --i ) reps[i] = reps[i-1];
+ reps[0] = dis - num_rep_distances;
+ }
+ else if( dis > 0 )
+ {
+ const int distance = reps[dis];
+ for( int i = dis; i > 0; --i ) reps[i] = reps[i-1];
+ reps[0] = distance;
+ }
+ }
+
+ int price_rep_len1( const State & state, const int pos_state ) const throw()
+ {
+ return price0( bm_rep0[state()] ) + price0( bm_len[state()][pos_state] );
+ }
+
+ void encode_pair( const uint32_t dis, const int len, const int pos_state ) throw()
+ {
+ len_encoder.encode( range_encoder, len, pos_state );
+ const int dis_slot = dis_slots[dis];
+ range_encoder.encode_tree( bm_dis_slot[get_dis_state(len)], dis_slot, dis_slot_bits );
+
+ if( dis_slot >= start_dis_model )
+ {
+ const int direct_bits = ( dis_slot >> 1 ) - 1;
+ const uint32_t base = ( 2 | ( dis_slot & 1 ) ) << direct_bits;
+ const uint32_t direct_dis = dis - base;
+
+ if( dis_slot < end_dis_model )
+ range_encoder.encode_tree_reversed( bm_dis + base - dis_slot,
+ direct_dis, direct_bits );
+ else
+ {
+ range_encoder.encode( direct_dis >> dis_align_bits, direct_bits - dis_align_bits );
+ range_encoder.encode_tree_reversed( bm_align, direct_dis, dis_align_bits );
+ }
+ }
+ }
+
+ int read_match_distances() throw()
+ {
+ int len = fmatchfinder.longest_match_len( &match_distance );
+ if( len == fmatchfinder.match_len_limit() )
+ len += fmatchfinder.true_match_len( len, match_distance + 1, max_match_len - len );
+ return len;
+ }
+
+ void move_pos( int n, bool skip = false )
+ {
+ while( --n >= 0 )
+ {
+ if( skip ) skip = false;
+ else fmatchfinder.longest_match_len();
+ fmatchfinder.move_pos();
+ }
+ }
+
+ int sequence_optimizer( const int reps[num_rep_distances],
+ int * const disp, const State & state );
+
+ void full_flush( const State & state );
+
+public:
+ FLZ_encoder( Fmatchfinder & mf, const File_header & header, const int outfd );
+
+ bool encode_member( const long long member_size );
+
+ long long member_position() const throw()
+ { return range_encoder.member_position(); }
+ };
diff --git a/lzip.h b/lzip.h
index 9378a2e..77ba0e8 100644
--- a/lzip.h
+++ b/lzip.h
@@ -1,4 +1,4 @@
-/* Lzip - A data compressor based on the LZMA algorithm
+/* Lzip - Data compressor based on the LZMA algorithm
Copyright (C) 2008, 2009, 2010 Antonio Diaz Diaz.
This program is free software: you can redistribute it and/or modify
@@ -27,56 +27,63 @@ public:
void set_char() throw()
{
- static const unsigned char next[states] = {0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 4, 5};
+ static const unsigned char next[states] =
+ { 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 4, 5 };
st = next[st];
}
+
void set_match() throw()
{
- static const unsigned char next[states] = {7, 7, 7, 7, 7, 7, 7, 10, 10, 10, 10, 10};
+ static const unsigned char next[states] =
+ { 7, 7, 7, 7, 7, 7, 7, 10, 10, 10, 10, 10 };
st = next[st];
}
+
void set_rep() throw()
{
- static const unsigned char next[states] = {8, 8, 8, 8, 8, 8, 8, 11, 11, 11, 11, 11};
+ static const unsigned char next[states] =
+ { 8, 8, 8, 8, 8, 8, 8, 11, 11, 11, 11, 11 };
st = next[st];
}
+
void set_short_rep() throw()
{
- static const unsigned char next[states] = {9, 9, 9, 9, 9, 9, 9, 11, 11, 11, 11, 11};
+ static const unsigned char next[states] =
+ { 9, 9, 9, 9, 9, 9, 9, 11, 11, 11, 11, 11 };
st = next[st];
}
};
-
-const int min_dictionary_bits = 12;
-const int min_dictionary_size = 1 << min_dictionary_bits;
-const int max_dictionary_bits = 29;
-const int max_dictionary_size = 1 << max_dictionary_bits;
-const int literal_context_bits = 3;
-const int pos_state_bits = 2;
-const int pos_states = 1 << pos_state_bits;
-const int pos_state_mask = pos_states - 1;
-
-const int dis_slot_bits = 6;
-const int start_dis_model = 4;
-const int end_dis_model = 14;
-const int modeled_distances = 1 << (end_dis_model / 2);
-const int dis_align_bits = 4;
-const int dis_align_size = 1 << dis_align_bits;
-
-const int len_low_bits = 3;
-const int len_mid_bits = 3;
-const int len_high_bits = 8;
-const int len_low_symbols = 1 << len_low_bits;
-const int len_mid_symbols = 1 << len_mid_bits;
-const int len_high_symbols = 1 << len_high_bits;
-const int max_len_symbols = len_low_symbols + len_mid_symbols + len_high_symbols;
-
-const int min_match_len = 2; // must be 2
-const int max_match_len = min_match_len + max_len_symbols - 1; // 273
-const int min_match_len_limit = 5;
-
-const int max_dis_states = 4;
+enum {
+ min_dictionary_bits = 12,
+ min_dictionary_size = 1 << min_dictionary_bits,
+ max_dictionary_bits = 29,
+ max_dictionary_size = 1 << max_dictionary_bits,
+ literal_context_bits = 3,
+ pos_state_bits = 2,
+ pos_states = 1 << pos_state_bits,
+ pos_state_mask = pos_states - 1,
+
+ dis_slot_bits = 6,
+ start_dis_model = 4,
+ end_dis_model = 14,
+ modeled_distances = 1 << (end_dis_model / 2),
+ dis_align_bits = 4,
+ dis_align_size = 1 << dis_align_bits,
+
+ len_low_bits = 3,
+ len_mid_bits = 3,
+ len_high_bits = 8,
+ len_low_symbols = 1 << len_low_bits,
+ len_mid_symbols = 1 << len_mid_bits,
+ len_high_symbols = 1 << len_high_bits,
+ max_len_symbols = len_low_symbols + len_mid_symbols + len_high_symbols,
+
+ min_match_len = 2, // must be 2
+ max_match_len = min_match_len + max_len_symbols - 1, // 273
+ min_match_len_limit = 5,
+
+ max_dis_states = 4 };
inline int get_dis_state( int len ) throw()
{
@@ -86,9 +93,9 @@ inline int get_dis_state( int len ) throw()
}
-const int bit_model_move_bits = 5;
-const int bit_model_total_bits = 11;
-const int bit_model_total = 1 << bit_model_total_bits;
+enum { bit_model_move_bits = 5,
+ bit_model_total_bits = 11,
+ bit_model_total = 1 << bit_model_total_bits };
struct Bit_model
{
@@ -101,12 +108,14 @@ class Pretty_print
{
const char * const stdin_name;
unsigned int longest_name;
+ const int verbosity_;
std::string name_;
mutable bool first_post;
public:
- Pretty_print( const std::vector< std::string > & filenames )
- : stdin_name( "(stdin)" ), longest_name( 0 ), first_post( false )
+ Pretty_print( const std::vector< std::string > & filenames, const int v )
+ : stdin_name( "(stdin)" ), longest_name( 0 ), verbosity_( v ),
+ first_post( false )
{
const unsigned int stdin_name_len = std::strlen( stdin_name );
for( unsigned int i = 0; i < filenames.size(); ++i )
@@ -127,6 +136,7 @@ public:
void reset() const throw() { if( name_.size() ) first_post = true; }
const char * name() const throw() { return name_.c_str(); }
+ int verbosity() const throw() { return verbosity_; }
void operator()( const char * const msg = 0 ) const throw();
};
@@ -160,6 +170,14 @@ public:
extern const CRC32 crc32;
+inline int real_bits( const int value ) throw()
+ {
+ int bits = 0;
+ for( int i = 1, mask = 1; mask > 0; ++i, mask <<= 1 )
+ if( value & mask ) bits = i;
+ return bits;
+ }
+
const uint8_t magic_string[4] = { 'L', 'Z', 'I', 'P' };
struct File_header
@@ -178,14 +196,6 @@ struct File_header
uint8_t version() const throw() { return data[4]; }
bool verify_version() const throw() { return ( data[4] <= 1 ); }
- static int real_bits( const int value ) throw()
- {
- int bits = 0;
- for( int i = 1, mask = 1; mask > 0; ++i, mask <<= 1 )
- if( value & mask ) bits = i;
- return bits;
- }
-
int dictionary_size() const throw()
{
int sz = ( 1 << ( data[5] & 0x1F ) );
@@ -261,13 +271,14 @@ struct File_trailer
struct Error
{
- const char * const s;
- Error( const char * const p ) throw() : s( p ) {}
+ const char * const msg;
+ Error( const char * const s ) throw() : msg( s ) {}
};
-extern int verbosity;
-void show_error( const char * const msg, const int errcode = 0, const bool help = false ) throw();
+// defined in main.cc lziprecover.cc
+void show_error( const char * const msg, const int errcode = 0,
+ const bool help = false ) throw();
void internal_error( const char * const msg );
int readblock( const int fd, uint8_t * const buf, const int size ) throw();
int writeblock( const int fd, const uint8_t * const buf, const int size ) throw();
diff --git a/lziprecover.cc b/lziprecover.cc
index 3acd46c..3f1b130 100644
--- a/lziprecover.cc
+++ b/lziprecover.cc
@@ -1,4 +1,4 @@
-/* Lziprecover - Member recoverer program for lzip compressed files
+/* Lziprecover - Data recovery tool for lzip compressed files
Copyright (C) 2008, 2009, 2010 Antonio Diaz Diaz.
This program is free software: you can redistribute it and/or modify
@@ -23,7 +23,9 @@
#define _FILE_OFFSET_BITS 64
+#include <algorithm>
#include <cerrno>
+#include <climits>
#include <cstdio>
#include <cstdlib>
#include <cstring>
@@ -33,9 +35,30 @@
#include <stdint.h>
#include <unistd.h>
#include <sys/stat.h>
+#if defined(__MSVCRT__)
+#define S_IRGRP 0
+#define S_IWGRP 0
+#define S_IROTH 0
+#define S_IWOTH 0
+#endif
#include "arg_parser.h"
#include "lzip.h"
+#include "decoder.h"
+
+#if CHAR_BIT != 8
+#error "Environments where CHAR_BIT != 8 are not supported."
+#endif
+
+#ifndef LLONG_MAX
+#define LLONG_MAX 0x7FFFFFFFFFFFFFFFLL
+#endif
+#ifndef LLONG_MIN
+#define LLONG_MIN (-LLONG_MAX - 1LL)
+#endif
+#ifndef ULLONG_MAX
+#define ULLONG_MAX 0xFFFFFFFFFFFFFFFFULL
+#endif
namespace {
@@ -51,18 +74,43 @@ const int o_binary = O_BINARY;
const int o_binary = 0;
#endif
+int verbosity = 0;
+
+
+class Block
+ {
+ long long pos_, size_; // pos + size <= LLONG_MAX
+
+public:
+ Block( const long long p, const long long s ) throw()
+ : pos_( p ), size_( s ) {}
+
+ long long pos() const throw() { return pos_; }
+ long long size() const throw() { return size_; }
+ long long end() const throw() { return pos_ + size_; }
+
+ void pos( const long long p ) throw() { pos_ = p; }
+ void size( const long long s ) throw() { size_ = s; }
+ void shift( Block & b ) throw() { ++size_; ++b.pos_; --b.size_; }
+ };
+
void show_help() throw()
{
- std::printf( "%s - Member recoverer program for lzip compressed files.\n", Program_name );
- std::printf( "\nSearches for members in .lz files, and writes each member in its own .lz\n" );
- std::printf( "file. You can then use `lzip -t' to test the integrity of the resulting\n" );
- std::printf( "files, and decompress those which are undamaged.\n" );
- std::printf( "\nUsage: %s [options] file\n", invocation_name );
- std::printf( "Options:\n" );
+ std::printf( "%s - Data recovery tool for lzip compressed files.\n", Program_name );
+ std::printf( "\nUsage: %s [options] [files]\n", invocation_name );
+ std::printf( "\nOptions:\n" );
std::printf( " -h, --help display this help and exit\n" );
std::printf( " -V, --version output version information and exit\n" );
+// std::printf( " -c, --create-recover-file create a recover file\n" );
+ std::printf( " -f, --force overwrite existing output files\n" );
+ std::printf( " -m, --merge correct errors in file using several copies\n" );
+ std::printf( " -o, --output=<file> place the output into <file>\n" );
std::printf( " -q, --quiet suppress all messages\n" );
+// std::printf( " -r, --recover correct errors in file using a recover file\n" );
+ std::printf( " -R, --repair try to repair a small error in file\n" );
+ std::printf( " -s, --split split a multimember file in single-member files\n" );
+// std::printf( " -u, --update convert file from version 0 to version 1\n" );
std::printf( " -v, --verbose be verbose (a 2nd -v gives more)\n" );
std::printf( "\nReport bugs to lzip-bug@nongnu.org\n");
std::printf( "Lzip home page: http://www.nongnu.org/lzip/lzip.html\n" );
@@ -95,7 +143,7 @@ int open_instream( const std::string & input_filename ) throw()
if( i < 0 || !S_ISREG( in_stats.st_mode ) )
{
if( verbosity >= 0 )
- std::fprintf( stderr, "%s: input file `%s' is not a regular file.\n",
+ std::fprintf( stderr, "%s: Input file `%s' is not a regular file.\n",
program_name, input_filename.c_str() );
close( infd );
infd = -1;
@@ -105,14 +153,21 @@ int open_instream( const std::string & input_filename ) throw()
}
-int open_outstream( const std::string & output_filename ) throw()
+int open_outstream( const std::string & output_filename,
+ const bool force ) throw()
{
- int outfd = open( output_filename.c_str(),
- O_CREAT | O_TRUNC | O_WRONLY | o_binary,
+ int flags = O_CREAT | O_RDWR | o_binary;
+ if( force ) flags |= O_TRUNC; else flags |= O_EXCL;
+
+ int outfd = open( output_filename.c_str(), flags,
S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH );
- if( outfd < 0 )
+ if( outfd < 0 && verbosity >= 0 )
{
- if( verbosity >= 0 )
+ if( errno == EEXIST )
+ std::fprintf( stderr, "%s: Output file `%s' already exists."
+ " Use `--force' to overwrite it.\n",
+ program_name, output_filename.c_str() );
+ else
std::fprintf( stderr, "%s: Can't create output file `%s': %s.\n",
program_name, output_filename.c_str(), std::strerror( errno ) );
}
@@ -120,36 +175,22 @@ int open_outstream( const std::string & output_filename ) throw()
}
-bool next_filename( std::string & output_filename )
- {
- for( int i = 7; i >= 3; --i ) // "rec00001"
- {
- if( output_filename[i] < '9' ) { ++output_filename[i]; return true; }
- else output_filename[i] = '0';
- }
- return false;
- }
-
-
-bool verify_header( const uint8_t * const buffer, const int pos )
+bool verify_header( const File_header & header )
{
- File_header header;
- for( int i = 0; i < File_header::size; ++i )
- header.data[i] = buffer[pos+i];
if( !header.verify_magic() )
{
- show_error( "bad magic number (file not in lzip format).\n" );
+ show_error( "Bad magic number (file not in lzip format)." );
return false;
}
if( header.version() == 0 )
{
- show_error( "version 0 member format can't be recovered.\n" );
+ show_error( "Version 0 member format can't be recovered." );
return false;
}
if( header.version() != 1 )
{
if( verbosity >= 0 )
- std::fprintf( stderr, "version %d member format not supported, newer %s needed.\n",
+ std::fprintf( stderr, "Version %d member format not supported, newer %s needed.\n",
header.version(), program_name );
return false;
}
@@ -157,7 +198,396 @@ bool verify_header( const uint8_t * const buffer, const int pos )
}
-int process_file( const std::string & input_filename, uint8_t * & base_buffer )
+bool verify_single_member( const int fd, const long long file_size )
+ {
+ File_header header;
+ if( lseek( fd, 0, SEEK_SET ) < 0 ||
+ readblock( fd, header.data, File_header::size ) != File_header::size )
+ { show_error( "Error reading member header", errno ); return false; }
+ if( !verify_header( header ) ) return false;
+
+ File_trailer trailer;
+ if( lseek( fd, -File_trailer::size(), SEEK_END ) < 0 ||
+ readblock( fd, trailer.data, File_trailer::size() ) != File_trailer::size() )
+ { show_error( "Error reading member trailer", errno ); return false; }
+ const long long member_size = trailer.member_size();
+ if( member_size != file_size )
+ {
+ if( member_size < file_size &&
+ lseek( fd, -member_size, SEEK_END ) > 0 &&
+ readblock( fd, header.data, File_header::size ) == File_header::size &&
+ verify_header( header ) )
+ show_error( "Input file has more than 1 member. Split it first." );
+ else
+ show_error( "Member size in input file trailer is corrupt." );
+ return false;
+ }
+ return true;
+ }
+
+
+bool try_decompress( const int fd, const long long file_size,
+ long long * failure_pos = 0 )
+ {
+ try {
+ Range_decoder rdec( fd );
+ File_header header;
+ rdec.reset_member_position();
+ for( int i = 0; i < File_header::size; ++i )
+ header.data[i] = rdec.get_byte();
+ if( !rdec.finished() && // End Of File
+ header.verify_magic() &&
+ header.version() == 1 &&
+ header.dictionary_size() >= min_dictionary_size &&
+ header.dictionary_size() <= max_dictionary_size )
+ {
+ LZ_decoder decoder( header, rdec, -1 );
+ std::vector< std::string > dummy_filenames;
+ Pretty_print dummy( dummy_filenames, -1 );
+
+ if( decoder.decode_member( dummy ) == 0 &&
+ rdec.member_position() == file_size ) return true;
+ if( failure_pos ) *failure_pos = rdec.member_position();
+ }
+ }
+ catch( std::bad_alloc )
+ {
+ show_error( "Not enough memory. Find a machine with more memory." );
+ std::exit( 1 );
+ }
+ catch( Error e ) {}
+ return false;
+ }
+
+
+bool copy_and_diff_file( const std::vector< int > & infd_vector,
+ const int outfd, std::vector< Block > & block_vector )
+ {
+ const int buffer_size = 65536;
+ std::vector< uint8_t * > buffer_vector( infd_vector.size() );
+ for( unsigned int i = 0; i < infd_vector.size(); ++i )
+ buffer_vector[i] = new uint8_t[buffer_size];
+ Block b( 0, 0 );
+ long long partial_pos = 0;
+ int equal_bytes = 0;
+ bool error = false;
+
+ while( !error )
+ {
+ const int rd = readblock( infd_vector[0], buffer_vector[0], buffer_size );
+ if( rd != buffer_size && errno )
+ { show_error( "Error reading input file", errno ); error = true; }
+ if( rd > 0 )
+ {
+ for( unsigned int i = 1; i < infd_vector.size(); ++i )
+ if( readblock( infd_vector[i], buffer_vector[i], rd ) != rd )
+ { show_error( "Error reading input file", errno ); error = true; }
+ const int wr = writeblock( outfd, buffer_vector[0], rd );
+ if( wr != rd )
+ { show_error( "Error writing output file", errno ); error = true; }
+ for( int i = 0; i < rd; ++i )
+ {
+ while( i < rd && b.pos() == 0 )
+ {
+ for( unsigned int j = 1; j < infd_vector.size(); ++j )
+ if( buffer_vector[0][i] != buffer_vector[j][i] )
+ { b.pos( partial_pos + i ); break; } // begin block
+ ++i;
+ }
+ while( i < rd && b.pos() > 0 )
+ {
+ ++equal_bytes;
+ for( unsigned int j = 1; j < infd_vector.size(); ++j )
+ if( buffer_vector[0][i] != buffer_vector[j][i] )
+ { equal_bytes = 0; break; }
+ if( equal_bytes >= 2 ) // end block
+ {
+ b.size( partial_pos + i - ( equal_bytes - 1 ) - b.pos() );
+ block_vector.push_back( b );
+ b.pos( 0 );
+ equal_bytes = 0;
+ }
+ ++i;
+ }
+ }
+ partial_pos += rd;
+ }
+ if( rd < buffer_size ) break; // EOF
+ }
+ if( b.pos() > 0 ) // finish last block
+ {
+ b.size( partial_pos - b.pos() );
+ block_vector.push_back( b );
+ }
+ for( unsigned int i = 0; i < infd_vector.size(); ++i )
+ delete[] buffer_vector[i];
+ return !error;
+ }
+
+
+bool copy_file( const int infd, const int outfd,
+ const long long size = LLONG_MAX )
+ {
+ long long rest = size;
+ const int buffer_size = 65536;
+ uint8_t * const buffer = new uint8_t[buffer_size];
+ bool error = false;
+
+ while( !error )
+ {
+ const int block_size = std::min( (long long)buffer_size, rest );
+ if( block_size <= 0 ) break;
+ const int rd = readblock( infd, buffer, block_size );
+ if( rd != block_size && errno )
+ { show_error( "Error reading input file", errno ); error = true; }
+ if( rd > 0 )
+ {
+ const int wr = writeblock( outfd, buffer, rd );
+ if( wr != rd )
+ { show_error( "Error writing output file", errno ); error = true; }
+ rest -= rd;
+ }
+ if( rd < block_size ) break; // EOF
+ }
+ delete[] buffer;
+ return !error;
+ }
+
+
+std::string insert_fixed( std::string name ) throw()
+ {
+ if( name.size() > 4 && name.compare( name.size() - 4, 4, ".tlz" ) == 0 )
+ name.insert( name.size() - 4, "_fixed" );
+ else if( name.size() > 3 && name.compare( name.size() - 3, 3, ".lz" ) == 0 )
+ name.insert( name.size() - 3, "_fixed" );
+ else name += "_fixed.lz";
+ return name;
+ }
+
+
+int ipow( const unsigned int base, const unsigned int exponent ) throw()
+ {
+ int result = 1;
+ for( unsigned int i = 0; i < exponent; ++i )
+ {
+ if( INT_MAX / base >= (unsigned int)result ) result *= base;
+ else { result = INT_MAX; break; }
+ }
+ return result;
+ }
+
+
+int merge_files( const std::vector< std::string > & filenames,
+ const std::string & output_filename, const bool force )
+ {
+ std::vector< int > infd_vector( filenames.size() );
+ for( unsigned int i = 0; i < filenames.size(); ++i )
+ {
+ infd_vector[i] = open_instream( filenames[i] );
+ if( infd_vector[i] < 0 ) return 1;
+ }
+ long long isize = 0;
+ for( unsigned int i = 0; i < filenames.size(); ++i )
+ {
+ const long long tmp = lseek( infd_vector[i], 0, SEEK_END );
+ if( tmp < 0 )
+ {
+ if( verbosity >= 0 )
+ std::fprintf( stderr, "File `%s' is not seekable.\n", filenames[i].c_str() );
+ return 1;
+ }
+ if( i == 0 ) isize = tmp;
+ else if( isize != tmp )
+ { show_error( "Sizes of input files are different." ); return 1; }
+ }
+ if( isize < 36 )
+ { show_error( "Input file is too short." ); return 2; }
+ for( unsigned int i = 0; i < filenames.size(); ++i )
+ if( !verify_single_member( infd_vector[i], isize ) )
+ return 2;
+ for( unsigned int i = 0; i < filenames.size(); ++i )
+ if( lseek( infd_vector[i], 0, SEEK_SET ) < 0 )
+ { show_error( "Seek error in input file", errno ); return 1; }
+ for( unsigned int i = 0; i < filenames.size(); ++i )
+ if( try_decompress( infd_vector[i], isize ) )
+ {
+ if( verbosity >= 0 )
+ std::printf( "File `%s' has no errors. Recovery is not needed.\n",
+ filenames[i].c_str() );
+ return 0;
+ }
+
+ const int outfd = open_outstream( output_filename, force );
+ if( outfd < 0 ) return 1;
+ for( unsigned int i = 0; i < filenames.size(); ++i )
+ if( lseek( infd_vector[i], 0, SEEK_SET ) < 0 )
+ { show_error( "Seek error in input file", errno ); return 1; }
+
+ // vector of data blocks differing among the copies of the input file.
+ std::vector< Block > block_vector;
+ if( !copy_and_diff_file( infd_vector, outfd, block_vector ) ) return 1;
+
+ if( !block_vector.size() )
+ { show_error( "Input files are identical. Recovery is not possible." );
+ return 1; }
+
+ const bool single_block = ( block_vector.size() == 1 );
+ if( single_block && block_vector[0].size() < 2 )
+ { show_error( "Input files have the same byte damaged."
+ " Try repairing one of them." );
+ return 1; }
+
+ if( ipow( filenames.size(), block_vector.size() ) >= INT_MAX ||
+ ( single_block &&
+ ipow( filenames.size(), 2 ) >= INT_MAX / block_vector[0].size() ) )
+ { show_error( "Input files are too damaged. Recovery is not possible." );
+ return 1; }
+
+ const int shifts = ( single_block ? block_vector[0].size() - 1 : 1 );
+ if( single_block )
+ {
+ Block b( block_vector[0].pos() + 1, block_vector[0].size() - 1 );
+ block_vector[0].size( 1 );
+ block_vector.push_back( b );
+ }
+
+ const int base_variations = ipow( filenames.size(), block_vector.size() );
+ const int variations = ( base_variations * shifts ) - 2;
+ bool done = false;
+ for( int var = 1; var <= variations; ++var )
+ {
+ if( verbosity >= 0 )
+ {
+ std::printf( "Trying variation %d of %d \r", var, variations );
+ std::fflush( stdout );
+ }
+ int tmp = var;
+ for( unsigned int i = 0; i < block_vector.size(); ++i )
+ {
+ const int infd = infd_vector[tmp % filenames.size()];
+ tmp /= filenames.size();
+ if( lseek( infd, block_vector[i].pos(), SEEK_SET ) < 0 ||
+ lseek( outfd, block_vector[i].pos(), SEEK_SET ) < 0 ||
+ !copy_file( infd, outfd, block_vector[i].size() ) )
+ { show_error( "Error reading output file", errno ); return 1; }
+ }
+ if( lseek( outfd, 0, SEEK_SET ) < 0 )
+ { show_error( "Seek error in output file", errno ); return 1; }
+ if( try_decompress( outfd, isize ) )
+ { done = true; break; }
+ if( var % base_variations == 0 ) block_vector[0].shift( block_vector[1] );
+ }
+ if( verbosity >= 0 ) std::printf( "\n" );
+
+ if( close( outfd ) != 0 )
+ { show_error( "Error closing output file", errno ); return 1; }
+ if( done )
+ {
+ if( verbosity >= 0 )
+ std::printf( "Input files merged successfully.\n" );
+ return 0;
+ }
+ else
+ {
+ std::remove( output_filename.c_str() );
+ show_error( "Some error areas overlap. Can't recover input file." );
+ return 2;
+ }
+ }
+
+
+int repair_file( const std::string & input_filename,
+ const std::string & output_filename, const bool force )
+ {
+ const int infd = open_instream( input_filename );
+ if( infd < 0 ) return 1;
+ const long long isize = lseek( infd, 0, SEEK_END );
+ if( isize < 0 )
+ { show_error( "Input file is not seekable", errno ); return 1; }
+ if( isize < 36 )
+ { show_error( "Input file is too short." ); return 2; }
+ if( !verify_single_member( infd, isize ) ) return 2;
+ if( lseek( infd, 0, SEEK_SET ) < 0 )
+ { show_error( "Seek error in input file", errno ); return 1; }
+ long long failure_pos = 0;
+ if( try_decompress( infd, isize, &failure_pos ) )
+ {
+ if( verbosity >= 0 )
+ std::printf( "Input file has no errors. Recovery is not needed.\n" );
+ return 0;
+ }
+ if( failure_pos >= isize - 8 ) failure_pos = isize - 8 - 1;
+ if( failure_pos < File_header::size )
+ { show_error( "Can't repair error in input file." ); return 2; }
+
+ const int outfd = open_outstream( output_filename, force );
+ if( outfd < 0 ) { close( infd ); return 1; }
+ if( lseek( infd, 0, SEEK_SET ) < 0 )
+ { show_error( "Seek error in input file", errno ); return 1; }
+ if( !copy_file( infd, outfd ) ) return 1;
+
+ const long long min_pos =
+ std::max( (long long)File_header::size, failure_pos - 1000 );
+ bool done = false;
+ for( long long pos = failure_pos; pos >= min_pos; --pos )
+ {
+ if( verbosity >= 0 )
+ {
+ std::printf( "Trying position %lld \r", pos );
+ std::fflush( stdout );
+ }
+ uint8_t byte;
+ if( lseek( outfd, pos, SEEK_SET ) < 0 ||
+ readblock( outfd, &byte, 1 ) != 1 )
+ { show_error( "Error reading output file", errno ); return 1; }
+ for( int i = 0; i < 255; ++i )
+ {
+ ++byte;
+ if( lseek( outfd, pos, SEEK_SET ) < 0 ||
+ writeblock( outfd, &byte, 1 ) != 1 ||
+ lseek( outfd, 0, SEEK_SET ) < 0 )
+ { show_error( "Error writing output file", errno ); return 1; }
+ if( try_decompress( outfd, isize ) )
+ { done = true; break; }
+ }
+ if( done ) break;
+ ++byte;
+ if( lseek( outfd, pos, SEEK_SET ) < 0 ||
+ writeblock( outfd, &byte, 1 ) != 1 )
+ { show_error( "Error writing output file", errno ); return 1; }
+ }
+ if( verbosity >= 0 ) std::printf( "\n" );
+
+ if( close( outfd ) != 0 )
+ { show_error( "Error closing output file", errno ); return 1; }
+ if( done )
+ {
+ if( verbosity >= 0 )
+ std::printf( "Copy of input file repaired successfully.\n" );
+ return 0;
+ }
+ else
+ {
+ std::remove( output_filename.c_str() );
+ show_error( "Error is larger than 1 byte. Can't repair input file." );
+ return 2;
+ }
+ }
+
+
+bool next_filename( std::string & output_filename )
+ {
+ for( int i = 7; i >= 3; --i ) // "rec00001"
+ {
+ if( output_filename[i] < '9' ) { ++output_filename[i]; return true; }
+ else output_filename[i] = '0';
+ }
+ return false;
+ }
+
+
+int do_split_file( const std::string & input_filename, uint8_t * & base_buffer,
+ const std::string & default_output_filename, const bool force )
{
const int hsize = File_header::size;
const int tsize = File_trailer::size();
@@ -171,13 +601,17 @@ int process_file( const std::string & input_filename, uint8_t * & base_buffer )
int size = readblock( infd, buffer, buffer_size + hsize ) - hsize;
bool at_stream_end = ( size < buffer_size );
if( size != buffer_size && errno )
- { show_error( "read error", errno ); return 1; }
+ { show_error( "Read error", errno ); return 1; }
if( size <= tsize )
- { show_error( "file too short" ); return 2; }
- if( !verify_header( buffer, 0 ) ) return 2;
+ { show_error( "Input file is too short." ); return 2; }
+ File_header header;
+ for( int i = 0; i < File_header::size; ++i )
+ header.data[i] = buffer[i];
+ if( !verify_header( header ) ) return 2;
- std::string output_filename( "rec00001" ); output_filename += input_filename;
- int outfd = open_outstream( output_filename );
+ std::string output_filename( "rec00001" );
+ output_filename += default_output_filename;
+ int outfd = open_outstream( output_filename, force );
if( outfd < 0 ) { close( infd ); return 1; }
long long partial_member_size = 0;
@@ -197,12 +631,12 @@ int process_file( const std::string & input_filename, uint8_t * & base_buffer )
{ // header found
const int wr = writeblock( outfd, buffer + pos, newpos - pos );
if( wr != newpos - pos )
- { show_error( "write error", errno ); return 1; }
+ { show_error( "Write error", errno ); return 1; }
if( close( outfd ) != 0 )
- { show_error( "error closing output file", errno ); return 1; }
+ { show_error( "Error closing output file", errno ); return 1; }
if( !next_filename( output_filename ) )
- { show_error( "too many members in file" ); close( infd ); return 1; }
- outfd = open_outstream( output_filename );
+ { show_error( "Too many members in file." ); close( infd ); return 1; }
+ outfd = open_outstream( output_filename, force );
if( outfd < 0 ) { close( infd ); return 1; }
partial_member_size = 0;
pos = newpos;
@@ -213,7 +647,7 @@ int process_file( const std::string & input_filename, uint8_t * & base_buffer )
{
const int wr = writeblock( outfd, buffer + pos, size + hsize - pos );
if( wr != size + hsize - pos )
- { show_error( "write error", errno ); return 1; }
+ { show_error( "Write error", errno ); return 1; }
break;
}
if( pos < buffer_size )
@@ -221,46 +655,72 @@ int process_file( const std::string & input_filename, uint8_t * & base_buffer )
partial_member_size += buffer_size - pos;
const int wr = writeblock( outfd, buffer + pos, buffer_size - pos );
if( wr != buffer_size - pos )
- { show_error( "write error", errno ); return 1; }
+ { show_error( "Write error", errno ); return 1; }
}
std::memcpy( base_buffer, base_buffer + buffer_size, tsize + hsize );
size = readblock( infd, buffer + hsize, buffer_size );
at_stream_end = ( size < buffer_size );
if( size != buffer_size && errno )
- { show_error( "read error", errno ); return 1; }
+ { show_error( "Read error", errno ); return 1; }
}
close( infd );
if( close( outfd ) != 0 )
- { show_error( "error closing output file", errno ); return 1; }
+ { show_error( "Error closing output file", errno ); return 1; }
return 0;
}
+
+int split_file( const std::string & input_filename,
+ const std::string & default_output_filename, const bool force )
+ {
+ uint8_t * base_buffer;
+ const int retval = do_split_file( input_filename, base_buffer,
+ default_output_filename, force );
+ delete[] base_buffer;
+ return retval;
+ }
+
} // end namespace
-int verbosity = 0;
+void Pretty_print::operator()( const char * const msg ) const throw()
+ {
+ if( verbosity_ >= 0 )
+ {
+ if( first_post )
+ {
+ first_post = false;
+ std::fprintf( stderr, " %s: ", name_.c_str() );
+ for( unsigned int i = 0; i < longest_name - name_.size(); ++i )
+ std::fprintf( stderr, " " );
+ if( !msg ) std::fflush( stderr );
+ }
+ if( msg ) std::fprintf( stderr, "%s.\n", msg );
+ }
+ }
void show_error( const char * const msg, const int errcode, const bool help ) throw()
{
if( verbosity >= 0 )
{
- if( msg && msg[0] != 0 )
+ if( msg && msg[0] )
{
std::fprintf( stderr, "%s: %s", program_name, msg );
- if( errcode > 0 ) std::fprintf( stderr, ": %s", std::strerror( errcode ) );
+ if( errcode > 0 )
+ std::fprintf( stderr, ": %s", std::strerror( errcode ) );
std::fprintf( stderr, "\n" );
}
- if( help && invocation_name && invocation_name[0] != 0 )
- std::fprintf( stderr, "Try `%s --help' for more information.\n", invocation_name );
+ if( help && invocation_name && invocation_name[0] )
+ std::fprintf( stderr, "Try `%s --help' for more information.\n",
+ invocation_name );
}
}
void internal_error( const char * const msg )
{
- std::string s( "internal error: " ); s += msg;
- show_error( s.c_str() );
+ std::fprintf( stderr, "%s: internal error: %s.\n", program_name, msg );
std::exit( 3 );
}
@@ -304,12 +764,22 @@ int writeblock( const int fd, const uint8_t * const buf, const int size ) throw(
int main( const int argc, const char * const argv[] )
{
+ enum Mode
+ { m_none, m_create, m_merge, m_recover, m_repair, m_split, m_update };
+ Mode program_mode = m_none;
+ bool force = false;
+ std::string default_output_filename;
invocation_name = argv[0];
const Arg_parser::Option options[] =
{
+ { 'f', "force", Arg_parser::no },
{ 'h', "help", Arg_parser::no },
+ { 'm', "merge", Arg_parser::no },
+ { 'o', "output", Arg_parser::yes },
{ 'q', "quiet", Arg_parser::no },
+ { 'R', "repair", Arg_parser::no },
+ { 's', "split", Arg_parser::no },
{ 'v', "verbose", Arg_parser::no },
{ 'V', "version", Arg_parser::no },
{ 0 , 0, Arg_parser::no } };
@@ -325,20 +795,49 @@ int main( const int argc, const char * const argv[] )
if( !code ) break; // no more options
switch( code )
{
+ case 'f': force = true; break;
case 'h': show_help(); return 0;
+ case 'm': program_mode = m_merge; break;
+ case 'o': default_output_filename = parser.argument( argind ); break;
case 'q': verbosity = -1; break;
+ case 'R': program_mode = m_repair; break;
+ case 's': program_mode = m_split; break;
case 'v': if( verbosity < 4 ) ++verbosity; break;
case 'V': show_version(); return 0;
default : internal_error( "uncaught option" );
}
}
+ if( program_mode == m_merge )
+ {
+ std::vector< std::string > filenames;
+ for( ; argind < parser.arguments(); ++argind )
+ filenames.push_back( parser.argument( argind ) );
+ if( filenames.size() < 2 )
+ { show_error( "You must specify at least 2 files.", 0, true ); return 1; }
+ if( !default_output_filename.size() )
+ default_output_filename = insert_fixed( filenames[0] );
+ return merge_files( filenames, default_output_filename, force );
+ }
+
if( argind + 1 != parser.arguments() )
- { show_error( "you must specify exactly 1 file", 0, true ); return 1; }
+ { show_error( "You must specify exactly 1 file.", 0, true ); return 1; }
- uint8_t * base_buffer;
- const int retval = process_file( parser.argument( argind ), base_buffer );
+ if( program_mode == m_repair )
+ {
+ if( !default_output_filename.size() )
+ default_output_filename = insert_fixed( parser.argument( argind ) );
+ return repair_file( parser.argument( argind ), default_output_filename, force );
+ }
- delete[] base_buffer;
- return retval;
+ if( program_mode == m_split )
+ {
+ if( !default_output_filename.size() )
+ default_output_filename = parser.argument( argind );
+ return split_file( parser.argument( argind ), default_output_filename, force );
+ }
+
+ show_error( "You must specify the operation to be performed on file.",
+ 0, true );
+ return 1;
}
diff --git a/main.cc b/main.cc
index 4191d6b..193f4ac 100644
--- a/main.cc
+++ b/main.cc
@@ -1,4 +1,4 @@
-/* Lzip - A data compressor based on the LZMA algorithm
+/* Lzip - Data compressor based on the LZMA algorithm
Copyright (C) 2008, 2009, 2010 Antonio Diaz Diaz.
This program is free software: you can redistribute it and/or modify
@@ -37,11 +37,26 @@
#include <unistd.h>
#include <utime.h>
#include <sys/stat.h>
+#if defined(__MSVCRT__)
+#include <io.h>
+#define fchmod(x,y) 0
+#define fchown(x,y,z) 0
+#define SIGHUP SIGTERM
+#define S_ISSOCK(x) 0
+#define S_IRGRP 0
+#define S_IWGRP 0
+#define S_IROTH 0
+#define S_IWOTH 0
+#endif
+#if defined(__OS2__)
+#include <io.h>
+#endif
#include "arg_parser.h"
#include "lzip.h"
#include "decoder.h"
#include "encoder.h"
+#include "fast_encoder.h"
#if CHAR_BIT != 8
#error "Environments where CHAR_BIT != 8 are not supported."
@@ -82,17 +97,18 @@ struct Lzma_options
int match_len_limit; // 5..273
};
-enum Mode { m_compress = 0, m_decompress, m_test };
+enum Mode { m_compress, m_decompress, m_test };
std::string output_filename;
int outfd = -1;
+int verbosity = 0;
mode_t outfd_mode = S_IRUSR | S_IWUSR;
bool delete_output_on_interrupt = false;
void show_help() throw()
{
- std::printf( "%s - A data compressor based on the LZMA algorithm.\n", Program_name );
+ std::printf( "%s - Data compressor based on the LZMA algorithm.\n", Program_name );
std::printf( "\nUsage: %s [options] [files]\n", invocation_name );
std::printf( "\nOptions:\n" );
std::printf( " -h, --help display this help and exit\n" );
@@ -102,15 +118,15 @@ void show_help() throw()
std::printf( " -d, --decompress decompress\n" );
std::printf( " -f, --force overwrite existing output files\n" );
std::printf( " -k, --keep keep (don't delete) input files\n" );
- std::printf( " -m, --match-length=<n> set match length limit in bytes [80]\n" );
+ std::printf( " -m, --match-length=<n> set match length limit in bytes [36]\n" );
std::printf( " -o, --output=<file> if reading stdin, place the output into <file>\n" );
std::printf( " -q, --quiet suppress all messages\n" );
std::printf( " -s, --dictionary-size=<n> set dictionary size limit in bytes [8MiB]\n" );
std::printf( " -S, --volume-size=<n> set volume size limit in bytes\n" );
std::printf( " -t, --test test compressed file integrity\n" );
std::printf( " -v, --verbose be verbose (a 2nd -v gives more)\n" );
- std::printf( " -1 .. -9 set compression level [default 6]\n" );
- std::printf( " --fast alias for -1\n" );
+ std::printf( " -0 .. -9 set compression level [default 6]\n" );
+ std::printf( " --fast alias for -0\n" );
std::printf( " --best alias for -9\n" );
std::printf( "If no file names are given, %s compresses or decompresses\n", program_name );
std::printf( "from standard input to standard output.\n" );
@@ -131,26 +147,19 @@ void show_version() throw()
}
-const char * format_num( long long num, long long limit = 9999,
- const int set_prefix = 0 ) throw()
+const char * format_num( long long num ) throw()
{
- const char * const si_prefix[8] =
- { "k", "M", "G", "T", "P", "E", "Z", "Y" };
- const char * const binary_prefix[8] =
+ const char * const prefix[8] =
{ "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi" };
- static bool si = false;
- static char buf[16];
-
- if( set_prefix ) si = ( set_prefix > 0 );
- const int factor = ( si ) ? 1000 : 1024;
- const char * const *prefix = ( si ) ? si_prefix : binary_prefix;
+ enum { buf_size = 16 };
+ static char buf[buf_size];
const char *p = "";
- limit = std::max( 999LL, std::min( 999999LL, limit ) );
+ const int factor = 1024;
- for( int i = 0; i < 8 && ( llabs( num ) > limit ||
+ for( int i = 0; i < 8 && ( llabs( num ) > 9999 ||
( llabs( num ) >= factor && num % factor == 0 ) ); ++i )
{ num /= factor; p = prefix[i]; }
- snprintf( buf, sizeof buf, "%lld %s", num, p );
+ snprintf( buf, buf_size, "%lld %s", num, p );
return buf;
}
@@ -164,7 +173,7 @@ long long getnum( const char * const ptr, const int bs = 0,
long long result = strtoll( ptr, &tail, 0 );
if( tail == ptr )
{
- show_error( "bad or missing numerical argument", 0, true );
+ show_error( "Bad or missing numerical argument.", 0, true );
std::exit( 1 );
}
@@ -194,7 +203,7 @@ long long getnum( const char * const ptr, const int bs = 0,
}
if( bad_multiplier )
{
- show_error( "bad multiplier in numerical argument", 0, true );
+ show_error( "Bad multiplier in numerical argument.", 0, true );
std::exit( 1 );
}
for( int i = 0; i < exponent; ++i )
@@ -206,7 +215,7 @@ long long getnum( const char * const ptr, const int bs = 0,
if( !errno && ( result < llimit || result > ulimit ) ) errno = ERANGE;
if( errno )
{
- show_error( "numerical argument out of limits" );
+ show_error( "Numerical argument out of limits." );
std::exit( 1 );
}
return result;
@@ -245,7 +254,7 @@ int open_instream( const std::string & name, struct stat * const in_statsp,
if( program_mode == m_compress && !force && eindex >= 0 )
{
if( verbosity >= 0 )
- std::fprintf( stderr, "%s: input file `%s' already has `%s' suffix.\n",
+ std::fprintf( stderr, "%s: Input file `%s' already has `%s' suffix.\n",
program_name, name.c_str(),
known_extensions[eindex].from );
}
@@ -267,7 +276,7 @@ int open_instream( const std::string & name, struct stat * const in_statsp,
S_ISBLK( mode ) || S_ISCHR( mode ) ) ) ) )
{
if( verbosity >= 0 )
- std::fprintf( stderr, "%s: input file `%s' is not a regular file%s.\n",
+ std::fprintf( stderr, "%s: Input file `%s' is not a regular file%s.\n",
program_name, name.c_str(),
to_stdout ? "" : " and `--stdout' was not specified" );
close( infd );
@@ -301,7 +310,7 @@ void set_d_outname( const std::string & name, const int i ) throw()
}
output_filename = name; output_filename += ".out";
if( verbosity >= 0 )
- std::fprintf( stderr, "%s: can't guess original name for `%s' -- using `%s'.\n",
+ std::fprintf( stderr, "%s: Can't guess original name for `%s' -- using `%s'.\n",
program_name, name.c_str(), output_filename.c_str() );
}
@@ -312,18 +321,14 @@ bool open_outstream( const bool force ) throw()
if( force ) flags |= O_TRUNC; else flags |= O_EXCL;
outfd = open( output_filename.c_str(), flags, outfd_mode );
- if( outfd < 0 )
+ if( outfd < 0 && verbosity >= 0 )
{
- if( errno == EEXIST ) outfd = -2; else outfd = -1;
- if( verbosity >= 0 )
- {
- if( outfd == -2 )
- std::fprintf( stderr, "%s: Output file %s already exists, skipping.\n",
- program_name, output_filename.c_str() );
- else
- std::fprintf( stderr, "%s: Can't create output file `%s': %s.\n",
- program_name, output_filename.c_str(), std::strerror( errno ) );
- }
+ if( errno == EEXIST )
+ std::fprintf( stderr, "%s: Output file `%s' already exists, skipping.\n",
+ program_name, output_filename.c_str() );
+ else
+ std::fprintf( stderr, "%s: Can't create output file `%s': %s.\n",
+ program_name, output_filename.c_str(), std::strerror( errno ) );
}
return ( outfd >= 0 );
}
@@ -331,7 +336,7 @@ bool open_outstream( const bool force ) throw()
bool check_tty( const int infd, const Mode program_mode ) throw()
{
- if( program_mode == m_compress && isatty( outfd ) )
+ if( program_mode == m_compress && outfd >= 0 && isatty( outfd ) )
{
show_error( "I won't write compressed data to a terminal.", 0, true );
return false;
@@ -368,8 +373,9 @@ void close_and_set_permissions( const struct stat * const in_statsp )
bool error = false;
if( in_statsp )
{
- if( fchmod( outfd, in_statsp->st_mode ) != 0 ) error = true;
- else (void)fchown( outfd, in_statsp->st_uid, in_statsp->st_gid );
+ if( fchmod( outfd, in_statsp->st_mode ) != 0 ||
+ ( fchown( outfd, in_statsp->st_uid, in_statsp->st_gid ) != 0 &&
+ errno != EPERM ) ) error = true;
// fchown will in many cases return with EPERM, which can be safely ignored.
}
if( close( outfd ) == 0 ) outfd = -1;
@@ -385,7 +391,7 @@ void close_and_set_permissions( const struct stat * const in_statsp )
}
if( error )
{
- show_error( "I can't change output file attributes." );
+ show_error( "Can't change output file attributes." );
cleanup_and_fail( 1 );
}
}
@@ -429,7 +435,7 @@ int compress( const long long member_size, const long long volume_size,
const long long size =
std::min( member_size, volume_size - partial_volume_size );
if( !encoder.encode_member( size ) )
- { pp(); show_error( "read error", errno ); retval = 1; break; }
+ { pp( "Encoder error" ); retval = 1; break; }
in_size += matchfinder.data_position();
out_size += encoder.member_position();
if( matchfinder.finished() ) break;
@@ -441,20 +447,18 @@ int compress( const long long member_size, const long long volume_size,
{
close_and_set_permissions( in_statsp );
if( !next_filename() )
- { pp(); show_error( "too many volume files" ); retval = 1; break; }
+ { pp( "Too many volume files" ); retval = 1; break; }
if( !open_outstream( true ) ) { retval = 1; break; }
delete_output_on_interrupt = true;
}
}
- if( !matchfinder.reset() )
- { pp(); show_error( "can't reset matchfinder", errno );
- retval = 1; break; }
+ matchfinder.reset();
}
if( retval == 0 && verbosity >= 1 )
{
if( in_size <= 0 || out_size <= 0 )
- std::fprintf( stderr, "no data compressed.\n" );
+ std::fprintf( stderr, "No data compressed.\n" );
else
std::fprintf( stderr, "%6.3f:1, %6.3f bits/byte, "
"%5.2f%% saved, %lld in, %lld out.\n",
@@ -466,10 +470,73 @@ int compress( const long long member_size, const long long volume_size,
}
catch( std::bad_alloc )
{
- pp( "not enough memory. Try a smaller dictionary size" );
+ pp( "Not enough memory. Try a smaller dictionary size" );
retval = 1;
}
- catch( Error e ) { pp(); show_error( e.s, errno ); retval = 1; }
+ catch( Error e ) { pp(); show_error( e.msg, errno ); retval = 1; }
+ return retval;
+ }
+
+
+int fcompress( const long long member_size, const long long volume_size,
+ const int infd,
+ const Pretty_print & pp, const struct stat * const in_statsp )
+ {
+ if( verbosity >= 1 ) pp();
+ File_header header;
+ header.set_magic();
+ int retval = 0;
+
+ try {
+ Fmatchfinder fmatchfinder( infd );
+ header.dictionary_size( fmatchfinder.dictionary_size() );
+
+ long long in_size = 0, out_size = 0, partial_volume_size = 0;
+ while( true ) // encode one member per iteration
+ {
+ FLZ_encoder encoder( fmatchfinder, header, outfd );
+ const long long size =
+ std::min( member_size, volume_size - partial_volume_size );
+ if( !encoder.encode_member( size ) )
+ { pp( "Encoder error" ); retval = 1; break; }
+ in_size += fmatchfinder.data_position();
+ out_size += encoder.member_position();
+ if( fmatchfinder.finished() ) break;
+ partial_volume_size += encoder.member_position();
+ if( partial_volume_size >= volume_size - min_dictionary_size )
+ {
+ partial_volume_size = 0;
+ if( delete_output_on_interrupt )
+ {
+ close_and_set_permissions( in_statsp );
+ if( !next_filename() )
+ { pp( "Too many volume files" ); retval = 1; break; }
+ if( !open_outstream( true ) ) { retval = 1; break; }
+ delete_output_on_interrupt = true;
+ }
+ }
+ fmatchfinder.reset();
+ }
+
+ if( retval == 0 && verbosity >= 1 )
+ {
+ if( in_size <= 0 || out_size <= 0 )
+ std::fprintf( stderr, "No data compressed.\n" );
+ else
+ std::fprintf( stderr, "%6.3f:1, %6.3f bits/byte, "
+ "%5.2f%% saved, %lld in, %lld out.\n",
+ (double)in_size / out_size,
+ ( 8.0 * out_size ) / in_size,
+ 100.0 * ( 1.0 - ( (double)out_size / in_size ) ),
+ in_size, out_size );
+ }
+ }
+ catch( std::bad_alloc )
+ {
+ pp( "Not enough memory. Try a smaller dictionary size" );
+ retval = 1;
+ }
+ catch( Error e ) { pp(); show_error( e.msg, errno ); retval = 1; }
return retval;
}
@@ -489,26 +556,26 @@ int decompress( const int infd, const Pretty_print & pp, const bool testing )
header.data[i] = rdec.get_byte();
if( rdec.finished() ) // End Of File
{
- if( first_member ) { pp( "error reading member header" ); retval = 1; }
+ if( first_member ) { pp( "Error reading member header" ); retval = 1; }
break;
}
if( !header.verify_magic() )
{
if( !first_member ) break; // trailing garbage
- pp( "bad magic number (file not in lzip format)" );
+ pp( "Bad magic number (file not in lzip format)" );
retval = 2; break;
}
if( !header.verify_version() )
{
if( verbosity >= 0 )
{ pp();
- std::fprintf( stderr, "version %d member format not supported, newer %s needed.\n",
- header.version(), program_name ); }
+ std::fprintf( stderr, "Version %d member format not supported.\n",
+ header.version() ); }
retval = 2; break;
}
if( header.dictionary_size() < min_dictionary_size ||
header.dictionary_size() > max_dictionary_size )
- { pp( "invalid dictionary size in member header" ); retval = 2; break; }
+ { pp( "Invalid dictionary size in member header" ); retval = 2; break; }
if( verbosity >= 1 )
{
@@ -528,10 +595,10 @@ int decompress( const int infd, const Pretty_print & pp, const bool testing )
{
pp();
if( result == 2 )
- std::fprintf( stderr, "file ends unexpectedly at pos %lld\n",
+ std::fprintf( stderr, "File ends unexpectedly at pos %lld\n",
partial_file_pos );
else
- std::fprintf( stderr, "decoder error at pos %lld\n",
+ std::fprintf( stderr, "Decoder error at pos %lld\n",
partial_file_pos );
}
retval = 2; break;
@@ -543,10 +610,10 @@ int decompress( const int infd, const Pretty_print & pp, const bool testing )
}
catch( std::bad_alloc )
{
- pp( "not enough memory. Find a machine with more memory" );
+ pp( "Not enough memory. Find a machine with more memory" );
retval = 1;
}
- catch( Error e ) { pp(); show_error( e.s, errno ); retval = 1; }
+ catch( Error e ) { pp(); show_error( e.msg, errno ); retval = 1; }
return retval;
}
@@ -568,12 +635,9 @@ void set_signals() throw()
} // end namespace
-int verbosity = 0;
-
-
void Pretty_print::operator()( const char * const msg ) const throw()
{
- if( verbosity >= 0 )
+ if( verbosity_ >= 0 )
{
if( first_post )
{
@@ -592,22 +656,23 @@ void show_error( const char * const msg, const int errcode, const bool help ) th
{
if( verbosity >= 0 )
{
- if( msg && msg[0] != 0 )
+ if( msg && msg[0] )
{
std::fprintf( stderr, "%s: %s", program_name, msg );
- if( errcode > 0 ) std::fprintf( stderr, ": %s", std::strerror( errcode ) );
+ if( errcode > 0 )
+ std::fprintf( stderr, ": %s", std::strerror( errcode ) );
std::fprintf( stderr, "\n" );
}
- if( help && invocation_name && invocation_name[0] != 0 )
- std::fprintf( stderr, "Try `%s --help' for more information.\n", invocation_name );
+ if( help && invocation_name && invocation_name[0] )
+ std::fprintf( stderr, "Try `%s --help' for more information.\n",
+ invocation_name );
}
}
void internal_error( const char * const msg )
{
- std::string s( "internal error: " ); s += msg;
- show_error( s.c_str() );
+ std::fprintf( stderr, "%s: internal error: %s.\n", program_name, msg );
std::exit( 3 );
}
@@ -655,15 +720,15 @@ int main( const int argc, const char * const argv[] )
// to the corresponding LZMA compression modes.
const Lzma_options option_mapping[] =
{
- { 1 << 16, 5 }, // -0
- { 1 << 20, 10 }, // -1
- { 3 << 19, 12 }, // -2
- { 1 << 21, 17 }, // -3
- { 3 << 20, 26 }, // -4
- { 1 << 22, 44 }, // -5
- { 1 << 23, 80 }, // -6
- { 1 << 24, 108 }, // -7
- { 3 << 23, 163 }, // -8
+ { 1 << 16, 16 }, // -0 entry values not used
+ { 1 << 20, 5 }, // -1
+ { 3 << 19, 6 }, // -2
+ { 1 << 21, 8 }, // -3
+ { 3 << 20, 12 }, // -4
+ { 1 << 22, 20 }, // -5
+ { 1 << 23, 36 }, // -6
+ { 1 << 24, 68 }, // -7
+ { 3 << 23, 132 }, // -8
{ 1 << 25, 273 } }; // -9
Lzma_options encoder_options = option_mapping[6]; // default = "-6"
long long member_size = LLONG_MAX;
@@ -673,16 +738,16 @@ int main( const int argc, const char * const argv[] )
bool force = false;
bool keep_input_files = false;
bool to_stdout = false;
+ bool zero = false;
std::string input_filename;
std::string default_output_filename;
std::vector< std::string > filenames;
invocation_name = argv[0];
-// std::setvbuf( stderr, 0, _IONBF, 0 );
const Arg_parser::Option options[] =
{
- { '0', 0, Arg_parser::no },
- { '1', "fast", Arg_parser::no },
+ { '0', "fast", Arg_parser::no },
+ { '1', 0, Arg_parser::no },
{ '2', 0, Arg_parser::no },
{ '3', 0, Arg_parser::no },
{ '4', 0, Arg_parser::no },
@@ -720,9 +785,11 @@ int main( const int argc, const char * const argv[] )
const char * const arg = parser.argument( argind ).c_str();
switch( code )
{
- case '0': case '1': case '2': case '3': case '4':
+ case '0': zero = true; break;
+ case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
- encoder_options = option_mapping[code-'0']; break;
+ encoder_options = option_mapping[code-'0'];
+ zero = false; break;
case 'b': member_size = getnum( arg, 0, 100000, LLONG_MAX / 2 ); break;
case 'c': to_stdout = true; break;
case 'd': program_mode = m_decompress; break;
@@ -731,11 +798,12 @@ int main( const int argc, const char * const argv[] )
case 'h': show_help(); return 0;
case 'k': keep_input_files = true; break;
case 'm': encoder_options.match_len_limit =
- getnum( arg, 0, min_match_len_limit, max_match_len ); break;
+ getnum( arg, 0, min_match_len_limit, max_match_len );
+ zero = false; break;
case 'o': default_output_filename = arg; break;
case 'q': verbosity = -1; break;
case 's': encoder_options.dictionary_size = get_dict_size( arg );
- break;
+ zero = false; break;
case 'S': volume_size = getnum( arg, 0, 100000, LLONG_MAX / 2 ); break;
case 't': program_mode = m_test; break;
case 'v': if( verbosity < 4 ) ++verbosity; break;
@@ -744,6 +812,11 @@ int main( const int argc, const char * const argv[] )
}
}
+#if defined(__MSVCRT__) || defined(__OS2__)
+ _setmode( STDIN_FILENO, O_BINARY );
+ _setmode( STDOUT_FILENO, O_BINARY );
+#endif
+
bool filenames_given = false;
for( ; argind < parser.arguments(); ++argind )
{
@@ -756,7 +829,7 @@ int main( const int argc, const char * const argv[] )
( filenames_given || default_output_filename.size() ) )
set_signals();
- Pretty_print pp( filenames );
+ Pretty_print pp( filenames, verbosity );
if( program_mode == m_test )
outfd = -1;
else if( program_mode == m_compress )
@@ -828,8 +901,13 @@ int main( const int argc, const char * const argv[] )
pp.set_name( input_filename );
int tmp = 0;
if( program_mode == m_compress )
- tmp = compress( member_size, volume_size, encoder_options, infd,
- pp, in_statsp );
+ {
+ if( zero )
+ tmp = fcompress( member_size, volume_size, infd, pp, in_statsp );
+ else
+ tmp = compress( member_size, volume_size, encoder_options, infd,
+ pp, in_statsp );
+ }
else
tmp = decompress( infd, pp, program_mode == m_test );
if( tmp > retval ) retval = tmp;
@@ -846,9 +924,7 @@ int main( const int argc, const char * const argv[] )
}
if( outfd >= 0 && close( outfd ) != 0 )
{
- if( verbosity >= 0 )
- std::fprintf( stderr, "%s: Can't close stdout: %s.\n",
- program_name, std::strerror( errno ) );
+ show_error( "Can't close stdout", errno );
if( retval < 1 ) retval = 1;
}
return retval;
diff --git a/testsuite/check.sh b/testsuite/check.sh
index 56ebc2b..710f0b8 100755
--- a/testsuite/check.sh
+++ b/testsuite/check.sh
@@ -1,5 +1,5 @@
#! /bin/sh
-# check script for Lzip - A data compressor based on the LZMA algorithm
+# check script for Lzip - Data compressor based on the LZMA algorithm
# Copyright (C) 2008, 2009, 2010 Antonio Diaz Diaz.
#
# This script is free software: you have unlimited permission
@@ -17,17 +17,36 @@ if [ ! -x "${LZIP}" ] ; then
echo "${LZIP}: cannot execute"
exit 1
fi
+if [ ! -x "${LZIPRECOVER}" ] ; then
+ echo "${LZIPRECOVER}: cannot execute"
+ exit 1
+fi
if [ -d tmp ] ; then rm -rf tmp ; fi
mkdir tmp
-printf "testing lzip..."
+printf "testing lzip-%s..." "$2"
cd "${objdir}"/tmp
-cat "${testdir}"/test1 > in || framework_failure
+cat "${testdir}"/test.txt > in || framework_failure
fail=0
-"${LZIP}" -cd "${testdir}"/test1.lz > copy || fail=1
+"${LZIP}" -t "${testdir}"/test_v0.lz || fail=1
+printf .
+"${LZIP}" -cd "${testdir}"/test_v0.lz > copy || fail=1
+cmp in copy || fail=1
+printf .
+
+"${LZIP}" -t "${testdir}"/test_v1.lz || fail=1
+printf .
+"${LZIP}" -cd "${testdir}"/test_v1.lz > copy || fail=1
+cmp in copy || fail=1
+printf .
+
+"${LZIP}" -t "${testdir}"/test_sync.lz || fail=1
+printf .
+"${LZIP}" -cd "${testdir}"/test_sync.lz > copy || fail=1
cmp in copy || fail=1
+printf .
for i in s4Ki 0 1 2 3 4 5 6 7 8 9 ; do
"${LZIP}" -k -$i in || fail=1
@@ -60,15 +79,70 @@ for i in s4Ki 0 1 2 3 4 5 6 7 8 9 ; do
printf .
done
-"${LZIP}" -ce in in in > out || fail=1
+# Description of test files for lziprecover:
+# test_bad1.lz: byte at offset 67 changed from 0xCC to 0x33
+# test_bad2.lz: [ 34- 66) --> copy of bytes [ 68- 100)
+# test_bad3.lz: [ 512-1536) --> zeroed; [2560-3584) --> zeroed
+# test_bad4.lz: [3072-4096) --> random data; [4608-5632) --> zeroed
+# test_bad5.lz: [1024-2048) --> random data; [5120-6144) --> random data
+
+printf "\ntesting lziprecover-%s..." "$2"
+
+"${LZIP}" -c in in in > out || fail=1
printf "garbage" >> out || fail=1
-"${LZIPRECOVER}" out || fail=1
+"${LZIPRECOVER}" -s out -o out.lz || fail=1
for i in 1 2 3 ; do
- "${LZIP}" -cd rec0000${i}out > copy || fail=1
+ "${LZIP}" -cd rec0000${i}out.lz > copy || fail=1
cmp in copy || fail=1
printf .
done
+"${LZIP}" -0kf -$i in || fail=1
+"${LZIPRECOVER}" -R in.lz > /dev/null || fail=1
+printf .
+"${LZIPRECOVER}" -R "${testdir}"/test_v1.lz > /dev/null || fail=1
+printf .
+
+"${LZIPRECOVER}" -R -o copy.lz "${testdir}"/test_bad1.lz > /dev/null || fail=1
+"${LZIP}" -df copy.lz || fail=1
+cmp in copy || fail=1
+printf .
+
+"${LZIPRECOVER}" -m -o copy.lz "${testdir}"/test_bad1.lz "${testdir}"/test_bad2.lz > /dev/null || fail=1
+"${LZIP}" -df copy.lz || fail=1
+cmp in copy || fail=1
+printf .
+"${LZIPRECOVER}" -m -o copy.lz "${testdir}"/test_bad2.lz "${testdir}"/test_bad1.lz > /dev/null || fail=1
+"${LZIP}" -df copy.lz || fail=1
+cmp in copy || fail=1
+printf .
+
+for i in 1 2 ; do
+ for j in 3 4 5 ; do
+ "${LZIPRECOVER}" -m -o copy.lz "${testdir}"/test_bad${i}.lz "${testdir}"/test_bad${j}.lz > /dev/null || fail=1
+ "${LZIP}" -df copy.lz || fail=1
+ cmp in copy || fail=1
+ printf .
+ "${LZIPRECOVER}" -m -o copy.lz "${testdir}"/test_bad${j}.lz "${testdir}"/test_bad${i}.lz > /dev/null || fail=1
+ "${LZIP}" -df copy.lz || fail=1
+ cmp in copy || fail=1
+ printf .
+ done
+done
+
+"${LZIPRECOVER}" -m -o copy.lz "${testdir}"/test_bad3.lz "${testdir}"/test_bad4.lz "${testdir}"/test_bad5.lz > /dev/null || fail=1
+"${LZIP}" -df copy.lz || fail=1
+cmp in copy || fail=1
+printf .
+"${LZIPRECOVER}" -m -o copy.lz "${testdir}"/test_bad4.lz "${testdir}"/test_bad5.lz "${testdir}"/test_bad3.lz > /dev/null || fail=1
+"${LZIP}" -df copy.lz || fail=1
+cmp in copy || fail=1
+printf .
+"${LZIPRECOVER}" -m -o copy.lz "${testdir}"/test_bad5.lz "${testdir}"/test_bad3.lz "${testdir}"/test_bad4.lz > /dev/null || fail=1
+"${LZIP}" -df copy.lz || fail=1
+cmp in copy || fail=1
+printf .
+
echo
if [ ${fail} = 0 ] ; then
echo "tests completed successfully."
diff --git a/testsuite/test1 b/testsuite/test.txt
index 5b244d5..5b244d5 100644
--- a/testsuite/test1
+++ b/testsuite/test.txt
diff --git a/testsuite/test_bad1.lz b/testsuite/test_bad1.lz
new file mode 100644
index 0000000..0b84883
--- /dev/null
+++ b/testsuite/test_bad1.lz
Binary files differ
diff --git a/testsuite/test_bad2.lz b/testsuite/test_bad2.lz
new file mode 100644
index 0000000..cce6a3c
--- /dev/null
+++ b/testsuite/test_bad2.lz
Binary files differ
diff --git a/testsuite/test_bad3.lz b/testsuite/test_bad3.lz
new file mode 100644
index 0000000..a1676bb
--- /dev/null
+++ b/testsuite/test_bad3.lz
Binary files differ
diff --git a/testsuite/test_bad4.lz b/testsuite/test_bad4.lz
new file mode 100644
index 0000000..a8f89a3
--- /dev/null
+++ b/testsuite/test_bad4.lz
Binary files differ
diff --git a/testsuite/test_bad5.lz b/testsuite/test_bad5.lz
new file mode 100644
index 0000000..73e0142
--- /dev/null
+++ b/testsuite/test_bad5.lz
Binary files differ
diff --git a/testsuite/test_sync.lz b/testsuite/test_sync.lz
new file mode 100644
index 0000000..419fa97
--- /dev/null
+++ b/testsuite/test_sync.lz
Binary files differ
diff --git a/testsuite/test1.lz b/testsuite/test_v0.lz
index a09b1e8..a09b1e8 100644
--- a/testsuite/test1.lz
+++ b/testsuite/test_v0.lz
Binary files differ
diff --git a/testsuite/test_v1.lz b/testsuite/test_v1.lz
new file mode 100644
index 0000000..f1c79eb
--- /dev/null
+++ b/testsuite/test_v1.lz
Binary files differ
diff --git a/testsuite/unzcrash.cc b/testsuite/unzcrash.cc
index f61f8a8..7d10b28 100644
--- a/testsuite/unzcrash.cc
+++ b/testsuite/unzcrash.cc
@@ -16,7 +16,7 @@
a bug.
Compile this file with the command:
- g++ -O2 -Wall -W -o unzcrash testsuite/unzcrash.cc
+ g++ -Wall -W -O2 -o unzcrash testsuite/unzcrash.cc
*/
#include <csignal>
@@ -26,9 +26,9 @@
#include <unistd.h>
-int main( const int argc, const char * argv[] )
+int main( const int argc, const char * const argv[] )
{
- if( argc < 3 )
+ if( argc != 3 )
{
std::fprintf( stderr, "Usage: unzcrash \"lzip -tv\" filename.lz\n" );
return 1;
@@ -41,8 +41,8 @@ int main( const int argc, const char * argv[] )
return 1;
}
- const int buffer_size = 1 << 20;
- uint8_t buffer[buffer_size];
+ const int buffer_size = 1 << 21;
+ uint8_t * const buffer = new uint8_t[buffer_size];
const int size = std::fread( buffer, 1, buffer_size, f );
if( size >= buffer_size )
{
@@ -64,7 +64,7 @@ int main( const int argc, const char * argv[] )
return 1;
}
- signal( SIGPIPE, SIG_IGN );
+ std::signal( SIGPIPE, SIG_IGN );
for( int byte = 0; byte < size; ++byte )
{
@@ -81,5 +81,6 @@ int main( const int argc, const char * argv[] )
++buffer[byte];
}
+ delete[] buffer;
return 0;
}