summaryrefslogtreecommitdiffstats
path: root/create_lz.cc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2019-02-27 19:14:36 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2019-02-27 19:14:36 +0000
commit0733fb1a9e3c0c586f9a958aa7f26fcfa7292b78 (patch)
tree52531825e3cbefa80bad43b2635fa77a3ec5a366 /create_lz.cc
parentAdding upstream version 0.12. (diff)
downloadtarlz-0733fb1a9e3c0c586f9a958aa7f26fcfa7292b78.tar.xz
tarlz-0733fb1a9e3c0c586f9a958aa7f26fcfa7292b78.zip
Adding upstream version 0.13.upstream/0.13
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'create_lz.cc')
-rw-r--r--create_lz.cc44
1 files changed, 22 insertions, 22 deletions
diff --git a/create_lz.cc b/create_lz.cc
index 797427b..e2aaf3d 100644
--- a/create_lz.cc
+++ b/create_lz.cc
@@ -368,7 +368,11 @@ void loop_encode( const uint8_t * const ibuf, const int isize,
courier.collect_packet( new Opacket( obuf, opos ), worker_id );
opos = 0; obuf = new( std::nothrow ) uint8_t[max_packet_size];
if( !obuf ) { show_error( mem_msg2 ); cleanup_and_fail(); }
- if( LZ_compress_finished( encoder ) == 1 ) break;
+ if( LZ_compress_finished( encoder ) == 1 )
+ {
+ if( LZ_compress_restart_member( encoder, LLONG_MAX ) >= 0 ) break;
+ show_error( "LZ_compress_restart_member failed." ); cleanup_and_fail();
+ }
}
}
if( ipos > isize ) internal_error( "ipacket size exceeded in worker." );
@@ -401,27 +405,27 @@ extern "C" void * cworker( void * arg )
if( !rbuf.size() ) { show_error( mem_msg2 ); cleanup_and_fail(); }
int opos = 0;
+ bool flushed = true; // avoid producing empty lzip members
while( true )
{
const Ipacket * const ipacket = courier.distribute_packet( worker_id );
if( !ipacket ) break; // no more packets to process
- if( ipacket->filename.empty() ) // end of group, flush encoder
+ if( ipacket->filename.empty() ) // end of group
{
- if( !encoder ) { delete ipacket; continue; } // nothing to flush
- loop_encode( 0, 0, data, opos, courier, encoder, worker_id, true );
+ if( !flushed ) // this lzip member is not empty
+ loop_encode( 0, 0, data, opos, courier, encoder, worker_id, true );
courier.collect_packet( new Opacket, worker_id ); // end of member token
- if( LZ_compress_restart_member( encoder, LLONG_MAX ) < 0 )
- { show_error( "LZ_compress_restart_member failed." ); cleanup_and_fail(); }
- delete ipacket; continue;
+ flushed = true; delete ipacket; continue;
}
const int infd =
ipacket->file_size ? open_instream( ipacket->filename.c_str() ) : -1;
- if( ipacket->file_size && infd < 0 )
+ if( ipacket->file_size && infd < 0 ) // can't read file data
{ delete[] ipacket->header; delete ipacket->extended; delete ipacket;
- set_error_status( 1 ); continue; }
+ set_error_status( 1 ); continue; } // skip file
- if( !encoder )
+ flushed = false;
+ if( !encoder ) // init encoder just before using it
{
data = new( std::nothrow ) uint8_t[max_packet_size];
encoder = LZ_compress_open( dictionary_size, match_len_limit, LLONG_MAX );
@@ -494,17 +498,15 @@ extern "C" void * cworker( void * arg )
/* Get from courier the processed and sorted packets, and write
their contents to the output archive. */
-void muxer( Packet_courier & courier, const char * const archive_name,
- const int outfd )
+void muxer( Packet_courier & courier, const int outfd )
{
while( true )
{
const Opacket * const opacket = courier.deliver_packet();
if( !opacket ) break; // queue is empty. all workers exited
- if( writeblock( outfd, opacket->data, opacket->size ) != opacket->size )
- { show_file_error( archive_name, "Write error", errno );
- cleanup_and_fail(); }
+ if( !writeblock_wrapper( outfd, opacket->data, opacket->size ) )
+ cleanup_and_fail();
delete[] opacket->data;
delete opacket;
}
@@ -514,9 +516,9 @@ void muxer( Packet_courier & courier, const char * const archive_name,
// init the courier, then start the grouper and the workers and call the muxer
-int encode_lz( const char * const archive_name, const Arg_parser & parser,
- const int dictionary_size, const int match_len_limit,
- const int num_workers, const int outfd, const int debug_level )
+int encode_lz( const Arg_parser & parser, const int dictionary_size,
+ const int match_len_limit, const int num_workers,
+ const int outfd, const int debug_level )
{
const int in_slots = 65536; // max small files (<=512B) in 64 MiB
const int total_in_slots = ( INT_MAX / num_workers >= in_slots ) ?
@@ -552,7 +554,7 @@ int encode_lz( const char * const archive_name, const Arg_parser & parser,
{ show_error( "Can't create worker threads", errcode ); cleanup_and_fail(); }
}
- muxer( courier, archive_name, outfd );
+ muxer( courier, outfd );
for( int i = num_workers - 1; i >= 0; --i )
{
@@ -575,9 +577,7 @@ int encode_lz( const char * const archive_name, const Arg_parser & parser,
0xA3, 0xB7, 0x80, 0x0C, 0x82, 0xDB, 0xFF, 0xFF, 0x9F, 0xF0, 0x00, 0x00,
0x2E, 0xAF, 0xB5, 0xEF, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x2C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
- if( writeblock( outfd, eof_member, eof_member_size ) != eof_member_size )
- { show_error( "Error writing end-of-archive blocks", errno );
- retval = 1; }
+ if( !writeblock_wrapper( outfd, eof_member, eof_member_size ) ) retval = 1;
if( close( outfd ) != 0 && !retval )
{ show_error( "Error closing archive", errno ); retval = 1; }