summaryrefslogtreecommitdiffstats
path: root/dec_stream.cc
diff options
context:
space:
mode:
authorDaniel Baumann <mail@daniel-baumann.ch>2015-11-07 15:37:35 +0000
committerDaniel Baumann <mail@daniel-baumann.ch>2015-11-07 15:37:35 +0000
commit06821cd88178d7d9a1fbcb4027a2bf939dd42cd1 (patch)
treed545383354b3edeb26edcba1fd6e52d0e9b68e0d /dec_stream.cc
parentAdding debian version 1.2~pre1-2. (diff)
downloadplzip-06821cd88178d7d9a1fbcb4027a2bf939dd42cd1.tar.xz
plzip-06821cd88178d7d9a1fbcb4027a2bf939dd42cd1.zip
Merging upstream version 1.2~rc1.
Signed-off-by: Daniel Baumann <mail@daniel-baumann.ch>
Diffstat (limited to 'dec_stream.cc')
-rw-r--r--dec_stream.cc30
1 files changed, 15 insertions, 15 deletions
diff --git a/dec_stream.cc b/dec_stream.cc
index 2897002..713a5bd 100644
--- a/dec_stream.cc
+++ b/dec_stream.cc
@@ -249,7 +249,7 @@ extern "C" void * dsplitter_s( void * arg )
const int buffer_size = max_packet_size;
const int base_buffer_size = tsize + buffer_size + hsize;
uint8_t * const base_buffer = new( std::nothrow ) uint8_t[base_buffer_size];
- if( !base_buffer ) { pp( "Not enough memory" ); cleanup_and_fail(); }
+ if( !base_buffer ) { pp( "Not enough memory." ); cleanup_and_fail(); }
uint8_t * const buffer = base_buffer + tsize;
int size = readblock( infd, buffer, buffer_size + hsize ) - hsize;
@@ -257,10 +257,10 @@ extern "C" void * dsplitter_s( void * arg )
if( size != buffer_size && errno )
{ pp(); show_error( "Read error", errno ); cleanup_and_fail(); }
if( size + hsize < min_member_size )
- { pp( "Input file is too short" ); cleanup_and_fail( 2 ); }
+ { pp( "Input file is too short." ); cleanup_and_fail( 2 ); }
const File_header & header = *(File_header *)buffer;
if( !header.verify_magic() )
- { pp( "Bad magic number (file not in lzip format)" ); cleanup_and_fail( 2 ); }
+ { pp( "Bad magic number (file not in lzip format)." ); cleanup_and_fail( 2 ); }
if( !header.verify_version() )
{
if( verbosity >= 0 )
@@ -293,7 +293,7 @@ extern "C" void * dsplitter_s( void * arg )
cleanup_and_fail( 2 );
}
uint8_t * const data = new( std::nothrow ) uint8_t[newpos - pos];
- if( !data ) { pp( "Not enough memory" ); cleanup_and_fail(); }
+ if( !data ) { pp( "Not enough memory." ); cleanup_and_fail(); }
std::memcpy( data, buffer + pos, newpos - pos );
courier.receive_packet( data, newpos - pos );
courier.receive_packet( 0, 0 ); // end of member token
@@ -306,7 +306,7 @@ extern "C" void * dsplitter_s( void * arg )
if( at_stream_end )
{
uint8_t * data = new( std::nothrow ) uint8_t[size + hsize - pos];
- if( !data ) { pp( "Not enough memory" ); cleanup_and_fail(); }
+ if( !data ) { pp( "Not enough memory." ); cleanup_and_fail(); }
std::memcpy( data, buffer + pos, size + hsize - pos );
courier.receive_packet( data, size + hsize - pos );
courier.receive_packet( 0, 0 ); // end of member token
@@ -316,7 +316,7 @@ extern "C" void * dsplitter_s( void * arg )
{
partial_member_size += buffer_size - pos;
uint8_t * data = new( std::nothrow ) uint8_t[buffer_size - pos];
- if( !data ) { pp( "Not enough memory" ); cleanup_and_fail(); }
+ if( !data ) { pp( "Not enough memory." ); cleanup_and_fail(); }
std::memcpy( data, buffer + pos, buffer_size - pos );
courier.receive_packet( data, buffer_size - pos );
}
@@ -352,7 +352,7 @@ extern "C" void * dworker_s( void * arg )
uint8_t * new_data = new( std::nothrow ) uint8_t[max_packet_size];
LZ_Decoder * const decoder = LZ_decompress_open();
if( !new_data || !decoder || LZ_decompress_errno( decoder ) != LZ_ok )
- { pp( "Not enough memory" ); cleanup_and_fail(); }
+ { pp( "Not enough memory." ); cleanup_and_fail(); }
int new_pos = 0;
bool trailing_garbage_found = false;
@@ -369,10 +369,10 @@ extern "C" void * dworker_s( void * arg )
{
const int wr = LZ_decompress_write( decoder, ipacket->data + written,
ipacket->size - written );
- if( wr < 0 ) internal_error( "library error (LZ_decompress_write)" );
+ if( wr < 0 ) internal_error( "library error (LZ_decompress_write)." );
written += wr;
if( written > ipacket->size )
- internal_error( "ipacket size exceeded in worker" );
+ internal_error( "ipacket size exceeded in worker." );
}
while( !trailing_garbage_found ) // read and pack decompressed data
{
@@ -387,7 +387,7 @@ extern "C" void * dworker_s( void * arg )
}
else new_pos += rd;
if( new_pos > max_packet_size )
- internal_error( "opacket size exceeded in worker" );
+ internal_error( "opacket size exceeded in worker." );
if( new_pos == max_packet_size || trailing_garbage_found ||
LZ_decompress_finished( decoder ) == 1 )
{
@@ -399,7 +399,7 @@ extern "C" void * dworker_s( void * arg )
courier.collect_packet( opacket, worker_id );
new_pos = 0;
new_data = new( std::nothrow ) uint8_t[max_packet_size];
- if( !new_data ) { pp( "Not enough memory" ); cleanup_and_fail(); }
+ if( !new_data ) { pp( "Not enough memory." ); cleanup_and_fail(); }
}
if( trailing_garbage_found ||
LZ_decompress_finished( decoder ) == 1 )
@@ -422,9 +422,9 @@ extern "C" void * dworker_s( void * arg )
delete[] new_data;
if( LZ_decompress_member_position( decoder ) != 0 )
- { pp( "Error, some data remains in decoder" ); cleanup_and_fail(); }
+ { pp( "Error, some data remains in decoder." ); cleanup_and_fail(); }
if( LZ_decompress_close( decoder ) < 0 )
- { pp( "LZ_decompress_close failed" ); cleanup_and_fail(); }
+ { pp( "LZ_decompress_close failed." ); cleanup_and_fail(); }
return 0;
}
@@ -481,7 +481,7 @@ int dec_stream( const int num_workers, const int infd, const int outfd,
Worker_arg * worker_args = new( std::nothrow ) Worker_arg[num_workers];
pthread_t * worker_threads = new( std::nothrow ) pthread_t[num_workers];
if( !worker_args || !worker_threads )
- { pp( "Not enough memory" ); cleanup_and_fail(); }
+ { pp( "Not enough memory." ); cleanup_and_fail(); }
for( int i = 0; i < num_workers; ++i )
{
worker_args[i].courier = &courier;
@@ -529,6 +529,6 @@ int dec_stream( const int num_workers, const int infd, const int outfd,
courier.ocheck_counter,
courier.owait_counter );
- if( !courier.finished() ) internal_error( "courier not finished" );
+ if( !courier.finished() ) internal_error( "courier not finished." );
return 0;
}