summaryrefslogtreecommitdiffstats
path: root/dec_stdout.cc
diff options
context:
space:
mode:
authorDaniel Baumann <mail@daniel-baumann.ch>2015-11-07 15:37:29 +0000
committerDaniel Baumann <mail@daniel-baumann.ch>2015-11-07 15:37:29 +0000
commit1f9c10664fb33107c4d175e1c5553b23a1f34550 (patch)
tree31e3e20c66e2e5fe93ff4522e05bad9882b49afa /dec_stdout.cc
parentAdding upstream version 1.2~pre1. (diff)
downloadplzip-1f9c10664fb33107c4d175e1c5553b23a1f34550.tar.xz
plzip-1f9c10664fb33107c4d175e1c5553b23a1f34550.zip
Adding upstream version 1.2~rc1.upstream/1.2_rc1
Signed-off-by: Daniel Baumann <mail@daniel-baumann.ch>
Diffstat (limited to 'dec_stdout.cc')
-rw-r--r--dec_stdout.cc18
1 files changed, 9 insertions, 9 deletions
diff --git a/dec_stdout.cc b/dec_stdout.cc
index 6c750c6..d2a0288 100644
--- a/dec_stdout.cc
+++ b/dec_stdout.cc
@@ -172,10 +172,10 @@ extern "C" void * dworker_o( void * arg )
LZ_Decoder * const decoder = LZ_decompress_open();
if( !new_data || !ibuffer || !decoder ||
LZ_decompress_errno( decoder ) != LZ_ok )
- { pp( "Not enough memory" ); cleanup_and_fail(); }
+ { pp( "Not enough memory." ); cleanup_and_fail(); }
int new_pos = 0;
- for( int i = worker_id; i < file_index.members(); i += num_workers )
+ for( long i = worker_id; i < file_index.members(); i += num_workers )
{
long long member_pos = file_index.mblock( i ).pos();
long long member_rest = file_index.mblock( i ).size();
@@ -193,7 +193,7 @@ extern "C" void * dworker_o( void * arg )
member_pos += size;
member_rest -= size;
if( LZ_decompress_write( decoder, ibuffer, size ) != size )
- internal_error( "library error (LZ_decompress_write)" );
+ internal_error( "library error (LZ_decompress_write)." );
}
if( member_rest <= 0 ) { LZ_decompress_finish( decoder ); break; }
}
@@ -205,7 +205,7 @@ extern "C" void * dworker_o( void * arg )
cleanup_and_fail( decompress_read_error( decoder, pp, worker_id ) );
new_pos += rd;
if( new_pos > max_packet_size )
- internal_error( "opacket size exceeded in worker" );
+ internal_error( "opacket size exceeded in worker." );
if( new_pos == max_packet_size ||
LZ_decompress_finished( decoder ) == 1 )
{
@@ -217,7 +217,7 @@ extern "C" void * dworker_o( void * arg )
courier.collect_packet( opacket, worker_id );
new_pos = 0;
new_data = new( std::nothrow ) uint8_t[max_packet_size];
- if( !new_data ) { pp( "Not enough memory" ); cleanup_and_fail(); }
+ if( !new_data ) { pp( "Not enough memory." ); cleanup_and_fail(); }
}
if( LZ_decompress_finished( decoder ) == 1 )
{
@@ -236,9 +236,9 @@ extern "C" void * dworker_o( void * arg )
delete[] ibuffer; delete[] new_data;
if( LZ_decompress_member_position( decoder ) != 0 )
- { pp( "Error, some data remains in decoder" ); cleanup_and_fail(); }
+ { pp( "Error, some data remains in decoder." ); cleanup_and_fail(); }
if( LZ_decompress_close( decoder ) < 0 )
- { pp( "LZ_decompress_close failed" ); cleanup_and_fail(); }
+ { pp( "LZ_decompress_close failed." ); cleanup_and_fail(); }
courier.worker_finished();
return 0;
}
@@ -278,7 +278,7 @@ int dec_stdout( const int num_workers, const int infd, const int outfd,
Worker_arg * worker_args = new( std::nothrow ) Worker_arg[num_workers];
pthread_t * worker_threads = new( std::nothrow ) pthread_t[num_workers];
if( !worker_args || !worker_threads )
- { pp( "Not enough memory" ); cleanup_and_fail(); }
+ { pp( "Not enough memory." ); cleanup_and_fail(); }
for( int i = 0; i < num_workers; ++i )
{
worker_args[i].file_index = &file_index;
@@ -324,6 +324,6 @@ int dec_stdout( const int num_workers, const int infd, const int outfd,
courier.ocheck_counter,
courier.owait_counter );
- if( !courier.finished() ) internal_error( "courier not finished" );
+ if( !courier.finished() ) internal_error( "courier not finished." );
return 0;
}