/* Plzip - A parallel version of the lzip data compressor
Copyright (C) 2009 Laszlo Ersek.
Copyright (C) 2009, 2010 Antonio Diaz Diaz.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see .
*/
#define _FILE_OFFSET_BITS 64
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "plzip.h"
#ifndef LLONG_MAX
#define LLONG_MAX 0x7FFFFFFFFFFFFFFFLL
#endif
#ifndef LLONG_MIN
#define LLONG_MIN (-LLONG_MAX - 1LL)
#endif
#ifndef ULLONG_MAX
#define ULLONG_MAX 0xFFFFFFFFFFFFFFFFULL
#endif
namespace {
long long in_size = 0;
long long out_size = 0;
void *(*mallocf)( size_t size );
void (*freef)( void *ptr );
void * trace_malloc( size_t size )
{
int save_errno = 0;
void * ret = malloc( size );
if( ret == 0 ) save_errno = errno;
std::fprintf( stderr, "malloc(%lu) == %p\n", (unsigned long)size, ret );
if( ret == 0 ) errno = save_errno;
return ret;
}
void trace_free( void *ptr )
{
std::fprintf( stderr, "free(%p)\n", ptr );
free( ptr );
}
void * xalloc( size_t size )
{
void *ret = (*mallocf)( size );
if( ret == 0 ) { show_error( "not enough memory", errno ); fatal(); }
return ret;
}
void xinit( pthread_cond_t * cond, pthread_mutex_t * mutex )
{
int ret = pthread_mutex_init( mutex, 0 );
if( ret != 0 ) { show_error( "pthread_mutex_init", ret ); fatal(); }
ret = pthread_cond_init( cond, 0 );
if( ret != 0 ) { show_error( "pthread_cond_init", ret ); fatal(); }
}
void xdestroy( pthread_cond_t * cond, pthread_mutex_t * mutex )
{
int ret = pthread_cond_destroy( cond );
if( ret != 0 ) { show_error( "pthread_cond_destroy", ret ); fatal(); }
ret = pthread_mutex_destroy( mutex );
if( ret != 0 ) { show_error( "pthread_mutex_destroy", ret ); fatal(); }
}
void xlock( pthread_mutex_t * mutex )
{
int ret = pthread_mutex_lock( mutex );
if( ret != 0 ) { show_error( "pthread_mutex_lock", ret ); fatal(); }
}
void xunlock( pthread_mutex_t * mutex )
{
int ret = pthread_mutex_unlock( mutex );
if( ret != 0 ) { show_error( "pthread_mutex_unlock", ret ); fatal(); }
}
void xwait( pthread_cond_t * cond, pthread_mutex_t * mutex )
{
int ret = pthread_cond_wait( cond, mutex );
if( ret != 0 ) { show_error( "pthread_cond_wait", ret ); fatal(); }
}
void xsignal( pthread_cond_t * cond )
{
int ret = pthread_cond_signal( cond );
if( ret != 0 ) { show_error( "pthread_cond_signal", ret ); fatal(); }
}
void xbroadcast( pthread_cond_t * cond )
{
int ret = pthread_cond_broadcast( cond );
if( ret != 0 ) { show_error( "pthread_cond_broadcast", ret ); fatal(); }
}
void xcreate( pthread_t *thread, void *(*routine)(void *), void *arg )
{
int ret = pthread_create( thread, 0, routine, arg );
if( ret != 0 ) { show_error( "pthread_create", ret ); fatal(); }
}
void xjoin( pthread_t thread )
{
int ret = pthread_join( thread, 0 );
if( ret != 0 ) { show_error( "pthread_join", ret ); fatal(); }
}
struct Slot_tally // Synchronizes splitter to muxer
{
unsigned long check_counter;
unsigned long wait_counter;
int num_free; // Number of free slots
pthread_mutex_t mutex;
pthread_cond_t slot_av; // Free slot available
Slot_tally( const int slots )
: check_counter( 0 ), wait_counter( 0 ), num_free( slots )
{ xinit( &slot_av, &mutex ); }
~Slot_tally() { xdestroy( &slot_av, &mutex ); }
};
struct S2w_blk // Splitter to worker data block
{
unsigned long long id; // Block serial number as read from infd
S2w_blk *next; // Next in queue
int loaded; // # of bytes in plain, may be 0 for 1st
uint8_t plain[1]; // Data read from infd, allocated: data_size
};
struct S2w_queue
{
S2w_blk * head; // Next ready worker shall compress this
S2w_blk * tail; // Splitter will append here
unsigned long check_counter;
unsigned long wait_counter;
pthread_mutex_t mutex;
pthread_cond_t av_or_eof; // New block available or splitter done
bool eof; // Splitter done
S2w_queue()
: head( 0 ), tail( 0 ), check_counter( 0 ), wait_counter( 0 ), eof( false )
{ xinit( &av_or_eof, &mutex ); }
~S2w_queue() { xdestroy( &av_or_eof, &mutex ); }
};
struct W2m_blk // Worker to muxer data block
{
unsigned long long id; // Block index as read from infd
W2m_blk *next; // Next block in list (unordered)
int produced; // Number of bytes in compr
uint8_t compr[1]; // Data to write to outfd, alloc.: compr_size
};
struct W2m_queue
{
unsigned long long needed_id; // Block needed for resuming writing
W2m_blk *head; // Block list (unordered)
unsigned long check_counter;
unsigned long wait_counter;
int num_working; // Number of workers still running
pthread_mutex_t mutex;
pthread_cond_t av_or_exit; // New block available or all workers exited
W2m_queue( const int num_workers )
: needed_id( 0 ), head( 0 ), check_counter( 0 ), wait_counter( 0 ),
num_working( num_workers )
{ xinit( &av_or_exit, &mutex ); }
~W2m_queue() { xdestroy( &av_or_exit, &mutex ); }
};
struct Splitter_arg
{
Slot_tally * slot_tally;
S2w_queue * s2w_queue;
int infd;
int data_size;
int s2w_blk_size;
};
void * splitter( void * arg )
{
const Splitter_arg & tmp = *(Splitter_arg *)arg;
Slot_tally & slot_tally = *tmp.slot_tally;
S2w_queue & s2w_queue = *tmp.s2w_queue;
const int infd = tmp.infd;
const int data_size = tmp.data_size;
const int s2w_blk_size = tmp.s2w_blk_size;
for( unsigned long long id = 0; ; ++id )
{
S2w_blk * s2w_blk = (S2w_blk *)xalloc( s2w_blk_size );
// Fill block
const int rd = readblock( infd, s2w_blk->plain, data_size );
if( rd != data_size && errno ) { show_error( "read", errno ); fatal(); }
if( rd > 0 || id == 0 ) // first block can be empty
{
s2w_blk->id = id;
s2w_blk->next = 0;
s2w_blk->loaded = rd;
in_size += rd;
xlock( &slot_tally.mutex ); // Grab a free slot
++slot_tally.check_counter;
while( slot_tally.num_free == 0 )
{
++slot_tally.wait_counter;
xwait( &slot_tally.slot_av, &slot_tally.mutex );
}
--slot_tally.num_free;
xunlock( &slot_tally.mutex );
}
else
{ (*freef)( s2w_blk ); s2w_blk = 0; }
xlock( &s2w_queue.mutex );
if( s2w_blk != 0 )
{
if( s2w_queue.tail == 0 ) s2w_queue.head = s2w_blk;
else s2w_queue.tail->next = s2w_blk;
s2w_queue.tail = s2w_blk;
xsignal( &s2w_queue.av_or_eof );
}
else
{
s2w_queue.eof = true;
xbroadcast( &s2w_queue.av_or_eof );
}
xunlock( &s2w_queue.mutex );
if( s2w_blk == 0 ) break;
}
return 0;
}
void work_compr( const int dictionary_size, const int match_len_limit,
const S2w_blk & s2w_blk, W2m_queue & w2m_queue,
const int compr_size, const int w2m_blk_size )
{
assert( s2w_blk.loaded > 0 || s2w_blk.id == 0 );
W2m_blk * w2m_blk = (W2m_blk *)xalloc( w2m_blk_size );
const int dict_size = std::max( LZ_min_dictionary_size(),
std::min( dictionary_size, s2w_blk.loaded ) );
LZ_Encoder * const encoder =
LZ_compress_open( dict_size, match_len_limit, LLONG_MAX );
if( !encoder || LZ_compress_errno( encoder ) != LZ_ok )
{ show_error( "LZ_compress_open failed." ); fatal(); }
int written = 0;
w2m_blk->produced = 0;
while( true )
{
if( LZ_compress_write_size( encoder ) > 0 )
{
if( written < s2w_blk.loaded )
{
const int wr = LZ_compress_write( encoder, s2w_blk.plain + written,
s2w_blk.loaded - written );
if( wr < 0 ) { show_error( "LZ_compress_write failed." ); fatal(); }
written += wr;
}
if( written >= s2w_blk.loaded ) LZ_compress_finish( encoder );
}
assert( w2m_blk->produced < compr_size );
const int rd = LZ_compress_read( encoder, w2m_blk->compr + w2m_blk->produced,
compr_size - w2m_blk->produced );
if( rd < 0 ) { show_error( "LZ_compress_read failed." ); fatal(); }
w2m_blk->produced += rd;
if( LZ_compress_finished( encoder ) == 1 ) break;
}
if( LZ_compress_close( encoder ) < 0 )
{ show_error( "LZ_compress_close failed." ); fatal(); }
w2m_blk->id = s2w_blk.id;
// Push block to muxer queue
xlock( &w2m_queue.mutex );
w2m_blk->next = w2m_queue.head;
w2m_queue.head = w2m_blk;
if( w2m_blk->id == w2m_queue.needed_id ) xsignal( &w2m_queue.av_or_exit );
xunlock( &w2m_queue.mutex );
}
struct Worker_arg
{
int dictionary_size;
int match_len_limit;
S2w_queue * s2w_queue;
W2m_queue * w2m_queue;
int compr_size;
int w2m_blk_size;
};
void * worker( void * arg )
{
const Worker_arg & tmp = *(Worker_arg *)arg;
const int dictionary_size = tmp.dictionary_size;
const int match_len_limit = tmp.match_len_limit;
S2w_queue & s2w_queue = *tmp.s2w_queue;
W2m_queue & w2m_queue = *tmp.w2m_queue;
const int compr_size = tmp.compr_size;
const int w2m_blk_size = tmp.w2m_blk_size;
while( true )
{
S2w_blk *s2w_blk;
// Grab a block to work on
xlock( &s2w_queue.mutex );
++s2w_queue.check_counter;
while( s2w_queue.head == 0 && !s2w_queue.eof )
{
++s2w_queue.wait_counter;
xwait( &s2w_queue.av_or_eof, &s2w_queue.mutex );
}
if( s2w_queue.head == 0 ) // No blocks available and splitter exited
{
xunlock( &s2w_queue.mutex );
break;
}
s2w_blk = s2w_queue.head;
s2w_queue.head = s2w_blk->next;
if( s2w_queue.head == 0 ) s2w_queue.tail = 0;
xunlock( &s2w_queue.mutex );
work_compr( dictionary_size, match_len_limit, *s2w_blk, w2m_queue,
compr_size, w2m_blk_size );
(*freef)( s2w_blk );
}
// Notify muxer when last worker exits
xlock( &w2m_queue.mutex );
if( --w2m_queue.num_working == 0 && w2m_queue.head == 0 )
xsignal( &w2m_queue.av_or_exit );
xunlock( &w2m_queue.mutex );
return 0;
}
void muxer( Slot_tally & slot_tally, W2m_queue & w2m_queue,
const int num_slots, const int outfd )
{
unsigned long long needed_id = 0;
std::vector< W2m_blk * > circular_buffer( num_slots, (W2m_blk *)0 );
xlock( &w2m_queue.mutex );
while( true )
{
// Grab all available compressed blocks in one step
++w2m_queue.check_counter;
while( w2m_queue.head == 0 && w2m_queue.num_working > 0 )
{
++w2m_queue.wait_counter;
xwait( &w2m_queue.av_or_exit, &w2m_queue.mutex );
}
if( w2m_queue.head == 0 ) break; // queue is empty. all workers exited
W2m_blk * w2m_blk = w2m_queue.head;
w2m_queue.head = 0;
xunlock( &w2m_queue.mutex );
// Merge blocks fetched this time into circular buffer
do {
// id collision shouldn't happen
assert( circular_buffer[w2m_blk->id%num_slots] == 0 );
circular_buffer[w2m_blk->id%num_slots] = w2m_blk;
W2m_blk * next = w2m_blk->next;
w2m_blk->next = 0;
w2m_blk = next;
} while( w2m_blk != 0 );
// Write out initial continuous sequence of reordered blocks
while( true )
{
w2m_blk = circular_buffer[needed_id%num_slots];
if( w2m_blk == 0 ) break;
out_size += w2m_blk->produced;
if( outfd >= 0 )
{
const int wr = writeblock( outfd, w2m_blk->compr, w2m_blk->produced );
if( wr != w2m_blk->produced )
{ show_error( "write", errno ); fatal(); }
}
circular_buffer[needed_id%num_slots] = 0;
++needed_id;
xlock( &slot_tally.mutex );
if( slot_tally.num_free++ == 0 ) xsignal( &slot_tally.slot_av );
xunlock( &slot_tally.mutex );
(*freef)( w2m_blk );
}
xlock( &w2m_queue.mutex );
w2m_queue.needed_id = needed_id;
}
xunlock( &w2m_queue.mutex );
for( int i = 0; i < num_slots; ++i )
if( circular_buffer[i] != 0 )
{ show_error( "circular buffer not empty" ); fatal(); }
}
} // end namespace
int compress( const int data_size, const int dictionary_size,
const int match_len_limit, const int num_workers,
const int num_slots, const int infd, const int outfd,
const int debug_level )
{
if( debug_level & 2 ) { mallocf = trace_malloc; freef = trace_free; }
else { mallocf = malloc; freef = free; }
Slot_tally slot_tally( num_slots );
S2w_queue s2w_queue;
W2m_queue w2m_queue( num_workers );
Splitter_arg splitter_arg;
splitter_arg.slot_tally = &slot_tally;
splitter_arg.s2w_queue = &s2w_queue;
splitter_arg.infd = infd;
splitter_arg.data_size = data_size;
splitter_arg.s2w_blk_size = sizeof (S2w_blk) + data_size - 1;
pthread_t splitter_thread;
xcreate( &splitter_thread, splitter, &splitter_arg );
Worker_arg worker_arg;
worker_arg.dictionary_size = dictionary_size;
worker_arg.match_len_limit = match_len_limit;
worker_arg.s2w_queue = &s2w_queue;
worker_arg.w2m_queue = &w2m_queue;
worker_arg.compr_size = 6 + 20 + ( ( data_size / 8 ) * 9 );
worker_arg.w2m_blk_size = sizeof (W2m_blk) + worker_arg.compr_size - 1;
pthread_t * worker_threads = new( std::nothrow ) pthread_t[num_workers];
if( worker_threads == 0 )
{ show_error( "not enough memory.", errno ); fatal(); }
for( int i = 0; i < num_workers; ++i )
xcreate( &worker_threads[i], worker, &worker_arg );
muxer( slot_tally, w2m_queue, num_slots, outfd );
for( int i = num_workers - 1; i >= 0; --i )
xjoin(worker_threads[i]);
delete[] worker_threads; worker_threads = 0;
xjoin( splitter_thread );
if( verbosity >= 1 )
{
if( in_size <= 0 || out_size <= 0 )
std::fprintf( stderr, "no data compressed.\n" );
else
std::fprintf( stderr, "%6.3f:1, %6.3f bits/byte, "
"%5.2f%% saved, %lld in, %lld out.\n",
(double)in_size / out_size,
( 8.0 * out_size ) / in_size,
100.0 * ( 1.0 - ( (double)out_size / in_size ) ),
in_size, out_size );
}
const int FW = ( sizeof (unsigned long) * 8 ) / 3 + 1;
if( debug_level & 1 )
std::fprintf( stderr,
"any worker tried to consume from splitter: %*lu\n"
"any worker stalled : %*lu\n"
"muxer tried to consume from workers : %*lu\n"
"muxer stalled : %*lu\n"
"splitter tried to fill a block : %*lu\n"
"splitter stalled : %*lu\n",
FW, s2w_queue.check_counter,
FW, s2w_queue.wait_counter,
FW, w2m_queue.check_counter,
FW, w2m_queue.wait_counter,
FW, slot_tally.check_counter,
FW, slot_tally.wait_counter );
assert( slot_tally.num_free == num_slots );
assert( s2w_queue.eof );
assert( s2w_queue.head == 0 );
assert( s2w_queue.tail == 0 );
assert( w2m_queue.num_working == 0 );
assert( w2m_queue.head == 0 );
return 0;
}