summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2021-07-23 11:24:09 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2021-07-23 11:24:09 +0000
commite36b37583bebd229102f46c4ed7d2f6fad8697d4 (patch)
tree73937b6f051fcaaa1ccbdfbaa9f3a1f36bbedb9e
parentInitial commit. (diff)
downloadck-e36b37583bebd229102f46c4ed7d2f6fad8697d4.tar.xz
ck-e36b37583bebd229102f46c4ed7d2f6fad8697d4.zip
Adding upstream version 0.6.0.upstream/0.6.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
-rw-r--r--.gitignore187
-rw-r--r--LICENSE54
-rw-r--r--Makefile.in103
-rw-r--r--README21
-rw-r--r--build/ck.build.aarch641
-rw-r--r--build/ck.build.arm1
-rw-r--r--build/ck.build.in10
-rw-r--r--build/ck.build.ppc1
-rw-r--r--build/ck.build.ppc642
-rw-r--r--build/ck.build.sparcv91
-rw-r--r--build/ck.build.x862
-rw-r--r--build/ck.build.x86_642
-rw-r--r--build/ck.pc.in10
-rw-r--r--build/ck.spec.in74
-rw-r--r--build/regressions.build.in10
-rwxr-xr-xconfigure796
-rw-r--r--doc/CK_ARRAY_FOREACH79
-rw-r--r--doc/CK_COHORT_INIT66
-rw-r--r--doc/CK_COHORT_INSTANCE59
-rw-r--r--doc/CK_COHORT_LOCK61
-rw-r--r--doc/CK_COHORT_PROTOTYPE76
-rw-r--r--doc/CK_COHORT_TRYLOCK69
-rw-r--r--doc/CK_COHORT_TRYLOCK_PROTOTYPE90
-rw-r--r--doc/CK_COHORT_UNLOCK61
-rw-r--r--doc/CK_HS_HASH71
-rw-r--r--doc/CK_RHS_HASH71
-rw-r--r--doc/CK_RWCOHORT_INIT61
-rw-r--r--doc/CK_RWCOHORT_INSTANCE64
-rw-r--r--doc/CK_RWCOHORT_PROTOTYPE65
-rw-r--r--doc/CK_RWCOHORT_READ_LOCK66
-rw-r--r--doc/CK_RWCOHORT_READ_UNLOCK65
-rw-r--r--doc/CK_RWCOHORT_WRITE_LOCK66
-rw-r--r--doc/CK_RWCOHORT_WRITE_UNLOCK65
-rw-r--r--doc/Makefile.in218
-rw-r--r--doc/ck_array_buffer60
-rw-r--r--doc/ck_array_commit58
-rw-r--r--doc/ck_array_deinit62
-rw-r--r--doc/ck_array_init69
-rw-r--r--doc/ck_array_initialized62
-rw-r--r--doc/ck_array_length57
-rw-r--r--doc/ck_array_put65
-rw-r--r--doc/ck_array_put_unique67
-rw-r--r--doc/ck_array_remove64
-rw-r--r--doc/ck_bitmap_base58
-rw-r--r--doc/ck_bitmap_bits56
-rw-r--r--doc/ck_bitmap_bts61
-rw-r--r--doc/ck_bitmap_buffer65
-rw-r--r--doc/ck_bitmap_clear56
-rw-r--r--doc/ck_bitmap_init84
-rw-r--r--doc/ck_bitmap_iterator_init70
-rw-r--r--doc/ck_bitmap_next90
-rw-r--r--doc/ck_bitmap_reset57
-rw-r--r--doc/ck_bitmap_set57
-rw-r--r--doc/ck_bitmap_size62
-rw-r--r--doc/ck_bitmap_test62
-rw-r--r--doc/ck_bitmap_union58
-rw-r--r--doc/ck_brlock121
-rw-r--r--doc/ck_cohort211
-rw-r--r--doc/ck_elide252
-rw-r--r--doc/ck_epoch_barrier120
-rw-r--r--doc/ck_epoch_begin73
-rw-r--r--doc/ck_epoch_call136
-rw-r--r--doc/ck_epoch_end64
-rw-r--r--doc/ck_epoch_init69
-rw-r--r--doc/ck_epoch_poll71
-rw-r--r--doc/ck_epoch_reclaim92
-rw-r--r--doc/ck_epoch_recycle102
-rw-r--r--doc/ck_epoch_register67
-rw-r--r--doc/ck_epoch_synchronize119
-rw-r--r--doc/ck_epoch_unregister65
-rw-r--r--doc/ck_hs_apply86
-rw-r--r--doc/ck_hs_count70
-rw-r--r--doc/ck_hs_destroy77
-rw-r--r--doc/ck_hs_fas98
-rw-r--r--doc/ck_hs_gc88
-rw-r--r--doc/ck_hs_get88
-rw-r--r--doc/ck_hs_grow81
-rw-r--r--doc/ck_hs_init169
-rw-r--r--doc/ck_hs_iterator_init78
-rw-r--r--doc/ck_hs_move90
-rw-r--r--doc/ck_hs_next92
-rw-r--r--doc/ck_hs_put98
-rw-r--r--doc/ck_hs_put_unique98
-rw-r--r--doc/ck_hs_rebuild76
-rw-r--r--doc/ck_hs_remove92
-rw-r--r--doc/ck_hs_reset77
-rw-r--r--doc/ck_hs_reset_size80
-rw-r--r--doc/ck_hs_set102
-rw-r--r--doc/ck_hs_stat81
-rw-r--r--doc/ck_ht_count77
-rw-r--r--doc/ck_ht_destroy87
-rw-r--r--doc/ck_ht_entry_empty90
-rw-r--r--doc/ck_ht_entry_key88
-rw-r--r--doc/ck_ht_entry_key_direct91
-rw-r--r--doc/ck_ht_entry_key_length88
-rw-r--r--doc/ck_ht_entry_key_set93
-rw-r--r--doc/ck_ht_entry_key_set_direct88
-rw-r--r--doc/ck_ht_entry_set95
-rw-r--r--doc/ck_ht_entry_set_direct94
-rw-r--r--doc/ck_ht_entry_value88
-rw-r--r--doc/ck_ht_entry_value_direct89
-rw-r--r--doc/ck_ht_gc96
-rw-r--r--doc/ck_ht_get_spmc177
-rw-r--r--doc/ck_ht_grow_spmc98
-rw-r--r--doc/ck_ht_hash90
-rw-r--r--doc/ck_ht_hash_direct90
-rw-r--r--doc/ck_ht_init188
-rw-r--r--doc/ck_ht_iterator_init88
-rw-r--r--doc/ck_ht_next107
-rw-r--r--doc/ck_ht_put_spmc146
-rw-r--r--doc/ck_ht_remove_spmc117
-rw-r--r--doc/ck_ht_reset_size_spmc84
-rw-r--r--doc/ck_ht_reset_spmc81
-rw-r--r--doc/ck_ht_set_spmc140
-rw-r--r--doc/ck_ht_stat85
-rw-r--r--doc/ck_pflock95
-rw-r--r--doc/ck_pr71
-rw-r--r--doc/ck_pr_add93
-rw-r--r--doc/ck_pr_and93
-rw-r--r--doc/ck_pr_barrier66
-rw-r--r--doc/ck_pr_btc90
-rw-r--r--doc/ck_pr_btr90
-rw-r--r--doc/ck_pr_bts90
-rw-r--r--doc/ck_pr_cas147
-rw-r--r--doc/ck_pr_dec124
-rw-r--r--doc/ck_pr_faa99
-rw-r--r--doc/ck_pr_fas100
-rw-r--r--doc/ck_pr_fence_acquire72
-rw-r--r--doc/ck_pr_fence_atomic111
-rw-r--r--doc/ck_pr_fence_atomic_load108
-rw-r--r--doc/ck_pr_fence_atomic_store109
-rw-r--r--doc/ck_pr_fence_load113
-rw-r--r--doc/ck_pr_fence_load_atomic113
-rw-r--r--doc/ck_pr_fence_load_depends75
-rw-r--r--doc/ck_pr_fence_load_store113
-rw-r--r--doc/ck_pr_fence_memory113
-rw-r--r--doc/ck_pr_fence_release71
-rw-r--r--doc/ck_pr_fence_store112
-rw-r--r--doc/ck_pr_fence_store_atomic108
-rw-r--r--doc/ck_pr_fence_store_load107
-rw-r--r--doc/ck_pr_inc124
-rw-r--r--doc/ck_pr_load96
-rw-r--r--doc/ck_pr_neg122
-rw-r--r--doc/ck_pr_not92
-rw-r--r--doc/ck_pr_or93
-rw-r--r--doc/ck_pr_rtm112
-rw-r--r--doc/ck_pr_stall86
-rw-r--r--doc/ck_pr_store96
-rw-r--r--doc/ck_pr_sub93
-rw-r--r--doc/ck_pr_xor93
-rw-r--r--doc/ck_queue147
-rw-r--r--doc/ck_rhs_apply86
-rw-r--r--doc/ck_rhs_count70
-rw-r--r--doc/ck_rhs_destroy77
-rw-r--r--doc/ck_rhs_fas98
-rw-r--r--doc/ck_rhs_gc73
-rw-r--r--doc/ck_rhs_get88
-rw-r--r--doc/ck_rhs_grow81
-rw-r--r--doc/ck_rhs_init166
-rw-r--r--doc/ck_rhs_iterator_init78
-rw-r--r--doc/ck_rhs_move90
-rw-r--r--doc/ck_rhs_next92
-rw-r--r--doc/ck_rhs_put98
-rw-r--r--doc/ck_rhs_put_unique98
-rw-r--r--doc/ck_rhs_rebuild76
-rw-r--r--doc/ck_rhs_remove92
-rw-r--r--doc/ck_rhs_reset77
-rw-r--r--doc/ck_rhs_reset_size80
-rw-r--r--doc/ck_rhs_set102
-rw-r--r--doc/ck_rhs_set_load_factor72
-rw-r--r--doc/ck_rhs_stat80
-rw-r--r--doc/ck_ring_capacity58
-rw-r--r--doc/ck_ring_dequeue_spmc117
-rw-r--r--doc/ck_ring_dequeue_spsc115
-rw-r--r--doc/ck_ring_enqueue_spmc115
-rw-r--r--doc/ck_ring_enqueue_spmc_size127
-rw-r--r--doc/ck_ring_enqueue_spsc113
-rw-r--r--doc/ck_ring_enqueue_spsc_size128
-rw-r--r--doc/ck_ring_init62
-rw-r--r--doc/ck_ring_size55
-rw-r--r--doc/ck_ring_trydequeue_spmc126
-rw-r--r--doc/ck_rwcohort203
-rw-r--r--doc/ck_rwlock143
-rw-r--r--doc/ck_sequence144
-rw-r--r--doc/ck_spinlock259
-rw-r--r--doc/ck_swlock138
-rw-r--r--doc/ck_tflock95
-rwxr-xr-xdoc/refcheck.pl27
-rw-r--r--include/ck_array.h100
-rw-r--r--include/ck_backoff.h57
-rw-r--r--include/ck_barrier.h164
-rw-r--r--include/ck_bitmap.h515
-rw-r--r--include/ck_brlock.h279
-rw-r--r--include/ck_bytelock.h196
-rw-r--r--include/ck_cc.h180
-rw-r--r--include/ck_cohort.h161
-rw-r--r--include/ck_elide.h321
-rw-r--r--include/ck_epoch.h207
-rw-r--r--include/ck_fifo.h478
-rw-r--r--include/ck_hp.h121
-rw-r--r--include/ck_hp_fifo.h215
-rw-r--r--include/ck_hp_stack.h110
-rw-r--r--include/ck_hs.h134
-rw-r--r--include/ck_ht.h271
-rw-r--r--include/ck_limits.h48
-rw-r--r--include/ck_malloc.h39
-rw-r--r--include/ck_md.h.in65
-rw-r--r--include/ck_pflock.h142
-rw-r--r--include/ck_pr.h1219
-rw-r--r--include/ck_queue.h428
-rw-r--r--include/ck_rhs.h134
-rw-r--r--include/ck_ring.h656
-rw-r--r--include/ck_rwcohort.h317
-rw-r--r--include/ck_rwlock.h302
-rw-r--r--include/ck_sequence.h125
-rw-r--r--include/ck_spinlock.h61
-rw-r--r--include/ck_stack.h357
-rw-r--r--include/ck_stdbool.h31
-rw-r--r--include/ck_stddef.h31
-rw-r--r--include/ck_stdint.h34
-rw-r--r--include/ck_stdlib.h31
-rw-r--r--include/ck_string.h31
-rw-r--r--include/ck_swlock.h218
-rw-r--r--include/ck_tflock.h136
-rw-r--r--include/gcc/aarch64/ck_f_pr.h167
-rw-r--r--include/gcc/aarch64/ck_pr.h227
-rw-r--r--include/gcc/aarch64/ck_pr_llsc.h352
-rw-r--r--include/gcc/aarch64/ck_pr_lse.h298
-rw-r--r--include/gcc/arm/ck_f_pr.h162
-rw-r--r--include/gcc/arm/ck_pr.h563
-rw-r--r--include/gcc/ck_cc.h142
-rw-r--r--include/gcc/ck_f_pr.h105
-rw-r--r--include/gcc/ck_pr.h297
-rw-r--r--include/gcc/ppc/ck_f_pr.h79
-rw-r--r--include/gcc/ppc/ck_pr.h327
-rw-r--r--include/gcc/ppc64/ck_f_pr.h97
-rw-r--r--include/gcc/ppc64/ck_pr.h427
-rw-r--r--include/gcc/sparcv9/ck_f_pr.h26
-rw-r--r--include/gcc/sparcv9/ck_pr.h228
-rw-r--r--include/gcc/x86/ck_f_pr.h152
-rw-r--r--include/gcc/x86/ck_pr.h390
-rw-r--r--include/gcc/x86_64/ck_f_pr.h202
-rw-r--r--include/gcc/x86_64/ck_pr.h585
-rw-r--r--include/gcc/x86_64/ck_pr_rtm.h109
-rw-r--r--include/spinlock/anderson.h167
-rw-r--r--include/spinlock/cas.h119
-rw-r--r--include/spinlock/clh.h122
-rw-r--r--include/spinlock/dec.h143
-rw-r--r--include/spinlock/fas.h118
-rw-r--r--include/spinlock/hclh.h145
-rw-r--r--include/spinlock/mcs.h155
-rw-r--r--include/spinlock/ticket.h296
-rw-r--r--regressions/Makefile128
-rw-r--r--regressions/Makefile.unsupported9
-rw-r--r--regressions/ck_array/validate/Makefile17
-rw-r--r--regressions/ck_array/validate/serial.c178
-rw-r--r--regressions/ck_backoff/validate/Makefile15
-rw-r--r--regressions/ck_backoff/validate/validate.c60
-rw-r--r--regressions/ck_barrier/benchmark/Makefile14
-rw-r--r--regressions/ck_barrier/benchmark/throughput.c136
-rw-r--r--regressions/ck_barrier/validate/Makefile34
-rw-r--r--regressions/ck_barrier/validate/barrier_centralized.c121
-rw-r--r--regressions/ck_barrier/validate/barrier_combining.c143
-rw-r--r--regressions/ck_barrier/validate/barrier_dissemination.c144
-rw-r--r--regressions/ck_barrier/validate/barrier_mcs.c131
-rw-r--r--regressions/ck_barrier/validate/barrier_tournament.c142
-rw-r--r--regressions/ck_bitmap/validate/Makefile17
-rw-r--r--regressions/ck_bitmap/validate/serial.c372
-rw-r--r--regressions/ck_brlock/benchmark/Makefile17
-rw-r--r--regressions/ck_brlock/benchmark/latency.c103
-rw-r--r--regressions/ck_brlock/benchmark/throughput.c164
-rw-r--r--regressions/ck_brlock/validate/Makefile17
-rw-r--r--regressions/ck_brlock/validate/validate.c155
-rw-r--r--regressions/ck_bytelock/benchmark/Makefile14
-rw-r--r--regressions/ck_bytelock/benchmark/latency.c99
-rw-r--r--regressions/ck_bytelock/validate/Makefile17
-rw-r--r--regressions/ck_bytelock/validate/validate.c166
-rw-r--r--regressions/ck_cohort/benchmark/Makefile17
-rw-r--r--regressions/ck_cohort/benchmark/ck_cohort.c8
-rw-r--r--regressions/ck_cohort/benchmark/throughput.c239
-rw-r--r--regressions/ck_cohort/ck_cohort.h35
-rw-r--r--regressions/ck_cohort/validate/Makefile17
-rw-r--r--regressions/ck_cohort/validate/validate.c205
-rw-r--r--regressions/ck_epoch/validate/Makefile42
-rw-r--r--regressions/ck_epoch/validate/ck_epoch_call.c64
-rw-r--r--regressions/ck_epoch/validate/ck_epoch_poll.c236
-rw-r--r--regressions/ck_epoch/validate/ck_epoch_section.c311
-rw-r--r--regressions/ck_epoch/validate/ck_epoch_section_2.c195
-rw-r--r--regressions/ck_epoch/validate/ck_epoch_synchronize.c249
-rw-r--r--regressions/ck_epoch/validate/ck_stack.c164
-rw-r--r--regressions/ck_epoch/validate/torture.c234
-rw-r--r--regressions/ck_fifo/benchmark/Makefile14
-rw-r--r--regressions/ck_fifo/benchmark/latency.c157
-rw-r--r--regressions/ck_fifo/validate/Makefile29
-rw-r--r--regressions/ck_fifo/validate/ck_fifo_mpmc.c168
-rw-r--r--regressions/ck_fifo/validate/ck_fifo_mpmc_iterator.c90
-rw-r--r--regressions/ck_fifo/validate/ck_fifo_spsc.c177
-rw-r--r--regressions/ck_fifo/validate/ck_fifo_spsc_iterator.c83
-rw-r--r--regressions/ck_hp/benchmark/Makefile17
-rw-r--r--regressions/ck_hp/benchmark/fifo_latency.c94
-rw-r--r--regressions/ck_hp/benchmark/stack_latency.c95
-rw-r--r--regressions/ck_hp/validate/Makefile33
-rw-r--r--regressions/ck_hp/validate/ck_hp_fifo.c187
-rw-r--r--regressions/ck_hp/validate/ck_hp_fifo_donner.c213
-rw-r--r--regressions/ck_hp/validate/ck_hp_stack.c165
-rw-r--r--regressions/ck_hp/validate/nbds_haz_test.c226
-rw-r--r--regressions/ck_hp/validate/serial.c127
-rw-r--r--regressions/ck_hs/benchmark/Makefile23
-rw-r--r--regressions/ck_hs/benchmark/apply.c260
-rw-r--r--regressions/ck_hs/benchmark/parallel_bytestring.c602
-rw-r--r--regressions/ck_hs/benchmark/serial.c517
-rw-r--r--regressions/ck_hs/validate/Makefile17
-rw-r--r--regressions/ck_hs/validate/serial.c315
-rw-r--r--regressions/ck_ht/benchmark/Makefile27
-rw-r--r--regressions/ck_ht/benchmark/parallel_bytestring.c559
-rw-r--r--regressions/ck_ht/benchmark/parallel_direct.c545
-rw-r--r--regressions/ck_ht/benchmark/serial.c387
-rw-r--r--regressions/ck_ht/validate/Makefile21
-rw-r--r--regressions/ck_ht/validate/serial.c309
-rw-r--r--regressions/ck_pflock/benchmark/Makefile17
-rw-r--r--regressions/ck_pflock/benchmark/latency.c72
-rw-r--r--regressions/ck_pflock/benchmark/throughput.c163
-rw-r--r--regressions/ck_pflock/validate/Makefile17
-rw-r--r--regressions/ck_pflock/validate/validate.c151
-rw-r--r--regressions/ck_pr/benchmark/Makefile31
-rw-r--r--regressions/ck_pr/benchmark/benchmark.h130
-rw-r--r--regressions/ck_pr/benchmark/ck_pr_add_64.c16
-rw-r--r--regressions/ck_pr/benchmark/ck_pr_cas_64.c16
-rw-r--r--regressions/ck_pr/benchmark/ck_pr_cas_64_2.c17
-rw-r--r--regressions/ck_pr/benchmark/ck_pr_faa_64.c16
-rw-r--r--regressions/ck_pr/benchmark/ck_pr_fas_64.c17
-rw-r--r--regressions/ck_pr/benchmark/ck_pr_neg_64.c16
-rw-r--r--regressions/ck_pr/benchmark/fp.c66
-rw-r--r--regressions/ck_pr/validate/Makefile84
-rw-r--r--regressions/ck_pr/validate/ck_pr_add.c151
-rw-r--r--regressions/ck_pr/validate/ck_pr_and.c147
-rw-r--r--regressions/ck_pr/validate/ck_pr_bin.c94
-rw-r--r--regressions/ck_pr/validate/ck_pr_btc.c96
-rw-r--r--regressions/ck_pr/validate/ck_pr_btr.c97
-rw-r--r--regressions/ck_pr/validate/ck_pr_bts.c97
-rw-r--r--regressions/ck_pr/validate/ck_pr_btx.c112
-rw-r--r--regressions/ck_pr/validate/ck_pr_cas.c158
-rw-r--r--regressions/ck_pr/validate/ck_pr_dec.c143
-rw-r--r--regressions/ck_pr/validate/ck_pr_faa.c152
-rw-r--r--regressions/ck_pr/validate/ck_pr_fas.c148
-rw-r--r--regressions/ck_pr/validate/ck_pr_fax.c121
-rw-r--r--regressions/ck_pr/validate/ck_pr_inc.c143
-rw-r--r--regressions/ck_pr/validate/ck_pr_load.c149
-rw-r--r--regressions/ck_pr/validate/ck_pr_n.c90
-rw-r--r--regressions/ck_pr/validate/ck_pr_or.c149
-rw-r--r--regressions/ck_pr/validate/ck_pr_store.c150
-rw-r--r--regressions/ck_pr/validate/ck_pr_sub.c151
-rw-r--r--regressions/ck_pr/validate/ck_pr_unary.c117
-rw-r--r--regressions/ck_pr/validate/ck_pr_xor.c147
-rw-r--r--regressions/ck_queue/validate/Makefile26
-rw-r--r--regressions/ck_queue/validate/ck_list.c236
-rw-r--r--regressions/ck_queue/validate/ck_slist.c217
-rw-r--r--regressions/ck_queue/validate/ck_stailq.c256
-rw-r--r--regressions/ck_rhs/benchmark/Makefile17
-rw-r--r--regressions/ck_rhs/benchmark/parallel_bytestring.c599
-rw-r--r--regressions/ck_rhs/benchmark/serial.c517
-rw-r--r--regressions/ck_rhs/validate/Makefile17
-rw-r--r--regressions/ck_rhs/validate/serial.c310
-rw-r--r--regressions/ck_ring/benchmark/Makefile14
-rw-r--r--regressions/ck_ring/benchmark/latency.c142
-rw-r--r--regressions/ck_ring/validate/Makefile40
-rw-r--r--regressions/ck_ring/validate/ck_ring_mpmc.c448
-rw-r--r--regressions/ck_ring/validate/ck_ring_mpmc_template.c349
-rw-r--r--regressions/ck_ring/validate/ck_ring_spmc.c340
-rw-r--r--regressions/ck_ring/validate/ck_ring_spmc_template.c350
-rw-r--r--regressions/ck_ring/validate/ck_ring_spsc.c213
-rw-r--r--regressions/ck_rwcohort/benchmark/Makefile32
-rw-r--r--regressions/ck_rwcohort/benchmark/ck_neutral.c7
-rw-r--r--regressions/ck_rwcohort/benchmark/ck_rp.c7
-rw-r--r--regressions/ck_rwcohort/benchmark/ck_wp.c7
-rw-r--r--regressions/ck_rwcohort/benchmark/latency.h106
-rw-r--r--regressions/ck_rwcohort/benchmark/throughput.h245
-rw-r--r--regressions/ck_rwcohort/ck_neutral.h8
-rw-r--r--regressions/ck_rwcohort/ck_rp.h8
-rw-r--r--regressions/ck_rwcohort/ck_wp.h8
-rw-r--r--regressions/ck_rwcohort/validate/Makefile25
-rw-r--r--regressions/ck_rwcohort/validate/ck_neutral.c2
-rw-r--r--regressions/ck_rwcohort/validate/ck_rp.c2
-rw-r--r--regressions/ck_rwcohort/validate/ck_wp.c2
-rw-r--r--regressions/ck_rwcohort/validate/validate.h209
-rw-r--r--regressions/ck_rwlock/benchmark/Makefile17
-rw-r--r--regressions/ck_rwlock/benchmark/latency.c134
-rw-r--r--regressions/ck_rwlock/benchmark/throughput.c254
-rw-r--r--regressions/ck_rwlock/validate/Makefile17
-rw-r--r--regressions/ck_rwlock/validate/validate.c447
-rw-r--r--regressions/ck_sequence/benchmark/Makefile18
-rw-r--r--regressions/ck_sequence/benchmark/ck_sequence.c91
-rw-r--r--regressions/ck_sequence/validate/Makefile17
-rw-r--r--regressions/ck_sequence/validate/ck_sequence.c171
-rw-r--r--regressions/ck_spinlock/benchmark/Makefile87
-rw-r--r--regressions/ck_spinlock/benchmark/ck_anderson.c8
-rw-r--r--regressions/ck_spinlock/benchmark/ck_cas.c8
-rw-r--r--regressions/ck_spinlock/benchmark/ck_clh.c7
-rw-r--r--regressions/ck_spinlock/benchmark/ck_dec.c7
-rw-r--r--regressions/ck_spinlock/benchmark/ck_fas.c7
-rw-r--r--regressions/ck_spinlock/benchmark/ck_hclh.c7
-rw-r--r--regressions/ck_spinlock/benchmark/ck_mcs.c7
-rw-r--r--regressions/ck_spinlock/benchmark/ck_spinlock.c7
-rw-r--r--regressions/ck_spinlock/benchmark/ck_ticket.c8
-rw-r--r--regressions/ck_spinlock/benchmark/ck_ticket_pb.c7
-rw-r--r--regressions/ck_spinlock/benchmark/latency.h76
-rw-r--r--regressions/ck_spinlock/benchmark/linux_spinlock.c7
-rw-r--r--regressions/ck_spinlock/benchmark/throughput.h218
-rw-r--r--regressions/ck_spinlock/ck_anderson.h11
-rw-r--r--regressions/ck_spinlock/ck_cas.h6
-rw-r--r--regressions/ck_spinlock/ck_clh.h9
-rw-r--r--regressions/ck_spinlock/ck_dec.h6
-rw-r--r--regressions/ck_spinlock/ck_fas.h6
-rw-r--r--regressions/ck_spinlock/ck_hclh.h16
-rw-r--r--regressions/ck_spinlock/ck_mcs.h7
-rw-r--r--regressions/ck_spinlock/ck_spinlock.h6
-rw-r--r--regressions/ck_spinlock/ck_ticket.h11
-rw-r--r--regressions/ck_spinlock/ck_ticket_pb.h6
-rw-r--r--regressions/ck_spinlock/linux_spinlock.h39
-rw-r--r--regressions/ck_spinlock/validate/Makefile57
-rw-r--r--regressions/ck_spinlock/validate/ck_anderson.c2
-rw-r--r--regressions/ck_spinlock/validate/ck_cas.c2
-rw-r--r--regressions/ck_spinlock/validate/ck_clh.c2
-rw-r--r--regressions/ck_spinlock/validate/ck_dec.c2
-rw-r--r--regressions/ck_spinlock/validate/ck_fas.c2
-rw-r--r--regressions/ck_spinlock/validate/ck_hclh.c2
-rw-r--r--regressions/ck_spinlock/validate/ck_mcs.c2
-rw-r--r--regressions/ck_spinlock/validate/ck_spinlock.c2
-rw-r--r--regressions/ck_spinlock/validate/ck_ticket.c2
-rw-r--r--regressions/ck_spinlock/validate/ck_ticket_pb.c2
-rw-r--r--regressions/ck_spinlock/validate/linux_spinlock.c14
-rw-r--r--regressions/ck_spinlock/validate/validate.h180
-rw-r--r--regressions/ck_stack/benchmark/Makefile14
-rw-r--r--regressions/ck_stack/benchmark/latency.c176
-rw-r--r--regressions/ck_stack/validate/Makefile56
-rw-r--r--regressions/ck_stack/validate/pair.c249
-rw-r--r--regressions/ck_stack/validate/pop.c269
-rw-r--r--regressions/ck_stack/validate/push.c248
-rw-r--r--regressions/ck_stack/validate/serial.c84
-rw-r--r--regressions/ck_swlock/benchmark/Makefile17
-rw-r--r--regressions/ck_swlock/benchmark/latency.c86
-rw-r--r--regressions/ck_swlock/benchmark/throughput.c183
-rw-r--r--regressions/ck_swlock/validate/Makefile17
-rw-r--r--regressions/ck_swlock/validate/validate.c455
-rw-r--r--regressions/ck_tflock/benchmark/Makefile17
-rw-r--r--regressions/ck_tflock/benchmark/latency.c73
-rw-r--r--regressions/ck_tflock/benchmark/throughput.c182
-rw-r--r--regressions/ck_tflock/validate/Makefile17
-rw-r--r--regressions/ck_tflock/validate/validate.c158
-rw-r--r--regressions/common.h471
-rw-r--r--src/Makefile.in64
-rw-r--r--src/ck_array.c240
-rw-r--r--src/ck_barrier_centralized.c59
-rw-r--r--src/ck_barrier_combining.c207
-rw-r--r--src/ck_barrier_dissemination.c130
-rw-r--r--src/ck_barrier_mcs.c141
-rw-r--r--src/ck_barrier_tournament.c184
-rw-r--r--src/ck_epoch.c545
-rw-r--r--src/ck_hp.c323
-rw-r--r--src/ck_hs.c941
-rw-r--r--src/ck_ht.c1036
-rw-r--r--src/ck_ht_hash.h269
-rw-r--r--src/ck_internal.h119
-rw-r--r--src/ck_rhs.c1480
-rwxr-xr-xtools/feature.sh5
465 files changed, 60158 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..8a1806c
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,187 @@
+/Makefile
+build/ck.build
+build/ck.pc
+build/regressions.build
+build/ck.spec
+include/ck_md.h
+src/Makefile
+doc/Makefile
+doc/*.3
+build/Makefile
+.DS_Store
+LOG
+*.log
+*.html
+*.gz
+*.o
+*.a
+*.so
+*.dSYM
+.*.sw[op]
+GPATH
+GRTAGS
+GTAGS
+ID
+regressions/ck_array/validate/serial
+regressions/ck_backoff/validate/validate
+regressions/ck_bag/validate/order
+regressions/ck_barrier/benchmark/throughput
+regressions/ck_barrier/validate/barrier_centralized
+regressions/ck_barrier/validate/barrier_combining
+regressions/ck_barrier/validate/barrier_dissemination
+regressions/ck_barrier/validate/barrier_mcs
+regressions/ck_barrier/validate/barrier_tournament
+regressions/ck_bitmap/validate/serial
+regressions/ck_brlock/benchmark/latency
+regressions/ck_brlock/benchmark/throughput
+regressions/ck_brlock/validate/validate
+regressions/ck_bytelock/benchmark/latency
+regressions/ck_bytelock/validate/validate
+regressions/ck_cohort/benchmark/ck_cohort.LATENCY
+regressions/ck_cohort/benchmark/ck_cohort.THROUGHPUT
+regressions/ck_cohort/validate/validate
+regressions/ck_epoch/validate/ck_epoch_call
+regressions/ck_epoch/validate/ck_epoch_poll
+regressions/ck_epoch/validate/ck_epoch_section
+regressions/ck_epoch/validate/ck_epoch_section_2
+regressions/ck_epoch/validate/torture
+regressions/ck_epoch/validate/ck_epoch_synchronize
+regressions/ck_epoch/validate/ck_stack
+regressions/ck_epoch/validate/ck_stack_read
+regressions/ck_fifo/benchmark/latency
+regressions/ck_fifo/validate/ck_fifo_mpmc
+regressions/ck_fifo/validate/ck_fifo_mpmc_iterator
+regressions/ck_fifo/validate/ck_fifo_spsc
+regressions/ck_fifo/validate/ck_fifo_spsc_iterator
+regressions/ck_hp/benchmark/fifo_latency
+regressions/ck_hp/benchmark/stack_latency
+regressions/ck_hp/validate/ck_hp_fifo
+regressions/ck_hp/validate/ck_hp_fifo_donner
+regressions/ck_hp/validate/ck_hp_stack
+regressions/ck_hp/validate/nbds_haz_test
+regressions/ck_hp/validate/serial
+regressions/ck_hs/benchmark/apply
+regressions/ck_hs/benchmark/parallel_bytestring
+regressions/ck_hs/benchmark/parallel_bytestring.delete
+regressions/ck_hs/benchmark/serial
+regressions/ck_hs/validate/serial
+regressions/ck_ht/benchmark/parallel_bytestring
+regressions/ck_ht/benchmark/parallel_bytestring.delete
+regressions/ck_ht/benchmark/parallel_direct
+regressions/ck_ht/benchmark/serial
+regressions/ck_ht/benchmark/serial.delete
+regressions/ck_ht/validate/serial
+regressions/ck_ht/validate/serial.delete
+regressions/ck_pflock/benchmark/latency
+regressions/ck_pflock/benchmark/throughput
+regressions/ck_pflock/validate/validate
+regressions/ck_pr/benchmark/ck_pr_cas_64
+regressions/ck_pr/benchmark/ck_pr_cas_64_2
+regressions/ck_pr/benchmark/ck_pr_fas_64
+regressions/ck_pr/benchmark/fp
+regressions/ck_pr/validate/ck_pr_add
+regressions/ck_pr/validate/ck_pr_and
+regressions/ck_pr/validate/ck_pr_bin
+regressions/ck_pr/validate/ck_pr_btc
+regressions/ck_pr/validate/ck_pr_btr
+regressions/ck_pr/validate/ck_pr_bts
+regressions/ck_pr/validate/ck_pr_btx
+regressions/ck_pr/validate/ck_pr_cas
+regressions/ck_pr/validate/ck_pr_dec
+regressions/ck_pr/validate/ck_pr_faa
+regressions/ck_pr/validate/ck_pr_fas
+regressions/ck_pr/validate/ck_pr_fax
+regressions/ck_pr/validate/ck_pr_inc
+regressions/ck_pr/validate/ck_pr_load
+regressions/ck_pr/validate/ck_pr_n
+regressions/ck_pr/validate/ck_pr_or
+regressions/ck_pr/validate/ck_pr_store
+regressions/ck_pr/validate/ck_pr_sub
+regressions/ck_pr/validate/ck_pr_unary
+regressions/ck_pr/validate/ck_pr_xor
+regressions/ck_queue/validate/ck_list
+regressions/ck_queue/validate/ck_slist
+regressions/ck_queue/validate/ck_stailq
+regressions/ck_rhs/benchmark/parallel_bytestring
+regressions/ck_rhs/benchmark/serial
+regressions/ck_rhs/validate/serial
+regressions/ck_ring/benchmark/latency
+regressions/ck_ring/validate/ck_ring_spmc
+regressions/ck_ring/validate/ck_ring_spmc_template
+regressions/ck_ring/validate/ck_ring_spsc
+regressions/ck_ring/validate/ck_ring_spsc_template
+regressions/ck_ring/validate/ck_ring_mpmc
+regressions/ck_ring/validate/ck_ring_mpmc_template
+regressions/ck_rwcohort/benchmark/ck_neutral.LATENCY
+regressions/ck_rwcohort/benchmark/ck_neutral.THROUGHPUT
+regressions/ck_rwcohort/benchmark/ck_rp.LATENCY
+regressions/ck_rwcohort/benchmark/ck_rp.THROUGHPUT
+regressions/ck_rwcohort/benchmark/ck_wp.LATENCY
+regressions/ck_rwcohort/benchmark/ck_wp.THROUGHPUT
+regressions/ck_rwcohort/validate/ck_neutral
+regressions/ck_rwcohort/validate/ck_rp
+regressions/ck_rwcohort/validate/ck_wp
+regressions/ck_rwlock/benchmark/latency
+regressions/ck_rwlock/benchmark/throughput
+regressions/ck_rwlock/validate/validate
+regressions/ck_sequence/benchmark/ck_sequence
+regressions/ck_sequence/validate/ck_sequence
+regressions/ck_spinlock/benchmark/ck_anderson.LATENCY
+regressions/ck_spinlock/benchmark/ck_anderson.THROUGHPUT
+regressions/ck_spinlock/benchmark/ck_cas.LATENCY
+regressions/ck_spinlock/benchmark/ck_cas.THROUGHPUT
+regressions/ck_spinlock/benchmark/ck_clh.LATENCY
+regressions/ck_spinlock/benchmark/ck_clh.THROUGHPUT
+regressions/ck_spinlock/benchmark/ck_dec.LATENCY
+regressions/ck_spinlock/benchmark/ck_dec.THROUGHPUT
+regressions/ck_spinlock/benchmark/ck_fas.LATENCY
+regressions/ck_spinlock/benchmark/ck_fas.THROUGHPUT
+regressions/ck_spinlock/benchmark/ck_hclh.LATENCY
+regressions/ck_spinlock/benchmark/ck_hclh.THROUGHPUT
+regressions/ck_spinlock/benchmark/ck_mcs.LATENCY
+regressions/ck_spinlock/benchmark/ck_mcs.THROUGHPUT
+regressions/ck_spinlock/benchmark/ck_spinlock.LATENCY
+regressions/ck_spinlock/benchmark/ck_spinlock.THROUGHPUT
+regressions/ck_spinlock/benchmark/ck_ticket.LATENCY
+regressions/ck_spinlock/benchmark/ck_ticket.THROUGHPUT
+regressions/ck_spinlock/benchmark/ck_ticket_pb.LATENCY
+regressions/ck_spinlock/benchmark/ck_ticket_pb.THROUGHPUT
+regressions/ck_spinlock/benchmark/linux_spinlock.LATENCY
+regressions/ck_spinlock/benchmark/linux_spinlock.THROUGHPUT
+regressions/ck_spinlock/validate/ck_anderson
+regressions/ck_spinlock/validate/ck_cas
+regressions/ck_spinlock/validate/ck_clh
+regressions/ck_spinlock/validate/ck_dec
+regressions/ck_spinlock/validate/ck_fas
+regressions/ck_spinlock/validate/ck_hclh
+regressions/ck_spinlock/validate/ck_mcs
+regressions/ck_spinlock/validate/ck_spinlock
+regressions/ck_spinlock/validate/ck_ticket
+regressions/ck_spinlock/validate/ck_ticket_pb
+regressions/ck_spinlock/validate/linux_spinlock
+regressions/ck_stack/benchmark/latency
+regressions/ck_stack/validate/mpmc_pair
+regressions/ck_stack/validate/mpmc_pop
+regressions/ck_stack/validate/mpmc_push
+regressions/ck_stack/validate/mpmc_trypair
+regressions/ck_stack/validate/mpmc_trypop
+regressions/ck_stack/validate/mpmc_trypush
+regressions/ck_stack/validate/mpnc_push
+regressions/ck_stack/validate/pthreads_pair
+regressions/ck_stack/validate/serial
+regressions/ck_stack/validate/spinlock_eb_pair
+regressions/ck_stack/validate/spinlock_eb_pop
+regressions/ck_stack/validate/spinlock_eb_push
+regressions/ck_stack/validate/spinlock_pair
+regressions/ck_stack/validate/spinlock_pop
+regressions/ck_stack/validate/spinlock_push
+regressions/ck_stack/validate/upmc_pop
+regressions/ck_stack/validate/upmc_push
+regressions/ck_stack/validate/upmc_trypop
+regressions/ck_stack/validate/upmc_trypush
+regressions/ck_swlock/benchmark/latency
+regressions/ck_swlock/benchmark/throughput
+regressions/ck_swlock/validate/validate
+regressions/ck_tflock/benchmark/latency
+regressions/ck_tflock/benchmark/throughput
+regressions/ck_tflock/validate/validate
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..73b0eeb
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,54 @@
+Copyright 2010-2014 Samy Al Bahra.
+Copyright 2011-2013 AppNexus, Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGE.
+
+Hazard Pointers (src/ck_hp.c) also includes this license:
+
+(c) Copyright 2008, IBM Corporation.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+ck_pr_rtm leverages work from Andi Kleen:
+Copyright (c) 2012,2013 Intel Corporation
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that: (1) source code distributions
+retain the above copyright notice and this paragraph in its entirety, (2)
+distributions including binary code include the above copyright notice and
+this paragraph in its entirety in the documentation or other materials
+provided with the distribution
+
+THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+
diff --git a/Makefile.in b/Makefile.in
new file mode 100644
index 0000000..7e73f70
--- /dev/null
+++ b/Makefile.in
@@ -0,0 +1,103 @@
+.PHONY: all check clean dist distclean doc install install-headers regressions uninstall
+
+BUILD_DIR=@BUILD_DIR@
+SRC_DIR=@SRC_DIR@
+CFLAGS=@CFLAGS@
+VERSION=@VERSION@
+VERSION_MAJOR=@VERSION_MAJOR@
+PREFIX=@PREFIX@
+LIBRARY=@LIBRARY@
+HEADERS=@HEADERS@
+PKGCONFIG_DATA=@LIBRARY@/pkgconfig
+LDNAME=@LDNAME@
+LDNAME_VERSION=@LDNAME_VERSION@
+LDNAME_MAJOR=@LDNAME_MAJOR@
+
+all: doc
+ $(MAKE) -C src all || exit
+ @echo
+ @echo
+ @echo ---[ Concurrency Kit has built successfully. You may now \"make install\".
+
+doc:
+ $(MAKE) -C doc all || exit
+ @echo ---[ Manual pages are ready for installation.
+
+regressions:
+ $(MAKE) -C regressions all || exit
+ @echo ---[ Regressions have built successfully.
+
+check: regressions
+ @echo ---[ Executing unit tests...
+ $(MAKE) -C regressions check
+ @echo ---[ Unit tests have completed successfully.
+
+install-headers:
+ mkdir -p $(DESTDIR)/$(HEADERS) || exit
+ cp $(SRC_DIR)/include/*.h $(DESTDIR)/$(HEADERS) || exit
+ chmod 644 $(DESTDIR)/$(HEADERS)/ck_*.h || exit
+ mkdir -p $(DESTDIR)$(HEADERS)/gcc || exit
+ cp -r $(SRC_DIR)/include/gcc/* $(DESTDIR)/$(HEADERS)/gcc || exit
+ cp include/ck_md.h $(DESTDIR)/$(HEADERS)/ck_md.h || exit
+ chmod 755 $(DESTDIR)/$(HEADERS)/gcc
+ chmod 644 $(DESTDIR)/$(HEADERS)/gcc/ck_*.h $(DESTDIR)/$(HEADERS)/gcc/*/ck_*.h || exit
+ mkdir -p $(DESTDIR)$(HEADERS)/spinlock || exit
+ cp -r $(SRC_DIR)/include/spinlock/* $(DESTDIR)/$(HEADERS)/spinlock || exit
+ chmod 755 $(DESTDIR)/$(HEADERS)/spinlock
+ chmod 644 $(DESTDIR)/$(HEADERS)/spinlock/*.h || exit
+
+install-so:
+ mkdir -p $(DESTDIR)/$(LIBRARY)
+ cp src/libck.so $(DESTDIR)/$(LIBRARY)/$(LDNAME_VERSION)
+ ln -sf $(LDNAME_VERSION) $(DESTDIR)/$(LIBRARY)/$(LDNAME)
+ ln -sf $(LDNAME_VERSION) $(DESTDIR)/$(LIBRARY)/$(LDNAME_MAJOR)
+ chmod 744 $(DESTDIR)/$(LIBRARY)/$(LDNAME_VERSION) \
+ $(DESTDIR)/$(LIBRARY)/$(LDNAME) \
+ $(DESTDIR)/$(LIBRARY)/$(LDNAME_MAJOR)
+
+install-lib:
+ mkdir -p $(DESTDIR)/$(LIBRARY)
+ cp src/libck.a $(DESTDIR)/$(LIBRARY)/libck.a
+ chmod 644 $(DESTDIR)/$(LIBRARY)/libck.a
+
+install: all install-headers @INSTALL_LIBS@
+ $(MAKE) -C doc install
+ mkdir -p $(DESTDIR)/$(LIBRARY) || exit
+ mkdir -p $(DESTDIR)/$(PKGCONFIG_DATA) || exit
+ chmod 755 $(DESTDIR)/$(PKGCONFIG_DATA)
+ cp build/ck.pc $(DESTDIR)/$(PKGCONFIG_DATA)/ck.pc || exit
+ @echo
+ @echo
+ @echo ---[ Concurrency Kit has installed successfully.
+
+uninstall:
+ $(MAKE) -C doc uninstall
+ rm -f $(DESTDIR)/$(LIBRARY)/$(LDNAME_VERSION) \
+ $(DESTDIR)/$(LIBRARY)/$(LDNAME) \
+ $(DESTDIR)/$(LIBRARY)/$(LDNAME_MAJOR)
+ rm -f $(DESTDIR)/$(LIBRARY)/libck.so*
+ rm -f $(DESTDIR)/$(LIBRARY)/libck.a
+ rm -f $(DESTDIR)/$(HEADERS)/ck_*.h
+ rm -f $(DESTDIR)/$(HEADERS)/spinlock/*.h
+ rm -f $(DESTDIR)/$(HEADERS)/gcc/ck_*.h
+ rm -f $(DESTDIR)/$(HEADERS)/gcc/*/ck_*.h
+ rm -f $(DESTDIR)/$(PKGCONFIG_DATA)/ck.pc
+
+clean:
+ $(MAKE) -C doc clean
+ $(MAKE) -C src clean
+ $(MAKE) -C regressions clean
+ rm -f $(BUILD_DIR)/*~ $(BUILD_DIR)/*.o $(BUILD_DIR)/*.tar.gz
+
+dist:
+ git archive --remote=$(SRC_DIR) --format=tar --prefix=ck-$(VERSION)/ HEAD \
+ | gzip > $(BUILD_DIR)/ck-$(VERSION).tar.gz
+
+distclean: clean
+ rm -f $(BUILD_DIR)/include/ck_md.h
+ rm -f $(BUILD_DIR)/build/regressions.build
+ rm -f $(BUILD_DIR)/build/ck.build
+ rm -f $(BUILD_DIR)/build/ck.pc
+ rm -f $(BUILD_DIR)/Makefile
+ rm -f $(BUILD_DIR)/doc/Makefile
+
diff --git a/README b/README
new file mode 100644
index 0000000..81fb5ac
--- /dev/null
+++ b/README
@@ -0,0 +1,21 @@
+ ____ _ ___ _
+ / ___|___ _ __ ___ _ _ _ __ _ __ ___ _ __ ___ _ _ | |/ (_) |_
+| | / _ \| '_ \ / __| | | | '__| '__/ _ \ '_ \ / __| | | | | ' /| | __|
+| |__| (_) | | | | (__| |_| | | | | | __/ | | | (__| |_| | | . \| | |_
+ \____\___/|_| |_|\___|\__,_|_| |_| \___|_| |_|\___|\__, | |_|\_\_|\__|
+ |___/
+
+Step 1.
+ ./configure
+ For additional options try ./configure --help
+
+Step 2.
+ In order to compile regressions (requires POSIX threads) use
+ "make regressions". In order to compile libck use "make all" or "make".
+
+Step 3.
+ In order to install use "make install"
+ To uninstall use "make uninstall".
+
+See http://concurrencykit.org/ for more information.
+
diff --git a/build/ck.build.aarch64 b/build/ck.build.aarch64
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/build/ck.build.aarch64
@@ -0,0 +1 @@
+
diff --git a/build/ck.build.arm b/build/ck.build.arm
new file mode 100644
index 0000000..3fa739c
--- /dev/null
+++ b/build/ck.build.arm
@@ -0,0 +1 @@
+CFLAGS+=-D__arm__
diff --git a/build/ck.build.in b/build/ck.build.in
new file mode 100644
index 0000000..1d6bfe3
--- /dev/null
+++ b/build/ck.build.in
@@ -0,0 +1,10 @@
+CC=@CC@
+MAKE=make
+SRC_DIR=@SRC_DIR@
+BUILD_DIR=@BUILD_DIR@
+CFLAGS+=@CFLAGS@ -I$(SRC_DIR)/include -I$(BUILD_DIR)/include
+LDFLAGS+=@LDFLAGS@
+ALL_LIBS=@ALL_LIBS@
+LD=@LD@
+
+include $(BUILD_DIR)/build/ck.build.@PROFILE@
diff --git a/build/ck.build.ppc b/build/ck.build.ppc
new file mode 100644
index 0000000..bd0c2fd
--- /dev/null
+++ b/build/ck.build.ppc
@@ -0,0 +1 @@
+CFLAGS+=-m32 -D__ppc__
diff --git a/build/ck.build.ppc64 b/build/ck.build.ppc64
new file mode 100644
index 0000000..51003f4
--- /dev/null
+++ b/build/ck.build.ppc64
@@ -0,0 +1,2 @@
+CFLAGS+=-m64 -D__ppc64__
+LDFLAGS+=-m64
diff --git a/build/ck.build.sparcv9 b/build/ck.build.sparcv9
new file mode 100644
index 0000000..d866841
--- /dev/null
+++ b/build/ck.build.sparcv9
@@ -0,0 +1 @@
+CFLAGS+=-m64 -D__sparcv9__
diff --git a/build/ck.build.x86 b/build/ck.build.x86
new file mode 100644
index 0000000..6e12783
--- /dev/null
+++ b/build/ck.build.x86
@@ -0,0 +1,2 @@
+CFLAGS+=-m32 -D__x86__ -msse -msse2
+LDFLAGS+=-m32
diff --git a/build/ck.build.x86_64 b/build/ck.build.x86_64
new file mode 100644
index 0000000..81b378a
--- /dev/null
+++ b/build/ck.build.x86_64
@@ -0,0 +1,2 @@
+CFLAGS+=-m64 -D__x86_64__
+LDFLAGS+=-m64
diff --git a/build/ck.pc.in b/build/ck.pc.in
new file mode 100644
index 0000000..0f1e93b
--- /dev/null
+++ b/build/ck.pc.in
@@ -0,0 +1,10 @@
+prefix=@PREFIX@
+includedir=@HEADERS@
+libdir=@LIBRARY@
+
+Name: Concurrency Kit
+Description: Toolkit for well-specified design and implementation of concurrent systems
+URL: http://concurrencykit.org/
+Version: @VERSION@
+Libs: -L${libdir} -lck
+Cflags: -D__@PROFILE@__ -I${includedir} @PC_CFLAGS@
diff --git a/build/ck.spec.in b/build/ck.spec.in
new file mode 100644
index 0000000..e486d53
--- /dev/null
+++ b/build/ck.spec.in
@@ -0,0 +1,74 @@
+Name: ck
+Version: @VERSION@
+Release: 1%{?dist}
+Group: Development/Libraries
+Summary: Concurrency Kit
+License: Simplified BSD License
+URL: http://concurrencykit.org
+BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
+
+Source: http://concurrencykit.org/releases/ck-%{version}.tar.gz
+
+%description
+Concurrency Kit provides a plethora of concurrency primitives, safe memory
+reclamation mechanisms and lock-less and lock-free data structures designed to
+aid in the design and implementation of high performance concurrent systems. It
+is designed to minimize dependencies on operating system-specific interfaces
+and most of the interface relies only on a strict subset of the standard
+library and more popular compiler extensions.
+
+%package devel
+Group: Development/Libraries
+Summary: Header files and libraries for CK development
+Requires: %{name} = %{version}-%{release}
+
+%description devel
+Concurrency Kit provides a plethora of concurrency primitives, safe memory
+reclamation mechanisms and lock-less and lock-free data structures designed to
+aid in the design and implementation of high performance concurrent systems. It
+is designed to minimize dependencies on operating system-specific interfaces
+and most of the interface relies only on a strict subset of the standard
+library and more popular compiler extensions.
+
+This package provides the libraries, include files, and other
+resources needed for developing Concurrency Kit applications.
+
+%prep
+%setup -q
+
+%build
+CFLAGS=$RPM_OPT_FLAGS ./configure \
+ --libdir=%{_libdir} \
+ --includedir=%{_includedir}/%{name} \
+ --mandir=%{_mandir} \
+ --prefix=%{_prefix}
+make %{?_smp_mflags}
+
+%install
+rm -rf $RPM_BUILD_ROOT
+make DESTDIR=$RPM_BUILD_ROOT install
+
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+%files
+%defattr(-,root,root,-)
+%{_libdir}/libck.so.@VERSION@
+%{_libdir}/libck.so.@VERSION_MAJOR@
+
+%files devel
+%defattr(-,root,root)
+%{_libdir}/libck.so
+%{_includedir}/%{name}/*.h
+%{_includedir}/%{name}/*/*.h
+%{_includedir}/%{name}/*/*/*.h
+%{_libdir}/libck.a
+%{_libdir}/pkgconfig/%{name}.pc
+%{_mandir}/man3/*.3.gz
+
+%post
+/sbin/ldconfig
+
+%postun
+/sbin/ldconfig
+
diff --git a/build/regressions.build.in b/build/regressions.build.in
new file mode 100644
index 0000000..6d79a8b
--- /dev/null
+++ b/build/regressions.build.in
@@ -0,0 +1,10 @@
+CC=@CC@
+MAKE=make
+CORES=@CORES@
+CFLAGS=@CFLAGS@ -I../../../include -DCORES=@CORES@
+LD=@LD@
+LDFLAGS=@LDFLAGS@
+PTHREAD_CFLAGS=@PTHREAD_CFLAGS@
+BUILD_DIR=@BUILD_DIR@
+
+include $(BUILD_DIR)/build/ck.build.@PROFILE@
diff --git a/configure b/configure
new file mode 100755
index 0000000..e840d41
--- /dev/null
+++ b/configure
@@ -0,0 +1,796 @@
+#!/bin/sh
+#
+# Copyright © 2009-2013 Samy Al Bahra.
+# Copyright © 2011 Devon H. O'Dell <devon.odell@gmail.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+
+REQUIRE_HEADER="stdbool.h stddef.h stdint.h string.h"
+
+EXIT_SUCCESS=0
+EXIT_FAILURE=1
+WANT_PIC=yes
+
+P_PWD=`pwd`
+MAINTAINER='sbahra@repnop.org'
+VERSION=${VERSION:-'0.6.0'}
+VERSION_MAJOR='0'
+BUILD="$PWD/build/ck.build"
+PREFIX=${PREFIX:-"/usr/local"}
+LDNAME="libck.so"
+LDNAME_VERSION="libck.so.$VERSION"
+LDNAME_MAJOR="libck.so.$VERSION_MAJOR"
+
+OPTION_CHECKING=1
+
+export CFLAGS
+export PREFIX
+LC_ALL=C
+export LC_ALL
+
+if test -n "${BASH_VERSION+set}" && (set -o posix) >/dev/null 2>&1; then
+ set -o posix
+fi
+
+trap epilog 1 2 3 6
+
+epilog()
+{
+ rm -f .1.c .1
+}
+
+assert()
+{
+
+ if test "$#" -eq 2; then
+ fail=$2
+ print=true
+ elif test "$#" -eq 3; then
+ fail=$3
+ print=echo
+ else
+ echo "Usage: assert <test> <fail string> or assert <test> <success string> <fail string>" 1>&2
+ exit $EXIT_FAILURE
+ fi
+
+ if test -z "$1"; then
+ echo "failed [$fail]"
+ exit $EXIT_FAILURE
+ else
+ ${print} "success [$1]"
+ fi
+}
+
+get_git_sha()
+{
+ # return a short SHA for the current HEAD
+ GIT_SHA=""
+ GIT_MSG="success"
+ gitcmd=`which git`
+ if test -n "$gitcmd"; then
+ GIT_SHA=`git rev-parse --short HEAD 2>/dev/null`
+ if ! test -n "$GIT_SHA"; then
+ GIT_MSG="not within a git repo"
+ fi
+ else
+ GIT_MSG="git not installed or executable"
+ fi
+}
+
+generate()
+{
+ sed -e "s#@PROFILE@#$PROFILE#g" \
+ -e "s#@VERSION@#$VERSION#g" \
+ -e "s#@VERSION_MAJOR@#$VERSION_MAJOR#g" \
+ -e "s#@CC@#$CC#g" \
+ -e "s#@CFLAGS@#$CFLAGS#g" \
+ -e "s#@HEADERS@#$HEADERS#g" \
+ -e "s#@LIBRARY@#$LIBRARY#g" \
+ -e "s#@PREFIX@#$PREFIX#g" \
+ -e "s#@CORES@#$CORES#g" \
+ -e "s#@ALL_LIBS@#$ALL_LIBS#g" \
+ -e "s#@INSTALL_LIBS@#$INSTALL_LIBS#g" \
+ -e "s#@LD@#$LD#g" \
+ -e "s#@LDFLAGS@#$LDFLAGS#g" \
+ -e "s#@PTHREAD_CFLAGS@#$PTHREAD_CFLAGS#g" \
+ -e "s#@MANDIR@#$MANDIR#g" \
+ -e "s#@GZIP@#$GZIP#g" \
+ -e "s#@GZIP_SUFFIX@#$GZIP_SUFFIX#g" \
+ -e "s#@POINTER_PACK_ENABLE@#$POINTER_PACK_ENABLE#g" \
+ -e "s#@DISABLE_DOUBLE@#$DISABLE_DOUBLE#g" \
+ -e "s#@RTM_ENABLE@#$RTM_ENABLE#g" \
+ -e "s#@LSE_ENABLE@#$LSE_ENABLE#g" \
+ -e "s#@VMA_BITS@#$VMA_BITS_R#g" \
+ -e "s#@VMA_BITS_VALUE@#$VMA_BITS_VALUE_R#g" \
+ -e "s#@MM@#$MM#g" \
+ -e "s#@BUILD_DIR@#$P_PWD#g" \
+ -e "s#@SRC_DIR@#$BUILD_DIR#g" \
+ -e "s#@LDNAME@#$LDNAME#g" \
+ -e "s#@LDNAME_MAJOR@#$LDNAME_MAJOR#g" \
+ -e "s#@LDNAME_VERSION@#$LDNAME_VERSION#g" \
+ -e "s#@PC_CFLAGS@#$PC_CFLAGS#g" \
+ -e "s#@GIT_SHA@#$GIT_SHA#g" \
+ $1 > $2
+}
+
+generate_stdout()
+{
+
+ echo
+ echo " VERSION = $VERSION"
+ echo " GIT_SHA = $GIT_SHA"
+ echo " BUILD_DIR = $P_PWD"
+ echo " SRC_DIR = $BUILD_DIR"
+ echo " SYSTEM = $SYSTEM"
+ echo " PROFILE = $PROFILE"
+ echo " CC = $CC"
+ echo " COMPILER = $COMPILER"
+ echo " CFLAGS = $CFLAGS"
+ echo " PTHREAD_CFLAGS = $PTHREAD_CFLAGS"
+ echo " LD = $LD"
+ echo " LDNAME = $LDNAME"
+ echo " LDNAME_VERSION = $LDNAME_VERSION"
+ echo " LDNAME_MAJOR = $LDNAME_MAJOR"
+ echo " LDFLAGS = $LDFLAGS"
+ echo " GZIP = $GZIP"
+ echo " CORES = $CORES"
+ echo " POINTER_PACK = $POINTER_PACK_ENABLE"
+ echo " VMA_BITS = $VMA_BITS"
+ echo " MEMORY_MODEL = $MM"
+ echo " RTM = $RTM_ENABLE"
+ echo " LSE = $LSE_ENABLE"
+ echo
+ echo "Headers will be installed in $HEADERS"
+ echo "Libraries will be installed in $LIBRARY"
+ echo "Documentation will be installed in $MANDIR"
+}
+
+for option; do
+ case "$option" in
+ *=?*)
+ value=`expr -- "$option" : '[^=]*=\(.*\)'`
+ ;;
+ *=)
+ value=
+ ;;
+ *)
+ value=yes
+ ;;
+ esac
+
+ case "$option" in
+ --help)
+ echo "Usage: $0 [OPTIONS]"
+ echo
+ echo "The following options may be used for cross-building."
+ echo " --profile=N Use custom build profile (use in conjunction with \$CC)"
+ echo
+ echo "The following options may be used to modify installation behavior."
+ echo " --includedir=N Headers directory (default is ${PREFIX}/include)"
+ echo " --libdir=N Libraries directory (default is ${PREFIX}/lib)"
+ echo " --mandir=N Manual pages directory (default is ${PREFIX}/man)"
+ echo " --prefix=N Installs library files in N (default is $PREFIX)"
+ echo
+ echo "The following options will affect generated code."
+ echo " --enable-pointer-packing Assumes address encoding is subset of pointer range"
+ echo " --enable-rtm Enable restricted transactional memory (power, x86_64)"
+ echo " --enable-lse Enable large system extensions (arm64)"
+ echo " --memory-model=N Specify memory model (currently tso, pso or rmo)"
+ echo " --vma-bits=N Specify valid number of VMA bits"
+ echo " --platform=N Force the platform type, instead of relying on autodetection"
+ echo " --use-cc-builtins Use the compiler atomic bultin functions, instead of the CK implementation"
+ echo " --disable-double Don't generate any of the functions using the \"double\" type"
+ echo
+ echo "The following options affect regression testing."
+ echo " --cores=N Specify number of cores available on target machine"
+ echo
+ echo "The following environment variables may be used:"
+ echo " CC C compiler command"
+ echo " CFLAGS C compiler flags"
+ echo " LDFLAGS Linker flags"
+ echo " GZIP GZIP compression tool"
+ echo
+ echo "Report bugs to ${MAINTAINER}."
+ exit $EXIT_SUCCESS
+ ;;
+ --memory-model=*)
+ case "$value" in
+ "tso")
+ MM="CK_MD_TSO"
+ ;;
+ "rmo")
+ MM="CK_MD_RMO"
+ ;;
+ "pso")
+ MM="CK_MD_PSO"
+ ;;
+ *)
+ echo "./configure [--help]"
+ exit $EXIT_FAILURE
+ ;;
+ esac
+ ;;
+ --vma-bits=*)
+ VMA_BITS=$value
+ ;;
+ --enable-pointer-packing)
+ POINTER_PACK_ENABLE="CK_MD_POINTER_PACK_ENABLE"
+ ;;
+ --enable-rtm)
+ RTM_ENABLE_SET="CK_MD_RTM_ENABLE"
+ ;;
+ --enable-lse)
+ LSE_ENABLE_SET="CK_MD_LSE_ENABLE"
+ ;;
+ --cores=*)
+ CORES=$value
+ ;;
+ --profile=*)
+ PROFILE=$value
+ ;;
+ --prefix=*)
+ PREFIX=$value
+ ;;
+ --includedir=*)
+ HEADERS=$value
+ ;;
+ --libdir=*)
+ LIBRARY=$value
+ ;;
+ --mandir=*)
+ MANDIR=$value
+ ;;
+ --with-pic)
+ WANT_PIC=yes
+ ;;
+ --without-pic)
+ WANT_PIC=no
+ ;;
+ --disable-option-checking)
+ OPTION_CHECKING=0
+ ;;
+ --use-cc-builtins)
+ USE_CC_BUILTINS=1
+ ;;
+ --disable-double)
+ DISABLE_DOUBLE="CK_PR_DISABLE_DOUBLE"
+ ;;
+ --platform=*)
+ PLATFORM=$value
+ ;;
+ --build=*|--host=*|--target=*|--exec-prefix=*|--bindir=*|--sbindir=*|\
+ --sysconfdir=*|--datadir=*|--libexecdir=*|--localstatedir=*|\
+ --enable-static|\
+ --sharedstatedir=*|--infodir=*|--enable-shared|--disable-shared|\
+ --cache-file=*|--srcdir=*)
+ # ignore for compat with regular configure
+ ;;
+ --*)
+ if test "$OPTION_CHECKING" -eq 1; then
+ echo "$0 [--help]"
+ echo "Unknown option $option"
+ exit $EXIT_FAILURE
+ fi
+ ;;
+ *=*)
+ NAME=`expr -- "$option" : '\([^=]*\)='`
+ eval "$NAME='$value'"
+ export $NAME
+ ;;
+ *)
+ echo "$0 [--help]"
+ echo "Unknown option $option"
+ exit $EXIT_FAILURE
+ ;;
+ esac
+done
+
+HEADERS=${HEADERS:-"${PREFIX}/include"}
+LIBRARY=${LIBRARY:-"${PREFIX}/lib"}
+MANDIR=${MANDIR:-"${PREFIX}/share/man"}
+GZIP=${GZIP:-"gzip -c"}
+POINTER_PACK_ENABLE=${POINTER_PACK_ENABLE:-"CK_MD_POINTER_PACK_DISABLE"}
+DISABLE_DOUBLE=${DISABLE_DOUBLE:-"CK_PR_ENABLE_DOUBLE"}
+RTM_ENABLE=${RTM_ENABLE_SET:-"CK_MD_RTM_DISABLE"}
+LSE_ENABLE=${LSE_ENABLE_SET:-"CK_MD_LSE_DISABLE"}
+VMA_BITS=${VMA_BITS:-"unknown"}
+
+DCORES=2
+printf "Detecting operating system......."
+SYSTEM=`uname -s 2> /dev/null`
+case "$SYSTEM" in
+ "SunOS")
+ SYSTEM=solaris
+ ;;
+ "Linux"|"uClinux")
+ DCORES=`egrep '(^CPU[0-9]+|^processor.*:.*)' /proc/cpuinfo|wc -l`
+ SYSTEM=linux
+ ;;
+ "FreeBSD"|"GNU/kFreeBSD")
+ DCORES=`sysctl -n hw.ncpu`
+ SYSTEM=freebsd
+ ;;
+ "NetBSD")
+ DCORES=`sysctl -n hw.ncpu`
+ SYSTEM=netbsd
+ ;;
+ "OpenBSD")
+ DCORES=`sysctl -n hw.ncpu`
+ SYSTEM=openbsd
+ ;;
+ "DragonFly")
+ DCORES=`sysctl -n hw.ncpu`
+ SYSTEM=dragonflybsd
+ ;;
+ "Darwin")
+ DCORES=`sysctl -n hw.ncpu`
+ SYSTEM=darwin
+ ;;
+ MINGW32*)
+ SYSTEM=mingw32
+ LDFLAGS="-mthreads $LDFLAGS"
+ ;;
+ CYGWIN_NT*)
+ SYSTEM=cygwin
+ LDFLAGS="-mthreads $LDFLAGS"
+ ;;
+ *)
+ SYSTEM=
+ ;;
+esac
+
+assert "$SYSTEM" "$SYSTEM" "unsupported"
+
+CORES=${CORES:-${DCORES}}
+printf "Detecting machine architecture..."
+if test "x$PLATFORM" = "x"; then
+ PLATFORM=`uname -m 2> /dev/null`
+fi
+
+case $PLATFORM in
+ "macppc"|"Power Macintosh"|"powerpc")
+ RTM_ENABLE="CK_MD_RTM_DISABLE"
+ LSE_ENABLE="CK_MD_LSE_DISABLE"
+ MM="${MM:-"CK_MD_RMO"}"
+ PLATFORM=ppc
+ ENVIRONMENT=32
+ LDFLAGS="-m32 $LDFLAGS"
+ ;;
+ "sun4u"|"sun4v"|"sparc64")
+ RTM_ENABLE="CK_MD_RTM_DISABLE"
+ LSE_ENABLE="CK_MD_LSE_DISABLE"
+ MM="${MM:-"CK_MD_TSO"}"
+ PLATFORM=sparcv9
+ ENVIRONMENT=64
+ LDFLAGS="-m64 $LDFLAGS"
+ ;;
+ i386|i486|i586|i686|i586_i686|pentium*|athlon*|k5|k6|k6_2|k6_3)
+ LSE_ENABLE="CK_MD_LSE_DISABLE"
+ MM="${MM:-"CK_MD_TSO"}"
+ case $SYSTEM in
+ darwin)
+ ENVIRONMENT=64
+ PLATFORM=x86_64
+ ;;
+ freebsd)
+ PLATFORM=x86
+ ENVIRONMENT=32
+
+ # FreeBSD doesn't give us a nice way to determine the CPU
+ # class of the running system, reporting any 32-bit x86
+ # architecture as i386. 486 is its minimum supported CPU
+ # class and cmpxchg8b was implemented first in i586.
+ dmesg | grep -q "486-class"
+ if test "$?" -eq 0; then
+ assert "" "" "Must have an i586 class or higher CPU"
+ fi
+
+ # FreeBSD still generates code for 486-class CPUs as its
+ # default 32-bit target, but we need 586 at the least.
+ echo "$CFLAGS" | grep -q 'march='
+ if test "$?" -ne 0; then
+ # Needed for cmpxchg8b
+ CFLAGS="$CFLAGS -march=i586"
+ fi
+ ;;
+ linux)
+ case $PLATFORM in
+ i386|i486)
+ assert "" "" "Must have an i586 class or higher CPU"
+ ;;
+ esac
+
+ PLATFORM=x86
+ ENVIRONMENT=32
+ ;;
+
+ *)
+ PLATFORM=x86
+ ENVIRONMENT=32
+ assert "$PLATFORM $ENVIRONMENT" "$PLATFORM $ENVIRONMENT" "unsupported"
+ ;;
+ esac
+ ;;
+ "amd64"|"x86_64")
+ LSE_ENABLE="CK_MD_LSE_DISABLE"
+ PLATFORM=x86_64
+ ENVIRONMENT=64
+ LDFLAGS="-m64 $LDFLAGS"
+ MM="${MM:-"CK_MD_TSO"}"
+ ;;
+ "i86pc")
+ RTM_ENABLE="CK_MD_RTM_DISABLE"
+ LSE_ENABLE="CK_MD_LSE_DISABLE"
+ MM="${MM:-"CK_MD_TSO"}"
+ if test -z "$ISA"; then ISA=`isainfo -n 2> /dev/null || echo i386` ; fi
+ case "$ISA" in
+ "amd64")
+ RTM_ENABLE=${RTM_ENABLE_SET:-"CK_MD_RTM_DISABLE"}
+ PLATFORM=x86_64
+ ENVIRONMENT=64
+ ;;
+ *)
+ PLATFORM=x86
+ ENVIRONMENT=32
+ assert "$PLATFORM $ENVIRONMENT" "$PLATFORM $ENVIRONMENT" "unsupported"
+ ;;
+ esac
+ ;;
+ "ppc64"|"ppc64le")
+ RTM_ENABLE="CK_MD_RTM_DISABLE"
+ LSE_ENABLE="CK_MD_LSE_DISABLE"
+ MM="${MM:-"CK_MD_RMO"}"
+ PLATFORM=ppc64
+ ENVIRONMENT=64
+ ;;
+ arm|armv6l|armv7l)
+ if test "$PLATFORM" = "armv6l"; then
+ CFLAGS="$CFLAGS -march=armv6k";
+ elif test "$PLATFORM" = "armv7l"; then
+ CFLAGS="$CFLAGS -march=armv7-a";
+ fi
+ RTM_ENABLE="CK_MD_RTM_DISABLE"
+ LSE_ENABLE="CK_MD_LSE_DISABLE"
+ MM="${MM:-"CK_MD_RMO"}"
+ PLATFORM=arm
+ ENVIRONMENT=32
+ ;;
+ "arm64"|"aarch64")
+ RTM_ENABLE="CK_MD_RTM_DISABLE"
+ MM="${MM:-"CK_MD_RMO"}"
+ PLATFORM=aarch64
+ ENVIRONMENT=64
+ ;;
+ *)
+ RTM_ENABLE="CK_MD_RTM_DISABLE"
+ LSE_ENABLE="CK_MD_LSE_DISABLE"
+ PLATFORM=
+ MM="${MM:-"CK_MD_RMO"}"
+ ;;
+esac
+
+assert "$PLATFORM" "$PLATFORM" "unsupported"
+
+if test "$VMA" = "unknown"; then
+ VMA_BITS_R="CK_MD_VMA_BITS_UNKNOWN"
+ VMA_BITS_VALUE_R=""
+ POINTER_PACK_ENABLE="CK_MD_POINTER_PACK_DISABLE"
+else
+ VMA_BITS_R="CK_MD_VMA_BITS"
+ VMA_BITS_VALUE_R="${VMA_BITS}ULL"
+fi
+
+if test "$USE_CC_BUILTINS"; then
+ CFLAGS="$CFLAGS -DCK_CC_BUILTINS"
+ PC_CFLAGS="-DCK_CC_BULITINS"
+fi
+
+# `which` on Solaris sucks
+pathsearch()
+{
+ what=$1
+ oldFS="$IFS"
+ IFS=":"
+ for d in $PATH ; do
+ if test -x "$d/$what" ; then
+ echo "$d/$what";
+ IFS="$oldFS"
+ return
+ fi
+ done
+ IFS="$oldFS"
+}
+
+printf "Finding dirname command.........."
+DIRNAME=`pathsearch "${DIRNAME:-dirname}"`
+if test -z "$DIRNAME" -o ! -x "$DIRNAME"; then
+ DIRNAME=`pathsearch "${DIRNAME:-dirname}"`
+ DIRNAME="$DIRNAME"
+else
+ echo "success [$DIRNAME]"
+fi
+
+if test -z "$DIRNAME"; then
+ echo "not found (out of source build unsupported)"
+else
+ printf "Determining build directory......"
+
+ BUILD_DIR=`$DIRNAME $0`
+ cd `$DIRNAME $0`
+ BUILD_DIR=`pwd`
+
+ echo "success [$BUILD_DIR]"
+fi
+
+printf "Finding gzip tool................"
+GZIP=`pathsearch "${GZIP:-gzip}"`
+if test -z "$GZIP" -o ! -x "$GZIP"; then
+ GZIP=`pathsearch "${GZIP:-gzip}"`
+ GZIP="$GZIP"
+fi
+
+if test -z "$GZIP"; then
+ echo "not found"
+ GZIP=cat
+ GZIP_SUFFIX=""
+else
+ echo "success [$GZIP]"
+ GZIP="$GZIP -c"
+ GZIP_SUFFIX=".gz"
+fi
+
+printf "Finding suitable compiler........"
+CC=`pathsearch "${CC:-cc}"`
+if test -z "$CC" -o ! -x "$CC"; then
+ CC=`pathsearch "${CC:-gcc}"`
+fi
+assert "$CC" "not found"
+
+cat << EOF > .1.c
+#include <stdio.h>
+int main(void) {
+#if defined(_WIN32)
+#if defined(__MINGW64__)
+ puts("mingw64");
+ return (0);
+#elif defined(__MINGW32__) && (__MINGW32_MAJOR_VERSION >= 3)
+ puts("mingw32");
+ return (0);
+#else
+ return (1);
+#endif /* __MINGW32__ && __MINGW32_MAJOR_VERSION >= 3 */
+#elif defined(__clang__) && (__clang_major__ >= 3)
+ puts("clang");
+ return (0);
+#elif defined(__SUNPRO_C) && (__SUNPRO_C >= 0x5110)
+ puts("suncc");
+ return (0);
+#elif defined(__GNUC__) && (__GNUC__ >= 4)
+ puts("gcc");
+ return (0);
+#else
+ return (1);
+#endif
+}
+EOF
+
+$CC -o .1 .1.c
+COMPILER=`./.1`
+r=$?
+rm -f .1.c .1
+
+if test "$r" -ne 0; then
+ assert "" "update compiler"
+else
+ echo "success [$CC]"
+fi
+
+if test "$COMPILER" = "suncc"; then
+ LD=/bin/ld
+ LDFLAGS="-G -z text -h libck.so.$VERSION_MAJOR $LDFLAGS"
+ CFLAGS="-xO5 $CFLAGS"
+ PTHREAD_CFLAGS="-mt -lpthread"
+elif test "$COMPILER" = "gcc" || test "$COMPILER" = "clang" || test "$COMPILER" = "mingw32" || test "$COMPILER" = "mingw64"; then
+ LD=$CC
+ SONAME="$LDNAME_MAJOR"
+ if test "$SYSTEM" = "darwin"; then
+ CC_WL_OPT="-install_name"
+ LDNAME="libck.dylib"
+ LDNAME_VERSION="libck.$VERSION.dylib"
+ LDNAME_MAJOR="libck.$VERSION_MAJOR.dylib"
+ SONAME="$LIBRARY/$LDNAME_MAJOR"
+ else
+ CC_WL_OPT="-soname"
+ fi
+
+ LDFLAGS="-Wl,$CC_WL_OPT,$SONAME $LDFLAGS"
+ if test "$WANT_PIC" = "yes"; then
+ LDFLAGS="$LDFLAGS -shared -fPIC"
+ CFLAGS="$CFLAGS -fPIC"
+ ALL_LIBS="libck.so libck.a"
+ INSTALL_LIBS="install-so install-lib"
+ else
+ LDFLAGS="$LDFLAGS -fno-PIC"
+ CFLAGS="$CFLAGS -fno-PIC"
+ ALL_LIBS="libck.a"
+ INSTALL_LIBS="install-lib"
+ fi
+
+ CFLAGS="-D_XOPEN_SOURCE=600 -D_BSD_SOURCE -D_DEFAULT_SOURCE -std=gnu99 -pedantic -Wall -W -Wundef -Wendif-labels -Wshadow -Wpointer-arith -Wcast-align -Wcast-qual -Wwrite-strings -Wstrict-prototypes -Wmissing-prototypes -Wnested-externs -Winline -Wdisabled-optimization -fstrict-aliasing -O2 -pipe -Wno-parentheses $CFLAGS"
+ PTHREAD_CFLAGS="-pthread"
+ if test "$COMPILER" = "mingw64"; then
+ ENVIRONMENT=64
+ PLATFORM=x86_64
+ fi
+else
+ assert "" "unknown compiler"
+fi
+
+printf "Detecting VMA bits..............."
+VMA="unknown"
+if test "$VMA_BITS" = "unknown"; then
+ if test "$PLATFORM" = "x86" || test $PLATFORM = "x86_64"; then
+ case $SYSTEM in
+ darwin)
+ VMA=`sysctl -n machdep.cpu.address_bits.virtual`
+ ;;
+ linux)
+ VMA=`awk '/address sizes/ {print $7;exit}' /proc/cpuinfo`
+ ;;
+ *)
+ if test "$PLATFORM" = "x86"; then
+ VMA="32"
+ else
+ cat << EOF > .1.c
+ #include <stdio.h>
+
+ int main(int argc, char *argv[])
+ {
+ unsigned long ret = 0x80000000;
+
+ __asm __volatile("cpuid\n"
+ : "+a" (ret));
+ if (ret >= 0x80000008) {
+ ret = 0x80000008;
+ __asm __volatile("cpuid\n"
+ : "+a" (ret));
+ printf("%lu\n", (ret >> 8) & 0xff);
+ } else {
+ return (1);
+ }
+ return (0);
+ }
+EOF
+
+ $CC -o .1 .1.c 2>/dev/null
+ VMA=`./.1 2>/dev/null`
+ if test $? -ne 0; then
+ VMA="unknown"
+ fi
+ rm -f .1.c .1
+ fi
+ esac
+ fi
+
+ VMA_BITS=$VMA
+else
+ VMA=$VMA_BITS
+fi
+
+if test "$VMA" = "unknown"; then
+ echo "unknown"
+ VMA_BITS_R="CK_MD_VMA_BITS_UNKNOWN"
+ VMA_BITS_VALUE_R=""
+ POINTER_PACK_ENABLE="CK_MD_POINTER_PACK_DISABLE"
+else
+ echo "success [$VMA]"
+ VMA_BITS_R="CK_MD_VMA_BITS"
+ VMA_BITS_VALUE_R="${VMA_BITS}ULL"
+fi
+
+for i in $REQUIRE_HEADER; do
+ printf "Checking header file usability..."
+
+ cat << EOF > .1.c
+#include <$i>
+int main(void){return(0);}
+EOF
+ $CC -o .1 .1.c 2> /dev/null
+ hf_s=$?
+
+ rm -f .1 .1.c
+ if test $hf_s -eq 0; then
+ echo "success [$i]"
+ else
+ echo "failed [$i]"
+ exit $EXIT_FAILURE
+ fi
+done
+
+printf "Detecting git SHA................"
+get_git_sha
+echo "$GIT_MSG [$GIT_SHA]"
+
+if test "$PROFILE"; then
+ printf "Using user-specified profile....."
+
+ if test -z "$CC"; then
+ echo "failed [specify compiler]"
+ exit $EXIT_FAILURE
+ fi
+
+ if test ! -f build/ck.build.$PROFILE; then
+ echo "failed [$PROFILE]"
+ exit $EXIT_FAILURE
+ fi
+
+ echo "success [$PROFILE]"
+ printf "Generating header files.........."
+ generate include/ck_md.h.in include/ck_md.h
+ echo "success"
+ printf "Generating build files..........."
+ generate src/Makefile.in src/Makefile
+ generate doc/Makefile.in doc/Makefile
+ generate build/ck.build.in build/ck.build
+ generate build/regressions.build.in build/regressions.build
+ generate build/ck.pc.in build/ck.pc
+ generate build/ck.spec.in build/ck.spec
+ generate Makefile.in Makefile
+ echo "success"
+ generate_stdout
+ exit $EXIT_SUCCESS
+fi
+
+# Platform will be used as a macro.
+PROFILE="${PROFILE:-$PLATFORM}"
+PLATFORM="__${PLATFORM}__"
+
+printf "Generating header files.........."
+generate include/ck_md.h.in include/ck_md.h
+echo "success"
+
+printf "Generating build files..........."
+
+mkdir -p $P_PWD/doc
+mkdir -p $P_PWD/build
+mkdir -p $P_PWD/include
+mkdir -p $P_PWD/src
+
+if test "$P_PWD" '!=' "$BUILD_DIR"; then
+ mkdir -p $P_PWD/regressions
+ cp $BUILD_DIR/regressions/Makefile.unsupported $P_PWD/regressions/Makefile &> /dev/null
+ cp $BUILD_DIR/build/ck.build.$PROFILE $P_PWD/build/ck.build.$PROFILE &> /dev/null
+ cp $BUILD_DIR/include/ck_md.h $P_PWD/include/ck_md.h &> /dev/null
+fi
+
+generate src/Makefile.in $P_PWD/src/Makefile
+generate doc/Makefile.in $P_PWD/doc/Makefile
+generate build/ck.build.in $P_PWD/build/ck.build
+generate build/regressions.build.in $P_PWD/build/regressions.build
+generate build/ck.pc.in $P_PWD/build/ck.pc
+generate build/ck.spec.in $P_PWD/build/ck.spec
+generate Makefile.in $P_PWD/Makefile
+touch src/*.c
+echo "success"
+generate_stdout
diff --git a/doc/CK_ARRAY_FOREACH b/doc/CK_ARRAY_FOREACH
new file mode 100644
index 0000000..d85b767
--- /dev/null
+++ b/doc/CK_ARRAY_FOREACH
@@ -0,0 +1,79 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd October 18, 2013
+.Dt CK_ARRAY_FOREACH 3
+.Sh NAME
+.Nm CK_ARRAY_FOREACH
+.Nd iterate through an array
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_array.h
+.Ft bool
+.Fn CK_ARRAY_FOREACH "ck_array_t *array" "ck_array_iterator_t *iterator" "void **b"
+.Sh DESCRIPTION
+The
+.Fn CK_ARRAY_FOREACH 3
+macro iterates through the array pointed to by
+.Fa array .
+A pointer to an iterator object must be specified by
+.Fa iterator
+and
+.Fa b
+must point to a void pointer.
+.Sh EXAMPLE
+.Bd -literal -offset indent
+#include <ck_array.h>
+
+/* Assume this was already previously initialized. */
+ck_array_t array;
+
+void
+example(void)
+{
+ ck_array_iterator_t iterator;
+ void *pointer;
+
+ CK_ARRAY_FOREACH(&array, &iterator, &pointer) {
+ do_something(pointer);
+ }
+}
+.Ed
+.Sh RETURN VALUES
+This macro has no return value.
+.Sh SEE ALSO
+.Xr ck_array_init 3 ,
+.Xr ck_array_commit 3 ,
+.Xr ck_array_put 3 ,
+.Xr ck_array_put_unique 3 ,
+.Xr ck_array_remove 3 ,
+.Xr ck_array_deinit 3
+.Xr ck_array_length 3 ,
+.Xr ck_array_buffer 3 ,
+.Xr ck_array_initialized 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/CK_COHORT_INIT b/doc/CK_COHORT_INIT
new file mode 100644
index 0000000..94454d9
--- /dev/null
+++ b/doc/CK_COHORT_INIT
@@ -0,0 +1,66 @@
+.\"
+.\" Copyright 2013 Brendon Scheinman.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd February 24, 2013.
+.Dt CK_COHORT_INIT 3
+.Sh NAME
+.Nm CK_COHORT_INIT
+.Nd initialize instance of a cohort type
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_cohort.h
+.Fn CK_COHORT_INIT "COHORT_NAME cohort_name" "COHORT *cohort" "void *global_lock" \
+"void *local_lock" "unsigned int pass_limit"
+.Sh DESCRIPTION
+Until a cohort instance is initialized using the CK_COHORT_INIT macro, any operations
+involving it will have undefined behavior. After this macro has been called, the cohort
+pointed to by the
+.Fa cohort
+argument will use the lock pointed to by
+.Fa global_lock
+as its global lock and the lock pointed to by
+.Fa local_lock
+as its local lock.
+.Pp
+The cohort will relinquish its global lock after
+.Fa pass_limit
+consecutive acquisitions of its local lock, even if there are other threads waiting.
+If you are unsure of a value to use for the
+.Fa pass_limit
+argument, you should use CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT.
+.Sh SEE ALSO
+.Xr ck_cohort 3 ,
+.Xr CK_COHORT_PROTOTYPE 3 ,
+.Xr CK_COHORT_TRYLOCK_PROTOTYPE 3 ,
+.Xr CK_COHORT_INSTANCE 3 ,
+.Xr CK_COHORT_INITIALIZER 3 ,
+.Xr CK_COHORT_LOCK 3 ,
+.Xr CK_COHORT_UNLOCK 3 ,
+.Xr CK_COHORT_LOCKED 3 ,
+.Xr CK_COHORT_TRYLOCK 3 ,
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/CK_COHORT_INSTANCE b/doc/CK_COHORT_INSTANCE
new file mode 100644
index 0000000..cec1617
--- /dev/null
+++ b/doc/CK_COHORT_INSTANCE
@@ -0,0 +1,59 @@
+.\"
+.\" Copyright 2013 Brendon Scheinman.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd February 24, 2013.
+.Dt CK_COHORT_INSTANCE 3
+.Sh NAME
+.Nm CK_COHORT_INSTANCE
+.Nd declare an instance of a cohort type
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_cohort.h
+.Fn CK_COHORT_INSTANCE "COHORT_NAME cohort_name"
+.Sh DESCRIPTION
+The user must use this macro to declare instances of cohort types that they have
+defined. For instance, if they have used the CK_COHORT_PROTOTYPE macro to define
+a cohort type with name foo, they would create an instance of this type as follows:
+.br
+CK_COHORT_INSTANCE(foo) cohort;
+.Pp
+This macro should also be used when allocating memory for cohorts. For instance,
+to allocate a block of 4 cohorts:
+.br
+CK_COHORT_INSTANCE(foo) *cohorts = malloc(4 * sizeof(CK_COHORT_INSTANCE(foo)));
+.Sh SEE ALSO
+.Xr ck_cohort 3 ,
+.Xr CK_COHORT_PROTOTYPE 3 ,
+.Xr CK_COHORT_TRYLOCK_PROTOTYPE 3 ,
+.Xr CK_COHORT_INSTANCE 3 ,
+.Xr CK_COHORT_INITIALIZER 3 ,
+.Xr CK_COHORT_LOCK 3 ,
+.Xr CK_COHORT_UNLOCK 3 ,
+.Xr CK_COHORT_LOCKED 3 ,
+.Xr CK_COHORT_TRYLOCK 3 ,
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/CK_COHORT_LOCK b/doc/CK_COHORT_LOCK
new file mode 100644
index 0000000..22475f8
--- /dev/null
+++ b/doc/CK_COHORT_LOCK
@@ -0,0 +1,61 @@
+.\"
+.\" Copyright 2013 Brendon Scheinman.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd February 24, 2013.
+.Dt CK_COHORT_LOCK 3
+.Sh NAME
+.Nm CK_COHORT_LOCK
+.Nd acquire cohort lock
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_cohort.h
+.Fn CK_COHORT_LOCK "COHORT_NAME cohort_name" "COHORT *cohort" "void *global_context" \
+"void *local_context"
+.Sh DESCRIPTION
+This call attempts to acquire both the local and global (if necessary) locks from
+.Fa cohort .
+The call will block until both locks have been acquired.
+.Fa global_context
+will be passed as the second argument to the function that was provided as the
+.Fa global_lock_method
+argument to CK_COHORT_PROTOTYPE if that method is called, and
+.Fa local_context
+will be passed to the function specified by
+.Fa local_lock_method
+.
+.Sh SEE ALSO
+.Xr ck_cohort 3 ,
+.Xr CK_COHORT_PROTOTYPE 3 ,
+.Xr CK_COHORT_TRYLOCK_PROTOTYPE 3 ,
+.Xr CK_COHORT_INSTANCE 3 ,
+.Xr CK_COHORT_INITIALIZER 3 ,
+.Xr CK_COHORT_INIT 3 ,
+.Xr CK_COHORT_UNLOCK 3 ,
+.Xr CK_COHORT_LOCKED 3 ,
+.Xr CK_COHORT_TRYLOCK 3 ,
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/CK_COHORT_PROTOTYPE b/doc/CK_COHORT_PROTOTYPE
new file mode 100644
index 0000000..7a7b1a7
--- /dev/null
+++ b/doc/CK_COHORT_PROTOTYPE
@@ -0,0 +1,76 @@
+.\"
+.\" Copyright 2013 Brendon Scheinman.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd February 24, 2013.
+.Dt CK_COHORT_PROTOTYPE 3
+.Sh NAME
+.Nm CK_COHORT_PROTOTYPE
+.Nd define cohort type with specified lock types
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_cohort.h
+.Fn CK_COHORT_PROTOTYPE "COHORT_NAME cohort_name" "TYPE global_lock_method" \
+"LOCK_FXN global_unlock_method" "LOCK_FXN local_lock_method" "LOCK_FXN local_unlock_method"
+.Sh DESCRIPTION
+The ck_cohort.h header file does not define any cohort types. Instead, the user must use
+the CK_COHORT_PROTOTYPE or
+.Xr CK_COHORT_TRYLOCK_PROTOTYPE 3
+macros to define any types they want to use. They must use CK_COHORT_TRYLOCK_PROTOTYPE
+if they want their cohort type to support trylock operations.
+The CK_COHORT_PROTOTYPE macro takes the following arguments:
+.Pp
+.Fa cohort_name
+: An identifier used for this cohort type. This will have to be passed to each
+of the other CK_COHORT macros.
+.br
+.Fa global_lock_method
+: The method that should be called to acquire the global lock
+.br
+.Fa global_unlock_method
+: The method that should be called to relinquish the global lock
+.br
+.Fa local_lock_method
+: The method that should be called to acquire the local lock
+.br
+.Fa local_unlock_method
+: The method that should be called to relinquish the local lock
+.Pp
+Instances of the defined cohort type can be declared as:
+.br
+ CK_COHORT_INSTANCE(cohort_name) cohort;
+.Sh SEE ALSO
+.Xr ck_cohort 3 ,
+.Xr CK_COHORT_TRYLOCK_PROTOTYPE 3 ,
+.Xr CK_COHORT_INSTANCE 3 ,
+.Xr CK_COHORT_INITIALIZER 3 ,
+.Xr CK_COHORT_INIT 3 ,
+.Xr CK_COHORT_LOCK 3 ,
+.Xr CK_COHORT_UNLOCK 3 ,
+.Xr CK_COHORT_LOCKED 3 ,
+.Xr CK_COHORT_TRYLOCK 3 ,
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/CK_COHORT_TRYLOCK b/doc/CK_COHORT_TRYLOCK
new file mode 100644
index 0000000..22bb4b5
--- /dev/null
+++ b/doc/CK_COHORT_TRYLOCK
@@ -0,0 +1,69 @@
+.\"
+.\" Copyright 2013 Brendon Scheinman.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 9, 2013.
+.Dt CK_COHORT_TRYLOCK 3
+.Sh NAME
+.Nm CK_COHORT_TRYLOCK
+.Nd try to acquire cohort lock
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_cohort.h
+.Fn CK_COHORT_TRYLOCK "COHORT_NAME cohort_name" "COHORT *cohort" "void *global_trylock_context" \
+"void *local_trylock_context" "void *lock_unlock_context"
+.Sh DESCRIPTION
+This call attempts to acquire both the local and global (if necessary) locks from
+.Fa cohort .
+It can only be used with cohort types that were defined using the
+.Xr CK_COHORT_TRYLOCK_PROTOTYPE 3
+macro. The call will not block and will return a bool that will evaluate to true iff
+the cohort was successfully acquired.
+.Fa global_trylock_context
+will be passed as the second argument to the function that was provided as the
+.Fa global_trylock_method
+argument to CK_COHORT_TRYLOCK_PROTOTYPE if that method is called, and
+.Fa local_trylock_context
+will be passed to the function specified by
+.Fa local_trylock_method .
+If the global lock acquisition fails, then the cohort will immediately release its
+local lock as well, and
+.Fa local_unlock_context
+will be passed to the function specified by
+.Fa local_unlock_method
+when this call is made.
+.Sh SEE ALSO
+.Xr ck_cohort 3 ,
+.Xr CK_COHORT_PROTOTYPE 3 ,
+.Xr CK_COHORT_TRYLOCK_PROTOTYPE 3 ,
+.Xr CK_COHORT_INSTANCE 3 ,
+.Xr CK_COHORT_INITIALIZER 3 ,
+.Xr CK_COHORT_INIT 3 ,
+.Xr CK_COHORT_LOCK 3 ,
+.Xr CK_COHORT_UNLOCK 3 ,
+.Xr CK_COHORT_LOCKED 3 ,
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/CK_COHORT_TRYLOCK_PROTOTYPE b/doc/CK_COHORT_TRYLOCK_PROTOTYPE
new file mode 100644
index 0000000..dd97ad4
--- /dev/null
+++ b/doc/CK_COHORT_TRYLOCK_PROTOTYPE
@@ -0,0 +1,90 @@
+.\"
+.\" Copyright 2013 Brendon Scheinman.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 9, 2013.
+.Dt CK_COHORT_TRYLOCK_PROTOTYPE 3
+.Sh NAME
+.Nm CK_COHORT_TRYLOCK_PROTOTYPE
+.Nd define cohort type with specified lock types
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_cohort.h
+.Fn CK_COHORT_TRYLOCK_PROTOTYPE "COHORT_NAME cohort_name" "LOCK_FXN global_lock_method" \
+"LOCK_FXN global_unlock_method" "BOOL_LOCK_FXN global_locked_method" \
+"BOOL_LOCK_FXN global_trylock_method" "LOCK_FXN local_lock_method" \
+"LOCK_FXN local_unlock_method" "BOOL_LOCK_FXN local_locked_method" "BOOL_LOCK_FXN local_trylock_method"
+.Sh DESCRIPTION
+The ck_cohort.h header file does not define any cohort types. Instead, the user must use
+the CK_COHORT_PROTOTYPE or CK_COHORT_TRYLOCK_PROTOTYPE macros to define any types
+they want to use. They must use CK_COHORT_TRYLOCK_PROTOTYPE if they want their cohort type to have support
+for trylock operations. The CK_COHORT_TRYLOCK_PROTOTYPE macro takes the following arguments:
+.Pp
+.Fa cohort_name
+: An identifier used for this cohort type. This will have to be passed to each
+of the other CK_COHORT macros.
+.br
+.Fa global_lock_method
+: The method that should be called to acquire the global lock
+.br
+.Fa global_unlock_method
+: The method that should be called to relinquish the global lock
+.br
+.Fa global_locked_method
+: This method should return true iff the global lock is acquired by a thread.
+.br
+.Fa global_trylock_method
+: The method that should be called to try to acquire the global lock.
+It should not block and return true iff the lock was successfully acquired.
+.br
+.Fa local_lock_method
+: The method that should be called to acquire the local lock
+.br
+.Fa local_unlock_method
+: The method that should be called to relinquish the local lock
+.br
+.Fa global_locked_method
+: This method should return true iff the global lock is acquired by a thread.
+.br
+.Fa local_trylock_method
+: The method that should be called to try to acquire the local lock.
+It should not block and return true iff the lock was successfully acquired.
+.Pp
+Instances of the defined cohort type can be declared as:
+.br
+ CK_COHORT_INSTANCE(cohort_name) cohort;
+.Sh SEE ALSO
+.Xr ck_cohort 3 ,
+.Xr CK_COHORT_PROTOTYPE 3 ,
+.Xr CK_COHORT_INSTANCE 3 ,
+.Xr CK_COHORT_INITIALIZER 3 ,
+.Xr CK_COHORT_INIT 3 ,
+.Xr CK_COHORT_LOCK 3 ,
+.Xr CK_COHORT_UNLOCK 3 ,
+.Xr CK_COHORT_LOCKED 3 ,
+.Xr CK_COHORT_TRYLOCK 3 ,
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/CK_COHORT_UNLOCK b/doc/CK_COHORT_UNLOCK
new file mode 100644
index 0000000..a9f302f
--- /dev/null
+++ b/doc/CK_COHORT_UNLOCK
@@ -0,0 +1,61 @@
+.\"
+.\" Copyright 2013 Brendon Scheinman.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd February 24, 2013.
+.Dt CK_COHORT_UNLOCK 3
+.Sh NAME
+.Nm CK_COHORT_UNLOCK
+.Nd release cohort lock
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_cohort.h
+.Fn CK_COHORT_UNLOCK "COHORT_NAME cohort_name" "COHORT *cohort" "void *global_context" \
+"void *local_context"
+.Sh DESCRIPTION
+This call instructs
+.Fa cohort
+to relinquish its local lock and potentially its global lock as well.
+.Fa global_context
+will be passed as the second argument to the function that was provided as the
+.Fa global_lock_method
+argument to CK_COHORT_PROTOTYPE if that method is called, and
+.Fa local_context
+will be passed to the function specified by
+.Fa local_lock_method
+.
+.Sh SEE ALSO
+.Xr ck_cohort 3 ,
+.Xr CK_COHORT_PROTOTYPE 3 ,
+.Xr CK_COHORT_TRYLOCK_PROTOTYPE 3 ,
+.Xr CK_COHORT_INSTANCE 3 ,
+.Xr CK_COHORT_INITIALIZER 3 ,
+.Xr CK_COHORT_INIT 3 ,
+.Xr CK_COHORT_LOCK 3 ,
+.Xr CK_COHORT_LOCKED 3 ,
+.Xr CK_COHORT_TRYLOCK 3 ,
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/CK_HS_HASH b/doc/CK_HS_HASH
new file mode 100644
index 0000000..6d8dc75
--- /dev/null
+++ b/doc/CK_HS_HASH
@@ -0,0 +1,71 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 28, 2012
+.Dt CK_HS_HASH 3
+.Sh NAME
+.Nm CK_HS_HASH
+.Nd invoke hash function with hash set seed
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_hs.h
+.Ft unsigned long
+.Fn CK_HS_HASH "ck_hs_t *hs" "ck_hs_hash_cb_t *hf" "const void *key"
+.Sh DESCRIPTION
+The
+.Fn CK_HS_HASH 3
+macro will invoke the hash function pointed to by the
+.Fa hf
+argument with the seed value associated with
+.Fa hs
+and the key pointer specified by the
+.Fa key
+argument.
+.Sh RETURN VALUES
+This function will return the value returned by the
+.Fa hf
+function.
+.Sh ERRORS
+It is expected
+.Fa hs
+was previously initialized via
+.Fn ck_hs_init 3 .
+.Sh SEE ALSO
+.Xr ck_hs_init 3 ,
+.Xr ck_hs_destroy 3 ,
+.Xr ck_hs_iterator_init 3 ,
+.Xr ck_hs_next 3 ,
+.Xr ck_hs_get 3 ,
+.Xr ck_hs_put 3 ,
+.Xr ck_hs_set 3 ,
+.Xr ck_hs_remove 3 ,
+.Xr ck_hs_grow 3 ,
+.Xr ck_hs_count 3 ,
+.Xr ck_hs_reset 3 ,
+.Xr ck_hs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/CK_RHS_HASH b/doc/CK_RHS_HASH
new file mode 100644
index 0000000..43b8859
--- /dev/null
+++ b/doc/CK_RHS_HASH
@@ -0,0 +1,71 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 28, 2012
+.Dt CK_RHS_HASH 3
+.Sh NAME
+.Nm CK_RHS_HASH
+.Nd invoke hash function with hash set seed
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_rhs.h
+.Ft unsigned long
+.Fn CK_RHS_HASH "ck_rhs_t *hs" "ck_rhs_hash_cb_t *hf" "const void *key"
+.Sh DESCRIPTION
+The
+.Fn CK_RHS_HASH 3
+macro will invoke the hash function pointed to by the
+.Fa hf
+argument with the seed value associated with
+.Fa hs
+and the key pointer specified by the
+.Fa key
+argument.
+.Sh RETURN VALUES
+This function will return the value returned by the
+.Fa hf
+function.
+.Sh ERRORS
+It is expected
+.Fa hs
+was previously initialized via
+.Fn ck_rhs_init 3 .
+.Sh SEE ALSO
+.Xr ck_rhs_init 3 ,
+.Xr ck_rhs_destroy 3 ,
+.Xr ck_rhs_iterator_init 3 ,
+.Xr ck_rhs_next 3 ,
+.Xr ck_rhs_get 3 ,
+.Xr ck_rhs_put 3 ,
+.Xr ck_rhs_set 3 ,
+.Xr ck_rhs_remove 3 ,
+.Xr ck_rhs_grow 3 ,
+.Xr ck_rhs_count 3 ,
+.Xr ck_rhs_reset 3 ,
+.Xr ck_rhs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/CK_RWCOHORT_INIT b/doc/CK_RWCOHORT_INIT
new file mode 100644
index 0000000..18d1b33
--- /dev/null
+++ b/doc/CK_RWCOHORT_INIT
@@ -0,0 +1,61 @@
+.\"
+.\" Copyright 2013 Brendon Scheinman.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd February 24, 2013.
+.Dt CK_RWCOHORT_INIT 3
+.Sh NAME
+.Nm CK_RWCOHORT_INIT
+.Nd initialize instance of a cohort-based reader-writer lock type
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_rwcohort.h
+.Fn CK_RWCOHORT_NEUTRAL_INIT "COHORT_NAME cohort_name" "LOCK *lock"
+.Fn CK_RWCOHORT_RP_INIT "COHORT_NAME cohort_name" "LOCK *lock" "unsigned int wait_limit"
+.Fn CK_RWCOHORT_WP_INIT "COHORT_NAME cohort_name" "LOCK *lock" "unsigned int wait_limit"
+.Sh DESCRIPTION
+This macro initializes the lock instance pointed to by the
+.Fa lock
+argument. Until a lock instance is initialized using the CK_RWCOHORT_INIT macro, any operations
+involving it will have undefined behavior. Note that the
+.Fa wait_limit
+argument should only be used with reader-preference or writer-preference locks. For neutral
+locks, this argument should be excluded.
+If you are unsure of a value to use for the
+.Fa wait_limit
+argument, you should use CK_RWCOHORT_STRATEGY_DEFAULT_LOCAL_WAIT_LIMIT.
+.Sh SEE ALSO
+.Xr ck_rwcohort 3 ,
+.Xr CK_RWCOHORT_PROTOTYPE 3 ,
+.Xr CK_RWCOHORT_TRYLOCK_PROTOTYPE 3 ,
+.Xr CK_RWCOHORT_INSTANCE 3 ,
+.Xr CK_RWCOHORT_INITIALIZER 3 ,
+.Xr CK_RWCOHORT_LOCK 3 ,
+.Xr CK_RWCOHORT_UNLOCK 3 ,
+.Xr CK_RWCOHORT_LOCKED 3 ,
+.Xr CK_RWCOHORT_TRYLOCK 3 ,
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/CK_RWCOHORT_INSTANCE b/doc/CK_RWCOHORT_INSTANCE
new file mode 100644
index 0000000..10251a3
--- /dev/null
+++ b/doc/CK_RWCOHORT_INSTANCE
@@ -0,0 +1,64 @@
+.\"
+.\" Copyright 2013 Brendon Scheinman.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd February 24, 2013.
+.Dt CK_RWCOHORT_INSTANCE 3
+.Sh NAME
+.Nm CK_RWCOHORT_INSTANCE
+.Nd declare an instance of a cohort-based reader-writer lock type
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_cohort.h
+.Fn CK_RWCOHORT_NEUTRAL_INSTANCE "COHORT_NAME cohort_name"
+.Fn CK_RWCOHORT_RP_INSTANCE "COHORT_NAME cohort_name"
+.Fn CK_RWCOHORT_WP_INSTANCE "COHORT_NAME cohort_name"
+.Sh DESCRIPTION
+The user must use this macro to declare instances of lock types that they have
+defined using the
+.Xr CK_RWCOHORT_PROTOTYPE 3
+macro. The cohort_name must be the same as the one used in the prototype macro.
+For instance, if CK_RWCOHORT_PROTOTYPE was called with the name "foo", the
+CK_RWCOHORT_INSTANCE macro should be called as
+.br
+CK_RWCOHORT_INSTANCE(foo) cohort;
+.Pp
+This macro should also be used when allocating memory for cohorts. For instance,
+to allocate a block of 4 cohorts:
+.br
+CK_RWCOHORT_WP_INSTANCE(foo) *cohorts = malloc(4 * sizeof(CK_RWCOHORT_WP_INSTANCE(foo)));
+.Sh SEE ALSO
+.Xr ck_rwcohort 3 ,
+.Xr CK_RWCOHORT_PROTOTYPE 3 ,
+.Xr CK_RWCOHORT_TRYLOCK_PROTOTYPE 3 ,
+.Xr CK_RWCOHORT_INSTANCE 3 ,
+.Xr CK_RWCOHORT_INITIALIZER 3 ,
+.Xr CK_RWCOHORT_LOCK 3 ,
+.Xr CK_RWCOHORT_UNLOCK 3 ,
+.Xr CK_RWCOHORT_LOCKED 3 ,
+.Xr CK_RWCOHORT_TRYLOCK 3 ,
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/CK_RWCOHORT_PROTOTYPE b/doc/CK_RWCOHORT_PROTOTYPE
new file mode 100644
index 0000000..a2705b6
--- /dev/null
+++ b/doc/CK_RWCOHORT_PROTOTYPE
@@ -0,0 +1,65 @@
+.\"
+.\" Copyright 2013 Brendon Scheinman.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd February 24, 2013.
+.Dt CK_RWCOHORT_PROTOTYPE 3
+.Sh NAME
+.Nm CK_RWCOHORT_PROTOTYPE
+.Nd define reader-writer cohort-based lock using the specified cohort type
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_rwcohort.h
+.Fn CK_RWCOHORT_NEUTRAL_PROTOTYPE "COHORT_NAME cohort_name"
+.Fn CK_RWCOHORT_RP_PROTOTYPE "COHORT_NAME cohort_name"
+.Fn CK_RWCOHORT_WP_PROTOTYPE "COHORT_NAME cohort_name"
+.Sh DESCRIPTION
+The ck_rwcohort.h header file does not define any cohort types. Instead, the user must use
+the CK_RWCOHORT_PROTOTYPE macro to define any types they want to use.
+This macro takes a single argument which corresponds to the type of the cohort lock that
+the reader-writer lock should use. A cohort type must have already been defined with that name
+using the
+.Xr CK_COHORT_PROTOTYPE 3
+or
+.Xr CK_COHORT_TRYLOCK_PROTOTYPE 3
+macros.
+.Pp
+Instances of the defined lock type can be declared as:
+.br
+ CK_RWCOHORT_INSTANCE(cohort_name) lock;
+.Sh SEE ALSO
+.Xr ck_rwcohort 3 ,
+.Xr CK_COHORT_PROTOTYPE 3 ,
+.Xr CK_COHORT_TRYLOCK_PROTOTYPE 3 ,
+.Xr CK_RWCOHORT_INSTANCE 3 ,
+.Xr CK_RWCOHORT_INITIALIZER 3 ,
+.Xr CK_RWCOHORT_INIT 3 ,
+.Xr CK_RWCOHORT_READ_LOCK 3 ,
+.Xr CK_RWCOHORT_READ_UNLOCK 3 ,
+.Xr CK_RWCOHORT_WRITE_LOCK 3 ,
+.Xr CK_RWCOHORT_WRITE_UNLOCK 3 ,
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/CK_RWCOHORT_READ_LOCK b/doc/CK_RWCOHORT_READ_LOCK
new file mode 100644
index 0000000..62831ea
--- /dev/null
+++ b/doc/CK_RWCOHORT_READ_LOCK
@@ -0,0 +1,66 @@
+.\"
+.\" Copyright 2013 Brendon Scheinman.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd February 24, 2013.
+.Dt CK_RWCOHORT_READ_LOCK 3
+.Sh NAME
+.Nm CK_RWCOHORT_READ_LOCK
+.Nd acquire read-only permission for cohort-based reader-writer lock
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_cohort.h
+.Fn CK_RWCOHORT_NEUTRAL_READ_LOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\
+"void *global_context" "void *local_context"
+.Fn CK_RWCOHORT_RP_READ_LOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\
+"void *global_context" "void *local_context"
+.Fn CK_RWCOHORT_WP_READ_LOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\
+"void *global_context" "void *local_context"
+.Sh DESCRIPTION
+This call will acquire read-only permission from
+.Fa lock .
+The call will block until this permission has been acquired.
+.Fa cohort
+must point to a cohort whose global lock is the same as all other cohorts used with
+.Fa lock .
+The
+.Fa global_context
+and
+.Fa local_context
+arguments will be passed along as the context arguments to any calls to
+.Fa cohort .
+.
+.Sh SEE ALSO
+.Xr ck_cohort 3 ,
+.Xr CK_RWCOHORT_PROTOTYPE 3 ,
+.Xr CK_RWCOHORT_INSTANCE 3 ,
+.Xr CK_RWCOHORT_INITIALIZER 3 ,
+.Xr CK_RWCOHORT_INIT 3 ,
+.Xr CK_RWCOHORT_READ_UNLOCK 3 ,
+.Xr CK_RWCOHORT_WRITE_LOCK 3 ,
+.Xr CK_RWCOHORT_WRITE_UNLOCK 3 ,
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/CK_RWCOHORT_READ_UNLOCK b/doc/CK_RWCOHORT_READ_UNLOCK
new file mode 100644
index 0000000..1c81801
--- /dev/null
+++ b/doc/CK_RWCOHORT_READ_UNLOCK
@@ -0,0 +1,65 @@
+.\"
+.\" Copyright 2013 Brendon Scheinman.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd February 24, 2013.
+.Dt CK_RWCOHORT_READ_UNLOCK 3
+.Sh NAME
+.Nm CK_RWCOHORT_READ_UNLOCK
+.Nd relinquish read-only access to cohort-based reader-writer lock
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_cohort.h
+.Fn CK_RWCOHORT_NEUTRAL_READ_UNLOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\
+"void *global_context" "void *local_context"
+.Fn CK_RWCOHORT_RP_READ_UNLOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\
+"void *global_context" "void *local_context"
+.Fn CK_RWCOHORT_WP_READ_UNLOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\
+"void *global_context" "void *local_context"
+.Sh DESCRIPTION
+This call will relinquish read-only permission to
+.Fa lock .
+.Fa cohort
+must point to a cohort whose global lock is the same as all other cohorts used with
+.Fa lock .
+The
+.Fa global_context
+and
+.Fa local_context
+arguments will be passed along as the context arguments to any calls to
+.Fa cohort .
+.
+.Sh SEE ALSO
+.Xr ck_cohort 3 ,
+.Xr CK_RWCOHORT_PROTOTYPE 3 ,
+.Xr CK_RWCOHORT_INSTANCE 3 ,
+.Xr CK_RWCOHORT_INITIALIZER 3 ,
+.Xr CK_RWCOHORT_INIT 3 ,
+.Xr CK_RWCOHORT_READ_LOCK 3 ,
+.Xr CK_RWCOHORT_WRITE_LOCK 3 ,
+.Xr CK_RWCOHORT_WRITE_UNLOCK 3 ,
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/CK_RWCOHORT_WRITE_LOCK b/doc/CK_RWCOHORT_WRITE_LOCK
new file mode 100644
index 0000000..161c7bb
--- /dev/null
+++ b/doc/CK_RWCOHORT_WRITE_LOCK
@@ -0,0 +1,66 @@
+.\"
+.\" Copyright 2013 Brendon Scheinman.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd February 24, 2013.
+.Dt CK_RWCOHORT_WRITE_LOCK 3
+.Sh NAME
+.Nm CK_RWCOHORT_WRITE_LOCK
+.Nd acquite write access for a cohort-based reader-writer lock
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_cohort.h
+.Fn CK_RWCOHORT_NEUTRAL_WRITE_LOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\
+"void *global_context" "void *local_context"
+.Fn CK_RWCOHORT_RP_WRITE_LOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\
+"void *global_context" "void *local_context"
+.Fn CK_RWCOHORT_WP_WRITE_LOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\
+"void *global_context" "void *local_context"
+.Sh DESCRIPTION
+This call will acquire write permission for
+.Fa lock .
+The call will block until this permission has been acquired.
+.Fa cohort
+must point to a cohort whose global lock is the same as all other cohorts used with
+.Fa lock .
+The
+.Fa global_context
+and
+.Fa local_context
+arguments will be passed along as the context arguments to any calls to
+.Fa cohort .
+.
+.Sh SEE ALSO
+.Xr ck_cohort 3 ,
+.Xr CK_RWCOHORT_PROTOTYPE 3 ,
+.Xr CK_RWCOHORT_INSTANCE 3 ,
+.Xr CK_RWCOHORT_INITIALIZER 3 ,
+.Xr CK_RWCOHORT_INIT 3 ,
+.Xr CK_RWCOHORT_READ_LOCK 3 ,
+.Xr CK_RWCOHORT_READ_UNLOCK 3 ,
+.Xr CK_RWCOHORT_WRITE_UNLOCK 3 ,
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/CK_RWCOHORT_WRITE_UNLOCK b/doc/CK_RWCOHORT_WRITE_UNLOCK
new file mode 100644
index 0000000..5772a9f
--- /dev/null
+++ b/doc/CK_RWCOHORT_WRITE_UNLOCK
@@ -0,0 +1,65 @@
+.\"
+.\" Copyright 2013 Brendon Scheinman.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd February 24, 2013.
+.Dt CK_RWCOHORT_WRITE_UNLOCK 3
+.Sh NAME
+.Nm CK_RWCOHORT_WRITE_UNLOCK
+.Nd relinquish write access for cohort-based reader-writer lock
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_cohort.h
+.Fn CK_RWCOHORT_NEUTRAL_WRITE_UNLOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\
+"void *global_context" "void *local_context"
+.Fn CK_RWCOHORT_RP_WRITE_UNLOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\
+"void *global_context" "void *local_context"
+.Fn CK_RWCOHORT_WP_WRITE_UNLOCK "COHORT_NAME cohort_name" "LOCK *lock" "COHORT *cohort"\
+"void *global_context" "void *local_context"
+.Sh DESCRIPTION
+This call will relinquish write permission for
+.Fa lock .
+.Fa cohort
+must point to a cohort whose global lock is the same as all other cohorts used with
+.Fa lock .
+The
+.Fa global_context
+and
+.Fa local_context
+arguments will be passed along as the context arguments to any calls to
+.Fa cohort .
+.
+.Sh SEE ALSO
+.Xr ck_cohort 3 ,
+.Xr CK_RWCOHORT_PROTOTYPE 3 ,
+.Xr CK_RWCOHORT_INSTANCE 3 ,
+.Xr CK_RWCOHORT_INITIALIZER 3 ,
+.Xr CK_RWCOHORT_INIT 3 ,
+.Xr CK_RWCOHORT_READ_LOCK 3 ,
+.Xr CK_RWCOHORT_READ_UNLOCK 3 ,
+.Xr CK_RWCOHORT_WRITE_LOCK 3 ,
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/Makefile.in b/doc/Makefile.in
new file mode 100644
index 0000000..cbad704
--- /dev/null
+++ b/doc/Makefile.in
@@ -0,0 +1,218 @@
+.PHONY: clean install uninstall
+
+MANDIR=@MANDIR@
+GZIP=@GZIP@
+GZIP_SUFFIX=.3@GZIP_SUFFIX@
+BUILD_DIR=@BUILD_DIR@
+SRC_DIR=@SRC_DIR@
+HTML_SUFFIX=.html
+
+OBJECTS=CK_ARRAY_FOREACH \
+ ck_array_buffer \
+ ck_array_commit \
+ ck_array_deinit \
+ ck_array_init \
+ ck_array_initialized \
+ ck_array_length \
+ ck_array_put \
+ ck_array_put_unique \
+ ck_array_remove \
+ ck_array_deinit \
+ ck_brlock \
+ ck_ht_count \
+ ck_ht_destroy \
+ ck_ht_gc \
+ ck_ht_get_spmc \
+ ck_ht_grow_spmc \
+ ck_ht_hash \
+ ck_ht_hash_direct \
+ ck_ht_init \
+ ck_ht_put_spmc \
+ ck_ht_remove_spmc \
+ ck_ht_reset_spmc \
+ ck_ht_reset_size_spmc \
+ ck_ht_set_spmc \
+ ck_ht_entry_empty \
+ ck_ht_entry_key \
+ ck_ht_entry_key_direct \
+ ck_ht_entry_key_length \
+ ck_ht_entry_key_set \
+ ck_ht_entry_key_set_direct \
+ ck_ht_entry_set \
+ ck_ht_entry_set_direct \
+ ck_ht_entry_value_direct \
+ ck_ht_entry_value \
+ ck_ht_iterator_init \
+ ck_ht_next \
+ ck_ht_stat \
+ ck_bitmap_init \
+ ck_bitmap_reset \
+ ck_bitmap_set \
+ ck_bitmap_bts \
+ ck_bitmap_test \
+ ck_bitmap_base \
+ ck_bitmap_union \
+ ck_bitmap_size \
+ ck_bitmap_clear \
+ ck_bitmap_bits \
+ ck_bitmap_buffer \
+ ck_bitmap_next \
+ ck_bitmap_iterator_init \
+ ck_elide \
+ ck_epoch_barrier \
+ ck_epoch_begin \
+ ck_epoch_call \
+ ck_epoch_end \
+ ck_epoch_init \
+ ck_epoch_poll \
+ ck_epoch_recycle \
+ ck_epoch_register \
+ ck_epoch_reclaim \
+ ck_epoch_synchronize \
+ ck_epoch_unregister \
+ ck_hs_gc \
+ ck_hs_init \
+ ck_hs_destroy \
+ CK_HS_HASH \
+ ck_hs_apply \
+ ck_hs_iterator_init \
+ ck_hs_next \
+ ck_hs_get \
+ ck_hs_put \
+ ck_hs_put_unique \
+ ck_hs_set \
+ ck_hs_fas \
+ ck_hs_remove \
+ ck_hs_move \
+ ck_hs_grow \
+ ck_hs_rebuild \
+ ck_hs_count \
+ ck_hs_reset \
+ ck_hs_reset_size \
+ ck_hs_stat \
+ ck_rhs_gc \
+ ck_rhs_init \
+ ck_rhs_destroy \
+ CK_RHS_HASH \
+ ck_rhs_apply \
+ ck_rhs_iterator_init \
+ ck_rhs_next \
+ ck_rhs_get \
+ ck_rhs_put \
+ ck_rhs_put_unique \
+ ck_rhs_set \
+ ck_rhs_fas \
+ ck_rhs_remove \
+ ck_rhs_move \
+ ck_rhs_grow \
+ ck_rhs_rebuild \
+ ck_rhs_count \
+ ck_rhs_reset \
+ ck_rhs_reset_size \
+ ck_rhs_stat \
+ ck_rwcohort \
+ CK_RWCOHORT_INIT \
+ CK_RWCOHORT_INSTANCE \
+ CK_RWCOHORT_PROTOTYPE \
+ CK_RWCOHORT_READ_LOCK \
+ CK_RWCOHORT_READ_UNLOCK \
+ CK_RWCOHORT_WRITE_LOCK \
+ CK_RWCOHORT_WRITE_UNLOCK \
+ ck_cohort \
+ CK_COHORT_PROTOTYPE \
+ CK_COHORT_TRYLOCK_PROTOTYPE \
+ CK_COHORT_INSTANCE \
+ CK_COHORT_INIT \
+ CK_COHORT_LOCK \
+ CK_COHORT_UNLOCK \
+ CK_COHORT_TRYLOCK \
+ ck_pr \
+ ck_pr_fence_acquire \
+ ck_pr_fence_release \
+ ck_pr_barrier \
+ ck_pr_fas \
+ ck_pr_fence_atomic \
+ ck_pr_fence_atomic_load \
+ ck_pr_fence_atomic_store \
+ ck_pr_fence_load \
+ ck_pr_fence_load_atomic \
+ ck_pr_fence_load_store \
+ ck_pr_fence_load_depends \
+ ck_pr_fence_memory \
+ ck_pr_fence_store \
+ ck_pr_fence_store_atomic \
+ ck_pr_fence_store_load \
+ ck_pr_stall \
+ ck_pr_faa \
+ ck_pr_inc \
+ ck_pr_dec \
+ ck_pr_not \
+ ck_pr_neg \
+ ck_pr_add \
+ ck_pr_sub \
+ ck_pr_and \
+ ck_pr_xor \
+ ck_pr_or \
+ ck_pr_cas \
+ ck_pr_bts \
+ ck_pr_btc \
+ ck_pr_btr \
+ ck_pr_store \
+ ck_pr_load \
+ ck_pr_rtm \
+ ck_queue \
+ ck_ring_init \
+ ck_ring_dequeue_spmc \
+ ck_ring_enqueue_spmc \
+ ck_ring_enqueue_spmc_size \
+ ck_ring_trydequeue_spmc \
+ ck_ring_dequeue_spsc \
+ ck_ring_enqueue_spsc \
+ ck_ring_enqueue_spsc_size \
+ ck_ring_size \
+ ck_ring_capacity \
+ ck_tflock \
+ ck_rwlock \
+ ck_pflock \
+ ck_swlock \
+ ck_sequence \
+ ck_spinlock
+
+all:
+ for target in $(OBJECTS); do \
+ $(GZIP) $(SRC_DIR)/doc/$$target > $(BUILD_DIR)/doc/$$target$(GZIP_SUFFIX); \
+ done
+
+html:
+ for target in $(OBJECTS); do \
+ echo $$target; \
+ groff -man -Tascii $(SRC_DIR)/doc/$$target | col -bx > \
+ $(BUILD_DIR)/doc/$$target$(HTML_SUFFIX); \
+ sed -i.bk 's/\&/\&amp\;/g;s/>/\&gt\;/g;s/</\&lt\;/g;' \
+ $(BUILD_DIR)/doc/$$target$(HTML_SUFFIX); \
+ done
+
+# check for entries that are missing in OBJECTS
+objcheck: all
+ for file in `ls * | egrep '(ck|CK)_' | egrep -v "($(GZIP_SUFFIX)|$(HTML_SUFFIX))$$"`; do \
+ if [ ! -f $${file}$(GZIP_SUFFIX) ]; then \
+ echo "$$file is missing from OBJECTS" >&2; \
+ fi; \
+ done
+
+# check for stale references
+refcheck:
+ @./refcheck.pl $(OBJECTS)
+
+install:
+ mkdir -p $(DESTDIR)/$(MANDIR)/man3 || exit
+ cp *$(GZIP_SUFFIX) $(DESTDIR)/$(MANDIR)/man3 || exit
+
+uninstall:
+ for target in $(OBJECTS); do \
+ rm -f $(DESTDIR)/$(MANDIR)/man3/$$target$(GZIP_SUFFIX); \
+ done
+
+clean:
+ rm -f $(BUILD_DIR)/doc/*~ $(BUILD_DIR)/doc/*$(GZIP_SUFFIX) $(BUILD_DIR)/doc/*$(HTML_SUFFIX)
+
diff --git a/doc/ck_array_buffer b/doc/ck_array_buffer
new file mode 100644
index 0000000..7a8ded3
--- /dev/null
+++ b/doc/ck_array_buffer
@@ -0,0 +1,60 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd October 18, 2013
+.Dt CK_ARRAY_BUFFER 3
+.Sh NAME
+.Nm ck_array_buffer
+.Nd return length and pointer to array of reader-visible pointers
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_array.h
+.Ft void *
+.Fn ck_array_buffer "ck_array_t *array" "unsigned int *length"
+.Sh DESCRIPTION
+The
+.Fn ck_array_buffer 3
+returns a pointer to the array of pointers currently visible
+to readers after the last commit operation in
+.Fa array .
+The unsigned integer pointed to by
+.Fa length
+is updated to reflect the length of the array.
+.Sh RETURN VALUES
+This function returns a pointer to an array of pointers.
+.Sh SEE ALSO
+.Xr ck_array_commit 3 ,
+.Xr ck_array_put 3 ,
+.Xr ck_array_put_unique 3 ,
+.Xr ck_array_remove 3 ,
+.Xr ck_array_init 3
+.Xr ck_array_deinit 3 ,
+.Xr ck_array_length 3 ,
+.Xr ck_array_initialized 3 ,
+.Xr CK_ARRAY_FOREACH 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_array_commit b/doc/ck_array_commit
new file mode 100644
index 0000000..0fc1192
--- /dev/null
+++ b/doc/ck_array_commit
@@ -0,0 +1,58 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd October 18, 2013
+.Dt CK_ARRAY_COMMIT 3
+.Sh NAME
+.Nm ck_array_commit
+.Nd linearization point for mutations before commit call
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_array.h
+.Ft bool
+.Fn ck_array_commit "ck_array_t *array"
+.Sh DESCRIPTION
+The
+.Fn ck_array_commit 3
+function will commit any pending put or remove operations associated
+with the array. The function may end up requesting the safe reclamation
+of memory actively being iterated upon by other threads.
+.Sh RETURN VALUES
+This function returns true if the commit operation succeeded. It will
+return false otherwise, and pending operations will not be applied.
+.Sh SEE ALSO
+.Xr ck_array_init 3 ,
+.Xr ck_array_put 3 ,
+.Xr ck_array_put_unique 3 ,
+.Xr ck_array_remove 3 ,
+.Xr ck_array_deinit 3
+.Xr ck_array_length 3 ,
+.Xr ck_array_buffer 3 ,
+.Xr ck_array_initialized 3 ,
+.Xr CK_ARRAY_FOREACH 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_array_deinit b/doc/ck_array_deinit
new file mode 100644
index 0000000..3a5e5ab
--- /dev/null
+++ b/doc/ck_array_deinit
@@ -0,0 +1,62 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd October 18, 2013
+.Dt CK_ARRAY_DEINIT 3
+.Sh NAME
+.Nm ck_array_deinit
+.Nd destroy and deinitialize a pointer array
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_array.h
+.Ft void
+.Fn ck_array_deinit "ck_array_t *array" "bool defer"
+.Sh DESCRIPTION
+The
+.Fn ck_array_deinit 3
+destroys the memory associated with the array pointed
+to by
+.Fa array .
+The
+.Fa defer
+argument is true if the allocator must destroy
+the memory using safe memory reclamation or false
+if the allocator can destroy this memory immediately.
+.Sh RETURN VALUES
+This function has no return value.
+.Sh SEE ALSO
+.Xr ck_array_commit 3 ,
+.Xr ck_array_put 3 ,
+.Xr ck_array_put_unique 3 ,
+.Xr ck_array_remove 3 ,
+.Xr ck_array_init 3
+.Xr ck_array_length 3 ,
+.Xr ck_array_buffer 3 ,
+.Xr ck_array_initialized 3 ,
+.Xr CK_ARRAY_FOREACH 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_array_init b/doc/ck_array_init
new file mode 100644
index 0000000..ad8a9fe
--- /dev/null
+++ b/doc/ck_array_init
@@ -0,0 +1,69 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd October 18, 2013
+.Dt CK_ARRAY_INIT 3
+.Sh NAME
+.Nm ck_array_init
+.Nd initialize a pointer array
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_array.h
+.Ft bool
+.Fn ck_array_init "ck_array_t *array" "unsigned int mode" "struct ck_malloc *allocator" "unsigned int initial_length"
+.Sh DESCRIPTION
+The
+.Fn ck_array_init 3
+function initializes the array pointed to by the argument
+.Fa array .
+The mode value must be
+.Dv CK_ARRAY_MODE_SPMC .
+The
+.Fa allocator
+argument must point to a ck_malloc data structure with valid non-NULL function pointers
+initialized for malloc, free and realloc. The
+.Fa initial_length
+specifies the initial length of the array. The value of
+.Fa initial_length
+must be greater than or equal to 2. An array allows for one concurrent put or remove operations
+in the presence of any number of concurrent CK_ARRAY_FOREACH operations.
+.Sh RETURN VALUES
+This function returns true if the array was successfully created. It returns
+false if the creation failed. Failure may occur due to internal memory allocation
+failures or invalid arguments.
+.Sh SEE ALSO
+.Xr ck_array_commit 3 ,
+.Xr ck_array_put 3 ,
+.Xr ck_array_put_unique 3 ,
+.Xr ck_array_remove 3 ,
+.Xr ck_array_deinit 3
+.Xr ck_array_length 3 ,
+.Xr ck_array_buffer 3 ,
+.Xr ck_array_initialized 3 ,
+.Xr CK_ARRAY_FOREACH 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_array_initialized b/doc/ck_array_initialized
new file mode 100644
index 0000000..3a06413
--- /dev/null
+++ b/doc/ck_array_initialized
@@ -0,0 +1,62 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd October 18, 2013
+.Dt CK_ARRAY_INITIALIZED 3
+.Sh NAME
+.Nm ck_array_initialized
+.Nd indicates whether an array was recently initialized or deinitialized
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_array.h
+.Ft bool
+.Fn ck_array_initialized "ck_array_t *array"
+.Sh DESCRIPTION
+The
+.Fn ck_array_initialized 3
+can be used to determine whether an array was recently initialized
+with
+.Fn ck_array_init 3
+or deinitialized with
+.Fn ck_array_deinit 3 .
+Behavior is undefined if a user allocates internal allocator data
+in through other means.
+.Sh RETURN VALUES
+This function returns true if the array is initialized, and false
+otherwise.
+.Sh SEE ALSO
+.Xr ck_array_commit 3 ,
+.Xr ck_array_put 3 ,
+.Xr ck_array_put_unique 3 ,
+.Xr ck_array_remove 3 ,
+.Xr ck_array_init 3
+.Xr ck_array_deinit 3 ,
+.Xr ck_array_length 3 ,
+.Xr ck_array_buffer 3 ,
+.Xr CK_ARRAY_FOREACH 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_array_length b/doc/ck_array_length
new file mode 100644
index 0000000..e60c6c3
--- /dev/null
+++ b/doc/ck_array_length
@@ -0,0 +1,57 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd October 18, 2013
+.Dt CK_ARRAY_LENGTH 3
+.Sh NAME
+.Nm ck_array_length
+.Nd returns the number of pointers committed to an array
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_array.h
+.Ft unsigned int
+.Fn ck_array_length "ck_array_t *array"
+.Sh DESCRIPTION
+The
+.Fn ck_array_length 3
+function returns the number of items a concurrent
+traversal operation would encounter at completion
+time.
+.Sh RETURN VALUES
+The number of traversal-visible pointers is returned.
+.Sh SEE ALSO
+.Xr ck_array_commit 3 ,
+.Xr ck_array_put 3 ,
+.Xr ck_array_put_unique 3 ,
+.Xr ck_array_remove 3 ,
+.Xr ck_array_init 3
+.Xr ck_array_deinit 3 ,
+.Xr ck_array_buffer 3 ,
+.Xr ck_array_initialized 3 ,
+.Xr CK_ARRAY_FOREACH 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_array_put b/doc/ck_array_put
new file mode 100644
index 0000000..0f74eb0
--- /dev/null
+++ b/doc/ck_array_put
@@ -0,0 +1,65 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd October 18, 2013
+.Dt CK_ARRAY_PUT 3
+.Sh NAME
+.Nm ck_array_put
+.Nd attempt immediate or deferred insertion of a pointer into array
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_array.h
+.Ft bool
+.Fn ck_array_put "ck_array_t *array" "void *pointer"
+.Sh DESCRIPTION
+The
+.Fn ck_array_put 3
+function will attempt to insert the value of
+.Fa pointer
+into the array pointed to by
+.Fa array .
+This function may incur additional memory allocations
+if not enough memory has been allocated in the array
+for a new entry. The operation is also free to apply
+the operation immediately if there is an opportunity
+for elimination with a pending (uncommitted) remove
+operation.
+.Sh RETURN VALUES
+This function returns true if the put operation succeeded. It will
+return false otherwise due to internal allocation failures.
+.Sh SEE ALSO
+.Xr ck_array_init 3 ,
+.Xr ck_array_commit 3 ,
+.Xr ck_array_put_unique 3 ,
+.Xr ck_array_remove 3 ,
+.Xr ck_array_deinit 3
+.Xr ck_array_length 3 ,
+.Xr ck_array_buffer 3 ,
+.Xr ck_array_initialized 3 ,
+.Xr CK_ARRAY_FOREACH 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_array_put_unique b/doc/ck_array_put_unique
new file mode 100644
index 0000000..bb355fe
--- /dev/null
+++ b/doc/ck_array_put_unique
@@ -0,0 +1,67 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd October 18, 2013
+.Dt CK_ARRAY_PUT_UNIQUE 3
+.Sh NAME
+.Nm ck_array_put_unique
+.Nd attempt immediate or deferred insertion of a unique pointer into array
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_array.h
+.Ft int
+.Fn ck_array_put_unique "ck_array_t *array" "void *pointer"
+.Sh DESCRIPTION
+The
+.Fn ck_array_put_unique 3
+function will attempt to insert the value of
+.Fa pointer
+into the array pointed to by
+.Fa array .
+This function may incur additional memory allocations
+if not enough memory has been allocated in the array
+for a new entry. The operation is also free to apply
+the operation immediately if there is an opportunity
+for elimination with a pending (uncommitted) remove
+operation. The function will not make any modifications
+if the pointer already exists in the array.
+.Sh RETURN VALUES
+This function returns 1 if the pointer already exists in the array.
+It returns 0 if the put operation succeeded. It returns -1 on
+error due to internal memory allocation failures.
+.Sh SEE ALSO
+.Xr ck_array_init 3 ,
+.Xr ck_array_commit 3 ,
+.Xr ck_array_put 3 ,
+.Xr ck_array_remove 3 ,
+.Xr ck_array_deinit 3
+.Xr ck_array_length 3 ,
+.Xr ck_array_buffer 3 ,
+.Xr ck_array_initialized 3 ,
+.Xr CK_ARRAY_FOREACH 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_array_remove b/doc/ck_array_remove
new file mode 100644
index 0000000..8df454d
--- /dev/null
+++ b/doc/ck_array_remove
@@ -0,0 +1,64 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd October 18, 2013
+.Dt CK_ARRAY_REMOVE 3
+.Sh NAME
+.Nm ck_array_remove
+.Nd attempt immediate or deferred removal of a pointer from an array
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_array.h
+.Ft bool
+.Fn ck_array_remove "ck_array_t *array" "void *pointer"
+.Sh DESCRIPTION
+The
+.Fn ck_array_remove 3
+function will attempt to remove the value of
+.Fa pointer
+into the array pointed to by
+.Fa array . The operation is also free to apply
+the operation immediately if there is an opportunity
+for elimination with a pending (uncommitted) put
+operation. If no elimination was possible, the function
+may require to allocate more memory.
+.Sh RETURN VALUES
+This function returns true if the remove operation succeeded. It will
+return false otherwise due to internal allocation failures or because
+the value did not exist.
+.Sh SEE ALSO
+.Xr ck_array_init 3 ,
+.Xr ck_array_commit 3 ,
+.Xr ck_array_remove 3 ,
+.Xr ck_array_put_unique 3 ,
+.Xr ck_array_deinit 3
+.Xr ck_array_length 3 ,
+.Xr ck_array_buffer 3 ,
+.Xr ck_array_initialized 3 ,
+.Xr CK_ARRAY_FOREACH 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_bitmap_base b/doc/ck_bitmap_base
new file mode 100644
index 0000000..e9342bf
--- /dev/null
+++ b/doc/ck_bitmap_base
@@ -0,0 +1,58 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 22, 2012
+.Dt CK_BITMAP_BASE 3
+.Sh NAME
+.Nm ck_bitmap_base
+.Nd determine the size of a bit array in bytes
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_bitmap.h
+.Ft unsigned int
+.Fn ck_bitmap_base "unsigned int n_bits"
+.Sh DESCRIPTION
+The
+.Fn ck_bitmap_base
+function returns the number of bytes that would be used
+to store the number of bits specified by
+.Fa n_bits .
+.Sh RETURN VALUES
+This function returns a non-zero value that is guaranteed to
+be a multiple of
+.Dv sizeof(CK_BITMAP_WORD) .
+.Sh SEE ALSO
+.Xr ck_bitmap_size 3 ,
+.Xr ck_bitmap_init 3 ,
+.Xr ck_bitmap_set 3 ,
+.Xr ck_bitmap_reset 3 ,
+.Xr ck_bitmap_test 3 ,
+.Xr ck_bitmap_clear 3 ,
+.Xr ck_bitmap_bits 3 ,
+.Xr ck_bitmap_buffer 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_bitmap_bits b/doc/ck_bitmap_bits
new file mode 100644
index 0000000..efd5eb2
--- /dev/null
+++ b/doc/ck_bitmap_bits
@@ -0,0 +1,56 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 22, 2012
+.Dt CK_BITMAP_BITS 3
+.Sh NAME
+.Nm ck_bitmap_bits
+.Nd return number of addressable bits in bitmap
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_bitmap.h
+.Ft unsigned int
+.Fn ck_bitmap_bits "ck_bitmap_t *bitmap"
+.Sh DESCRIPTION
+The
+.Fn ck_bitmap_bits
+function returns the maximum number of addressable bits in
+the object pointed to by
+.Fa bitmap .
+.Sh RETURN VALUES
+This function returns a non-zero value.
+.Sh SEE ALSO
+.Xr ck_bitmap_base 3 ,
+.Xr ck_bitmap_size 3 ,
+.Xr ck_bitmap_init 3 ,
+.Xr ck_bitmap_set 3 ,
+.Xr ck_bitmap_reset 3 ,
+.Xr ck_bitmap_test 3 ,
+.Xr ck_bitmap_clear 3 ,
+.Xr ck_bitmap_buffer 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_bitmap_bts b/doc/ck_bitmap_bts
new file mode 100644
index 0000000..872284c
--- /dev/null
+++ b/doc/ck_bitmap_bts
@@ -0,0 +1,61 @@
+.\"
+.\" Copyright 2014 David Joseph.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd August 22, 2014
+.Dt CK_BITMAP_BTS 3
+.Sh NAME
+.Nm ck_bitmap_bts
+.Nd set the bit at the specified index and fetch its original value
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_bitmap.h
+.Ft bool
+.Fn ck_bitmap_bts "ck_bitmap_t *bitmap" "unsigned int n"
+.Sh DESCRIPTION
+.Fn ck_bitmap_bts
+sets the bit at the offset specified by the argument
+.Fa n
+to
+.Dv 1
+and fetches its original value.
+.Sh RETURN VALUES
+This function returns the original value of the bit at offset
+.Fa n
+in
+.Fa bitmap .
+.Sh SEE ALSO
+.Xr ck_bitmap_base 3 ,
+.Xr ck_bitmap_size 3 ,
+.Xr ck_bitmap_init 3 ,
+.Xr ck_bitmap_reset 3 ,
+.Xr ck_bitmap_clear 3 ,
+.Xr ck_bitmap_set 3 ,
+.Xr ck_bitmap_test 3 ,
+.Xr ck_bitmap_bits 3 ,
+.Xr ck_bitmap_buffer 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_bitmap_buffer b/doc/ck_bitmap_buffer
new file mode 100644
index 0000000..206df03
--- /dev/null
+++ b/doc/ck_bitmap_buffer
@@ -0,0 +1,65 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 22, 2012
+.Dt CK_BITMAP_BUFFER 3
+.Sh NAME
+.Nm ck_bitmap_buffer
+.Nd returns pointer to bit array
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_bitmap.h
+.Ft void *
+.Fn ck_bitmap_buffer "ck_bitmap_t *bitmap"
+.Sh DESCRIPTION
+The
+.Fn ck_bitmap_buffer
+functions returns a pointer to the actual bit array.
+For ck_bitmap pointers, the bit array is of type
+CK_BITMAP_WORD[] and consists of
+ck_bitmap_base(bitmap) / sizeof(CK_BITMAP_WORD) elements.
+On currently supported 64-bit platforms
+.Dv CK_BITMAP_WORD
+is
+.Dv uint64_t .
+On currently supported 32-bit platforms
+.Dv CK_BITMAP_WORD
+is
+.Dv uint32_t .
+.Sh RETURN VALUES
+This function returns a non-NULL value.
+.Sh SEE ALSO
+.Xr ck_bitmap_base 3 ,
+.Xr ck_bitmap_size 3 ,
+.Xr ck_bitmap_init 3 ,
+.Xr ck_bitmap_set 3 ,
+.Xr ck_bitmap_reset 3 ,
+.Xr ck_bitmap_test 3 ,
+.Xr ck_bitmap_clear 3 ,
+.Xr ck_bitmap_bits 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_bitmap_clear b/doc/ck_bitmap_clear
new file mode 100644
index 0000000..f94dca2
--- /dev/null
+++ b/doc/ck_bitmap_clear
@@ -0,0 +1,56 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 22, 2012
+.Dt CK_BITMAP_CLEAR 3
+.Sh NAME
+.Nm ck_bitmap_clear
+.Nd reset all bits
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_bitmap.h
+.Ft void
+.Fn ck_bitmap_clear "ck_bitmap_t *bitmap"
+.Sh DESCRIPTION
+The
+.Fn ck_bitmap_clear
+function sets all bits in the bitmap pointed to by
+.Fa bitmap
+to 0.
+.Sh RETURN VALUES
+This function has no return value.
+.Sh SEE ALSO
+.Xr ck_bitmap_base 3 ,
+.Xr ck_bitmap_size 3 ,
+.Xr ck_bitmap_init 3 ,
+.Xr ck_bitmap_set 3 ,
+.Xr ck_bitmap_reset 3 ,
+.Xr ck_bitmap_test 3 ,
+.Xr ck_bitmap_bits 3 ,
+.Xr ck_bitmap_buffer 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_bitmap_init b/doc/ck_bitmap_init
new file mode 100644
index 0000000..a238374
--- /dev/null
+++ b/doc/ck_bitmap_init
@@ -0,0 +1,84 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 22, 2012
+.Dt CK_BITMAP_INIT 3
+.Sh NAME
+.Nm ck_bitmap_init
+.Nd initialize a bitmap
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_bitmap.h
+.Ft void
+.Fn ck_bitmap_init "ck_bitmap_t *bitmap" "unsigned int n_bits" "bool set"
+.Sh DESCRIPTION
+The
+.Fn ck_bitmap_init
+function initializes the bitmap pointed to by the
+.Fa bitmap
+pointer. The argument
+.Fa n_bits
+specifies the number of bits that are to be stored in the bitmap.
+The argument
+.Fa set
+determines whether the values of the bits in
+.Fa bitmap
+are to be initialized to
+.Dv 1
+or
+.Dv 0 .
+.Pp
+It is expected that
+.Fa bitmap
+points to a contiguous region of memory containing at least
+the number of bytes specified by
+.Xr ck_bitmap_size 3 .
+.Sh RETURN VALUES
+This function has no return value.
+.Sh ERRORS
+.Bl -tag -width Er
+.Pp
+The behavior of
+.Fn ck_bitmap_init
+is undefined if
+.Fa bitmap
+is not a pointer to a region of bytes
+of at least
+.Xr ck_bitmap_size 3
+length.
+.El
+.Sh SEE ALSO
+.Xr ck_bitmap_base 3 ,
+.Xr ck_bitmap_size 3 ,
+.Xr ck_bitmap_set 3 ,
+.Xr ck_bitmap_reset 3 ,
+.Xr ck_bitmap_clear 3 ,
+.Xr ck_bitmap_test 3 ,
+.Xr ck_bitmap_bits 3 ,
+.Xr ck_bitmap_buffer 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_bitmap_iterator_init b/doc/ck_bitmap_iterator_init
new file mode 100644
index 0000000..d67c659
--- /dev/null
+++ b/doc/ck_bitmap_iterator_init
@@ -0,0 +1,70 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" Copyright 2012-2013 Shreyas Prasad.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 27, 2012
+.Dt CK_BITMAP_ITERATOR_INIT 3
+.Sh NAME
+.Nm ck_bitmap_iterator_init
+.Nd initialize bitmap iterator
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Pp
+.Ft void
+.Fn ck_bitmap_iterator_init "ck_bitmap_iterator_t *iterator" "ck_bitmap_t *bitmap"
+.Sh DESCRIPTION
+The
+.Fn ck_bitmap_iterator_init
+function will initialize the object pointed to by
+the
+.Fa iterator
+argument for use with
+.Fa bitmap .
+.Pp
+An iterator is used to iterate through set bitmap bits
+with the
+.Xr ck_bitmap_next 3
+function.
+.Sh RETURN VALUES
+The
+.Fn ck_bitmap_iterator_init
+function does not return a value.
+.Sh ERRORS
+This function will not fail.
+.Sh SEE ALSO
+.Xr ck_bitmap_base 3 ,
+.Xr ck_bitmap_size 3 ,
+.Xr ck_bitmap_init 3 ,
+.Xr ck_bitmap_set 3 ,
+.Xr ck_bitmap_reset 3 ,
+.Xr ck_bitmap_clear 3 ,
+.Xr ck_bitmap_bits 3 ,
+.Xr ck_bitmap_buffer 3 ,
+.Xr ck_bitmap_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_bitmap_next b/doc/ck_bitmap_next
new file mode 100644
index 0000000..3e93c43
--- /dev/null
+++ b/doc/ck_bitmap_next
@@ -0,0 +1,90 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" Copyright 2012-2013 Shreyas Prasad.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 27, 2012
+.Dt CK_BITMAP_TEST 3
+.Sh NAME
+.Nm ck_bitmap_next
+.Nd iterate to the next set bit in bitmap
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_bitmap.h
+.Ft bool
+.Fn ck_bitmap_next "ck_bitmap_t *bitmap" "ck_bitmap_iterator_t iterator" "unsigned int *bit"
+.Sh DESCRIPTION
+The
+.Fn ck_bitmap_next
+function will increment the iterator object pointed to by
+.Fa iterator
+to point to the next set bit in the bitmap. If
+.Fn ck_bitmap_next
+returns
+.Dv true
+then the pointer pointed to by
+.Fa bit
+is initialized to the number of the current set bit pointed to by the
+.Fa iterator
+object.
+.Pp
+It is expected that
+.Fa iterator
+has been initialized using the
+.Xr ck_bitmap_iterator_init 3
+function.
+.Sh RETURN VALUES
+If
+.Fn ck_bitmap_next
+returns
+.Dv true
+then the object pointed to by
+.Fa bit
+contains a set bit. If
+.Fn ck_bitmap_next
+returns
+.Dv false
+then value of the object pointed to by
+.Fa bit
+is undefined.
+.Sh ERRORS
+Behavior is undefined if
+.Fa iterator
+or
+.Fa bitmap
+are uninitialized.
+.Sh SEE ALSO
+.Xr ck_bitmap_base 3 ,
+.Xr ck_bitmap_size 3 ,
+.Xr ck_bitmap_init 3 ,
+.Xr ck_bitmap_set 3 ,
+.Xr ck_bitmap_reset 3 ,
+.Xr ck_bitmap_clear 3 ,
+.Xr ck_bitmap_bits 3 ,
+.Xr ck_bitmap_buffer 3 ,
+.Xr ck_bitmap_iterator_init 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_bitmap_reset b/doc/ck_bitmap_reset
new file mode 100644
index 0000000..c6b8ee5
--- /dev/null
+++ b/doc/ck_bitmap_reset
@@ -0,0 +1,57 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 22, 2012
+.Dt CK_BITMAP_RESET 3
+.Sh NAME
+.Nm ck_bitmap_reset
+.Nd resets the bit at the specified index
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_bitmap.h
+.Ft void
+.Fn ck_bitmap_reset "ck_bitmap_t *bitmap" "unsigned int n"
+.Sh DESCRIPTION
+The
+.Fn ck_bitmap_reset
+resets the bit at offset specified by the argument
+.Fa n
+to
+.Dv 0 .
+.Sh RETURN VALUES
+This function has no return value.
+.Sh SEE ALSO
+.Xr ck_bitmap_base 3 ,
+.Xr ck_bitmap_size 3 ,
+.Xr ck_bitmap_init 3 ,
+.Xr ck_bitmap_set 3 ,
+.Xr ck_bitmap_clear 3 ,
+.Xr ck_bitmap_test 3 ,
+.Xr ck_bitmap_bits 3 ,
+.Xr ck_bitmap_buffer 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_bitmap_set b/doc/ck_bitmap_set
new file mode 100644
index 0000000..e92ba24
--- /dev/null
+++ b/doc/ck_bitmap_set
@@ -0,0 +1,57 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 22, 2012
+.Dt CK_BITMAP_SET 3
+.Sh NAME
+.Nm ck_bitmap_set
+.Nd set the bit at the specified index
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_bitmap.h
+.Ft void
+.Fn ck_bitmap_set "ck_bitmap_t *bitmap" "unsigned int n"
+.Sh DESCRIPTION
+The
+.Fn ck_bitmap_set
+sets the bit at offset specified by the argument
+.Fa n
+to
+.Dv 1 .
+.Sh RETURN VALUES
+This function has no return value.
+.Sh SEE ALSO
+.Xr ck_bitmap_base 3 ,
+.Xr ck_bitmap_size 3 ,
+.Xr ck_bitmap_init 3 ,
+.Xr ck_bitmap_reset 3 ,
+.Xr ck_bitmap_clear 3 ,
+.Xr ck_bitmap_test 3 ,
+.Xr ck_bitmap_bits 3 ,
+.Xr ck_bitmap_buffer 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_bitmap_size b/doc/ck_bitmap_size
new file mode 100644
index 0000000..03e5892
--- /dev/null
+++ b/doc/ck_bitmap_size
@@ -0,0 +1,62 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 22, 2012
+.Dt CK_BITMAP_SIZE 3
+.Sh NAME
+.Nm ck_bitmap_size
+.Nd returns necessary number of bytes for bitmap
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_bitmap.h
+.Ft unsigned int
+.Fn ck_bitmap_size "unsigned int n_bits"
+.Sh DESCRIPTION
+The
+.Fn ck_bitmap_size
+function returns the number of bytes that are necessary
+to allocate for a bitmap that will contain the number
+of bits specified by
+.Fa n_bits .
+.Pp
+This function is used to determine how many bytes to
+allocate for dynamically created bitmap objects. The
+allocated object must still be initialized using
+.Xr ck_bitmap_init 3 .
+.Sh RETURN VALUES
+This function returns a non-zero value.
+.Sh SEE ALSO
+.Xr ck_bitmap_base 3 ,
+.Xr ck_bitmap_init 3 ,
+.Xr ck_bitmap_set_mpmc 3 ,
+.Xr ck_bitmap_reset_mpmc 3 ,
+.Xr ck_bitmap_test 3 ,
+.Xr ck_bitmap_clear 3 ,
+.Xr ck_bitmap_bits 3 ,
+.Xr ck_bitmap_buffer 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_bitmap_test b/doc/ck_bitmap_test
new file mode 100644
index 0000000..9eb8936
--- /dev/null
+++ b/doc/ck_bitmap_test
@@ -0,0 +1,62 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 22, 2012
+.Dt CK_BITMAP_TEST 3
+.Sh NAME
+.Nm ck_bitmap_test
+.Nd determine if the bit at the specified index is set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_bitmap.h
+.Ft bool
+.Fn ck_bitmap_test "ck_bitmap_t *bitmap" "unsigned int n"
+.Sh DESCRIPTION
+The
+.Fn ck_bitmap_test
+determines if the bit at the offset specified by the argument
+.Fa n
+is set to
+.Dv 1 .
+.Sh RETURN VALUES
+This function returns
+.Dv true
+if the bit at the specified offset is set to
+.Dv 1
+and otherwise returns
+.Dv false .
+.Sh SEE ALSO
+.Xr ck_bitmap_base 3 ,
+.Xr ck_bitmap_size 3 ,
+.Xr ck_bitmap_init 3 ,
+.Xr ck_bitmap_set_mpmc 3 ,
+.Xr ck_bitmap_reset_mpmc 3 ,
+.Xr ck_bitmap_clear 3 ,
+.Xr ck_bitmap_bits 3 ,
+.Xr ck_bitmap_buffer 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_bitmap_union b/doc/ck_bitmap_union
new file mode 100644
index 0000000..b0ab8e8
--- /dev/null
+++ b/doc/ck_bitmap_union
@@ -0,0 +1,58 @@
+.\"
+.\" Copyright 2012-2014 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd February 23, 2013
+.Dt CK_BITMAP_UNION 3
+.Sh NAME
+.Nm ck_bitmap_union
+.Nd generates union of two bitmaps
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_bitmap.h
+.Ft void
+.Fn ck_bitmap_union "ck_bitmap_t *dst" "ck_bitmap_t *src"
+.Sh DESCRIPTION
+The
+.Fn ck_bitmap_union
+function sets all bits in the bitmap pointed to by
+.Fa src
+in the bitmap pointed to by
+.Fa dst .
+.Sh RETURN VALUES
+This function has no return value.
+.Sh SEE ALSO
+.Xr ck_bitmap_base 3 ,
+.Xr ck_bitmap_size 3 ,
+.Xr ck_bitmap_init 3 ,
+.Xr ck_bitmap_reset 3 ,
+.Xr ck_bitmap_set 3 ,
+.Xr ck_bitmap_clear 3 ,
+.Xr ck_bitmap_test 3 ,
+.Xr ck_bitmap_bits 3 ,
+.Xr ck_bitmap_buffer 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_brlock b/doc/ck_brlock
new file mode 100644
index 0000000..7972ee4
--- /dev/null
+++ b/doc/ck_brlock
@@ -0,0 +1,121 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd July 26, 2013.
+.Dt ck_brlock 3
+.Sh NAME
+.Nm ck_brlock_init ,
+.Nm ck_brlock_write_lock ,
+.Nm ck_brlock_write_unlock ,
+.Nm ck_brlock_write_trylock ,
+.Nm ck_brlock_read_register ,
+.Nm ck_brlock_read_unregister ,
+.Nm ck_brlock_read_lock ,
+.Nm ck_brlock_read_trylock ,
+.Nm ck_brlock_read_unlock
+.Nd big-reader locks
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_brlock.h
+.Pp
+.Dv ck_brlock_t brlock = CK_BRLOCK_INITIALIZER;
+.Pp
+.Dv ck_brlock_reader_t reader = CK_BRLOCK_READER_INITIALIZER;
+.Pp
+.Ft void
+.Fn ck_brlock_init "ck_brlock_t *br"
+.Ft void
+.Fn ck_brlock_write_lock "ck_brlock_t *br"
+.Ft void
+.Fn ck_brlock_write_unlock "ck_brlock_t *br"
+.Ft bool
+.Fn ck_brlock_write_trylock "ck_brlock_t *br" "unsigned int factor"
+.Ft void
+.Fn ck_brlock_read_register "ck_brlock_t *br" "ck_brlock_reader_t *reader"
+.Ft void
+.Fn ck_brlock_read_unregister "ck_brlock_t *br" "ck_brlock_reader_t *reader"
+.Ft void
+.Fn ck_brlock_read_lock "ck_brlock_t *br" "ck_brlock_reader_t *reader"
+.Ft bool
+.Fn ck_brlock_read_trylock "ck_brlock_t *br" "ck_brlock_reader_t *reader" \
+"unsigned int factor"
+.Ft void
+.Fn ck_brlock_read_unlock "ck_brlock_reader_t *reader"
+.Sh DESCRIPTION
+Big reader locks are distributed reader-writer locks with low latency constant time
+reader acquisition (with respect to number of concurrent readers). On the other
+hand, writer acquisitions are a relatively expensive O(n) operation. This is a write-biased
+lock.
+.Sh EXAMPLE
+.Bd -literal -offset indent
+static ck_brlock_t lock = CK_BRLOCK_INITIALIZER;
+static __thread ck_brlock_reader_t reader;
+
+static void
+reader(void)
+{
+
+ /* Add our thread as a lock participant. */
+ ck_brlock_read_register(&lock, &reader);
+
+ for (;;) {
+ ck_brlock_read_lock(&lock, &reader);
+ /* Read-side critical section. */
+ ck_brlock_read_unlock(&reader);
+
+ if (ck_brlock_read_trylock(&lock, &reader, 1) == true) {
+ /* Read-side critical section. */
+ ck_brlock_read_unlock(&reader);
+ }
+ }
+
+ return;
+}
+
+static void
+writer(void)
+{
+
+ for (;;) {
+ ck_brlock_write_lock(&lock);
+ /* Write-side critical section. */
+ ck_brlock_write_unlock(&lock);
+
+ if (ck_brlock_write_trylock(&lock, 1) == true) {
+ /* Write-side critical section. */
+ ck_brlock_write_unlock(&lock);
+ }
+ }
+
+ return;
+}
+.Ed
+.Sh SEE ALSO
+.Xr ck_bytelock 3 ,
+.Xr ck_rwlock 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_cohort b/doc/ck_cohort
new file mode 100644
index 0000000..4905418
--- /dev/null
+++ b/doc/ck_cohort
@@ -0,0 +1,211 @@
+.\"
+.\" Copyright 2013 Brendon Scheinman.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd February 24, 2013.
+.Dt ck_cohort 3
+.Sh NAME
+.Nm ck_cohort
+.Nd generalized interface for lock cohorts
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_cohort.h
+.Fn CK_COHORT_PROTOTYPE "COHORT_NAME cohort_name" "LOCK_FXN global_lock_method" \
+"LOCK_FXN global_unlock_method" "LOCK_FXN local_lock_method" "LOCK_FXN local_unlock_method"
+.Fn CK_COHORT_TRYLOCK_PROTOTYPE "COHORT_NAME cohort_name" \
+"LOCK_FXN global_lock_method" "LOCK_FXN global_unlock_method" \
+"BOOL_LOCK_FXN global_locked_method" "BOOL_LOCK_FXN global_trylock_method" \
+"LOCK_FXN local_lock_method" "LOCK_FXN local_unlock_method" \
+"BOOL_LOCK_FXN local_locked_method" "BOOL_LOCK_FXN local_trylock_method"
+.Fn CK_COHORT_INSTANCE "COHORT_NAME cohort_name"
+.Fn CK_COHORT_INIT "COHORT_NAME cohort_name" "ck_cohort *cohort" \
+"void *global_lock" "void *local_lock" "unsigned int pass_limit"
+.Fn CK_COHORT_LOCK "COHORT_NAME cohort_name" "ck_cohort *cohort" \
+"void *global_context" "void *local_context"
+.Fn CK_COHORT_UNLOCK "COHORT_NAME cohort_name" "ck_cohort *cohort" \
+"void *global_context" "void *local_context"
+.Pp
+Where LOCK_FXN refers to a method with the signature
+.br
+void(void *lock, void *context)
+.br
+BOOL_LOCK_FXN refers to a method with the signature
+.br
+bool(void *lock, void *context)
+.Pp
+The
+.Fa context
+argument in each signature is used to pass along any additional information that
+the lock might need for its lock, unlock and trylock methods. The values for this
+argument are provided to each call to
+.Xr CK_COHORT_LOCK 3 ,
+.Xr CK_COHORT_UNLOCK 3 ,
+.Xr CK_COHORT_LOCKED 3 ,
+and
+.Xr CK_COHORT_TRYLOCK 3
+.
+.Sh DESCRIPTION
+ck_cohort.h provides an interface for defining lock cohorts with
+arbitrary lock types. Cohorts are a mechanism for coordinating
+threads on NUMA architectures in order to reduce the frequency
+with which a lock is passed between threads on different clusters.
+.Pp
+Before using a cohort, the user must define a cohort type using
+either the
+.Fn CK_COHORT_PROTOTYPE
+or the
+.Fn CK_COHORT_TRYLOCK_PROTOTYPE
+macros. These macros allow the user to specify the lock methods that
+they would like the cohort to use. See the
+.Xr CK_COHORT_PROTOTYPE 3
+and
+.Xr CK_COHORT_TRYLOCK_PROTOTYPE 3
+man pages for more details.
+.Pp
+.Sh EXAMPLE
+.Bd -literal -offset indent
+#include <stdlib.h>
+#include <pthread.h>
+
+#include <ck_pr.h>
+#include <ck_cohort.h>
+#include <ck_spinlock.h>
+
+/*
+ * Create cohort methods with signatures that match
+ * the required signature
+ */
+static void
+ck_spinlock_lock_with_context(ck_spinlock_t *lock, void *context)
+{
+ (void)context;
+ ck_spinlock_lock(lock);
+ return;
+}
+
+static void
+ck_spinlock_unlock_with_context(ck_spinlock_t *lock, void *context)
+{
+ (void)context;
+ ck_spinlock_unlock(lock);
+ return;
+}
+
+static bool
+ck_spinlock_locked_with_context(ck_spinlock_t *lock, void *context)
+{
+ (void)context;
+ return ck_spinlock_locked(lock);
+}
+
+/*
+ * define a cohort type named "test_cohort" that will use
+ * the above methods for both its global and local locks
+ */
+CK_COHORT_PROTOTYPE(test_cohort,
+ ck_spinlock_lock_with_context, ck_spinlock_unlock_with_context, ck_spinlock_locked_with_context,
+ ck_spinlock_lock_with_context, ck_spinlock_unlock_with_context, ck_spinlock_locked_with_context)
+
+static ck_spinlock_t global_lock = CK_SPINLOCK_INITIALIZER;
+static unsigned int ready;
+
+static void *
+function(void *context)
+{
+ CK_COHORT_INSTANCE(test_cohort) *cohort = context;
+
+ while (ready == 0);
+
+ while (ready > 0) {
+ /*
+ * acquire the cohort lock before performing critical section.
+ * note that we pass NULL for both the global and local context
+ * arguments because neither the lock nor unlock functions
+ * will use them.
+ */
+ CK_COHORT_LOCK(test_cohort, cohort, NULL, NULL);
+
+ /* perform critical section */
+
+ /* relinquish cohort lock */
+ CK_COHORT_UNLOCK(test_cohort, cohort, NULL, NULL);
+ }
+
+ return NULL;
+}
+
+int
+main(void)
+{
+ unsigned int nthr = 4;
+ unsigned int n_cohorts = 2;
+ unsigned int i;
+
+ /* allocate 2 cohorts of the defined type */
+ CK_COHORT_INSTANCE(test_cohort) *cohorts =
+ calloc(n_cohorts, sizeof(CK_COHORT_INSTANCE(test_cohort)));
+
+ /* create local locks to use with each cohort */
+ ck_spinlock_t *local_locks =
+ calloc(n_cohorts, sizeof(ck_spinlock_t));
+
+ pthread_t *threads =
+ calloc(nthr, sizeof(pthread_t));
+
+ /* initialize each of the cohorts before using them */
+ for (i = 0 ; i < n_cohorts ; ++i) {
+ CK_COHORT_INIT(test_cohort, cohorts + i, &global_lock, local_locks + i,
+ CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT);
+ }
+
+ /* start each thread and assign cohorts equally */
+ for (i = 0 ; i < nthr ; ++i) {
+ pthread_create(threads + i, NULL, function, cohorts + (i % n_cohorts));
+ }
+
+ ck_pr_store_uint(&ready, 1);
+ sleep(10);
+ ck_pr_store_uint(&ready, 0);
+
+ for (i = 0 ; i < nthr ; ++i) {
+ pthread_join(threads[i], NULL);
+ }
+
+ return 0;
+}
+.Ed
+.Sh SEE ALSO
+.Xr CK_COHORT_PROTOTYPE 3 ,
+.Xr CK_COHORT_TRYLOCK_PROTOTYPE 3 ,
+.Xr CK_COHORT_INSTANCE 3 ,
+.Xr CK_COHORT_INITIALIZER 3 ,
+.Xr CK_COHORT_INIT 3 ,
+.Xr CK_COHORT_LOCK 3 ,
+.Xr CK_COHORT_UNLOCK 3 ,
+.Xr CK_COHORT_LOCKED 3 ,
+.Xr CK_COHORT_TRYLOCK 3 ,
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_elide b/doc/ck_elide
new file mode 100644
index 0000000..c068567
--- /dev/null
+++ b/doc/ck_elide
@@ -0,0 +1,252 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd July 13, 2013.
+.Dt ck_elide 3
+.Sh NAME
+.Nm CK_ELIDE_PROTOTYPE ,
+.Nm CK_ELIDE_LOCK_ADAPTIVE ,
+.Nm CK_ELIDE_UNLOCK_ADAPTIVE ,
+.Nm CK_ELIDE_LOCK ,
+.Nm CK_ELIDE_UNLOCK ,
+.Nm CK_ELIDE_TRYLOCK_PROTOTYPE ,
+.Nm CK_ELIDE_TRYLOCK
+.Nd lock elision wrappers
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_elide.h
+.Pp
+.Dv ck_elide_stat_t stat = CK_ELIDE_STAT_INITIALIZER;
+.Pp
+.Ft void
+.Fn ck_elide_stat_init "ck_elide_stat_t *"
+.Pp
+.Dv struct ck_elide_config config = CK_ELIDE_CONFIG_DEFAULT_INITIALIZER;
+.Pp
+.Bd -literal -offset
+struct ck_elide_config {
+ unsigned short skip_busy;
+ short retry_busy;
+ unsigned short skip_other;
+ short retry_other;
+ unsigned short skip_conflict;
+ short retry_conflict;
+};
+.Ed
+.Pp
+.Fn CK_ELIDE_PROTOTYPE "NAME" "TYPE" "LOCK_PREDICATE" "LOCK_FUNCTION" "UNLOCK_PREDICATE" "UNLOCK_FUNCTION"
+.Fn CK_ELIDE_LOCK_ADAPTIVE "NAME" "ck_elide_stat_t *" "struct ck_elide_config *" "TYPE *"
+.Fn CK_ELIDE_UNLOCK_ADAPTIVE "NAME" "ck_elide_stat_t *" "TYPE *"
+.Fn CK_ELIDE_LOCK "NAME" "TYPE *"
+.Fn CK_ELIDE_UNLOCK "NAME" "TYPE *"
+.Fn CK_ELIDE_TRYLOCK_PROTOTYPE "NAME" "TYPE" "LOCK_PREDICATE" "TRYLOCK_FUNCTION"
+.Sh DESCRIPTION
+These macros implement lock elision wrappers for a user-specified single-argument
+lock interface. The wrappers will attempt to elide lock acquisition, allowing
+concurrent execution of critical sections that do not issue conflicting memory
+operations. If any threads have successfully elided a lock acquisition,
+conflicting memory operations will roll-back any side-effects of the critical
+section and force every thread to retry the lock acquisition regularly.
+.Pp
+.Fn CK_ELIDE_LOCK ,
+.Fn CK_ELIDE_UNLOCK ,
+.Fn CK_ELIDE_LOCK_ADAPTIVE ,
+and
+.Fn CK_ELIDE_UNLOCK_ADAPTIVE
+macros require
+a previous
+.Fn CK_ELIDE_PROTOTYPE
+with the same
+.Fa NAME .
+Elision is attempted if the
+.Fa LOCK_PREDICATE
+function returns false. If
+.Fa LOCK_PREDICATE
+returns true then elision is aborted and
+.Fa LOCK_FUNCTION
+is executed instead. If any threads are in an elided critical section,
+.Fa LOCK_FUNCTION
+must force them to rollback through a conflicting memory operation.
+The
+.Fa UNLOCK_PREDICATE
+function must return true if the lock is acquired by the caller, meaning
+that the lock was not successfully elided. If
+.Fa UNLOCK_PREDICATE
+returns true, then the
+.Fa UNLOCK_FUNCTION
+is executed. If RTM is unsupported (no CK_F_PR_RTM macro) then
+.Fn CK_ELIDE_LOCK
+and
+.Fn CK_ELIDE_LOCK_ADAPTIVE
+will immediately call
+.Fn LOCK_FUNCTION .
+.Fn CK_ELIDE_UNLOCK
+and
+.Fn CK_ELIDE_UNLOCK_ADAPTIVE
+will immediately call
+.Fn UNLOCK_FUNCTION .
+.Pp
+.Fn CK_ELIDE_TRYLOCK
+requires a previous
+.Fn CK_ELIDE_TRYLOCK_PROTOTYPE
+with the same name.
+Elision is attempted if the
+.Fa LOCK_PREDICATE
+function returns false. If
+.Fa LOCK_PREDICATE
+returns true or if elision fails then the
+operation is aborted. If RTM is unsupported
+(no CK_F_PR_RTM macro) then
+.Fn CK_ELIDE_TRYLOCK
+will immediately call
+.Fn TRYLOCK_FUNCTION .
+.Pp
+.Fn CK_ELIDE_LOCK_ADAPTIVE
+and
+.Fn CK_ELIDE_UNLOCK_ADAPTIVE
+will adapt the elision behavior associated with lock operations
+according to the run-time behavior of the program. This behavior
+is defined by the ck_elide_config structure pointer passed to
+.Fn CK_ELIDE_LOCK_ADAPTIVE .
+A thread-local ck_elide_stat structure must be passed to both
+.Fn CK_ELIDE_LOCK_ADAPTIVE
+and
+.Fn CK_ELIDE_UNLOCK_ADAPTIVE .
+This structure is expected to be unique for different workloads,
+may not be re-used in recursive acquisitions and must match the
+lifetime of the lock it is associated with. It is safe to mix
+adaptive calls with best-effort calls.
+.Pp
+Both ck_spinlock.h and ck_rwlock.h define ck_elide wrappers under
+the ck_spinlock and ck_rwlock namespace, respectively.
+.Sh EXAMPLES
+This example utilizes built-in lock elision facilities in ck_rwlock and ck_spinlock.
+.Bd -literal -offset indent
+#include <ck_rwlock.h>
+#include <ck_spinlock.h>
+
+static ck_rwlock_t rw = CK_RWLOCK_INITIALIZER;
+static struct ck_elide_config rw_config =
+ CK_ELIDE_CONFIG_DEFAULT_INITIALIZER;
+static __thread ck_elide_stat_t rw_stat =
+ CK_ELIDE_STAT_INITIALIZER;
+
+static ck_spinlock_t spinlock = CK_SPINLOCK_INITIALIZER;
+static struct ck_elide_config spinlock_config =
+ CK_ELIDE_CONFIG_DEFAULT_INITIALIZER;
+static __thread ck_elide_stat_t spinlock_stat =
+ CK_ELIDE_STAT_INITIALIZER;
+
+void
+function(void)
+{
+
+ /* Lock-unlock write-side lock in weak best-effort manner. */
+ CK_ELIDE_LOCK(ck_rwlock_write, &rw);
+ CK_ELIDE_UNLOCK(ck_rwlock_write, &rw);
+
+ /* Attempt to acquire the write-side lock. */
+ if (CK_ELIDE_TRYLOCK(ck_rwlock_write, &rw) == true)
+ CK_ELIDE_UNLOCK(ck_rwlock_write, &rw);
+
+ /* Lock-unlock read-side lock in weak best-effort manner. */
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw);
+
+ /* Attempt to acquire the read-side lock. */
+ if (CK_ELIDE_TRYLOCK(ck_rwlock_read, &rw) == true)
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw);
+
+ /* Lock-unlock write-side lock in an adaptive manner. */
+ CK_ELIDE_LOCK_ADAPTIVE(ck_rwlock_write, &rw_stat,
+ &rw_config, &rw);
+ CK_ELIDE_UNLOCK_ADAPTIVE(ck_rwlock_write, &rw_stat,
+ &rw_config, &rw);
+
+ /* Lock-unlock read-side lock in an adaptive manner. */
+ CK_ELIDE_LOCK_ADAPTIVE(ck_rwlock_read, &rw_stat,
+ &rw_config, &rw);
+ CK_ELIDE_UNLOCK_ADAPTIVE(ck_rwlock_read, &rw_stat,
+ &rw_config, &rw);
+
+ /* Lock-unlock spinlock in weak best-effort manner. */
+ CK_ELIDE_LOCK(ck_spinlock, &spinlock);
+ CK_ELIDE_UNLOCK(ck_spinlock, &spinlock);
+
+ /* Attempt to acquire the lock. */
+ if (CK_ELIDE_TRYLOCK(ck_spinlock, &lock) == true)
+ CK_ELIDE_UNLOCK(ck_spinlock, &spinlock);
+
+ /* Lock-unlock spinlock in an adaptive manner. */
+ CK_ELIDE_LOCK_ADAPTIVE(ck_spinlock, &spinlock_stat,
+ &spinlock_config, &spinlock);
+ CK_ELIDE_UNLOCK_ADAPTIVE(ck_spinlock, &spinlock_stat,
+ &spinlock_config, &spinlock);
+}
+.Ed
+.Pp
+In this example, user-defined locking functions are provided an elision
+implementation.
+.Bd -literal -offset indent
+/* Assume lock_t has been previously defined. */
+#include <ck_elide.h>
+
+/*
+ * This function returns true if the lock is unavailable at the time
+ * it was called or false if the lock is available.
+ */
+bool is_locked(lock_t *);
+
+/*
+ * This function acquires the supplied lock.
+ */
+void lock(lock_t *);
+
+/*
+ * This function releases the lock.
+ */
+void unlock(lock_t *);
+
+CK_ELIDE_PROTOTYPE(my_lock, lock_t, is_locked, lock, is_locked, unlock)
+
+static lock_t lock;
+
+void
+function(void)
+{
+
+ CK_ELIDE_LOCK(my_lock, &lock);
+ CK_ELIDE_UNLOCK(my_lock, &lock);
+}
+.Ed
+.Sh SEE ALSO
+.Xr ck_rwlock 3 ,
+.Xr ck_spinlock 3
+.Pp
+Ravi Rajwar and James R. Goodman. 2001. Speculative lock elision: enabling highly concurrent multithreaded execution. In Proceedings of the 34th annual ACM/IEEE international symposium on Microarchitecture (MICRO 34). IEEE Computer Society, Washington, DC, USA, 294-305.
+.Pp
+Additional information available at http://en.wikipedia.org/wiki/Transactional_Synchronization_Extensions and http://concurrencykit.org/
diff --git a/doc/ck_epoch_barrier b/doc/ck_epoch_barrier
new file mode 100644
index 0000000..a586145
--- /dev/null
+++ b/doc/ck_epoch_barrier
@@ -0,0 +1,120 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 2, 2012
+.Dt CK_EPOCH_BARRIER 3
+.Sh NAME
+.Nm ck_epoch_barrier
+.Nd block until a grace period and all callbacks have been dispatched
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_epoch.h
+.Ft void
+.Fn ck_epoch_barrier "ck_epoch_record_t *record"
+.Sh DESCRIPTION
+The
+.Fn ck_epoch_barrier 3
+function will block the caller until a grace period has been
+detected, according to the semantics of epoch reclamation.
+Any objects requiring safe memory reclamation which are logically
+deleted are safe for physical deletion following a call to
+.Fn ck_epoch_barrier 3 . This function will also dispatch all callbacks
+associated with
+.Fa epoch
+that were previously scheduled via
+.Fn ck_epoch_call 3 .
+.Sh EXAMPLE
+.Bd -literal -offset indent
+
+#include <ck_epoch.h>
+#include <ck_stack.h>
+#include <stdlib.h>
+
+/*
+ * epoch was previously initialized with ck_epoch_init.
+ * stack was previously initialized with ck_stack_init.
+ */
+ck_epoch_t *epoch;
+ck_stack_t *stack;
+
+void
+function(void)
+{
+ ck_epoch_record_t *record;
+ ck_stack_entry_t *s;
+
+ record = malloc(sizeof *record);
+ ck_epoch_register(&epoch, record);
+
+ /*
+ * We are using an epoch section here to guarantee no
+ * nodes in the stack are deleted while we are dereferencing
+ * them. This is needed here because there are multiple writers.
+ * If there was only one thread popping from the this stack,
+ * then there is no need to ck_epoch_begin/ck_epoch_end.
+ */
+ ck_epoch_begin(record);
+
+ /* Logically delete an object. */
+ s = ck_stack_pop_upmc(stack);
+
+ ck_epoch_end(record);
+
+ /*
+ * Wait until no threads could possibly have a reference to the
+ * object we just popped (assume all threads are simply executing
+ * ck_stack_pop_upmc).
+ */
+ ck_epoch_barrier(record);
+
+ /* It is now safe to physically delete the object. */
+ free(s);
+ return;
+}
+.Ed
+.Sh RETURN VALUES
+This function has no return value.
+.Sh ERRORS
+Behavior is undefined if the object pointed to by
+.Fa epoch
+is not a valid epoch object. The object pointed to by
+.Fa record
+must have been previously registered via
+.Fn ck_epoch_register 3 .
+.Sh SEE ALSO
+.Xr ck_epoch_init 3 ,
+.Xr ck_epoch_register 3 ,
+.Xr ck_epoch_unregister 3 ,
+.Xr ck_epoch_recycle 3 ,
+.Xr ck_epoch_poll 3 ,
+.Xr ck_epoch_synchronize 3 ,
+.Xr ck_epoch_reclaim 3 ,
+.Xr ck_epoch_call 3 ,
+.Xr ck_epoch_begin 3 ,
+.Xr ck_epoch_end 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_epoch_begin b/doc/ck_epoch_begin
new file mode 100644
index 0000000..a44ecf8
--- /dev/null
+++ b/doc/ck_epoch_begin
@@ -0,0 +1,73 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 2, 2012
+.Dt CK_EPOCH_BEGIN 3
+.Sh NAME
+.Nm ck_epoch_begin
+.Nd begin epoch-protected segment of execution
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_epoch.h
+.Ft void
+.Fn ck_epoch_begin "ck_epoch_record_t *record" "ck_epoch_section_t *section"
+.Sh DESCRIPTION
+The
+.Fn ck_epoch_begin 3
+function will mark the beginning of an epoch-protected code section.
+An epoch-protected code section is delimited by a call to the
+.Fn ck_epoch_end 3
+function. Though recursion is allowed for epoch-protected sections,
+recursive calls will be associated with the
+.Fn ck_epoch_begin 3
+that is at the top of the call stack. If a section is passed, then
+recursion on a record will cause the epoch to be refreshed on entry
+of every protected section.
+.Sh RETURN VALUES
+This function has no return value.
+.Sh ERRORS
+The object pointed to by
+.Fa epoch
+must have been previously initiated via
+.Fn ck_epoch_init 3 .
+The object pointed to by
+.Fa record
+must have been previously registered via
+.Fn ck_epoch_register 3 .
+.Sh SEE ALSO
+.Xr ck_epoch_init 3 ,
+.Xr ck_epoch_register 3 ,
+.Xr ck_epoch_unregister 3 ,
+.Xr ck_epoch_recycle 3 ,
+.Xr ck_epoch_poll 3 ,
+.Xr ck_epoch_synchronize 3 ,
+.Xr ck_epoch_reclaim 3 ,
+.Xr ck_epoch_barrier 3 ,
+.Xr ck_epoch_call 3 ,
+.Xr ck_epoch_end 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_epoch_call b/doc/ck_epoch_call
new file mode 100644
index 0000000..7390642
--- /dev/null
+++ b/doc/ck_epoch_call
@@ -0,0 +1,136 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 2, 2012
+.Dt CK_EPOCH_CALL 3
+.Sh NAME
+.Nm ck_epoch_call
+.Nd defer function execution until a grace period
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_epoch.h
+typedef struct ck_epoch_entry ck_epoch_entry_t;
+.br
+typedef void ck_epoch_cb_t(ck_epoch_entry_t *);
+.Ft void
+.Fn ck_epoch_call "ck_epoch_record_t *record" "ck_epoch_entry_t *entry" "ck_epoch_cb_t *function"
+.Sh DESCRIPTION
+The
+.Fn ck_epoch_call 3
+function will defer the execution of the function pointed to by
+.Fa function
+until a grace-period has been detected in
+.Fa epoch .
+The function will be provided
+the pointer specified by
+.Fa entry .
+The function will execute at some time in the future via calls to
+.Fn ck_epoch_reclaim 3 ,
+.Fn ck_epoch_barrier 3
+or
+.Fn ck_epoch_poll 3 .
+.Sh EXAMPLE
+.Bd -literal -offset indent
+
+#include <ck_epoch.h>
+#include <ck_stack.h>
+#include <stdlib.h>
+
+/*
+ * epoch was previously initialized with ck_epoch_init.
+ */
+ck_epoch_t *epoch;
+
+struct object {
+ int value;
+ ck_epoch_entry_t epoch_entry;
+};
+static struct object *global;
+
+CK_EPOCH_CONTAINER(struct object, epoch_entry, object_container)
+
+void
+destroy_object(ck_epoch_entry_t *e)
+{
+ struct object *o = object_container(e);
+
+ free(o);
+ return;
+}
+
+void
+function(void)
+{
+ ck_epoch_record_t *record;
+ struct object *n;
+
+ record = malloc(sizeof *record);
+ ck_epoch_register(&epoch, record);
+
+ n = malloc(sizeof *n);
+ if (n == NULL)
+ return;
+
+ n->value = 1;
+
+ /*
+ * We are using an epoch section here because there are multiple
+ * writers. It is also an option to use other forms of blocking
+ * write-side synchronization such as mutexes.
+ */
+ ck_epoch_begin(record);
+ n = ck_pr_fas_ptr(&global, n);
+ ck_epoch_end(record);
+
+ /* Defer destruction of previous object. */
+ ck_epoch_call(record, &n->epoch_entry, destroy_object);
+
+ /* Poll epoch sub-system in non-blocking manner. */
+ ck_epoch_poll(record);
+ return;
+}
+.Ed
+.Sh RETURN VALUES
+This function has no return value.
+.Sh ERRORS
+The object pointed to by
+.Fa record
+must have been previously registered via
+.Fn ck_epoch_register 3 .
+.Sh SEE ALSO
+.Xr ck_epoch_init 3 ,
+.Xr ck_epoch_register 3 ,
+.Xr ck_epoch_unregister 3 ,
+.Xr ck_epoch_recycle 3 ,
+.Xr ck_epoch_poll 3 ,
+.Xr ck_epoch_synchronize 3 ,
+.Xr ck_epoch_reclaim 3 ,
+.Xr ck_epoch_barrier 3 ,
+.Xr ck_epoch_begin 3 ,
+.Xr ck_epoch_end 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_epoch_end b/doc/ck_epoch_end
new file mode 100644
index 0000000..a36afbd
--- /dev/null
+++ b/doc/ck_epoch_end
@@ -0,0 +1,64 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 2, 2012
+.Dt CK_EPOCH_END 3
+.Sh NAME
+.Nm ck_epoch_end
+.Nd end epoch-protected segment of execution
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_epoch.h
+.Ft void
+.Fn ck_epoch_end "ck_epoch_record_t *record" "ck_epoch_section_t *section"
+.Sh DESCRIPTION
+The
+.Fn ck_epoch_end 3
+function will mark the end of an epoch-protected code section.
+.Fa section
+must point to a section object initialized previously with
+.Fn ck_epoch_begin 3 .
+.Sh RETURN VALUES
+This function has no return value.
+.Sh ERRORS
+The object pointed to by
+.Fa record
+must have been previously registered via
+.Fn ck_epoch_register 3 .
+.Sh SEE ALSO
+.Xr ck_epoch_init 3 ,
+.Xr ck_epoch_register 3 ,
+.Xr ck_epoch_unregister 3 ,
+.Xr ck_epoch_recycle 3 ,
+.Xr ck_epoch_poll 3 ,
+.Xr ck_epoch_synchronize 3 ,
+.Xr ck_epoch_reclaim 3 ,
+.Xr ck_epoch_barrier 3 ,
+.Xr ck_epoch_call 3 ,
+.Xr ck_epoch_begin 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_epoch_init b/doc/ck_epoch_init
new file mode 100644
index 0000000..51a3e2a
--- /dev/null
+++ b/doc/ck_epoch_init
@@ -0,0 +1,69 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 2, 2012
+.Dt CK_EPOCH_INIT 3
+.Sh NAME
+.Nm ck_epoch_init
+.Nd initialize epoch reclamation object
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_epoch.h
+.Ft void
+.Fn ck_epoch_init "ck_epoch_t *epoch"
+.Sh DESCRIPTION
+The
+.Fn ck_epoch_init
+function initializes the epoch object pointed to by the
+.Fa epoch
+pointer.
+.Sh RETURN VALUES
+This function has no return value.
+.Sh ERRORS
+.Bl -tag -width Er
+.Pp
+The behavior of
+.Fn ck_epoch_init
+is undefined if
+.Fa epoch
+is not a pointer to a
+.Tn ck_epoch_t
+object.
+.El
+.Sh SEE ALSO
+.Xr ck_epoch_register 3 ,
+.Xr ck_epoch_unregister 3 ,
+.Xr ck_epoch_recycle 3 ,
+.Xr ck_epoch_poll 3 ,
+.Xr ck_epoch_synchronize 3 ,
+.Xr ck_epoch_reclaim 3 ,
+.Xr ck_epoch_barrier 3 ,
+.Xr ck_epoch_call 3 ,
+.Xr ck_epoch_begin 3 ,
+.Xr ck_epoch_end 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_epoch_poll b/doc/ck_epoch_poll
new file mode 100644
index 0000000..68c4a4e
--- /dev/null
+++ b/doc/ck_epoch_poll
@@ -0,0 +1,71 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 2, 2012
+.Dt CK_EPOCH_POLL 3
+.Sh NAME
+.Nm ck_epoch_poll
+.Nd non-blocking poll of epoch object for dispatch cycles
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_epoch.h
+.Ft bool
+.Fn ck_epoch_poll "ck_epoch_record_t *record"
+.Sh DESCRIPTION
+The
+.Fn ck_epoch_poll 3
+function will attempt to dispatch any functions associated with the
+object pointed to by
+.Fa epoch
+via
+.Fn ck_epoch_call 3
+if deemed safe. This function is meant to be used in cases epoch
+reclamation cost must be amortized over time in a manner that does
+not affect caller progress.
+.Sh RETURN VALUES
+This function will return true if at least one function was dispatched.
+This function will return false if it has determined not all threads have
+observed the latest generation of epoch-protected objects. Neither value
+indicates an error.
+.Sh ERRORS
+Behavior is undefined if the object pointed to by
+.Fa record
+has not have been previously registered via
+.Fn ck_epoch_register 3 .
+.Sh SEE ALSO
+.Xr ck_epoch_init 3 ,
+.Xr ck_epoch_register 3 ,
+.Xr ck_epoch_unregister 3 ,
+.Xr ck_epoch_recycle 3 ,
+.Xr ck_epoch_synchronize 3 ,
+.Xr ck_epoch_reclaim 3 ,
+.Xr ck_epoch_barrier 3 ,
+.Xr ck_epoch_call 3 ,
+.Xr ck_epoch_begin 3 ,
+.Xr ck_epoch_end 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_epoch_reclaim b/doc/ck_epoch_reclaim
new file mode 100644
index 0000000..ffe3bac
--- /dev/null
+++ b/doc/ck_epoch_reclaim
@@ -0,0 +1,92 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd May 2, 2013
+.Dt CK_EPOCH_RECLAIM 3
+.Sh NAME
+.Nm ck_epoch_reclaim
+.Nd immediately execute all deferred callbacks
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_epoch.h
+.Ft void
+.Fn ck_epoch_reclaim "ck_epoch_record_t *record"
+.Sh DESCRIPTION
+The
+.Fn ck_epoch_reclaim 3
+function will unconditionally execute all callbacks
+that have been deferred with
+.Fn ck_epoch_call 3 .
+.Sh EXAMPLE
+.Bd -literal -offset indent
+
+#include <ck_epoch.h>
+#include <ck_stack.h>
+#include <stdlib.h>
+
+/*
+ * epoch was previously initialized with ck_epoch_init.
+ */
+ck_epoch_t *epoch;
+
+void
+function(void)
+{
+ ck_epoch_record_t *record;
+
+ logically_delete(object);
+ ck_epoch_call(epoch, record, &object->epoch_entry, destructor);
+
+ /*
+ * Wait until no threads could possibly have a reference to the
+ * object we just deleted.
+ */
+ ck_epoch_synchronize(epoch, record);
+
+ /*
+ * Execute all deferred callbacks.
+ */
+ ck_epoch_reclaim(record);
+
+ return;
+}
+.Ed
+.Sh RETURN VALUES
+This function has no return value.
+.Sh SEE ALSO
+.Xr ck_epoch_init 3 ,
+.Xr ck_epoch_register 3 ,
+.Xr ck_epoch_unregister 3 ,
+.Xr ck_epoch_recycle 3 ,
+.Xr ck_epoch_poll 3 ,
+.Xr ck_epoch_reclaim 3 ,
+.Xr ck_epoch_barrier 3 ,
+.Xr ck_epoch_call 3 ,
+.Xr ck_epoch_begin 3 ,
+.Xr ck_epoch_end 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_epoch_recycle b/doc/ck_epoch_recycle
new file mode 100644
index 0000000..530079c
--- /dev/null
+++ b/doc/ck_epoch_recycle
@@ -0,0 +1,102 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 2, 2012
+.Dt CK_EPOCH_RECYCLE 3
+.Sh NAME
+.Nm ck_epoch_recycle
+.Nd return an epoch record that may be used by caller
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_epoch.h
+.Ft ck_epoch_record_t *
+.Fn ck_epoch_recycle "ck_epoch_t *epoch"
+.Sh DESCRIPTION
+The
+.Fn ck_epoch_recycle 3
+function attempts to return an unused epoch record object for use by
+the caller. These epoch records were associated with previous calls
+to the
+.Fn ck_epoch_unregister 3
+function.
+.Sh EXAMPLE
+.Bd -literal -offset indent
+#include <ck_epoch.h>
+#include <stdlib.h>
+
+/*
+ * epoch was previously initialized with ck_epoch_init.
+ */
+ck_epoch_t *epoch;
+
+void
+function(void)
+{
+ ck_epoch_record_t *record;
+
+ record = ck_epoch_recycle(&epoch);
+ if (record == NULL) {
+ record = malloc(sizeof *record);
+ if (record == NULL)
+ return;
+
+ ck_epoch_register(&epoch, record);
+ }
+
+ /*
+ * After we are done, we will unregister the record so it
+ * can be used by other new participants in the epoch system
+ * provided by the object pointed to by "epoch".
+ */
+ ck_epoch_unregister(&epoch, record);
+ return;
+}
+.Ed
+.Sh RETURN VALUES
+This function returns a pointer to a
+.Dv ck_epoch_record_t
+object. If no unused record was found to be associated with the
+object pointed to by
+.Fa epoch ,
+then the function will return NULL.
+.Sh ERRORS
+Behavior is undefined if the object pointed to by
+.Fa epoch
+is not a valid epoch object.
+.Sh SEE ALSO
+.Xr ck_epoch_init 3 ,
+.Xr ck_epoch_register 3 ,
+.Xr ck_epoch_unregister 3 ,
+.Xr ck_epoch_poll 3 ,
+.Xr ck_epoch_synchronize 3 ,
+.Xr ck_epoch_reclaim 3 ,
+.Xr ck_epoch_barrier 3 ,
+.Xr ck_epoch_call 3 ,
+.Xr ck_epoch_begin 3 ,
+.Xr ck_epoch_end 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_epoch_register b/doc/ck_epoch_register
new file mode 100644
index 0000000..85ea461
--- /dev/null
+++ b/doc/ck_epoch_register
@@ -0,0 +1,67 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 2, 2012
+.Dt CK_EPOCH_REGISTER 3
+.Sh NAME
+.Nm ck_epoch_register
+.Nd register a thread for epoch reclamation
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_epoch.h
+.Ft void
+.Fn ck_epoch_register "ck_epoch_t *epoch" "ck_epoch_record_t *record"
+.Sh DESCRIPTION
+The
+.Fn ck_epoch_register 3
+function associates a record object specified by the
+.Fa record
+pointer with the epoch object pointed to by
+.Fa epoch .
+Any thread or processor that will require safe memory reclamation
+guarantees must register a unique record object. After registration, the
+object pointed to by the
+.Fa record
+argument will have lifetime managed by the underlying epoch sub-system.
+The record object must not be destroyed after it is associated with a
+.Fn ck_epoch_register 3
+call.
+.Sh RETURN VALUES
+This function has no return value.
+.Sh SEE ALSO
+.Xr ck_epoch_init 3 ,
+.Xr ck_epoch_unregister 3 ,
+.Xr ck_epoch_recycle 3 ,
+.Xr ck_epoch_poll 3 ,
+.Xr ck_epoch_synchronize 3 ,
+.Xr ck_epoch_reclaim 3 ,
+.Xr ck_epoch_barrier 3 ,
+.Xr ck_epoch_call 3 ,
+.Xr ck_epoch_begin 3 ,
+.Xr ck_epoch_end 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_epoch_synchronize b/doc/ck_epoch_synchronize
new file mode 100644
index 0000000..6c9a698
--- /dev/null
+++ b/doc/ck_epoch_synchronize
@@ -0,0 +1,119 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 2, 2012
+.Dt CK_EPOCH_SYNCHRONIZE 3
+.Sh NAME
+.Nm ck_epoch_synchronize
+.Nd block until a grace period has been detected
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_epoch.h
+.Ft void
+.Fn ck_epoch_synchronize "ck_epoch_record_t *record"
+.Sh DESCRIPTION
+The
+.Fn ck_epoch_synchronize 3
+function will block the caller until a grace period has been
+detected, according to the semantics of epoch reclamation.
+Any objects requiring safe memory reclamation which are logically
+deleted are safe for physical deletion following a call to
+.Fn ck_epoch_synchronize 3 .
+If you require that all callbacks be dispatched, then it is suggested
+that you use
+.Fn ck_epoch_barrier 3
+instead or follow a call of
+.Fn ck_epoch_synchronize 3
+with
+.Fn ck_epoch_reclaim 3 .
+.Sh EXAMPLE
+.Bd -literal -offset indent
+
+#include <ck_epoch.h>
+#include <ck_stack.h>
+#include <stdlib.h>
+
+/*
+ * epoch was previously initialized with ck_epoch_init.
+ * stack was previously initialized with ck_stack_init.
+ */
+ck_epoch_t *epoch;
+ck_stack_t *stack;
+
+void
+function(void)
+{
+ ck_epoch_record_t *record;
+ ck_stack_entry_t *s;
+
+ record = malloc(sizeof *record);
+ ck_epoch_register(&epoch, record);
+
+ /*
+ * We are using an epoch section here to guarantee no
+ * nodes in the stack are deleted while we are dereferencing
+ * them. This is needed here because there are multiple writers.
+ * If there was only one thread popping from the this stack,
+ * then there is no need to ck_epoch_begin/ck_epoch_end.
+ */
+ ck_epoch_begin(record);
+
+ /* Logically delete an object. */
+ s = ck_stack_pop_upmc(stack);
+
+ ck_epoch_end(record);
+
+ /*
+ * Wait until no threads could possibly have a reference to the
+ * object we just popped (assume all threads are simply executing
+ * ck_stack_pop_upmc).
+ */
+ ck_epoch_synchronize(record);
+
+ /* It is now safe to physically delete the object. */
+ free(s);
+ return;
+}
+.Ed
+.Sh RETURN VALUES
+This function has no return value.
+.Sh ERRORS
+The object pointed to by .Fa record must have been previously registered via
+.Fn ck_epoch_register 3 .
+.Sh SEE ALSO
+.Xr ck_epoch_init 3 ,
+.Xr ck_epoch_register 3 ,
+.Xr ck_epoch_unregister 3 ,
+.Xr ck_epoch_recycle 3 ,
+.Xr ck_epoch_poll 3 ,
+.Xr ck_epoch_reclaim 3 ,
+.Xr ck_epoch_barrier 3 ,
+.Xr ck_epoch_call 3 ,
+.Xr ck_epoch_begin 3 ,
+.Xr ck_epoch_end 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_epoch_unregister b/doc/ck_epoch_unregister
new file mode 100644
index 0000000..3be537f
--- /dev/null
+++ b/doc/ck_epoch_unregister
@@ -0,0 +1,65 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 2, 2012
+.Dt CK_EPOCH_UNREGISTER 3
+.Sh NAME
+.Nm ck_epoch_unregister
+.Nd unregister a thread for epoch reclamation
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_epoch.h
+.Ft void
+.Fn ck_epoch_unregister "ck_epoch_record_t *record"
+.Sh DESCRIPTION
+The
+.Fn ck_epoch_unregister 3
+function allows for the record pointed by the
+.Fa record
+pointer to be used as a return value by the
+.Fn ck_epoch_recycle 3
+function. This record can now be used by another thread
+of execution. Behavior is undefined if the object pointed by
+.Fa record
+is modified in any way, even after a call is made to the
+.Fn ck_epoch_unregister 3
+function.
+.Sh RETURN VALUES
+This function has no return value.
+.Sh SEE ALSO
+.Xr ck_epoch_init 3 ,
+.Xr ck_epoch_register 3 ,
+.Xr ck_epoch_recycle 3 ,
+.Xr ck_epoch_poll 3 ,
+.Xr ck_epoch_synchronize 3 ,
+.Xr ck_epoch_reclaim 3 ,
+.Xr ck_epoch_barrier 3 ,
+.Xr ck_epoch_call 3 ,
+.Xr ck_epoch_begin 3 ,
+.Xr ck_epoch_end 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_hs_apply b/doc/ck_hs_apply
new file mode 100644
index 0000000..5664f73
--- /dev/null
+++ b/doc/ck_hs_apply
@@ -0,0 +1,86 @@
+.\"
+.\" Copyright 2014 Samy Al Bahra.
+.\" Copyright 2014 Backtrace I/O, Inc.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 1, 2014
+.Dt CK_HS_APPLY 3
+.Sh NAME
+.Nm ck_hs_apply
+.Nd apply a function to hash set value
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_hs.h
+.Ft void *
+.Fn ck_hs_apply_fn_t "void *key" "void *closure"
+.Ft bool
+.Fn ck_hs_apply "ck_hs_t *hs" "unsigned long hash" "const void *key" "ck_hs_apply_fn_t *function" "void *argument"
+.Sh DESCRIPTION
+The
+.Fn ck_hs_apply 3
+function will lookup the hash set slot associated with
+.Fa key
+and pass it to function pointed to by
+.Fa function
+for further action. This callback may remove or replace
+the value by respectively returning NULL or a pointer to
+another object with an identical key. The first argument
+passed to
+.Fa function
+is a pointer to the object found in the hash set and
+the second argument is the
+.Fa argument
+pointer passed to
+.Fn ck_hs_apply 3 .
+If the pointer returned by
+.Fa function
+is equivalent to the first argument then no modification
+is made to the hash set.
+.Sh RETURN VALUES
+Upon successful completion,
+.Fn ck_hs_apply 3
+returns true and otherwise returns false on failure.
+.Sh SEE ALSO
+.Xr ck_hs_init 3 ,
+.Xr ck_hs_move 3 ,
+.Xr ck_hs_destroy 3 ,
+.Xr ck_hs_fas 3 ,
+.Xr CK_HS_HASH 3 ,
+.Xr ck_hs_iterator_init 3 ,
+.Xr ck_hs_next 3 ,
+.Xr ck_hs_get 3 ,
+.Xr ck_hs_put 3 ,
+.Xr ck_hs_put_unique 3 ,
+.Xr ck_hs_remove 3 ,
+.Xr ck_hs_grow 3 ,
+.Xr ck_hs_rebuild 3 ,
+.Xr ck_hs_gc 3 ,
+.Xr ck_hs_count 3 ,
+.Xr ck_hs_reset 3 ,
+.Xr ck_hs_reset_size 3 ,
+.Xr ck_hs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_hs_count b/doc/ck_hs_count
new file mode 100644
index 0000000..c12d8f7
--- /dev/null
+++ b/doc/ck_hs_count
@@ -0,0 +1,70 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 17, 2012
+.Dt CK_HS_COUNT 3
+.Sh NAME
+.Nm ck_hs_count
+.Nd returns number of entries in hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_hs.h
+.Ft unsigned long
+.Fn ck_hs_count "ck_hs_t *hs"
+.Sh DESCRIPTION
+The
+.Fn ck_hs_count 3
+function returns the number of keys currently
+stored in
+.Fa hs .
+.Sh ERRORS
+Behavior is undefined if
+.Fa hs
+is uninitialized. Behavior is
+undefined if this function is called by a non-writer
+thread.
+.Sh SEE ALSO
+.Xr ck_hs_init 3 ,
+.Xr ck_hs_move 3 ,
+.Xr ck_hs_destroy 3 ,
+.Xr CK_HS_HASH 3 ,
+.Xr ck_hs_iterator_init 3 ,
+.Xr ck_hs_next 3 ,
+.Xr ck_hs_get 3 ,
+.Xr ck_hs_put 3 ,
+.Xr ck_hs_put_unique 3 ,
+.Xr ck_hs_set 3 ,
+.Xr ck_hs_fas 3 ,
+.Xr ck_hs_remove 3 ,
+.Xr ck_hs_grow 3 ,
+.Xr ck_hs_rebuild 3 ,
+.Xr ck_hs_gc 3 ,
+.Xr ck_hs_reset 3 ,
+.Xr ck_hs_reset_size 3 ,
+.Xr ck_hs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_hs_destroy b/doc/ck_hs_destroy
new file mode 100644
index 0000000..952502b
--- /dev/null
+++ b/doc/ck_hs_destroy
@@ -0,0 +1,77 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 17, 2012
+.Dt CK_HS_DESTROY 3
+.Sh NAME
+.Nm ck_hs_destroy
+.Nd destroy hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_hs.h
+.Ft void
+.Fn ck_hs_destroy "ck_hs_t *hs"
+.Sh DESCRIPTION
+The
+.Fn ck_hs_destroy 3
+function will request that the underlying allocator, as specified by the
+.Xr ck_hs_init 3
+function, immediately destroy the object pointed to by the
+.Fa hs
+argument.
+The user must guarantee that no threads are accessing the object pointed to
+by
+.Fa hs
+when
+.Fn ck_hs_destroy 3
+is called.
+.Sh RETURN VALUES
+.Fn ck_hs_destroy 3
+has no return value.
+.Sh ERRORS
+This function is guaranteed not to fail.
+.Sh SEE ALSO
+.Xr ck_hs_init 3 ,
+.Xr ck_hs_move 3 ,
+.Xr CK_HS_HASH 3 ,
+.Xr ck_hs_iterator_init 3 ,
+.Xr ck_hs_next 3 ,
+.Xr ck_hs_get 3 ,
+.Xr ck_hs_put 3 ,
+.Xr ck_hs_put_unique 3 ,
+.Xr ck_hs_set 3 ,
+.Xr ck_hs_fas 3 ,
+.Xr ck_hs_remove 3 ,
+.Xr ck_hs_grow 3 ,
+.Xr ck_hs_rebuild 3 ,
+.Xr ck_hs_gc 3 ,
+.Xr ck_hs_count 3 ,
+.Xr ck_hs_reset 3 ,
+.Xr ck_hs_reset_size 3 ,
+.Xr ck_hs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_hs_fas b/doc/ck_hs_fas
new file mode 100644
index 0000000..69760b5
--- /dev/null
+++ b/doc/ck_hs_fas
@@ -0,0 +1,98 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd June 20, 2013
+.Dt CK_HS_FAS 3
+.Sh NAME
+.Nm ck_hs_fas
+.Nd fetch and store key in hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_hs.h
+.Ft bool
+.Fn ck_hs_fas "ck_hs_t *hs" "unsigned long hash" "const void *key" "void **previous"
+.Sh DESCRIPTION
+The
+.Fn ck_hs_fas 3
+function will fetch and store the key specified by the
+.Fa key
+argument in the hash set pointed to by the
+.Fa hs
+argument. The key specified by
+.Fa key
+is expected to have the hash value specified by the
+.Fa hash
+argument (which was previously generated using the
+.Xr CK_HS_HASH 3
+macro).
+.Pp
+If the call to
+.Fn ck_hs_fas 3
+was successful then the key specified by
+.Fa key
+was successfully stored in the hash set pointed to by
+.Fa hs .
+The key must already exist in the hash set, and is
+replaced by
+.Fa key
+and the previous value is stored into the void pointer
+pointed to by the
+.Fa previous
+argument. If the key does not exist in the hash set
+then the function will return false and the hash set
+is unchanged. This function
+is guaranteed to be stable with respect to memory usage.
+.Sh RETURN VALUES
+Upon successful completion,
+.Fn ck_hs_fas 3
+returns true and otherwise returns false on failure.
+.Sh ERRORS
+Behavior is undefined if
+.Fa key
+or
+.Fa hs
+are uninitialized.
+.Sh SEE ALSO
+.Xr ck_hs_init 3 ,
+.Xr ck_hs_move 3 ,
+.Xr ck_hs_destroy 3 ,
+.Xr CK_HS_HASH 3 ,
+.Xr ck_hs_iterator_init 3 ,
+.Xr ck_hs_next 3 ,
+.Xr ck_hs_get 3 ,
+.Xr ck_hs_put 3 ,
+.Xr ck_hs_put_unique 3 ,
+.Xr ck_hs_remove 3 ,
+.Xr ck_hs_grow 3 ,
+.Xr ck_hs_rebuild 3 ,
+.Xr ck_hs_gc 3 ,
+.Xr ck_hs_count 3 ,
+.Xr ck_hs_reset 3 ,
+.Xr ck_hs_reset_size 3 ,
+.Xr ck_hs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_hs_gc b/doc/ck_hs_gc
new file mode 100644
index 0000000..85abba2
--- /dev/null
+++ b/doc/ck_hs_gc
@@ -0,0 +1,88 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd December 17, 2013
+.Dt CK_HS_GC 3
+.Sh NAME
+.Nm ck_hs_gc
+.Nd perform maintenance on a hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_hs.h
+.Ft bool
+.Fn ck_hs_gc "ck_hs_t *hs" "unsigned long cycles" "unsigned long seed"
+.Sh DESCRIPTION
+The
+.Fn ck_hs_gc 3
+function will perform various maintenance routines on the hash set
+pointed to by
+.Fa hs ,
+including defragmentation of probe sequences with respect to tombstones
+and in the case that the delete workload hint has been passed, recalculation
+of probe sequence bounds. The
+.Fa cycles
+argument is used to indicate how many hash set entries should be subject
+to attempted maintenance. If
+.Fa cycles
+is 0, then maintenance is performed on the complete hash set. The
+.Fa seed
+argument determines the start location of the maintenance process. If
+.Fa cycles
+is non-zero, it is recommended that
+.Fa seed
+is some random value. If the delete hint has been passed, the function
+will require an additional 12% of memory (with respect to existing
+memory usage of the set), until operation completion.
+.Sh RETURN VALUES
+Upon successful completion,
+.Fn ck_hs_gc 3
+returns true and otherwise returns false on failure due to memory allocation
+failure.
+.Sh ERRORS
+This function will only return false if there are internal memory allocation
+failures.
+.Sh SEE ALSO
+.Xr ck_hs_init 3 ,
+.Xr ck_hs_move 3 ,
+.Xr ck_hs_destroy 3 ,
+.Xr CK_HS_HASH 3 ,
+.Xr ck_hs_iterator_init 3 ,
+.Xr ck_hs_next 3 ,
+.Xr ck_hs_get 3 ,
+.Xr ck_hs_put 3 ,
+.Xr ck_hs_put_unique 3 ,
+.Xr ck_hs_grow 3 ,
+.Xr ck_hs_rebuild 3 ,
+.Xr ck_hs_set 3 ,
+.Xr ck_hs_fas 3 ,
+.Xr ck_hs_remove 3 ,
+.Xr ck_hs_count 3 ,
+.Xr ck_hs_reset 3 ,
+.Xr ck_hs_reset_size 3 ,
+.Xr ck_hs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_hs_get b/doc/ck_hs_get
new file mode 100644
index 0000000..9c1600d
--- /dev/null
+++ b/doc/ck_hs_get
@@ -0,0 +1,88 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 17, 2012
+.Dt CK_HS_GET 3
+.Sh NAME
+.Nm ck_hs_get
+.Nd load a key from a hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_hs.h
+.Ft void *
+.Fn ck_hs_get "ck_hs_t *hs" "unsigned long hash" "const void *key"
+.Sh DESCRIPTION
+The
+.Fn ck_hs_get 3
+function will return a pointer to a key in the hash set
+.Fa hs
+that is of equivalent value to the object pointed to by
+.Fa key .
+The key specified by
+.Fa key
+is expected to have the hash value specified by the
+.Fa hash
+argument (which is to have been previously generated using the
+.Xr CK_HS_HASH 3
+macro).
+.Sh RETURN VALUES
+If the provided key is a member of
+.Fa hs
+then a pointer to the key as stored in
+.Fa hs
+is returned. If the key was not found in
+.Fa hs
+then a value of
+.Dv NULL
+is returned.
+.Sh ERRORS
+Behavior is undefined if
+.Fa entry
+or
+.Fa hs
+are uninitialized.
+.Sh SEE ALSO
+.Xr ck_hs_init 3 ,
+.Xr ck_hs_move 3 ,
+.Xr ck_hs_destroy 3 ,
+.Xr CK_HS_HASH 3 ,
+.Xr ck_hs_iterator_init 3 ,
+.Xr ck_hs_next 3 ,
+.Xr ck_hs_put 3 ,
+.Xr ck_hs_put_unique 3 ,
+.Xr ck_hs_set 3 ,
+.Xr ck_hs_fas 3 ,
+.Xr ck_hs_remove 3 ,
+.Xr ck_hs_grow 3 ,
+.Xr ck_hs_rebuild 3 ,
+.Xr ck_hs_gc 3 ,
+.Xr ck_hs_count 3 ,
+.Xr ck_hs_reset 3 ,
+.Xr ck_hs_reset_size 3 ,
+.Xr ck_hs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_hs_grow b/doc/ck_hs_grow
new file mode 100644
index 0000000..ed35cd0
--- /dev/null
+++ b/doc/ck_hs_grow
@@ -0,0 +1,81 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 17, 2012
+.Dt CK_HS_GROW 3
+.Sh NAME
+.Nm ck_hs_grow
+.Nd enlarge hash set capacity
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_hs.h
+.Ft bool
+.Fn ck_hs_grow "ck_hs_t *hs" "unsigned long capacity"
+.Sh DESCRIPTION
+The
+.Fn ck_hs_grow 3
+function will resize the hash set in order to be
+able to store at least the number of entries specified by
+.Fa capacity
+at a load factor of one. The default hash set load factor
+is 0.5. If you wish to minimize the likelihood of memory allocations
+for a hash set meant to store n entries, then specify a
+.Fa capacity
+of 2n. The default behavior of ck_hs is to round
+.Fa capacity
+to the next power of two if it is not already a power of two.
+.Sh RETURN VALUES
+Upon successful completion,
+.Fn ck_hs_grow 3
+returns true and otherwise returns false on failure.
+.Sh ERRORS
+Behavior is undefined if
+.Fa hs
+is uninitialized. This function will only
+return false if there are internal memory allocation
+failures.
+.Sh SEE ALSO
+.Xr ck_hs_init 3 ,
+.Xr ck_hs_move 3 ,
+.Xr ck_hs_destroy 3 ,
+.Xr CK_HS_HASH 3 ,
+.Xr ck_hs_iterator_init 3 ,
+.Xr ck_hs_next 3 ,
+.Xr ck_hs_get 3 ,
+.Xr ck_hs_put 3 ,
+.Xr ck_hs_put_unique 3 ,
+.Xr ck_hs_set 3 ,
+.Xr ck_hs_fas 3 ,
+.Xr ck_hs_remove 3 ,
+.Xr ck_hs_rebuild 3 ,
+.Xr ck_hs_gc 3 ,
+.Xr ck_hs_count 3 ,
+.Xr ck_hs_reset 3 ,
+.Xr ck_hs_reset_size 3 ,
+.Xr ck_hs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_hs_init b/doc/ck_hs_init
new file mode 100644
index 0000000..cfcbf63
--- /dev/null
+++ b/doc/ck_hs_init
@@ -0,0 +1,169 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 17, 2012
+.Dt CK_HS_INIT 3
+.Sh NAME
+.Nm ck_hs_init
+.Nd initialize a hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_hs.h
+.Ft typedef unsigned long
+.Fn ck_hs_hash_cb_t "const void *key" "unsigned long seed"
+.Ft typedef bool
+.Fn ck_hs_compare_cb_t "const void *c1" "const void *c2"
+.Ft bool
+.Fn ck_hs_init "ck_hs_t *hs" "unsigned int mode" "ck_hs_hash_cb_t *hash_function" "ck_hs_compare_cb_t *compare" "struct ck_malloc *allocator" "unsigned long capacity" "unsigned long seed"
+.Sh DESCRIPTION
+The
+.Fn ck_hs_init
+function initializes the hash set pointed to by the
+.Fa hs
+pointer.
+.Pp
+The argument
+.Fa mode
+specifies the type of key-value pairs to be stored in the
+hash set as well as the expected concurrent access model.
+The value of
+.Fa mode
+consists of a bitfield of one of the following:
+.Bl -tag -width indent
+.It CK_HS_MODE_OBJECT
+The hash set is meant to store pointers to objects. This provides
+a hint that only CK_MD_VMA_BITS are necessary to encode the key
+argument. Any unused pointer bits are leveraged for internal
+optimizations.
+.It CK_HS_MODE_DIRECT
+The hash set is meant to directly store key values and that all
+bits of the key are used to encode values.
+.El
+.Pp
+The concurrent access model is specified by:
+.Bl -tag -width indent
+.It CK_HS_MODE_SPMC
+The hash set should allow for concurrent readers in the
+presence of a single writer.
+.It CK_HS_MODE_MPMC
+The hash set should allow for concurrent readers in the
+presence of concurrent writers. This is currently unsupported.
+.El
+.Pp
+The developer is free to specify additional workload hints.
+These hints are one of:
+.Bl -tag -width indent
+.It CK_HS_MODE_DELETE
+The hash set is expected to have a delete-heavy workload.
+At the cost of approximately 13% increased memory usage,
+allow for stronger per-slot probe bounds to combat the
+effects of tombstone accumulation.
+.El
+.Pp
+The argument
+.Fa hash_function
+is a mandatory pointer to a user-specified hash function.
+A user-specified hash function takes two arguments. The
+.Fa key
+argument is a pointer to a key. The
+.Fa seed
+argument is the initial seed associated with the hash set.
+This initial seed is specified by the user in
+.Xr ck_hs_init 3 .
+.Pp
+The
+.Fa compare
+argument is an optional pointer to a user-specified
+key comparison function. If NULL is specified in this
+argument, then pointer equality will be used to determine
+key equality. A user-specified comparison function takes
+two arguments representing pointers to the objects being
+compared for equality. It is expected to return true
+if the keys are of equal value and false otherwise.
+.Pp
+The
+.Fa allocator
+argument is a pointer to a structure containing
+.Fa malloc
+and
+.Fa free
+function pointers which respectively define the memory allocation and
+destruction functions to be used by the hash set being initialized.
+.Pp
+The argument
+.Fa capacity
+represents the initial number of keys the hash
+set is expected to contain. This argument is simply a hint
+and the underlying implementation is free to allocate more
+or less memory than necessary to contain the number of entries
+.Fa capacity
+specifies.
+.Pp
+The argument
+.Fa seed
+specifies the initial seed used by the underlying hash function.
+The user is free to choose a value of their choice.
+.Sh RETURN VALUES
+Upon successful completion
+.Fn ck_hs_init
+returns a value of
+.Dv true
+and otherwise returns a value of
+.Dv false
+to indicate an error.
+.Sh ERRORS
+.Bl -tag -width Er
+.Pp
+The behavior of
+.Fn ck_hs_init
+is undefined if
+.Fa hs
+is not a pointer to a
+.Tn ck_hs_t
+object.
+.El
+.Sh SEE ALSO
+.Xr ck_hs_move 3 ,
+.Xr ck_hs_destroy 3 ,
+.Xr CK_HS_HASH 3 ,
+.Xr ck_hs_iterator_init 3 ,
+.Xr ck_hs_next 3 ,
+.Xr ck_hs_get 3 ,
+.Xr ck_hs_put 3 ,
+.Xr ck_hs_put_unique 3 ,
+.Xr ck_hs_set 3 ,
+.Xr ck_hs_fas 3 ,
+.Xr ck_hs_remove 3 ,
+.Xr ck_hs_grow 3 ,
+.Xr ck_hs_rebuild 3 ,
+.Xr ck_hs_gc 3 ,
+.Xr ck_hs_count 3 ,
+.Xr ck_hs_reset 3 ,
+.Xr ck_hs_reset_size 3 ,
+.Xr ck_hs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_hs_iterator_init b/doc/ck_hs_iterator_init
new file mode 100644
index 0000000..d2c25cc
--- /dev/null
+++ b/doc/ck_hs_iterator_init
@@ -0,0 +1,78 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 17, 2012
+.Dt CK_HS_ITERATOR_INIT 3
+.Sh NAME
+.Nm ck_hs_iterator_init
+.Nd initialize hash set iterator
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_hs.h
+.Pp
+.Dv ck_hs_iterator_t iterator = CK_HS_ITERATOR_INITIALIZER
+.Pp
+.Ft void
+.Fn ck_hs_iterator_init "ck_hs_iterator_t *iterator"
+.Sh DESCRIPTION
+The
+.Fn ck_hs_iterator_init 3
+function will initialize the object pointed to
+by the
+.Fa iterator
+argument. Alternatively, an iterator may be statically
+initialized by assigning it to the CK_HS_ITERATOR_INITIALIZER value.
+.Pp
+An iterator is used to iterate through hash set entries with the
+.Xr ck_hs_next 3
+function.
+.Sh RETURN VALUES
+.Fn ck_hs_iterator_init 3
+has no return value.
+.Sh ERRORS
+This function will not fail.
+.Sh SEE ALSO
+.Xr ck_hs_init 3 ,
+.Xr ck_hs_move 3 ,
+.Xr ck_hs_destroy 3 ,
+.Xr CK_HS_HASH 3 ,
+.Xr ck_hs_next 3 ,
+.Xr ck_hs_get 3 ,
+.Xr ck_hs_put 3 ,
+.Xr ck_hs_put_unique 3 ,
+.Xr ck_hs_set 3 ,
+.Xr ck_hs_fas 3 ,
+.Xr ck_hs_remove 3 ,
+.Xr ck_hs_grow 3 ,
+.Xr ck_hs_rebuild 3 ,
+.Xr ck_hs_gc 3 ,
+.Xr ck_hs_count 3 ,
+.Xr ck_hs_reset 3 ,
+.Xr ck_hs_reset_size 3 ,
+.Xr ck_hs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_hs_move b/doc/ck_hs_move
new file mode 100644
index 0000000..1d30195
--- /dev/null
+++ b/doc/ck_hs_move
@@ -0,0 +1,90 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd July 18, 2013
+.Dt CK_HS_MOVE 3
+.Sh NAME
+.Nm ck_hs_move
+.Nd move one from hash set to another
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_hs.h
+.Ft bool
+.Fn ck_hs_move "ck_hs_t *destination" "ck_hs_t *source" "ck_hs_hash_cb_t *hash_cb" "ck_hs_compare_cb_t *compare_cb" "struct ck_malloc *m"
+.Sh DESCRIPTION
+The
+.Fn ck_hs_move 3
+function will initialize
+.Fa source
+from
+.Fa destination .
+The hash function is set to
+.Fa hash_cb ,
+comparison function to
+.Fa compare_cb
+and the allocator callbacks to
+.Fa m .
+Further modifications to
+.Fa source
+will result in undefined behavior. Concurrent
+.Xr ck_hs_get 3
+and
+.Xr ck_hs_fas 3
+operations to
+.Fa source
+are legal until the next write operation to
+.Fa destination .
+.Pp
+This operation moves ownership from one hash set object
+to another and re-assigns callback functions to developer-specified
+values. This allows for dynamic configuration of allocation
+callbacks and is necessary for use-cases involving executable code
+which may be unmapped underneath the hash set.
+.Sh RETURN VALUES
+Upon successful completion
+.Fn ck_hs_move 3
+returns true and otherwise returns false to indicate an error.
+.Sh SEE ALSO
+.Xr ck_hs_init 3 ,
+.Xr ck_hs_destroy 3 ,
+.Xr CK_HS_HASH 3 ,
+.Xr ck_hs_iterator_init 3 ,
+.Xr ck_hs_next 3 ,
+.Xr ck_hs_put 3 ,
+.Xr ck_hs_put_unique 3 ,
+.Xr ck_hs_set 3 ,
+.Xr ck_hs_fas 3 ,
+.Xr ck_hs_remove 3 ,
+.Xr ck_hs_grow 3 ,
+.Xr ck_hs_rebuild 3 ,
+.Xr ck_hs_gc 3 ,
+.Xr ck_hs_count 3 ,
+.Xr ck_hs_reset 3 ,
+.Xr ck_hs_reset_size 3 ,
+.Xr ck_hs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_hs_next b/doc/ck_hs_next
new file mode 100644
index 0000000..67e083e
--- /dev/null
+++ b/doc/ck_hs_next
@@ -0,0 +1,92 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 17, 2012
+.Dt CK_HS_NEXT 3
+.Sh NAME
+.Nm ck_hs_next
+.Nd iterate to next entry in hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_hs.h
+.Ft bool
+.Fn ck_hs_next "ck_hs_t *hs" "ck_hs_iterator_t *iterator" "void **entry"
+.Sh DESCRIPTION
+The
+.Fn ck_hs_next 3
+function will increment the iterator object pointed to by
+.Fa iterator
+to point to the next non-empty hash set entry. If
+.Fn ck_hs_next 3
+returns true then the pointer pointed to by
+.Fa entry
+is initialized to the current hash set key pointed to by the
+.Fa iterator
+object.
+.Pp
+It is expected that
+.Fa iterator
+has been initialized using the
+.Xr ck_hs_iterator_init 3
+function or statically initialized using CK_HS_ITERATOR_INITIALIZER.
+.Sh RETURN VALUES
+If
+.Fn ck_hs_next 3
+returns true then the object pointed to by
+.Fa entry
+points to a valid hash set key. If
+.Fn ck_hs_next 3
+returns false then the value of the object pointed to by
+.Fa entry
+is undefined.
+.Sh ERRORS
+Behavior is undefined if
+.Fa iterator
+or
+.Fa hs
+are uninitialized.
+.Sh SEE ALSO
+.Xr ck_hs_init 3 ,
+.Xr ck_hs_move 3 ,
+.Xr ck_hs_destroy 3 ,
+.Xr CK_HS_HASH 3 ,
+.Xr ck_hs_iterator_init 3 ,
+.Xr ck_hs_get 3 ,
+.Xr ck_hs_put 3 ,
+.Xr ck_hs_put_unique 3 ,
+.Xr ck_hs_set 3 ,
+.Xr ck_hs_fas 3 ,
+.Xr ck_hs_remove 3 ,
+.Xr ck_hs_grow 3 ,
+.Xr ck_hs_rebuild 3 ,
+.Xr ck_hs_gc 3 ,
+.Xr ck_hs_count 3 ,
+.Xr ck_hs_reset 3 ,
+.Xr ck_hs_reset_size 3 ,
+.Xr ck_hs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_hs_put b/doc/ck_hs_put
new file mode 100644
index 0000000..8f8f55f
--- /dev/null
+++ b/doc/ck_hs_put
@@ -0,0 +1,98 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 17, 2012
+.Dt CK_HS_PUT 3
+.Sh NAME
+.Nm ck_hs_put
+.Nd store unique key into a hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_hs.h
+.Ft bool
+.Fn ck_hs_put "ck_hs_t *hs" "unsigned long hash" "const void *key"
+.Sh DESCRIPTION
+The
+.Fn ck_hs_put 3
+function will store the key specified by the
+.Fa key
+argument in the hash set pointed to by the
+.Fa hs
+argument. The key specified by
+.Fa key
+is expected to have the hash value specified by the
+.Fa hash
+argument (which was previously generated using the
+.Xr CK_HS_HASH 3
+macro).
+.Pp
+If the call to
+.Fn ck_hs_put 3
+was successful then the key specified by
+.Fa key
+was successfully stored in the hash set pointed to by
+.Fa hs .
+The function will fail if a key with an
+equivalent value to
+.Fa key
+is already present in the hash set. For replacement
+semantics, please see the
+.Xr ck_hs_set 3
+function.
+.Sh RETURN VALUES
+Upon successful completion,
+.Fn ck_hs_put 3
+returns true and otherwise returns false on failure.
+.Sh ERRORS
+Behavior is undefined if
+.Fa key
+or
+.Fa hs
+are uninitialized. The function will also
+return false if the hash set could not be enlarged
+to accomodate key insertion.
+.Sh SEE ALSO
+.Xr ck_hs_init 3 ,
+.Xr ck_hs_move 3 ,
+.Xr ck_hs_destroy 3 ,
+.Xr CK_HS_HASH 3 ,
+.Xr ck_hs_iterator_init 3 ,
+.Xr ck_hs_next 3 ,
+.Xr ck_hs_put_unique 3 ,
+.Xr ck_hs_get 3 ,
+.Xr ck_hs_set 3 ,
+.Xr ck_hs_fas 3 ,
+.Xr ck_hs_remove 3 ,
+.Xr ck_hs_grow 3 ,
+.Xr ck_hs_rebuild 3 ,
+.Xr ck_hs_gc 3 ,
+.Xr ck_hs_count 3 ,
+.Xr ck_hs_reset 3 ,
+.Xr ck_hs_reset_size 3 ,
+.Xr ck_hs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_hs_put_unique b/doc/ck_hs_put_unique
new file mode 100644
index 0000000..f60c543
--- /dev/null
+++ b/doc/ck_hs_put_unique
@@ -0,0 +1,98 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd December 7, 2013
+.Dt CK_HS_PUT_UNIQUE 3
+.Sh NAME
+.Nm ck_hs_put_unique
+.Nd unconditionally store unique key into a hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_hs.h
+.Ft bool
+.Fn ck_hs_put_unique "ck_hs_t *hs" "unsigned long hash" "const void *key"
+.Sh DESCRIPTION
+The
+.Fn ck_hs_put_unique 3
+function will store the key specified by the
+.Fa key
+argument in the hash set pointed to by the
+.Fa hs
+argument. The key specified by
+.Fa key
+is expected to have the hash value specified by the
+.Fa hash
+argument (which was previously generated using the
+.Xr CK_HS_HASH 3
+macro).
+.Pp
+If the call to
+.Fn ck_hs_put 3
+was successful then the key specified by
+.Fa key
+was successfully stored in the hash set pointed to by
+.Fa hs .
+The function will cause undefined behavior if a key with an
+equivalent value is already present in the hash set. For replacement
+semantics, please see the
+.Xr ck_hs_set 3
+function.
+.Sh RETURN VALUES
+Upon successful completion,
+.Fn ck_hs_put_unique 3
+returns true and otherwise returns false on failure.
+.Sh ERRORS
+Behavior is undefined if
+.Fa key
+or
+.Fa hs
+are uninitialized. The function will also
+return false if the hash set could not be enlarged
+to accomodate key insertion. The function will
+result in undefined behavior if called for an
+already inserted key value.
+.Sh SEE ALSO
+.Xr ck_hs_init 3 ,
+.Xr ck_hs_move 3 ,
+.Xr ck_hs_destroy 3 ,
+.Xr CK_HS_HASH 3 ,
+.Xr ck_hs_iterator_init 3 ,
+.Xr ck_hs_next 3 ,
+.Xr ck_hs_get 3 ,
+.Xr ck_hs_put 3 ,
+.Xr ck_hs_set 3 ,
+.Xr ck_hs_fas 3 ,
+.Xr ck_hs_remove 3 ,
+.Xr ck_hs_grow 3 ,
+.Xr ck_hs_rebuild 3 ,
+.Xr ck_hs_gc 3 ,
+.Xr ck_hs_count 3 ,
+.Xr ck_hs_reset 3 ,
+.Xr ck_hs_reset_size 3 ,
+.Xr ck_hs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_hs_rebuild b/doc/ck_hs_rebuild
new file mode 100644
index 0000000..a49bb28
--- /dev/null
+++ b/doc/ck_hs_rebuild
@@ -0,0 +1,76 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd December 7, 2013
+.Dt CK_HS_REBUILD 3
+.Sh NAME
+.Nm ck_hs_rebuild
+.Nd rebuild a hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_hs.h
+.Ft bool
+.Fn ck_hs_rebuild "ck_hs_t *hs"
+.Sh DESCRIPTION
+The
+.Fn ck_hs_rebuild 3
+function will regenerate the hash set pointed to by
+.Fa hs .
+This has the side-effect of pruning degradatory side-effects
+of workloads that are delete heavy. The regenerated hash
+set should have shorter probe sequences on average. This
+operation will require a significant amount of memory
+and is free to allocate a duplicate hash set in the
+rebuild process.
+.Sh RETURN VALUES
+Upon successful completion,
+.Fn ck_hs_rebuild 3
+returns true and otherwise returns false on failure.
+.Sh ERRORS
+This function will only return false if there are internal memory allocation
+failures.
+.Sh SEE ALSO
+.Xr ck_hs_init 3 ,
+.Xr ck_hs_move 3 ,
+.Xr ck_hs_destroy 3 ,
+.Xr CK_HS_HASH 3 ,
+.Xr ck_hs_iterator_init 3 ,
+.Xr ck_hs_next 3 ,
+.Xr ck_hs_get 3 ,
+.Xr ck_hs_put 3 ,
+.Xr ck_hs_put_unique 3 ,
+.Xr ck_hs_set 3 ,
+.Xr ck_hs_fas 3 ,
+.Xr ck_hs_gc 3 ,
+.Xr ck_hs_grow 3 ,
+.Xr ck_hs_remove 3 ,
+.Xr ck_hs_count 3 ,
+.Xr ck_hs_reset 3 ,
+.Xr ck_hs_reset_size 3 ,
+.Xr ck_hs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_hs_remove b/doc/ck_hs_remove
new file mode 100644
index 0000000..10ccfb6
--- /dev/null
+++ b/doc/ck_hs_remove
@@ -0,0 +1,92 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 17, 2012
+.Dt CK_HS_REMOVE 3
+.Sh NAME
+.Nm ck_hs_remove
+.Nd remove key from a hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_hs.h
+.Ft void *
+.Fn ck_hs_remove "ck_hs_t *hs" "unsigned long hash" "const void *key"
+.Sh DESCRIPTION
+The
+.Fn ck_hs_remove 3
+function will attempt to remove the key specified by the
+.Fa key
+argument in the hash set pointed to by the
+.Fa hs
+argument. The key specified by
+.Fa key
+is expected to have the hash value specified by the
+.Fa hash
+argument (which was previously generated using the
+.Xr CK_HS_HASH 3
+macro).
+.Pp
+If the call to
+.Fn ck_hs_remove 3
+was successful then the key contained in the hash
+set is returned. If the key was not a member of the hash
+set then
+.Dv NULL
+is returned.
+.Sh RETURN VALUES
+Upon successful completion,
+.Fn ck_hs_remove 3
+returns a pointer to a key and otherwise returns
+.Dv NULL
+on failure.
+.Sh ERRORS
+Behavior is undefined if
+.Fa key
+or
+.Fa hs
+are uninitialized.
+.Sh SEE ALSO
+.Xr ck_hs_init 3 ,
+.Xr ck_hs_move 3 ,
+.Xr ck_hs_destroy 3 ,
+.Xr CK_HS_HASH 3 ,
+.Xr ck_hs_iterator_init 3 ,
+.Xr ck_hs_next 3 ,
+.Xr ck_hs_get 3 ,
+.Xr ck_hs_put 3 ,
+.Xr ck_hs_put_unique 3 ,
+.Xr ck_hs_set 3 ,
+.Xr ck_hs_fas 3 ,
+.Xr ck_hs_grow 3 ,
+.Xr ck_hs_gc 3 ,
+.Xr ck_hs_rebuild 3 ,
+.Xr ck_hs_count 3 ,
+.Xr ck_hs_reset 3 ,
+.Xr ck_hs_reset_size 3 ,
+.Xr ck_hs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_hs_reset b/doc/ck_hs_reset
new file mode 100644
index 0000000..e6ce72e
--- /dev/null
+++ b/doc/ck_hs_reset
@@ -0,0 +1,77 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 17, 2012
+.Dt CK_HS_RESET 3
+.Sh NAME
+.Nm ck_hs_reset
+.Nd remove all keys from a hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_hs.h
+.Ft bool
+.Fn ck_hs_reset "ck_hs_t *hs"
+.Sh DESCRIPTION
+The
+.Fn ck_hs_reset 3
+function will remove all keys stored in the hash
+set pointed to by the
+.Fa hs
+argument.
+.Sh RETURN VALUES
+If successful,
+.Fn ck_hs_reset 3
+will return true and will otherwise return false on failure. This
+function will only fail if a replacement hash set could not be
+allocated internally.
+.Sh ERRORS
+Behavior is undefined if
+.Fa hs
+is uninitialized. Behavior is
+undefined if this function is called by a non-writer
+thread.
+.Sh SEE ALSO
+.Xr ck_hs_init 3 ,
+.Xr ck_hs_move 3 ,
+.Xr ck_hs_destroy 3 ,
+.Xr CK_HS_HASH 3 ,
+.Xr ck_hs_iterator_init 3 ,
+.Xr ck_hs_next 3 ,
+.Xr ck_hs_get 3 ,
+.Xr ck_hs_put 3 ,
+.Xr ck_hs_put_unique 3 ,
+.Xr ck_hs_set 3 ,
+.Xr ck_hs_fas 3 ,
+.Xr ck_hs_remove 3 ,
+.Xr ck_hs_reset_size 3 ,
+.Xr ck_hs_grow 3 ,
+.Xr ck_hs_gc 3 ,
+.Xr ck_hs_rebuild 3 ,
+.Xr ck_hs_count 3 ,
+.Xr ck_hs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_hs_reset_size b/doc/ck_hs_reset_size
new file mode 100644
index 0000000..801c063
--- /dev/null
+++ b/doc/ck_hs_reset_size
@@ -0,0 +1,80 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd May 5, 2013
+.Dt CK_HS_RESET_SIZE 3
+.Sh NAME
+.Nm ck_hs_reset_size
+.Nd remove all keys from a hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_hs.h
+.Ft bool
+.Fn ck_hs_reset_size "ck_hs_t *hs" "unsigned long size"
+.Sh DESCRIPTION
+The
+.Fn ck_hs_reset_size 3
+function will remove all keys stored in the hash
+set pointed to by the
+.Fa hs
+argument and create a new generation of the hash set that
+is preallocated for
+.Fa size
+entries.
+.Sh RETURN VALUES
+If successful,
+.Fn ck_hs_reset_size 3
+will return true and will otherwise return false on failure. This
+function will only fail if a replacement hash set could not be
+allocated internally.
+.Sh ERRORS
+Behavior is undefined if
+.Fa hs
+is uninitialized. Behavior is
+undefined if this function is called by a non-writer
+thread.
+.Sh SEE ALSO
+.Xr ck_hs_init 3 ,
+.Xr ck_hs_move 3 ,
+.Xr ck_hs_destroy 3 ,
+.Xr CK_HS_HASH 3 ,
+.Xr ck_hs_iterator_init 3 ,
+.Xr ck_hs_next 3 ,
+.Xr ck_hs_get 3 ,
+.Xr ck_hs_put 3 ,
+.Xr ck_hs_put_unique 3 ,
+.Xr ck_hs_set 3 ,
+.Xr ck_hs_fas 3 ,
+.Xr ck_hs_remove 3 ,
+.Xr ck_hs_grow 3 ,
+.Xr ck_hs_gc 3 ,
+.Xr ck_hs_rebuild 3 ,
+.Xr ck_hs_count 3 ,
+.Xr ck_hs_reset 3 ,
+.Xr ck_hs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_hs_set b/doc/ck_hs_set
new file mode 100644
index 0000000..e9ba9f1
--- /dev/null
+++ b/doc/ck_hs_set
@@ -0,0 +1,102 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 17, 2012
+.Dt CK_HS_SET 3
+.Sh NAME
+.Nm ck_hs_set
+.Nd store key into a hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_hs.h
+.Ft bool
+.Fn ck_hs_set "ck_hs_t *hs" "unsigned long hash" "const void *key" "void **previous"
+.Sh DESCRIPTION
+The
+.Fn ck_hs_set 3
+function will store the key specified by the
+.Fa key
+argument in the hash set pointed to by the
+.Fa hs
+argument. The key specified by
+.Fa key
+is expected to have the hash value specified by the
+.Fa hash
+argument (which was previously generated using the
+.Xr CK_HS_HASH 3
+macro).
+.Pp
+If the call to
+.Fn ck_hs_set 3
+was successful then the key specified by
+.Fa key
+was successfully stored in the hash set pointed to by
+.Fa hs .
+If the key already exists in the hash set, then it is
+replaced by
+.Fa key
+and the previous value is stored into the void pointer
+pointed to by the
+.Fa previous
+argument. If previous is set to
+.Dv NULL
+then
+.Fa key
+was not a replacement for an existing entry in the hash set.
+.Sh RETURN VALUES
+Upon successful completion,
+.Fn ck_hs_set 3
+returns true and otherwise returns false on failure.
+.Sh ERRORS
+Behavior is undefined if
+.Fa key
+or
+.Fa hs
+are uninitialized. The function will also
+return false if the hash set could not be enlarged
+to accomodate key insertion.
+.Sh SEE ALSO
+.Xr ck_hs_init 3 ,
+.Xr ck_hs_move 3 ,
+.Xr ck_hs_destroy 3 ,
+.Xr CK_HS_HASH 3 ,
+.Xr ck_hs_iterator_init 3 ,
+.Xr ck_hs_next 3 ,
+.Xr ck_hs_get 3 ,
+.Xr ck_hs_put 3 ,
+.Xr ck_hs_put_unique 3 ,
+.Xr ck_hs_fas 3 ,
+.Xr ck_hs_remove 3 ,
+.Xr ck_hs_grow 3 ,
+.Xr ck_hs_gc 3 ,
+.Xr ck_hs_rebuild 3 ,
+.Xr ck_hs_count 3 ,
+.Xr ck_hs_reset 3 ,
+.Xr ck_hs_reset_size 3 ,
+.Xr ck_hs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_hs_stat b/doc/ck_hs_stat
new file mode 100644
index 0000000..796a894
--- /dev/null
+++ b/doc/ck_hs_stat
@@ -0,0 +1,81 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 17, 2012
+.Dt CK_HS_STAT 3
+.Sh NAME
+.Nm ck_hs_stat
+.Nd get hash set status
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_hs.h
+.Ft void
+.Fn ck_hs_stat "ck_hs_t *hs" "struct ck_hs_stat *st"
+.Sh DESCRIPTION
+The
+.Fn ck_hs_stat 3
+function will store various hash set statistics in
+the object pointed to by
+.Fa st .
+The ck_hs_stat structure is defined as follows:
+.Bd -literal -offset indent
+struct ck_hs_stat {
+ unsigned long tombstones; /* Current number of tombstones in hash set. */
+ unsigned long n_entries; /* Current number of keys in hash set. */
+ unsigned int probe_maximum; /* Longest read-side probe sequence. */
+};
+.Ed
+.Sh RETURN VALUES
+.Fn ck_hs_stat 3
+has no return value.
+.Sh ERRORS
+Behavior is undefined if
+.Fa hs
+is uninitialized. Behavior is
+undefined if this function is called by a non-writer
+thread.
+.Sh SEE ALSO
+.Xr ck_hs_init 3 ,
+.Xr ck_hs_move 3 ,
+.Xr ck_hs_destroy 3 ,
+.Xr CK_HS_HASH 3 ,
+.Xr ck_hs_iterator_init 3 ,
+.Xr ck_hs_next 3 ,
+.Xr ck_hs_get 3 ,
+.Xr ck_hs_put 3 ,
+.Xr ck_hs_put_unique 3 ,
+.Xr ck_hs_set 3 ,
+.Xr ck_hs_fas 3 ,
+.Xr ck_hs_remove 3 ,
+.Xr ck_hs_grow 3 ,
+.Xr ck_hs_gc 3 ,
+.Xr ck_hs_rebuild 3 ,
+.Xr ck_hs_count 3 ,
+.Xr ck_hs_reset 3 ,
+.Xr ck_hs_reset_size 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_count b/doc/ck_ht_count
new file mode 100644
index 0000000..ba10835
--- /dev/null
+++ b/doc/ck_ht_count
@@ -0,0 +1,77 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 29, 2012
+.Dt CK_HT_COUNT 3
+.Sh NAME
+.Nm ck_ht_count
+.Nd return count of key-value pairs in hash table
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Ft uint64_t
+.Fn ck_ht_count "ck_ht_t *ht"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_count
+function will return the number of entries in the hash table
+pointed to be the
+.Fa ht
+argument. The function may only be called without the presence
+of concurrent write operations.
+.Sh ERRORS
+Behavior is undefined if
+.Fa ht
+has not been initialized.
+.Sh SEE ALSO
+.Xr ck_ht_stat 3 ,
+.Xr ck_ht_init 3 ,
+.Xr ck_ht_destroy 3 ,
+.Xr ck_ht_hash 3 ,
+.Xr ck_ht_hash_direct 3 ,
+.Xr ck_ht_set_spmc 3 ,
+.Xr ck_ht_put_spmc 3 ,
+.Xr ck_ht_gc 3 ,
+.Xr ck_ht_get_spmc 3 ,
+.Xr ck_ht_grow_spmc 3 ,
+.Xr ck_ht_remove_spmc 3 ,
+.Xr ck_ht_reset_size_spmc 3 ,
+.Xr ck_ht_reset_spmc 3 ,
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key_set 3 ,
+.Xr ck_ht_entry_key_set_direct 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_value 3 ,
+.Xr ck_ht_entry_set 3 ,
+.Xr ck_ht_entry_set_direct 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value_direct 3 ,
+.Xr ck_ht_iterator_init 3 ,
+.Xr ck_ht_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_destroy b/doc/ck_ht_destroy
new file mode 100644
index 0000000..95e4acb
--- /dev/null
+++ b/doc/ck_ht_destroy
@@ -0,0 +1,87 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 29, 2012
+.Dt CK_HT_DESTROY 3
+.Sh NAME
+.Nm ck_ht_destroy
+.Nd immediately destroy a hash table
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Ft void
+.Fn ck_ht_destroy "ck_ht_t *ht"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_destroy
+function will request that the underlying allocator, as specified by the
+.Xr ck_ht_init 3
+function, immediately destroy the object pointed to by the
+.Fa ht
+argument.
+.Pp
+The user must guarantee that no threads are accessing the object pointed to
+by
+.Fa ht
+when
+.Fn ck_ht_destroy
+is called.
+.Sh RETURN VALUES
+.Fn ck_ht_destroy
+has no return value.
+.Sh ERRORS
+.Bl -tag -width Er
+This function is guaranteed not to fail.
+.El
+.Sh SEE ALSO
+.Xr ck_ht_stat 3 ,
+.Xr ck_ht_init 3 ,
+.Xr ck_ht_hash 3 ,
+.Xr ck_ht_hash_direct 3 ,
+.Xr ck_ht_set_spmc 3 ,
+.Xr ck_ht_put_spmc 3 ,
+.Xr ck_ht_gc 3 ,
+.Xr ck_ht_get_spmc 3 ,
+.Xr ck_ht_grow_spmc 3 ,
+.Xr ck_ht_remove_spmc 3 ,
+.Xr ck_ht_reset_spmc 3 ,
+.Xr ck_ht_reset_size_spmc 3 ,
+.Xr ck_ht_count 3 ,
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key_set 3 ,
+.Xr ck_ht_entry_key_set_direct 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_value 3 ,
+.Xr ck_ht_entry_set 3 ,
+.Xr ck_ht_entry_set_direct 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value_direct 3 ,
+.Xr ck_ht_iterator_init 3 ,
+.Xr ck_ht_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_entry_empty b/doc/ck_ht_entry_empty
new file mode 100644
index 0000000..9233344
--- /dev/null
+++ b/doc/ck_ht_entry_empty
@@ -0,0 +1,90 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 29, 2012
+.Dt CK_HT_ENTRY_EMPTY 3
+.Sh NAME
+.Nm ck_ht_entry_empty
+.Nd determine whether entry contains a key-value pair
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Ft bool
+.Fn ck_ht_entry_empty "ck_ht_entry_t *entry"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_entry_empty
+function will return
+.Dv false
+if
+.Fa entry
+points to a valid key-value pair. If
+.Fa entry
+does not point to a valid key-value pair it
+returns
+.Dv true.
+It is expected that the object pointed to by
+.Fa entry
+was initialized by a preceding call to the
+.Xr ck_ht_entry_set
+family of functions, the
+.Xr ck_ht_get_spmc 3
+function or the
+.Xr ck_ht_set_spmc 3
+function.
+.Sh ERRORS
+Behavior is undefined if
+.Fa entry
+has not been initialized.
+.Sh SEE ALSO
+.Xr ck_ht_stat 3 ,
+.Xr ck_ht_init 3 ,
+.Xr ck_ht_destroy 3 ,
+.Xr ck_ht_hash 3 ,
+.Xr ck_ht_hash_direct 3 ,
+.Xr ck_ht_set_spmc 3 ,
+.Xr ck_ht_put_spmc 3 ,
+.Xr ck_ht_gc 3 ,
+.Xr ck_ht_get_spmc 3 ,
+.Xr ck_ht_grow_spmc 3 ,
+.Xr ck_ht_remove_spmc 3 ,
+.Xr ck_ht_count 3 ,
+.Xr ck_ht_reset_spmc 3 ,
+.Xr ck_ht_reset_size_spmc 3 ,
+.Xr ck_ht_entry_key_set 3 ,
+.Xr ck_ht_entry_key_set_direct 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_value 3 ,
+.Xr ck_ht_entry_set 3 ,
+.Xr ck_ht_entry_set_direct 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value_direct 3 ,
+.Xr ck_ht_iterator_init 3 ,
+.Xr ck_ht_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_entry_key b/doc/ck_ht_entry_key
new file mode 100644
index 0000000..5003a1a
--- /dev/null
+++ b/doc/ck_ht_entry_key
@@ -0,0 +1,88 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 30, 2012
+.Dt CK_HT_ENTRY_KEY 3
+.Sh NAME
+.Nm ck_ht_entry_key
+.Nd return pointer to key as specified in hash table entry
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Ft void *
+.Fn ck_ht_entry_key "ck_ht_entry_t *entry"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_entry_key
+function will return the key pointer as specified in the
+object pointed to by the
+.Fa entry
+argument.
+.Pp
+It is expected that the entry is
+associated with a hash table initialized with
+.Dv CK_HT_MODE_BYTESTRING
+(see
+.Xr ck_ht_init 3
+for more information).
+.Sh RETURN VALUES
+.Fn ck_ht_entry_key
+returns
+.Dv NULL
+if the entry is empty.
+.Sh ERRORS
+Behavior is undefined if
+.Fa entry
+has not been initialized.
+.Sh SEE ALSO
+.Xr ck_ht_stat 3 ,
+.Xr ck_ht_init 3 ,
+.Xr ck_ht_destroy 3 ,
+.Xr ck_ht_hash 3 ,
+.Xr ck_ht_hash_direct 3 ,
+.Xr ck_ht_set_spmc 3 ,
+.Xr ck_ht_put_spmc 3 ,
+.Xr ck_ht_gc 3 ,
+.Xr ck_ht_get_spmc 3 ,
+.Xr ck_ht_grow_spmc 3 ,
+.Xr ck_ht_remove_spmc 3 ,
+.Xr ck_ht_count 3 ,
+.Xr ck_ht_reset_spmc 3 ,
+.Xr ck_ht_reset_size_spmc 3 ,
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key_set 3 ,
+.Xr ck_ht_entry_key_set_direct 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_value 3 ,
+.Xr ck_ht_entry_set 3 ,
+.Xr ck_ht_entry_set_direct 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value_direct 3 ,
+.Xr ck_ht_iterator_init 3 ,
+.Xr ck_ht_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_entry_key_direct b/doc/ck_ht_entry_key_direct
new file mode 100644
index 0000000..e0a75a2
--- /dev/null
+++ b/doc/ck_ht_entry_key_direct
@@ -0,0 +1,91 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 30, 2012
+.Dt CK_HT_ENTRY_KEY_DIRECT 3
+.Sh NAME
+.Nm ck_ht_entry_key_direct
+.Nd return key value as specified in hash table entry
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Ft uintptr_t
+.Fn ck_ht_entry_key_direct "ck_ht_entry_t *entry"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_entry_key_direct
+function will return the key value as specified in the
+object pointed to by the
+.Fa entry
+argument.
+.Pp
+It is expected that the entry is
+associated with a hash table initialized with
+.Dv CK_HT_MODE_DIRECT
+(see
+.Xr ck_ht_init 3
+for more information).
+.Sh RETURN VALUES
+.Fn ck_ht_entry_key_direct
+returns
+.Dv 0
+if the entry is empty. Otherwise, it returns the
+key value stored in the object pointed to by the
+.Fa entry
+argument.
+.Sh ERRORS
+Behavior is undefined if
+.Fa entry
+has not been initialized.
+.Sh SEE ALSO
+.Xr ck_ht_stat 3 ,
+.Xr ck_ht_init 3 ,
+.Xr ck_ht_destroy 3 ,
+.Xr ck_ht_hash 3 ,
+.Xr ck_ht_hash_direct 3 ,
+.Xr ck_ht_set_spmc 3 ,
+.Xr ck_ht_put_spmc 3 ,
+.Xr ck_ht_gc 3 ,
+.Xr ck_ht_get_spmc 3 ,
+.Xr ck_ht_grow_spmc 3 ,
+.Xr ck_ht_remove_spmc 3 ,
+.Xr ck_ht_count 3 ,
+.Xr ck_ht_reset_spmc 3 ,
+.Xr ck_ht_reset_size_spmc 3 ,
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key_set 3 ,
+.Xr ck_ht_entry_key_set_direct 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_value 3 ,
+.Xr ck_ht_entry_set 3 ,
+.Xr ck_ht_entry_set_direct 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_value_direct 3 ,
+.Xr ck_ht_iterator_init 3 ,
+.Xr ck_ht_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_entry_key_length b/doc/ck_ht_entry_key_length
new file mode 100644
index 0000000..6ac3ded
--- /dev/null
+++ b/doc/ck_ht_entry_key_length
@@ -0,0 +1,88 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 30, 2012
+.Dt CK_HT_ENTRY_KEY_LENGTH 3
+.Sh NAME
+.Nm ck_ht_entry_key_length
+.Nd returns the length of the key specified in the argument
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Ft uint16_t
+.Fn ck_ht_entry_key_length "ck_ht_entry_t *entry"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_entry_key_length
+function will return the length of the key associated with the
+object pointed to by the
+.Fa entry
+argument.
+.Pp
+It is expected that the entry is
+associated with a hash table initialized with
+.Dv CK_HT_MODE_BYTESTRING
+(see
+.Xr ck_ht_init 3
+for more information).
+.Sh RETURN VALUES
+.Fn ck_ht_entry_key_length
+returns
+.Dv 0
+if the entry is empty.
+.Sh ERRORS
+Behavior is undefined if
+.Fa entry
+has not been initialized.
+.Sh SEE ALSO
+.Xr ck_ht_stat 3 ,
+.Xr ck_ht_init 3 ,
+.Xr ck_ht_destroy 3 ,
+.Xr ck_ht_hash 3 ,
+.Xr ck_ht_hash_direct 3 ,
+.Xr ck_ht_set_spmc 3 ,
+.Xr ck_ht_put_spmc 3 ,
+.Xr ck_ht_gc 3 ,
+.Xr ck_ht_get_spmc 3 ,
+.Xr ck_ht_grow_spmc 3 ,
+.Xr ck_ht_remove_spmc 3 ,
+.Xr ck_ht_count 3 ,
+.Xr ck_ht_reset_spmc 3 ,
+.Xr ck_ht_reset_size_spmc 3 ,
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key_set 3 ,
+.Xr ck_ht_entry_key_set_direct 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_value 3 ,
+.Xr ck_ht_entry_set 3 ,
+.Xr ck_ht_entry_set_direct 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value_direct 3 ,
+.Xr ck_ht_iterator_init 3 ,
+.Xr ck_ht_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_entry_key_set b/doc/ck_ht_entry_key_set
new file mode 100644
index 0000000..03e53bb
--- /dev/null
+++ b/doc/ck_ht_entry_key_set
@@ -0,0 +1,93 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 30, 2012
+.Dt CK_HT_ENTRY_KEY_SET 3
+.Sh NAME
+.Nm ck_ht_entry_key_set
+.Nd initialize pointer to key in hash table entry
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Ft void
+.Fn ck_ht_entry_key_set "ck_ht_entry_t *entry" "const void *key" "uint16_t key_length"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_entry_key_set
+function will initialize the object pointed to by
+.Fa entry
+with a key pointed to by the
+.Fa key
+argument. The length of the key is specified by
+.Fa key_length.
+The maximum value of
+.Fa key_length
+is defined by the CK_HT_KEY_LENGTH macro.
+This function is typically used to initialize an
+entry for
+.Xr ck_ht_get_spmc 3
+and
+.Xr ck_ht_remove_spmc 3
+operations. It is expected that the entry will
+be associated with a hash table initialized with
+.Dv CK_HT_MODE_BYTESTRING
+(see
+.Xr ck_ht_init 3
+for more information).
+.Sh RETURN VALUES
+.Fn ck_ht_entry_key_set
+has no return value.
+.Sh ERRORS
+This function will never fail.
+.Sh SEE ALSO
+.Xr ck_ht_stat 3 ,
+.Xr ck_ht_init 3 ,
+.Xr ck_ht_destroy 3 ,
+.Xr ck_ht_hash 3 ,
+.Xr ck_ht_hash_direct 3 ,
+.Xr ck_ht_set_spmc 3 ,
+.Xr ck_ht_put_spmc 3 ,
+.Xr ck_ht_gc 3 ,
+.Xr ck_ht_get_spmc 3 ,
+.Xr ck_ht_grow_spmc 3 ,
+.Xr ck_ht_remove_spmc 3 ,
+.Xr ck_ht_count 3 ,
+.Xr ck_ht_reset_spmc 3 ,
+.Xr ck_ht_reset_size_spmc 3 ,
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key_set_direct 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_value 3 ,
+.Xr ck_ht_entry_set 3 ,
+.Xr ck_ht_entry_set_direct 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value_direct 3 ,
+.Xr ck_ht_iterator_init 3 ,
+.Xr ck_ht_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_entry_key_set_direct b/doc/ck_ht_entry_key_set_direct
new file mode 100644
index 0000000..1cd2d6c
--- /dev/null
+++ b/doc/ck_ht_entry_key_set_direct
@@ -0,0 +1,88 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 30, 2012
+.Dt CK_HT_ENTRY_KEY_SET_DIRECT 3
+.Sh NAME
+.Nm ck_ht_entry_key_set_direct
+.Nd initialize key value in hash table entry
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Ft void
+.Fn ck_ht_entry_key_set_direct "ck_ht_entry_t *entry" "uintptr_t key"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_entry_key_set_direct
+function will initialize the object pointed to by
+.Fa entry
+with the key value specified in the
+.Fa key
+argument. This function is typically used to initialize an
+entry for
+.Xr ck_ht_get_spmc 3
+and
+.Xr ck_ht_remove_spmc 3
+operations. It is expected that the entry will
+be associated with a hash table initialized with
+.Dv CK_HT_MODE_DIRECT
+(see
+.Xr ck_ht_init 3
+for more information).
+.Sh RETURN VALUES
+.Fn ck_ht_entry_key_set_direct
+has no return value.
+.Sh ERRORS
+This function will never fail.
+.Sh SEE ALSO
+.Xr ck_ht_stat 3 ,
+.Xr ck_ht_init 3 ,
+.Xr ck_ht_destroy 3 ,
+.Xr ck_ht_hash 3 ,
+.Xr ck_ht_hash_direct 3 ,
+.Xr ck_ht_set_spmc 3 ,
+.Xr ck_ht_put_spmc 3 ,
+.Xr ck_ht_gc 3 ,
+.Xr ck_ht_get_spmc 3 ,
+.Xr ck_ht_grow_spmc 3 ,
+.Xr ck_ht_remove_spmc 3 ,
+.Xr ck_ht_count 3 ,
+.Xr ck_ht_reset_spmc 3 ,
+.Xr ck_ht_reset_size_spmc 3 ,
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key_set 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_value 3 ,
+.Xr ck_ht_entry_set 3 ,
+.Xr ck_ht_entry_set_direct 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value_direct 3 ,
+.Xr ck_ht_iterator_init 3 ,
+.Xr ck_ht_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_entry_set b/doc/ck_ht_entry_set
new file mode 100644
index 0000000..b017430
--- /dev/null
+++ b/doc/ck_ht_entry_set
@@ -0,0 +1,95 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 30, 2012
+.Dt CK_HT_ENTRY_SET 3
+.Sh NAME
+.Nm ck_ht_entry_set
+.Nd initialize a key-value pair
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Ft void
+.Fn ck_ht_entry_set "ck_ht_entry_t *entry" "ck_ht_hash_t h" "const void *key" "uint16_t key_length" "const void *value"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_entry_set
+function will initialize the object pointed to by
+.Fa entry
+with a key pointed to by the
+.Fa key
+argument and a value pointed to by the
+.Fa value
+argument. The length of the key is specified by
+.Fa key_length.
+The maximum value of
+.Fa key_length
+is defined by the CK_HT_KEY_LENGTH macro.
+This function is typically used to initialize an
+entry for
+.Xr ck_ht_set_spmc 3
+and
+.Xr ck_ht_put_spmc 3
+operations. It is expected that the entry will
+be associated with a hash table initialized with
+.Dv CK_HT_MODE_BYTESTRING
+(see
+.Xr ck_ht_init 3
+for more information).
+.Sh RETURN VALUES
+.Fn ck_ht_entry_set
+has no return value.
+.Sh ERRORS
+This function will never fail.
+.Sh SEE ALSO
+.Xr ck_ht_stat 3 ,
+.Xr ck_ht_init 3 ,
+.Xr ck_ht_destroy 3 ,
+.Xr ck_ht_hash 3 ,
+.Xr ck_ht_hash_direct 3 ,
+.Xr ck_ht_set_spmc 3 ,
+.Xr ck_ht_put_spmc 3 ,
+.Xr ck_ht_gc 3 ,
+.Xr ck_ht_get_spmc 3 ,
+.Xr ck_ht_grow_spmc 3 ,
+.Xr ck_ht_remove_spmc 3 ,
+.Xr ck_ht_count 3 ,
+.Xr ck_ht_reset_spmc 3 ,
+.Xr ck_ht_reset_size_spmc 3 ,
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_set_direct 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_value 3 ,
+.Xr ck_ht_entry_key_set 3 ,
+.Xr ck_ht_entry_key_set_direct 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value_direct 3 ,
+.Xr ck_ht_iterator_init 3 ,
+.Xr ck_ht_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_entry_set_direct b/doc/ck_ht_entry_set_direct
new file mode 100644
index 0000000..9c9bf08
--- /dev/null
+++ b/doc/ck_ht_entry_set_direct
@@ -0,0 +1,94 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 30, 2012
+.Dt CK_HT_ENTRY_SET_DIRECT 3
+.Sh NAME
+.Nm ck_ht_entry_set_direct
+.Nd initialize a key-value pair
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Ft void
+.Fn ck_ht_entry_set_direct "ck_ht_entry_t *entry" "ck_ht_hash_t h" "uintptr_t key" "uintptr_t value"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_entry_set
+function will initialize the object pointed to by
+.Fa entry
+with the hash value specified by the
+.Fa h
+argument, the key value specified in the
+.Fa key
+argument and the value specified by the
+.Fa value
+argument.
+.Pp
+This function is typically used to initialize an
+entry for
+.Xr ck_ht_set_spmc 3
+and
+.Xr ck_ht_put_spmc 3
+operations. It is expected that the entry will
+be associated with a hash table initialized with
+.Dv CK_HT_MODE_DIRECT
+(see
+.Xr ck_ht_init 3
+for more information).
+.Sh RETURN VALUES
+.Fn ck_ht_entry_set_direct
+has no return value.
+.Sh ERRORS
+This function will never fail.
+.Sh SEE ALSO
+.Xr ck_ht_stat 3 ,
+.Xr ck_ht_init 3 ,
+.Xr ck_ht_destroy 3 ,
+.Xr ck_ht_hash 3 ,
+.Xr ck_ht_hash_direct 3 ,
+.Xr ck_ht_set_spmc 3 ,
+.Xr ck_ht_put_spmc 3 ,
+.Xr ck_ht_gc 3 ,
+.Xr ck_ht_get_spmc 3 ,
+.Xr ck_ht_grow_spmc 3 ,
+.Xr ck_ht_remove_spmc 3 ,
+.Xr ck_ht_count 3 ,
+.Xr ck_ht_reset_spmc 3 ,
+.Xr ck_ht_reset_size_spmc 3 ,
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_value 3 ,
+.Xr ck_ht_entry_key_set 3 ,
+.Xr ck_ht_entry_key_set_direct 3 ,
+.Xr ck_ht_entry_set 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value_direct 3 ,
+.Xr ck_ht_iterator_init 3 ,
+.Xr ck_ht_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_entry_value b/doc/ck_ht_entry_value
new file mode 100644
index 0000000..2e712e3
--- /dev/null
+++ b/doc/ck_ht_entry_value
@@ -0,0 +1,88 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 30, 2012
+.Dt CK_HT_ENTRY_VALUE 3
+.Sh NAME
+.Nm ck_ht_entry_value
+.Nd return pointer to value as specified in hash table entry
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Ft void *
+.Fn ck_ht_entry_value "ck_ht_entry_t *entry"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_entry_value
+function will return the value pointer as specified in the
+object pointed to by the
+.Fa entry
+argument.
+.Pp
+It is expected that the entry is
+associated with a hash table initialized with
+.Dv CK_HT_MODE_BYTESTRING
+(see
+.Xr ck_ht_init 3
+for more information).
+.Sh RETURN VALUES
+The
+.Fn ck_ht_entry_value
+function returns the value pointed to by
+.Dv entry.
+.Sh ERRORS
+Behavior is undefined if
+.Fa entry
+has not been initialized or if the key is empty.
+.Sh SEE ALSO
+.Xr ck_ht_stat 3 ,
+.Xr ck_ht_init 3 ,
+.Xr ck_ht_destroy 3 ,
+.Xr ck_ht_hash 3 ,
+.Xr ck_ht_hash_direct 3 ,
+.Xr ck_ht_set_spmc 3 ,
+.Xr ck_ht_put_spmc 3 ,
+.Xr ck_ht_gc 3 ,
+.Xr ck_ht_get_spmc 3 ,
+.Xr ck_ht_grow_spmc 3 ,
+.Xr ck_ht_remove_spmc 3 ,
+.Xr ck_ht_count 3 ,
+.Xr ck_ht_reset_spmc 3 ,
+.Xr ck_ht_reset_size_spmc 3 ,
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key_set 3 ,
+.Xr ck_ht_entry_key_set_direct 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_set 3 ,
+.Xr ck_ht_entry_set_direct 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value_direct 3 ,
+.Xr ck_ht_iterator_init 3 ,
+.Xr ck_ht_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_entry_value_direct b/doc/ck_ht_entry_value_direct
new file mode 100644
index 0000000..1a7c28e
--- /dev/null
+++ b/doc/ck_ht_entry_value_direct
@@ -0,0 +1,89 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 30, 2012
+.Dt CK_HT_ENTRY_VALUE_DIRECT 3
+.Sh NAME
+.Nm ck_ht_entry_value_direct
+.Nd return value as specified in hash table entry
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Ft uintptr_t
+.Fn ck_ht_entry_value_direct "ck_ht_entry_t *entry"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_entry_value_direct
+function will return the value of the key-value pair as specified in the
+object pointed to by the
+.Fa entry
+argument.
+.Pp
+It is expected that the entry is
+associated with a hash table initialized with
+.Dv CK_HT_MODE_DIRECT
+(see
+.Xr ck_ht_init 3
+for more information).
+.Sh RETURN VALUES
+The
+.Fn ck_ht_entry_value_direct
+function returns the value stored in the object pointed to by the
+.Fa entry
+argument.
+.Sh ERRORS
+Behavior is undefined if
+.Fa entry
+has not been initialized or if the key is empty.
+.Sh SEE ALSO
+.Xr ck_ht_stat 3 ,
+.Xr ck_ht_init 3 ,
+.Xr ck_ht_destroy 3 ,
+.Xr ck_ht_hash 3 ,
+.Xr ck_ht_hash_direct 3 ,
+.Xr ck_ht_set_spmc 3 ,
+.Xr ck_ht_put_spmc 3 ,
+.Xr ck_ht_gc 3 ,
+.Xr ck_ht_get_spmc 3 ,
+.Xr ck_ht_grow_spmc 3 ,
+.Xr ck_ht_remove_spmc 3 ,
+.Xr ck_ht_count 3 ,
+.Xr ck_ht_reset_spmc 3 ,
+.Xr ck_ht_reset_size_spmc 3 ,
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key_set 3 ,
+.Xr ck_ht_entry_key_set_direct 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_set 3 ,
+.Xr ck_ht_entry_set_direct 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value 3 ,
+.Xr ck_ht_iterator_init 3 ,
+.Xr ck_ht_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_gc b/doc/ck_ht_gc
new file mode 100644
index 0000000..8bbad56
--- /dev/null
+++ b/doc/ck_ht_gc
@@ -0,0 +1,96 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd December 18, 2013
+.Dt CK_HT_GC 3
+.Sh NAME
+.Nm ck_ht_gc
+.Nd perform maintenance on a hash table
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Ft bool
+.Fn ck_ht_gc "ck_ht_t *ht" "unsigned long cycles" "unsigned long seed"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_gc
+function will perform various maintenance routines on the hash table
+pointed to by
+.Fa ht ,
+including defragmentation of probe sequences with respect to tombstones
+and in the case that the delete workload hint has been passed, recalculation
+of probe sequence bounds. The
+.Fa cycles
+argument is used to indicate how many hash table entries should be subject
+to attempted maintenance.
+If
+.Fa cycles
+is 0, then maintenance is performed on the complete hash table. The
+.Fa seed
+argument determines the start location of the maintenance process. If
+.Fa cycles
+is non-zero, it is recommended that
+.Fa seed
+is some random value. If the delete hint has been passed, the function
+will require an additional 12% of memory (with respect to existing
+memory usage of the set), until operation completion.
+.Sh RETURN VALUES
+Upon successful completion,
+.Fn ck_ht_gc 3
+returns true and otherwise returns false on failure due to memory allocation
+failure.
+.Sh ERRORS
+This function will only return false if there are internal memory allocation
+failures.
+.Sh SEE ALSO
+.Xr ck_ht_count 3 ,
+.Xr ck_ht_stat 3 ,
+.Xr ck_ht_init 3 ,
+.Xr ck_ht_destroy 3 ,
+.Xr ck_ht_hash 3 ,
+.Xr ck_ht_hash_direct 3 ,
+.Xr ck_ht_set_spmc 3 ,
+.Xr ck_ht_put_spmc 3 ,
+.Xr ck_ht_get_spmc 3 ,
+.Xr ck_ht_grow_spmc 3 ,
+.Xr ck_ht_remove_spmc 3 ,
+.Xr ck_ht_reset_size_spmc 3 ,
+.Xr ck_ht_reset_spmc 3 ,
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key_set 3 ,
+.Xr ck_ht_entry_key_set_direct 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_value 3 ,
+.Xr ck_ht_entry_set 3 ,
+.Xr ck_ht_entry_set_direct 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value_direct 3 ,
+.Xr ck_ht_iterator_init 3 ,
+.Xr ck_ht_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_get_spmc b/doc/ck_ht_get_spmc
new file mode 100644
index 0000000..91b9534
--- /dev/null
+++ b/doc/ck_ht_get_spmc
@@ -0,0 +1,177 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 29, 2012
+.Dt CK_HT_GET_SPMC 3
+.Sh NAME
+.Nm ck_ht_get_spmc
+.Nd load a key-value pair from a hash table
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Ft bool
+.Fn ck_ht_get_spmc "ck_ht_t *ht" "ck_ht_hash_t h" "ck_ht_entry_t *entry"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_get_spmc
+function will return the value associated with the key specified in the
+.Fa entry
+argument in the hash table pointed to by the
+.Fa ht
+argument. The key specified in
+.Fa entry
+is expected to have the hash value specified by the
+.Fa h
+argument.
+.Pp
+If
+.Fa ht
+was created with CK_HT_MODE_BYTESTRING then
+.Fa entry
+must have been initialized with the
+.Xr ck_ht_entry_set_key 3
+or
+.Xr ck_ht_entry_set 3
+functions. If
+.Fa ht
+was created with CK_HT_MODE_DIRECT then
+.Fa entry
+must have been initialized with the
+.Xr ck_ht_entry_key_set_direct 3
+or
+.Xr ck_ht_entry_set_direct 3
+functions.
+.Pp
+It is expected that
+.Fa h
+was initialized with
+.Xr ck_ht_hash 3
+if
+.Fa ht
+was created with CK_HT_MODE_BYTESTRING. If
+.Fa ht
+was initialized with CK_HT_MODE_DIRECT then it is
+expected that
+.Fa h
+was initialized with the
+.Xr ck_ht_hash_direct 3
+function.
+.Pp
+If the call to
+.Fn ck_ht_get_spmc
+was successful then the key-value pair in
+.Fa entry
+was successfully found in the hash table pointed
+to by
+.Fa h
+and will fail if the key specified in
+.Fa entry
+does not exist in the hash table. If successful
+.Fa entry
+will contain the key-value pair found in the hash table
+pointed to by the
+.Fa ht
+argument.
+.Pp
+If
+.Fa ht
+was initialized with CK_HT_MODE_BYTESTRING then
+the key/value pair in
+.Fa entry
+may be extracted using the
+.Xr ck_ht_entry_key 3
+and
+.Xr ck_ht_entry_value 3
+functions. The length of the key may be extracted
+using the
+.Xr ck_ht_entry_key_length 3
+function.
+.Pp
+If
+.Fa ht
+was initialized with CK_HT_MODE_DIRECT then the
+key/value pair in
+.Fa entry
+may be extracted using the
+.Xr ck_ht_entry_key_direct 3
+and
+.Xr ck_ht_entry_value_direct 3
+functions.
+.Pp
+This function is safe to call in the presence of a concurrent writer.
+.Sh RETURN VALUES
+Upon successful completion
+.Fn ck_ht_get_spmc
+returns
+.Dv true.
+If successful,
+.Fa entry
+will contain the key/value pair as found
+in the hash table.
+Otherwise the function returns
+.Dv false
+on failure.
+.Sh ERRORS
+.Bl -tag -width Er
+Behavior is undefined if
+.Fa entry
+or
+.Fa ht
+are uninitialized. The function will return
+.Dv false
+if the key as specified in
+.Fa entry
+was not found in the hash table.
+.El
+.Sh SEE ALSO
+.Xr ck_ht_stat 3 ,
+.Xr ck_ht_init 3 ,
+.Xr ck_ht_destroy 3 ,
+.Xr ck_ht_hash 3 ,
+.Xr ck_ht_hash_direct 3 ,
+.Xr ck_ht_set_spmc 3 ,
+.Xr ck_ht_put_spmc 3 ,
+.Xr ck_ht_gc 3 ,
+.Xr ck_ht_grow_spmc 3 ,
+.Xr ck_ht_remove_spmc 3 ,
+.Xr ck_ht_reset_spmc 3 ,
+.Xr ck_ht_reset_size_spmc 3 ,
+.Xr ck_ht_count 3 ,
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key_set 3 ,
+.Xr ck_ht_entry_key_set_direct 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_value 3 ,
+.Xr ck_ht_entry_set 3 ,
+.Xr ck_ht_entry_set_direct 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value_direct 3 ,
+.Xr ck_ht_iterator_init 3 ,
+.Xr ck_ht_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_grow_spmc b/doc/ck_ht_grow_spmc
new file mode 100644
index 0000000..70e6055
--- /dev/null
+++ b/doc/ck_ht_grow_spmc
@@ -0,0 +1,98 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 29, 2012
+.Dt CK_HT_GROW_SPMC 3
+.Sh NAME
+.Nm ck_ht_grow_spmc
+.Nd resize a hash table if necessary
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Ft bool
+.Fn ck_ht_grow_spmc "ck_ht_t *ht" "uint64_t capacity"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_grow_spmc
+function will resize the hash table in order to be able to
+at least store the number of entries specified by
+.Fa capacity
+at a load factor of one. The default load hash table load factor is
+0.5. If you wish to minimize the likelihood of memory allocations
+for a hash table meant to store n entries then specify a capacity
+of 2n. The default behavior of ck_ht is to round
+.Fa capacity
+to the next available power of two if it is not already a power
+of two.
+.Pp
+This function is safe to call in the presence of concurrent
+.Xr ck_ht_get_spmc 3
+operations.
+.Sh RETURN VALUES
+Upon successful completion
+.Fn ck_ht_grow_spmc
+returns
+.Dv true
+and otherwise returns a
+.Dv false
+value.
+.Sh ERRORS
+.Bl -tag -width Er
+Behavior is undefined if
+.Fa ht
+is uninitialized. The function will only return
+.Dv false
+if there are internal memory allocation failures.
+.El
+.Sh SEE ALSO
+.Xr ck_ht_stat 3 ,
+.Xr ck_ht_init 3 ,
+.Xr ck_ht_destroy 3 ,
+.Xr ck_ht_hash 3 ,
+.Xr ck_ht_hash_direct 3 ,
+.Xr ck_ht_set_spmc 3 ,
+.Xr ck_ht_put_spmc 3 ,
+.Xr ck_ht_gc 3 ,
+.Xr ck_ht_get_spmc 3 ,
+.Xr ck_ht_remove_spmc 3 ,
+.Xr ck_ht_reset_spmc 3 ,
+.Xr ck_ht_reset_size_spmc 3 ,
+.Xr ck_ht_count 3 ,
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key_set 3 ,
+.Xr ck_ht_entry_key_set_direct 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_value 3 ,
+.Xr ck_ht_entry_set 3 ,
+.Xr ck_ht_entry_set_direct 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value_direct 3 ,
+.Xr ck_ht_iterator_init 3 ,
+.Xr ck_ht_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_hash b/doc/ck_ht_hash
new file mode 100644
index 0000000..0ac5db9
--- /dev/null
+++ b/doc/ck_ht_hash
@@ -0,0 +1,90 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 29, 2012
+.Dt CK_HT_HASH 3
+.Sh NAME
+.Nm ck_ht_hash
+.Nd generate a hash value for a hash table
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Ft void
+.Fn ck_ht_hash "ck_ht_hash_t *h" "ck_ht_t *ht" "const void *key" "uint16_t key_length"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_hash
+function will generate a hash value in the object pointed to by the
+.Fa h
+argument. The hash value is valid for use in the hash table pointed to by the
+.Fa ht
+argument for the key (of bytestring type) specified by the
+.Fa key
+argument. The length of the key is specified by the
+.Fa key_length
+argument.
+.Sh RETURN VALUES
+.Fn ck_ht_hash
+has no return value.
+.Sh ERRORS
+.Bl -tag -width Er
+Behavior is undefined if
+.Fa key
+is
+.Dv NULL
+or if
+.Fa ht
+is uninitialized.
+.El
+.Sh SEE ALSO
+.Xr ck_ht_stat 3 ,
+.Xr ck_ht_init 3 ,
+.Xr ck_ht_destroy 3 ,
+.Xr ck_ht_hash_direct 3 ,
+.Xr ck_ht_set_spmc 3 ,
+.Xr ck_ht_put_spmc 3 ,
+.Xr ck_ht_gc 3 ,
+.Xr ck_ht_get_spmc 3 ,
+.Xr ck_ht_grow_spmc 3 ,
+.Xr ck_ht_remove_spmc 3 ,
+.Xr ck_ht_reset_spmc 3 ,
+.Xr ck_ht_reset_size_spmc 3 ,
+.Xr ck_ht_count 3 ,
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key_set 3 ,
+.Xr ck_ht_entry_key_set_direct 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_value 3 ,
+.Xr ck_ht_entry_set 3 ,
+.Xr ck_ht_entry_set_direct 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value_direct 3 ,
+.Xr ck_ht_iterator_init 3 ,
+.Xr ck_ht_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_hash_direct b/doc/ck_ht_hash_direct
new file mode 100644
index 0000000..564099c
--- /dev/null
+++ b/doc/ck_ht_hash_direct
@@ -0,0 +1,90 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 29, 2012
+.Dt CK_HT_HASH_DIRECT 3
+.Sh NAME
+.Nm ck_ht_hash_direct
+.Nd generate a hash value for a hash table
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Ft void
+.Fn ck_ht_hash_direct "ck_ht_hash_t *h" "ck_ht_t *ht" "uintptr_t key"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_hash_direct
+function will generate a hash value in the object pointed to by the
+.Fa h
+argument. The hash value is valid for use in the hash table pointed to by the
+.Fa ht
+argument for the key (of direct type) specified by the
+.Fa key
+argument.
+.Sh RETURN VALUES
+.Fn ck_ht_hash_direct
+has no return value.
+.Sh ERRORS
+.Bl -tag -width Er
+Behavior is undefined if
+.Fa key
+is a
+.Dv 0
+or
+.Dv UINTPTR_MAX
+value or if
+.Fa ht
+is uninitialized.
+.El
+.Sh SEE ALSO
+.Xr ck_ht_stat 3 ,
+.Xr ck_ht_init 3 ,
+.Xr ck_ht_destroy 3 ,
+.Xr ck_ht_hash 3 ,
+.Xr ck_ht_set_spmc 3 ,
+.Xr ck_ht_put_spmc 3 ,
+.Xr ck_ht_gc 3 ,
+.Xr ck_ht_get_spmc 3 ,
+.Xr ck_ht_grow_spmc 3 ,
+.Xr ck_ht_remove_spmc 3 ,
+.Xr ck_ht_reset_spmc 3 ,
+.Xr ck_ht_reset_size_spmc 3 ,
+.Xr ck_ht_count 3 ,
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key_set 3 ,
+.Xr ck_ht_entry_key_set_direct 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_value 3 ,
+.Xr ck_ht_entry_set 3 ,
+.Xr ck_ht_entry_set_direct 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value_direct 3 ,
+.Xr ck_ht_iterator_init 3 ,
+.Xr ck_ht_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_init b/doc/ck_ht_init
new file mode 100644
index 0000000..757a39a
--- /dev/null
+++ b/doc/ck_ht_init
@@ -0,0 +1,188 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 28, 2012
+.Dt CK_HT_INIT 3
+.Sh NAME
+.Nm ck_ht_init
+.Nd initialize a hash table
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Ft typedef void
+.Fn ck_ht_hash_cb_t "ck_ht_hash_t *h" "const void *key" "size_t key_length" "uint64_t seed"
+.Ft bool
+.Fn ck_ht_init "ck_ht_t *ht" "enum ck_ht_mode mode" "ck_ht_hash_cb_t *hash_function" "struct ck_malloc *allocator" "uint64_t capacity" "uint64_t seed"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_init
+function initializes the hash table pointed to by the
+.Fa ht
+pointer.
+.Pp
+The argument
+.Fa mode
+specifies the type of key-value pairs to be stored in the
+hash table. The value of
+.Fa mode
+may be one of:
+.Bl -tag -width indent
+.It CK_HT_MODE_BYTESTRING
+The hash table is meant to store key-value pointers where
+key is a region of memory that is up to 65536 bytes long.
+This pointer will be dereferenced during hash table operations
+for key comparison. Entries of this hash table are expected
+to be interacted with using the
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_value 3 ,
+and
+.Xr ck_ht_entry_set 3
+functions. Attempting a hash table operation with a key of value
+NULL or (void *)UINTPTR_MAX will result in undefined behavior.
+.It CK_HT_MODE_DIRECT
+The hash table is meant to store key-value pointers where
+the key is of fixed width field compatible with the
+.Tn uintptr_t
+type. The key will be directly compared with other keys for
+equality. Entries of this hash table are expected to be interacted
+with using the
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value_direct 3
+and
+.Xr ck_ht_entry_set_direct 3
+functions. Attempting a hash table operation with a key of value of 0 or
+UINTPTR_MAX will result in undefined behavior.
+.El
+.Pp
+In addition to this, the user may bitwise OR the mode flag with
+CK_HT_WORKLOAD_DELETE to indicate that the hash table will
+have to handle a delete heavy workload, in which case stronger
+bounds on latency can be provided at the cost of approximately
+13% higher memory usage.
+The argument
+.Fa hash_function
+is a pointer to a user-specified hash function. It is optional,
+if
+.Dv NULL
+is specified, then the default hash function implementation will be
+used (
+.Xr ck_ht_hash 3 ).
+A user-specified hash function takes four arguments. The
+.Fa h
+argument is a pointer to a hash value object. The hash function
+is expected to update the
+.Fa value
+object of type
+.Fa uint64_t
+contained with-in the object pointed to by
+.Fa h .
+The
+.Fa key
+argument is a pointer to a key, the
+.Fa key_length
+argument is the length of the key and the
+.Fa seed
+argument is the initial seed associated with the hash table.
+This initial seed is specified by the user in
+.Xr ck_ht_init 3 .
+.Pp
+The
+.Fa allocator
+argument is a pointer to a structure containing
+.Fa malloc
+and
+.Fa free
+function pointers which respectively define the memory allocation and
+destruction functions to be used by the hash table being initialized.
+.Pp
+The argument
+.Fa capacity
+represents the initial number of key-value pairs the hash
+table is expected to contain. This argument is simply a hint
+and the underlying implementation is free to allocate more
+or less memory than necessary to contain the number of entries
+.Fa capacity
+specifies.
+.Pp
+The argument
+.Fa seed
+specifies the initial seed used by the underlying hash function.
+The user is free to choose a value of their choice.
+.Pp
+The hash table is safe to access by multiple readers in the presence
+of one concurrent writer. Behavior is undefined in the presence of
+concurrent writers.
+.Sh RETURN VALUES
+Upon successful completion
+.Fn ck_ht_init
+returns a value of
+.Dv true
+and otherwise returns a value of
+.Dv false
+to indicate an error.
+.Sh ERRORS
+.Bl -tag -width Er
+.Pp
+The behavior of
+.Fn ck_ht_init
+is undefined if
+.Fa ht
+is not a pointer to a
+.Tn ck_ht_t
+object.
+.El
+.Sh SEE ALSO
+.Xr ck_ht_stat 3 ,
+.Xr ck_ht_destroy 3 ,
+.Xr ck_ht_hash 3 ,
+.Xr ck_ht_hash_direct 3 ,
+.Xr ck_ht_set_spmc 3 ,
+.Xr ck_ht_put_spmc 3 ,
+.Xr ck_ht_gc 3 ,
+.Xr ck_ht_get_spmc 3 ,
+.Xr ck_ht_grow_spmc 3 ,
+.Xr ck_ht_remove_spmc 3 ,
+.Xr ck_ht_reset_spmc 3 ,
+.Xr ck_ht_reset_size_spmc 3 ,
+.Xr ck_ht_count 3 ,
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key_set 3 ,
+.Xr ck_ht_entry_key_set_direct 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_value 3 ,
+.Xr ck_ht_entry_set 3 ,
+.Xr ck_ht_entry_set_direct 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value_direct 3 ,
+.Xr ck_ht_iterator_init 3 ,
+.Xr ck_ht_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_iterator_init b/doc/ck_ht_iterator_init
new file mode 100644
index 0000000..14f10c6
--- /dev/null
+++ b/doc/ck_ht_iterator_init
@@ -0,0 +1,88 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 30, 2012
+.Dt CK_HT_ITERATOR_INIT 3
+.Sh NAME
+.Nm ck_ht_iterator_init
+.Nd initialize hash table iterator
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Pp
+.Dv ck_ht_iterator_t iterator = CK_HT_ITERATOR_INITIALIZER
+.Pp
+.Ft void
+.Fn ck_ht_iterator_init "ck_ht_iterator_t *iterator"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_iterator_init
+function will initialize the object pointed to by
+the
+.Fa iterator
+argument. Alternatively, an iterator may be statically initialized
+by assigning it the
+.Dv CK_HT_ITERATOR_INITIALIZER
+value.
+.Pp
+An iterator is used to iterate through hash table entries
+with the
+.Xr ck_ht_next 3
+function.
+.Sh RETURN VALUES
+The
+.Fn ck_ht_iterator_init
+function does not return a value.
+.Sh ERRORS
+This function will not fail.
+.Sh SEE ALSO
+.Xr ck_ht_stat 3 ,
+.Xr ck_ht_init 3 ,
+.Xr ck_ht_destroy 3 ,
+.Xr ck_ht_hash 3 ,
+.Xr ck_ht_hash_direct 3 ,
+.Xr ck_ht_set_spmc 3 ,
+.Xr ck_ht_put_spmc 3 ,
+.Xr ck_ht_gc 3 ,
+.Xr ck_ht_get_spmc 3 ,
+.Xr ck_ht_grow_spmc 3 ,
+.Xr ck_ht_remove_spmc 3 ,
+.Xr ck_ht_count 3 ,
+.Xr ck_ht_reset_size_spmc 3 ,
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key_set 3 ,
+.Xr ck_ht_entry_key_set_direct 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_set 3 ,
+.Xr ck_ht_entry_set_direct 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value 3 ,
+.Xr ck_ht_entry_value_direct 3 ,
+.Xr ck_ht_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_next b/doc/ck_ht_next
new file mode 100644
index 0000000..d0365a1
--- /dev/null
+++ b/doc/ck_ht_next
@@ -0,0 +1,107 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 30, 2012
+.Dt CK_HT_NEXT 3
+.Sh NAME
+.Nm ck_ht_next
+.Nd iterate to next entry in hash table
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Ft bool
+.Fn ck_ht_next "ck_ht_t *ht" "ck_ht_iterator_t *iterator" "ck_ht_entry_t **entry"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_next
+function will increment the iterator object pointed to by
+.Fa iterator
+to point to the next non-empty hash table entry. If
+.Fn ck_ht_next
+returns
+.Dv true
+then the pointer pointed to by
+.Fa entry
+is initialized to the current hash table entry pointed to by
+the
+.Fa iterator
+object.
+.Pp
+It is expected that
+.Fa iterator
+has been initialized using the
+.Xr ck_ht_iterator_init 3
+function or statically initialized using
+.Dv CK_HT_ITERATOR_INITIALIZER.
+.Sh RETURN VALUES
+If
+.Fn ck_ht_next
+returns
+.Dv true
+then the object pointed to by
+.Fa entry
+points to a valid hash table entry. If
+.Fn ck_ht_next
+returns
+.Dv false
+then value of the object pointed to by
+.Fa entry
+is undefined.
+.Sh ERRORS
+Behavior is undefined if
+.Fa iterator
+or
+.Fa ht
+are uninitialized.
+.Sh SEE ALSO
+.Xr ck_ht_stat 3 ,
+.Xr ck_ht_init 3 ,
+.Xr ck_ht_destroy 3 ,
+.Xr ck_ht_hash 3 ,
+.Xr ck_ht_hash_direct 3 ,
+.Xr ck_ht_set_spmc 3 ,
+.Xr ck_ht_put_spmc 3 ,
+.Xr ck_ht_gc 3 ,
+.Xr ck_ht_get_spmc 3 ,
+.Xr ck_ht_grow_spmc 3 ,
+.Xr ck_ht_remove_spmc 3 ,
+.Xr ck_ht_count 3 ,
+.Xr ck_ht_reset_spmc 3 ,
+.Xr ck_ht_reset_size_spmc 3 ,
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key_set 3 ,
+.Xr ck_ht_entry_key_set_direct 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_set 3 ,
+.Xr ck_ht_entry_set_direct 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value 3 ,
+.Xr ck_ht_entry_value_direct 3 ,
+.Xr ck_ht_iterator_init 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_put_spmc b/doc/ck_ht_put_spmc
new file mode 100644
index 0000000..f5a6389
--- /dev/null
+++ b/doc/ck_ht_put_spmc
@@ -0,0 +1,146 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 29, 2012
+.Dt CK_HT_PUT_SPMC 3
+.Sh NAME
+.Nm ck_ht_put_spmc
+.Nd store unique key-value pair into hash table
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Ft bool
+.Fn ck_ht_put_spmc "ck_ht_t *ht" "ck_ht_hash_t h" "ck_ht_entry_t *entry"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_put_spmc
+function will store the key-value pair specified in the
+.Fa entry
+argument in the hash table pointed to by the
+.Fa ht
+argument. The key specified in
+.Fa entry
+is expected to have the hash value specified by the
+.Fa h
+argument.
+.Pp
+If
+.Fa ht
+was created with CK_HT_MODE_BYTESTRING then
+.Fa entry
+must have been initialized with the
+.Xr ck_ht_entry_set 3
+function. If
+.Fa ht
+was created with CK_HT_MODE_DIRECT then
+.Fa entry
+must have been initialized with the
+.Xr ck_ht_entry_set_direct 3
+function.
+.Pp
+It is expected that
+.Fa h
+was initialized with
+.Xr ck_ht_hash 3
+if
+.Fa ht
+was created with CK_HT_MODE_BYTESTRING. If
+.Fa ht
+was initialized with CK_HT_MODE_DIRECT then it is
+expected that
+.Fa h
+was initialized with the
+.Xr ck_ht_hash_direct 3
+function.
+.Pp
+If the call to
+.Fn ck_ht_put_spmc
+was successful then the key-value pair in
+.Fa entry
+was successfully stored in the hash table pointed
+to by
+.Fa ht
+and will fail if the key specified in
+.Fa entry
+already exists with-in the hash table. Replacement semantics
+are provided by the
+.Xr ck_ht_set_spmc 3
+function.
+.Pp
+This function is safe to call in the presence of concurrent
+.Xr ck_ht_get_spmc 3
+operations.
+.Sh RETURN VALUES
+Upon successful completion
+.Fn ck_ht_put_spmc
+returns
+.Dv true
+and otherwise returns
+.Dv false
+on failure.
+.Sh ERRORS
+.Bl -tag -width Er
+Behavior is undefined if
+.Fa entry
+or
+.Fa ht
+are uninitialized. The function will return
+.Dv false
+if the hash table required to be grown but failed
+while attempting to grow or if the key specified
+in
+.Fa entry
+was already present in the hash table.
+.El
+.Sh SEE ALSO
+.Xr ck_ht_stat 3 ,
+.Xr ck_ht_init 3 ,
+.Xr ck_ht_destroy 3 ,
+.Xr ck_ht_hash 3 ,
+.Xr ck_ht_hash_direct 3 ,
+.Xr ck_ht_set_spmc 3 ,
+.Xr ck_ht_gc 3 ,
+.Xr ck_ht_get_spmc 3 ,
+.Xr ck_ht_grow_spmc 3 ,
+.Xr ck_ht_remove_spmc 3 ,
+.Xr ck_ht_reset_spmc 3 ,
+.Xr ck_ht_reset_size_spmc 3 ,
+.Xr ck_ht_count 3 ,
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key_set 3 ,
+.Xr ck_ht_entry_key_set_direct 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_value 3 ,
+.Xr ck_ht_entry_set 3 ,
+.Xr ck_ht_entry_set_direct 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value_direct 3 ,
+.Xr ck_ht_iterator_init 3 ,
+.Xr ck_ht_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_remove_spmc b/doc/ck_ht_remove_spmc
new file mode 100644
index 0000000..a263866
--- /dev/null
+++ b/doc/ck_ht_remove_spmc
@@ -0,0 +1,117 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 29, 2012
+.Dt CK_HT_GROW_SPMC 3
+.Sh NAME
+.Nm ck_ht_remove_spmc
+.Nd resize a hash table if necessary
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Ft bool
+.Fn ck_ht_remove_spmc "ck_ht_t *ht" "ck_ht_hash_t h" "ck_ht_entry_t *entry"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_remove_spmc
+function will remove the key-value pair associated with the
+key specified by the
+.Fa entry
+argument.
+.Pp
+If
+.Fa ht
+was created with CK_HT_MODE_BYTESTRING then
+.Fa entry
+must have been initialized with the
+.Xr ck_ht_entry_set_key 3
+or
+.Xr ck_ht_entry_set 3
+functions. If
+.Fa ht
+was created with CK_HT_MODE_DIRECT then
+.Fa entry
+must have been initialized with the
+.Xr ck_ht_entry_key_set_direct 3
+or
+.Xr ck_ht_entry_set_direct 3
+functions.
+.Pp
+It is expected that
+.Fa h
+was initialized with
+.Xr ck_ht_hash 3
+if
+.Fa ht
+was created with CK_HT_MODE_BYTESTRING. If
+.Fa ht
+was initialized with CK_HT_MODE_DIRECT then it is
+expected that
+.Fa h
+was initialized with the
+.Xr ck_ht_hash_direct 3
+function.
+.Sh RETURN VALUES
+If successful,
+.Fa entry
+will contain the key-value pair that was found in the hash table
+and
+.Fn ck_ht_remove_spmc
+will return
+.Dv true.
+If the entry could not be found then
+.Fn ck_ht_remove_spmc
+will return
+.Dv false.
+.Sh SEE ALSO
+.Xr ck_ht_stat 3 ,
+.Xr ck_ht_init 3 ,
+.Xr ck_ht_destroy 3 ,
+.Xr ck_ht_hash 3 ,
+.Xr ck_ht_hash_direct 3 ,
+.Xr ck_ht_set_spmc 3 ,
+.Xr ck_ht_put_spmc 3 ,
+.Xr ck_ht_gc 3 ,
+.Xr ck_ht_get_spmc 3 ,
+.Xr ck_ht_grow_spmc 3 ,
+.Xr ck_ht_reset_spmc 3 ,
+.Xr ck_ht_reset_size_spmc 3 ,
+.Xr ck_ht_count 3 ,
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key_set 3 ,
+.Xr ck_ht_entry_key_set_direct 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_value 3 ,
+.Xr ck_ht_entry_set 3 ,
+.Xr ck_ht_entry_set_direct 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value_direct 3 ,
+.Xr ck_ht_iterator_init 3 ,
+.Xr ck_ht_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_reset_size_spmc b/doc/ck_ht_reset_size_spmc
new file mode 100644
index 0000000..4308380
--- /dev/null
+++ b/doc/ck_ht_reset_size_spmc
@@ -0,0 +1,84 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd May 5, 2013
+.Dt CK_HT_RESET_SPMC 3
+.Sh NAME
+.Nm ck_ht_reset_size_spmc
+.Nd remove all entries from a hash table and reset size
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Ft bool
+.Fn ck_ht_reset_size_spmc "ck_ht_t *ht" "uint64_t capacity"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_reset_size_spmc
+function will remove all key-value pairs stored in the hash
+table pointed to by the
+.Fa ht
+argument and create a new generation of the hash table that
+is preallocated for
+.Fa capacity
+entries.
+.Sh RETURN VALUES
+If successful,
+.Fn ck_ht_reset_size_spmc
+will return
+.Dv true
+and will otherwise return
+.Dv false.
+This function will only fail if a replacement hash table
+could not be allocated internally.
+.Sh SEE ALSO
+.Xr ck_ht_stat 3 ,
+.Xr ck_ht_init 3 ,
+.Xr ck_ht_destroy 3 ,
+.Xr ck_ht_hash 3 ,
+.Xr ck_ht_hash_direct 3 ,
+.Xr ck_ht_set_spmc 3 ,
+.Xr ck_ht_put_spmc 3 ,
+.Xr ck_ht_gc 3 ,
+.Xr ck_ht_get_spmc 3 ,
+.Xr ck_ht_grow_spmc 3 ,
+.Xr ck_ht_remove_spmc 3 ,
+.Xr ck_ht_count 3 ,
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key_set 3 ,
+.Xr ck_ht_entry_key_set_direct 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_value 3 ,
+.Xr ck_ht_entry_set 3 ,
+.Xr ck_ht_entry_set_direct 3 ,
+.Xr ck_ht_reset_spmc 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value_direct 3 ,
+.Xr ck_ht_iterator_init 3 ,
+.Xr ck_ht_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_reset_spmc b/doc/ck_ht_reset_spmc
new file mode 100644
index 0000000..dc2e601
--- /dev/null
+++ b/doc/ck_ht_reset_spmc
@@ -0,0 +1,81 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 29, 2012
+.Dt CK_HT_RESET_SPMC 3
+.Sh NAME
+.Nm ck_ht_reset_spmc
+.Nd remove all entries from a hash table
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Ft bool
+.Fn ck_ht_reset_spmc "ck_ht_t *ht"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_reset_spmc
+function will remove all key-value pairs stored in the hash
+table pointed to by the
+.Fa ht
+argument.
+.Sh RETURN VALUES
+If successful,
+.Fn ck_ht_reset_spmc
+will return
+.Dv true
+and will otherwise return
+.Dv false.
+This function will only fail if a replacement hash table
+could not be allocated internally.
+.Sh SEE ALSO
+.Xr ck_ht_stat 3 ,
+.Xr ck_ht_init 3 ,
+.Xr ck_ht_destroy 3 ,
+.Xr ck_ht_hash 3 ,
+.Xr ck_ht_hash_direct 3 ,
+.Xr ck_ht_set_spmc 3 ,
+.Xr ck_ht_put_spmc 3 ,
+.Xr ck_ht_gc 3 ,
+.Xr ck_ht_get_spmc 3 ,
+.Xr ck_ht_grow_spmc 3 ,
+.Xr ck_ht_remove_spmc 3 ,
+.Xr ck_ht_reset_size_spmc 3 ,
+.Xr ck_ht_count 3 ,
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key_set 3 ,
+.Xr ck_ht_entry_key_set_direct 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_value 3 ,
+.Xr ck_ht_entry_set 3 ,
+.Xr ck_ht_entry_set_direct 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value_direct 3 ,
+.Xr ck_ht_iterator_init 3 ,
+.Xr ck_ht_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_set_spmc b/doc/ck_ht_set_spmc
new file mode 100644
index 0000000..e0fe1ae
--- /dev/null
+++ b/doc/ck_ht_set_spmc
@@ -0,0 +1,140 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd March 29, 2012
+.Dt CK_HT_SET_SPMC 3
+.Sh NAME
+.Nm ck_ht_set_spmc
+.Nd store key-value pair into hash table
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Ft bool
+.Fn ck_ht_set_spmc "ck_ht_t *ht" "ck_ht_hash_t h" "ck_ht_entry_t *entry"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_set_spmc
+function will store the key-value pair specified in the
+.Fa entry
+argument in the hash table pointed to by the
+.Fa ht
+argument. The key specified in
+.Fa entry
+is expected to have the hash value specified by the
+.Fa h
+argument.
+.Pp
+If
+.Fa ht
+was created with CK_HT_MODE_BYTESTRING then
+.Fa entry
+must have been initialized with the
+.Xr ck_ht_entry_set 3
+function. If
+.Fa ht
+was created with CK_HT_MODE_DIRECT then
+.Fa entry
+must have been initialized with the
+.Xr ck_ht_entry_set_direct 3
+function.
+.Pp
+It is expected that
+.Fa h
+was initialized with
+.Xr ck_ht_hash 3
+if
+.Fa ht
+was created with CK_HT_MODE_BYTESTRING. If
+.Fa ht
+was initialized with CK_HT_MODE_DIRECT then it is
+expected that
+.Fa h
+was initialized with the
+.Xr ck_ht_hash_direct 3
+function.
+.Pp
+If the call to
+.Fn ck_ht_set_spmc
+was successful then the key-value pair in
+.Fa entry
+will contain the previous key-value pair associated
+with the key originally contained in the
+.Fa entry
+argument. If the operation was unsuccessful then
+.Fa entry
+is unmodified.
+.Pp
+This function is safe to call in the presence of concurrent
+.Xr ck_ht_get_spmc
+operations.
+.Sh RETURN VALUES
+Upon successful completion
+.Fn ck_ht_set_spmc
+returns
+.Dv true
+and otherwise returns
+.Dv false
+on failure.
+.Sh ERRORS
+.Bl -tag -width Er
+Behavior is undefined if
+.Fa entry
+or
+.Fa ht
+are uninitialized. The function will return
+.Dv false
+if the hash table required to be grown but failed
+while attempting to grow.
+.El
+.Sh SEE ALSO
+.Xr ck_ht_stat 3 ,
+.Xr ck_ht_init 3 ,
+.Xr ck_ht_destroy 3 ,
+.Xr ck_ht_hash 3 ,
+.Xr ck_ht_hash_direct 3 ,
+.Xr ck_ht_put_spmc 3 ,
+.Xr ck_ht_gc 3 ,
+.Xr ck_ht_get_spmc 3 ,
+.Xr ck_ht_grow_spmc 3 ,
+.Xr ck_ht_remove_spmc 3 ,
+.Xr ck_ht_reset_spmc 3 ,
+.Xr ck_ht_reset_size_spmc 3 ,
+.Xr ck_ht_count 3 ,
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key_set 3 ,
+.Xr ck_ht_entry_key_set_direct 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_value 3 ,
+.Xr ck_ht_entry_set 3 ,
+.Xr ck_ht_entry_set_direct 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value_direct 3 ,
+.Xr ck_ht_iterator_init 3 ,
+.Xr ck_ht_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ht_stat b/doc/ck_ht_stat
new file mode 100644
index 0000000..6d2f1cd
--- /dev/null
+++ b/doc/ck_ht_stat
@@ -0,0 +1,85 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 17, 2012
+.Dt CK_HT_STAT 3
+.Sh NAME
+.Nm ck_ht_stat
+.Nd get hash table status
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ht.h
+.Ft void
+.Fn ck_ht_stat "ck_ht_t *ht" "struct ck_ht_stat *st"
+.Sh DESCRIPTION
+The
+.Fn ck_ht_stat 3
+function will store various hash set statistics in the object
+pointed to by
+.Fa st .
+The ck_ht_stat structure is defined as follows:
+.Bd -literal -offset indent
+struct ck_ht_stat {
+ uint64_t probe_maximum; /* Longest read-side probe sequence. */
+ uint64_t n_entries; /* Current number of keys in hash set. */
+};
+.Ed
+.Sh RETURN VALUES
+.Fn ck_ht_stat 3
+has no return value.
+.Sh ERRORS
+Behavior is undefined if
+.Fa ht
+has not been initialized.
+.Sh SEE ALSO
+.Xr ck_ht_count 3 ,
+.Xr ck_ht_init 3 ,
+.Xr ck_ht_destroy 3 ,
+.Xr ck_ht_hash 3 ,
+.Xr ck_ht_hash_direct 3 ,
+.Xr ck_ht_set_spmc 3 ,
+.Xr ck_ht_put_spmc 3 ,
+.Xr ck_ht_gc 3 ,
+.Xr ck_ht_get_spmc 3 ,
+.Xr ck_ht_grow_spmc 3 ,
+.Xr ck_ht_remove_spmc 3 ,
+.Xr ck_ht_reset_spmc 3 ,
+.Xr ck_ht_reset_size_spmc 3 ,
+.Xr ck_ht_entry_empty 3 ,
+.Xr ck_ht_entry_key_set 3 ,
+.Xr ck_ht_entry_key_set_direct 3 ,
+.Xr ck_ht_entry_key 3 ,
+.Xr ck_ht_entry_key_length 3 ,
+.Xr ck_ht_entry_value 3 ,
+.Xr ck_ht_entry_set 3 ,
+.Xr ck_ht_entry_set_direct 3 ,
+.Xr ck_ht_entry_key_direct 3 ,
+.Xr ck_ht_entry_value_direct 3 ,
+.Xr ck_ht_iterator_init 3 ,
+.Xr ck_ht_next 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pflock b/doc/ck_pflock
new file mode 100644
index 0000000..6fea701
--- /dev/null
+++ b/doc/ck_pflock
@@ -0,0 +1,95 @@
+.\"
+.\" Copyright 2014 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 22, 2014.
+.Dt ck_pflock 3
+.Sh NAME
+.Nm ck_pflock_init ,
+.Nm ck_pflock_write_lock ,
+.Nm ck_pflock_write_unlock ,
+.Nm ck_pflock_read_lock ,
+.Nm ck_pflock_read_unlock ,
+.Nd centralized phase-fair reader-writer locks
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pflock.h
+.Pp
+.Dv ck_pflock_t lock = CK_PFLOCK_INITIALIZER;
+.Pp
+.Ft void
+.Fn ck_pflock_init "ck_pflock_t *lock"
+.Ft void
+.Fn ck_pflock_write_lock "ck_pflock_t *lock"
+.Ft void
+.Fn ck_pflock_write_unlock "ck_pflock_t *lock"
+.Ft void
+.Fn ck_pflock_read_lock "ck_pflock_t *lock"
+.Ft void
+.Fn ck_pflock_read_unlock "ck_pflock_t *lock"
+.Sh DESCRIPTION
+This is a centralized phase-fair reader-writer lock. It
+requires little space overhead and has a low latency
+fast path.
+.Sh EXAMPLE
+.Bd -literal -offset indent
+#include <ck_pflock.h>
+
+static ck_pflock_t lock = CK_TFLOCK_INITIALIZER;
+
+static void
+reader(void)
+{
+
+ for (;;) {
+ ck_pflock_read_lock(&lock);
+ /* Read-side critical section. */
+ ck_pflock_read_unlock(&lock);
+ }
+
+ return;
+}
+
+static void
+writer(void)
+{
+
+ for (;;) {
+ ck_pflock_write_lock(&lock);
+ /* Write-side critical section. */
+ ck_pflock_write_unlock(&lock);
+ }
+
+ return;
+}
+.Ed
+.Sh SEE ALSO
+.Xr ck_brlock 3 ,
+.Xr ck_rwlock 3 ,
+.Xr ck_tflock 3 ,
+.Xr ck_swlock 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr b/doc/ck_pr
new file mode 100644
index 0000000..67c726f
--- /dev/null
+++ b/doc/ck_pr
@@ -0,0 +1,71 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 7, 2013
+.Dt ck_pr 3
+.Sh NAME
+.Nm ck_pr
+.Nd concurrency primitives interface
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Sh DESCRIPTION
+ck_pr.h provides an interface to volatile atomic instructions,
+memory barriers and busy-wait facilities as provided by the
+underlying processor. The presence of an atomic operation
+is detected by the presence of a corresponding CK_F_PR macro.
+For example, the availability of
+.Xr ck_pr_add_16 3
+would be determined by the presence of CK_F_PR_ADD_16.
+.Sh SEE ALSO
+.Xr ck_pr_stall 3 ,
+.Xr ck_pr_fence_acquire 3 ,
+.Xr ck_pr_fence_release 3 ,
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_barrier 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_add b/doc/ck_pr_add
new file mode 100644
index 0000000..b4d394a
--- /dev/null
+++ b/doc/ck_pr_add
@@ -0,0 +1,93 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 11, 2013
+.Dt ck_pr_add 3
+.Sh NAME
+.Nm ck_pr_add_ptr ,
+.Nm ck_pr_add_double ,
+.Nm ck_pr_add_char ,
+.Nm ck_pr_add_uint ,
+.Nm ck_pr_add_int ,
+.Nm ck_pr_add_64 ,
+.Nm ck_pr_add_32 ,
+.Nm ck_pr_add_16 ,
+.Nm ck_pr_add_8
+.Nd atomic addition operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void
+.Fn ck_pr_add_ptr "void *target" "uintptr_t delta"
+.Ft void
+.Fn ck_pr_add_double "double *target" "double delta"
+.Ft void
+.Fn ck_pr_add_char "char *target" "char delta"
+.Ft void
+.Fn ck_pr_add_uint "unsigned int *target" "unsigned int delta"
+.Ft void
+.Fn ck_pr_add_int "int *target" "int delta"
+.Ft void
+.Fn ck_pr_add_64 "uint64_t *target" "uint64_t delta"
+.Ft void
+.Fn ck_pr_add_32 "uint32_t *target" "uint32_t delta"
+.Ft void
+.Fn ck_pr_add_16 "uint16_t *target" "uint16_t delta"
+.Ft void
+.Fn ck_pr_add_8 "uint8_t *target" "uint8_t delta"
+.Sh DESCRIPTION
+The
+.Fn ck_pr_add 3
+family of functions atomically add the value specified by
+.Fa delta
+to the value pointed to by
+.Fa target .
+.Sh RETURN VALUES
+This family of functions does not have a return value.
+.Sh SEE ALSO
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_and b/doc/ck_pr_and
new file mode 100644
index 0000000..56ce5af
--- /dev/null
+++ b/doc/ck_pr_and
@@ -0,0 +1,93 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 11, 2013
+.Dt ck_pr_and 3
+.Sh NAME
+.Nm ck_pr_and_ptr ,
+.Nm ck_pr_and_char ,
+.Nm ck_pr_and_uint ,
+.Nm ck_pr_and_int ,
+.Nm ck_pr_and_64 ,
+.Nm ck_pr_and_32 ,
+.Nm ck_pr_and_16 ,
+.Nm ck_pr_and_8
+.Nd atomic bitwise-and operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void
+.Fn ck_pr_and_ptr "void *target" "uintptr_t delta"
+.Ft void
+.Fn ck_pr_and_char "char *target" "char delta"
+.Ft void
+.Fn ck_pr_and_uint "unsigned int *target" "unsigned int delta"
+.Ft void
+.Fn ck_pr_and_int "int *target" "int delta"
+.Ft void
+.Fn ck_pr_and_64 "uint64_t *target" "uint64_t delta"
+.Ft void
+.Fn ck_pr_and_32 "uint32_t *target" "uint32_t delta"
+.Ft void
+.Fn ck_pr_and_16 "uint16_t *target" "uint16_t delta"
+.Ft void
+.Fn ck_pr_and_8 "uint8_t *target" "uint8_t delta"
+.Sh DESCRIPTION
+The
+.Fn ck_pr_and 3
+family of functions atomically compute and store the
+result of a bitwise-and of the value pointed to by
+.Fa target
+and
+.Fa delta
+into the value pointed to by
+.Fa target .
+.Sh RETURN VALUES
+This family of functions does not have a return value.
+.Sh SEE ALSO
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_barrier b/doc/ck_pr_barrier
new file mode 100644
index 0000000..3886729
--- /dev/null
+++ b/doc/ck_pr_barrier
@@ -0,0 +1,66 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 7, 2013
+.Dt ck_pr_barrier 3
+.Sh NAME
+.Nm ck_pr_barrier
+.Nd compiler optimization barrier
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void
+.Fn ck_pr_barrier void
+.Sh DESCRIPTION
+The
+.Fn ck_pr_barrier 3
+function is used to disable code movement optimizations
+across the invocation of the function.
+.Sh SEE ALSO
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_btc b/doc/ck_pr_btc
new file mode 100644
index 0000000..5956221
--- /dev/null
+++ b/doc/ck_pr_btc
@@ -0,0 +1,90 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 11, 2013
+.Dt ck_pr_btc 3
+.Sh NAME
+.Nm ck_pr_btc_ptr ,
+.Nm ck_pr_btc_uint ,
+.Nm ck_pr_btc_int ,
+.Nm ck_pr_btc_64 ,
+.Nm ck_pr_btc_32 ,
+.Nm ck_pr_btc_16
+.Nd atomic bit test-and-complement operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft bool
+.Fn ck_pr_btc_ptr "void *target" "unsigned int bit_index"
+.Ft bool
+.Fn ck_pr_btc_uint "uint *target" "unsigned int bit_index"
+.Ft bool
+.Fn ck_pr_btc_int "int *target" "unsigned int bit_index"
+.Ft bool
+.Fn ck_pr_btc_64 "uint64_t *target" "unsigned int bit_index"
+.Ft bool
+.Fn ck_pr_btc_32 "uint32_t *target" "unsigned int bit_index"
+.Ft bool
+.Fn ck_pr_btc_16 "uint16_t *target" "unsigned int bit_index"
+.Sh DESCRIPTION
+The
+.Fn ck_pr_btc 3
+family of functions atomically fetch the value
+of the bit in
+.Fa target
+at index
+.Fa bit_index
+and set that bit to its complement.
+.Sh RETURN VALUES
+These family of functions return the original value of
+the bit at offset
+.Fa bit_index
+that is in the value pointed to by
+.Fa target .
+.Sh SEE ALSO
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_btr 3 ,
+.Xr ck_pr_cas 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_btr b/doc/ck_pr_btr
new file mode 100644
index 0000000..d5e03fd
--- /dev/null
+++ b/doc/ck_pr_btr
@@ -0,0 +1,90 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 11, 2013
+.Dt ck_pr_btr 3
+.Sh NAME
+.Nm ck_pr_btr_ptr ,
+.Nm ck_pr_btr_uint ,
+.Nm ck_pr_btr_int ,
+.Nm ck_pr_btr_64 ,
+.Nm ck_pr_btr_32 ,
+.Nm ck_pr_btr_16
+.Nd atomic bit test-and-reset operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft bool
+.Fn ck_pr_btr_ptr "void *target" "unsigned int bit_index"
+.Ft bool
+.Fn ck_pr_btr_uint "uint *target" "unsigned int bit_index"
+.Ft bool
+.Fn ck_pr_btr_int "int *target" "unsigned int bit_index"
+.Ft bool
+.Fn ck_pr_btr_64 "uint64_t *target" "unsigned int bit_index"
+.Ft bool
+.Fn ck_pr_btr_32 "uint32_t *target" "unsigned int bit_index"
+.Ft bool
+.Fn ck_pr_btr_16 "uint16_t *target" "unsigned int bit_index"
+.Sh DESCRIPTION
+The
+.Fn ck_pr_btr 3
+family of functions atomically fetch the value
+of the bit in
+.Fa target
+at index
+.Fa bit_index
+and set that bit to 0.
+.Sh RETURN VALUES
+This family of functions returns the original value of
+the bit at offset
+.Fa bit_index
+that is in the value pointed to by
+.Fa target .
+.Sh SEE ALSO
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_cas 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_bts b/doc/ck_pr_bts
new file mode 100644
index 0000000..955855d
--- /dev/null
+++ b/doc/ck_pr_bts
@@ -0,0 +1,90 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 11, 2013
+.Dt ck_pr_bts 3
+.Sh NAME
+.Nm ck_pr_bts_ptr ,
+.Nm ck_pr_bts_uint ,
+.Nm ck_pr_bts_int ,
+.Nm ck_pr_bts_64 ,
+.Nm ck_pr_bts_32 ,
+.Nm ck_pr_bts_16
+.Nd atomic bit test-and-set operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft bool
+.Fn ck_pr_bts_ptr "void *target" "unsigned int bit_index"
+.Ft bool
+.Fn ck_pr_bts_uint "uint *target" "unsigned int bit_index"
+.Ft bool
+.Fn ck_pr_bts_int "int *target" "unsigned int bit_index"
+.Ft bool
+.Fn ck_pr_bts_64 "uint64_t *target" "unsigned int bit_index"
+.Ft bool
+.Fn ck_pr_bts_32 "uint32_t *target" "unsigned int bit_index"
+.Ft bool
+.Fn ck_pr_bts_16 "uint16_t *target" "unsigned int bit_index"
+.Sh DESCRIPTION
+The
+.Fn ck_pr_bts 3
+family of functions atomically fetch the value
+of the bit in
+.Fa target
+at index
+.Fa bit_index
+and set that bit to 1.
+.Sh RETURN VALUES
+This family of functions returns the original value of
+the bit at offset
+.Fa bit_index
+that is in the value pointed to by
+.Fa target .
+.Sh SEE ALSO
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_btr 3 ,
+.Xr ck_pr_cas 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_cas b/doc/ck_pr_cas
new file mode 100644
index 0000000..9d1d39b
--- /dev/null
+++ b/doc/ck_pr_cas
@@ -0,0 +1,147 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 11, 2013
+.Dt ck_pr_cas 3
+.Sh NAME
+.Nm ck_pr_cas_ptr ,
+.Nm ck_pr_cas_ptr_value ,
+.Nm ck_pr_cas_ptr_2 ,
+.Nm ck_pr_cas_ptr_2_value ,
+.Nm ck_pr_cas_double ,
+.Nm ck_pr_cas_double_value ,
+.Nm ck_pr_cas_char ,
+.Nm ck_pr_cas_char_value ,
+.Nm ck_pr_cas_uint ,
+.Nm ck_pr_cas_uint_value ,
+.Nm ck_pr_cas_int ,
+.Nm ck_pr_cas_int_value ,
+.Nm ck_pr_cas_64_2 ,
+.Nm ck_pr_cas_64_2_value ,
+.Nm ck_pr_cas_64 ,
+.Nm ck_pr_cas_64_value ,
+.Nm ck_pr_cas_32 ,
+.Nm ck_pr_cas_32_value ,
+.Nm ck_pr_cas_16 ,
+.Nm ck_pr_cas_16_value ,
+.Nm ck_pr_cas_8 ,
+.Nm ck_pr_cas_8_value
+.Nd atomic compare-and-swap operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft bool
+.Fn ck_pr_cas_ptr "void *target" "void *old_value" "void *new_value"
+.Ft bool
+.Fn ck_pr_cas_ptr_value "void *target" "void *old_value" "void *new_value" "void *original_value"
+.Ft bool
+.Fn ck_pr_cas_ptr_2 "void *target" "void *old_value" "void *new_value"
+.Ft bool
+.Fn ck_pr_cas_ptr_2_value "void *target" "void *old_value" "void *new_value" "void *original_value"
+.Ft bool
+.Fn ck_pr_cas_double "double *target" "double old_value" "double new_value"
+.Ft bool
+.Fn ck_pr_cas_double_value "double *target" "double old_value" "double new_value" "double *original_value"
+.Ft bool
+.Fn ck_pr_cas_char "char *target" "char old_value" "char new_value"
+.Ft bool
+.Fn ck_pr_cas_char_value "char *target" "char old_value" "char new_value" "char *original_value"
+.Ft bool
+.Fn ck_pr_cas_uint "unsigned int *target" "unsigned int old_value" "unsigned int new_value"
+.Ft bool
+.Fn ck_pr_cas_uint_value "unsigned int *target" "unsigned int old_value" "unsigned int new_value" "unsigned int *original_value"
+.Ft bool
+.Fn ck_pr_cas_int "int *target" "int old_value" "int new_value"
+.Ft bool
+.Fn ck_pr_cas_int_value "int *target" "int old_value" "int new_value" "int *original_value"
+.Ft bool
+.Fn ck_pr_cas_64_2 "uint64_t target[static 2]" "uint64_t old_value[static 2]" "uint64_t new_value[static 2]"
+.Ft bool
+.Fn ck_pr_cas_64_2_value "uint64_t target[static 2]" "uint64_t old_value[static 2]" "uint64_t new_value[static 2]" "uint64_t original_value[static 2]"
+.Ft bool
+.Fn ck_pr_cas_64 "uint64_t *target" "uint64_t old_value" "uint64_t new_value"
+.Ft bool
+.Fn ck_pr_cas_64_value "uint64_t *target" "uint64_t old_value" "uint64_t new_value" "uint64_t *original_value"
+.Ft bool
+.Fn ck_pr_cas_32 "uint32_t *target" "uint32_t old_value" "uint32_t new_value"
+.Ft bool
+.Fn ck_pr_cas_32_value "uint32_t *target" "uint32_t old_value" "uint32_t new_value" "uint32_t *original_value"
+.Ft bool
+.Fn ck_pr_cas_16 "uint16_t *target" "uint16_t old_value" "uint16_t new_value"
+.Ft bool
+.Fn ck_pr_cas_16_value "uint16_t *target" "uint16_t old_value" "uint16_t new_value" "uint16_t *original_value"
+.Ft bool
+.Fn ck_pr_cas_8 "uint8_t *target" "uint8_t old_value" "uint8_t new_value"
+.Ft bool
+.Fn ck_pr_cas_8_value "uint8_t *target" "uint8_t old_value" "uint8_t new_value" "uint8_t *original_value"
+.Sh DESCRIPTION
+The
+.Fn ck_pr_cas 3
+family of functions atomically compare the value in
+.Fa target
+for equality with
+.Fa old_value
+and if so, replace the value pointed to by
+.Fa target
+with the value specified by
+.Fa new_value .
+If the value in
+.Fa target
+was not equal to the value specified by
+.Fa old_value
+then no modifications occur to the value in
+.Fa target .
+The *_value form of these functions unconditionally update
+.Fa original_value .
+.Sh RETURN VALUES
+This family of functions return true if the value in
+.Fa target
+was modified as a result of the operation. Otherwise, they
+return false.
+.Sh SEE ALSO
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_dec b/doc/ck_pr_dec
new file mode 100644
index 0000000..f3d34dd
--- /dev/null
+++ b/doc/ck_pr_dec
@@ -0,0 +1,124 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 7, 2013
+.Dt ck_pr_dec 3
+.Sh NAME
+.Nm ck_pr_dec_ptr ,
+.Nm ck_pr_dec_ptr_zero ,
+.Nm ck_pr_dec_double ,
+.Nm ck_pr_dec_double_zero ,
+.Nm ck_pr_dec_char ,
+.Nm ck_pr_dec_char_zero ,
+.Nm ck_pr_dec_uint ,
+.Nm ck_pr_dec_uint_zero ,
+.Nm ck_pr_dec_int ,
+.Nm ck_pr_dec_int_zero ,
+.Nm ck_pr_dec_64 ,
+.Nm ck_pr_dec_64_zero ,
+.Nm ck_pr_dec_32 ,
+.Nm ck_pr_dec_32_zero ,
+.Nm ck_pr_dec_16 ,
+.Nm ck_pr_dec_16_zero ,
+.Nm ck_pr_dec_8 ,
+.Nm ck_pr_dec_8_zero
+.Nd atomic decrement operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void
+.Fn ck_pr_dec_ptr "void *target"
+.Ft void
+.Fn ck_pr_dec_ptr_zero "void *target" "bool *z"
+.Ft void
+.Fn ck_pr_dec_double "double *target"
+.Ft void
+.Fn ck_pr_dec_double_zero "double *target" "bool *z"
+.Ft void
+.Fn ck_pr_dec_char "char *target"
+.Ft void
+.Fn ck_pr_dec_char_zero "char *target" "bool *z"
+.Ft void
+.Fn ck_pr_dec_uint "unsigned int *target"
+.Ft void
+.Fn ck_pr_dec_uint_zero "unsigned int *target" "bool *z"
+.Ft void
+.Fn ck_pr_dec_int "int *target"
+.Ft void
+.Fn ck_pr_dec_int_zero "int *target" "bool *z"
+.Ft void
+.Fn ck_pr_dec_64 "uint64_t *target"
+.Ft void
+.Fn ck_pr_dec_64_zero "uint64_t *target" "bool *z"
+.Ft void
+.Fn ck_pr_dec_32 "uint32_t *target"
+.Ft void
+.Fn ck_pr_dec_32_zero "uint32_t *target" "bool *z"
+.Ft void
+.Fn ck_pr_dec_16 "uint16_t *target"
+.Ft void
+.Fn ck_pr_dec_16_zero "uint16_t *target" "bool *z"
+.Ft void
+.Fn ck_pr_dec_8 "uint8_t *target"
+.Ft void
+.Fn ck_pr_dec_8_zero "uint8_t *target" "bool *z"
+.Sh DESCRIPTION
+The
+.Fn ck_pr_dec 3
+family of functions atomically decrement the value pointed to
+by
+.Fa target .
+.Sh RETURN VALUES
+The ck_pr_dec_zero family of functions set the value pointed to by
+.Fa z
+to true if the result
+of the decrement operation was 0. They set the value pointed to by
+.Fa z
+to false otherwise.
+.Sh SEE ALSO
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_faa b/doc/ck_pr_faa
new file mode 100644
index 0000000..fbeff01
--- /dev/null
+++ b/doc/ck_pr_faa
@@ -0,0 +1,99 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 7, 2013
+.Dt ck_pr_faa 3
+.Sh NAME
+.Nm ck_pr_faa_ptr ,
+.Nm ck_pr_faa_double ,
+.Nm ck_pr_faa_char ,
+.Nm ck_pr_faa_uint ,
+.Nm ck_pr_faa_int ,
+.Nm ck_pr_faa_64 ,
+.Nm ck_pr_faa_32 ,
+.Nm ck_pr_faa_16 ,
+.Nm ck_pr_faa_8
+.Nd atomic fetch-and-add operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft uintptr_t
+.Fn ck_pr_faa_ptr "void *target" "uintptr_t delta"
+.Ft double
+.Fn ck_pr_faa_double "double *target" "double delta"
+.Ft char
+.Fn ck_pr_faa_char "char *target" "char delta"
+.Ft unsigned int
+.Fn ck_pr_faa_uint "unsigned int *target" "unsigned int delta"
+.Ft int
+.Fn ck_pr_faa_int "int *target" "int delta"
+.Ft uint64_t
+.Fn ck_pr_faa_64 "uint64_t *target" "uint64_t delta"
+.Ft uint32_t
+.Fn ck_pr_faa_32 "uint32_t *target" "uint32_t delta"
+.Ft uint16_t
+.Fn ck_pr_faa_16 "uint16_t *target" "uint16_t delta"
+.Ft uint8_t
+.Fn ck_pr_faa_8 "uint8_t *target" "uint8_t delta"
+.Sh DESCRIPTION
+The
+.Fn ck_pr_faa 3
+family of functions atomically fetch the value pointed to
+by
+.Fa target
+and add the value specified by
+.Fa delta
+to the value pointed to by
+.Fa target .
+.Sh RETURN VALUES
+This function returns the value pointed to by
+.Fa target
+at the time of operation invocation before the
+addition operation is applied.
+.Sh SEE ALSO
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_fas b/doc/ck_pr_fas
new file mode 100644
index 0000000..037b104
--- /dev/null
+++ b/doc/ck_pr_fas
@@ -0,0 +1,100 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 7, 2013
+.Dt ck_pr_fas 3
+.Sh NAME
+.Nm ck_pr_fas_ptr ,
+.Nm ck_pr_fas_double ,
+.Nm ck_pr_fas_char ,
+.Nm ck_pr_fas_uint ,
+.Nm ck_pr_fas_int ,
+.Nm ck_pr_fas_64 ,
+.Nm ck_pr_fas_32 ,
+.Nm ck_pr_fas_16 ,
+.Nm ck_pr_fas_8
+.Nd atomic swap operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void *
+.Fn ck_pr_fas_ptr "void *target" "void *new_value"
+.Ft double
+.Fn ck_pr_fas_double "double *target" "double new_value"
+.Ft char
+.Fn ck_pr_fas_char "char *target" "char new_value"
+.Ft unsigned int
+.Fn ck_pr_fas_uint "unsigned int *target" "unsigned int new_value"
+.Ft int
+.Fn ck_pr_fas_int "int *target" "int new_value"
+.Ft uint64_t
+.Fn ck_pr_fas_64 "uint64_t *target" "uint64_t new_value"
+.Ft uint32_t
+.Fn ck_pr_fas_32 "uint32_t *target" "uint32_t new_value"
+.Ft uint16_t
+.Fn ck_pr_fas_16 "uint16_t *target" "uint16_t new_value"
+.Ft uint8_t
+.Fn ck_pr_fas_8 "uint8_t *target" "uint8_t new_value"
+.Sh DESCRIPTION
+The
+.Fn ck_pr_fas 3
+family of functions atomically fetch the value pointed to
+by
+.Fa target
+and replace the value pointed to by
+.Fa target
+with the value specified by
+.Fa new_value .
+.Sh RETURN VALUES
+This function returns the value pointed to by
+.Fa target
+at the time of operation invocation before it was
+atomically replaced with
+.Fa new_value .
+.Sh SEE ALSO
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_fence_acquire b/doc/ck_pr_fence_acquire
new file mode 100644
index 0000000..2d6b997
--- /dev/null
+++ b/doc/ck_pr_fence_acquire
@@ -0,0 +1,72 @@
+.\"
+.\" Copyright 2014 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd January 2, 2014
+.Dt CK_PR_FENCE_ACQUIRE 3
+.Sh NAME
+.Nm ck_pr_fence_acquire
+.Nd enforce acquire semantics
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void
+.Fn ck_pr_fence_acquire void
+.Sh DESCRIPTION
+This function enforces the partial ordering of any loads prior
+to invocation with respect to any following stores, loads and
+atomic operations. It is typically used to implement critical
+sections.
+.Sh RETURN VALUES
+This function has no return value.
+.Sh SEE ALSO
+.Xr ck_pr_stall 3 ,
+.Xr ck_pr_fence_atomic 3 ,
+.Xr ck_pr_fence_atomic_store 3 ,
+.Xr ck_pr_fence_atomic_load 3 ,
+.Xr ck_pr_fence_release 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_barrier 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_fence_atomic b/doc/ck_pr_fence_atomic
new file mode 100644
index 0000000..0680328
--- /dev/null
+++ b/doc/ck_pr_fence_atomic
@@ -0,0 +1,111 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd May 16, 2013
+.Dt CK_PR_FENCE_ATOMIC 3
+.Sh NAME
+.Nm ck_pr_fence_atomic
+.Nd enforce partial ordering of atomic read-modify-write operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void
+.Fn ck_pr_fence_atomic void
+.Ft void
+.Fn ck_pr_fence_strict_atomic void
+.Sh DESCRIPTION
+The
+.Fn ck_pr_fence_atomic
+function enforces the ordering of any
+atomic read-modify-write operations relative to
+the invocation of the function. This function
+always serve as an implicit compiler barrier. On
+architectures implementing CK_MD_TSO, this operation
+only serves as a compiler barrier and no fences
+are emitted. On architectures implementing
+CK_MD_PSO and CK_MD_RMO, a store fence is
+emitted. To force the unconditional emission of
+a fence, use
+.Fn ck_pr_fence_strict_atomic .
+.Sh EXAMPLE
+.Bd -literal -offset indent
+
+#include <ck_pr.h>
+
+static int a = 0;
+static int b = 0;
+static int c = 0;
+
+void
+function(void)
+{
+
+ ck_pr_fas_int(&a, 1);
+
+ /*
+ * Guarantee that the update to a is completed
+ * with respect to the updates of b and c.
+ */
+ ck_pr_fence_atomic();
+ ck_pr_fas_int(&b, 2);
+ ck_pr_fas_int(&c, 2);
+
+ return;
+}
+.Ed
+.Sh RETURN VALUES
+This function has no return value.
+.Sh SEE ALSO
+.Xr ck_pr_stall 3 ,
+.Xr ck_pr_fence_atomic_store 3 ,
+.Xr ck_pr_fence_atomic_load 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_atomic 3 ,
+.Xr ck_pr_fence_load_store 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_barrier 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_fence_atomic_load b/doc/ck_pr_fence_atomic_load
new file mode 100644
index 0000000..77675ce
--- /dev/null
+++ b/doc/ck_pr_fence_atomic_load
@@ -0,0 +1,108 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd May 16, 2013
+.Dt CK_PR_FENCE_ATOMIC_LOAD 3
+.Sh NAME
+.Nm ck_pr_fence_atomic_load
+.Nd enforce ordering of atomic read-modify-write operations to load operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void
+.Fn ck_pr_fence_atomic_load void
+.Ft void
+.Fn ck_pr_fence_strict_atomic_load void
+.Sh DESCRIPTION
+The
+.Fn ck_pr_fence_atomic_load
+function enforces the ordering of any
+atomic read-modify-write operations relative to
+any load operations following the function invocation. This function
+always serve as an implicit compiler barrier. On
+architectures implementing CK_MD_TSO, this operation
+only serves as a compiler barrier and no fences
+are emitted. To force the unconditional emission of
+a fence, use
+.Fn ck_pr_fence_strict_atomic_load .
+.Sh EXAMPLE
+.Bd -literal -offset indent
+
+#include <ck_pr.h>
+
+static int a = 0;
+static int b = 0;
+
+void
+function(void)
+{
+ int c;
+
+ ck_pr_fas_int(&a, 1);
+
+ /*
+ * Guarantee that the update to a is completed
+ * with respect to the load of *b.
+ */
+ ck_pr_fence_atomic_load();
+ c = ck_pr_load_int(&b);
+
+ return;
+}
+.Ed
+.Sh RETURN VALUES
+This function has no return value.
+.Sh SEE ALSO
+.Xr ck_pr_stall 3 ,
+.Xr ck_pr_fence_atomic 3 ,
+.Xr ck_pr_fence_atomic_store 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_atomic 3 ,
+.Xr ck_pr_fence_load_store 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_barrier 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_fence_atomic_store b/doc/ck_pr_fence_atomic_store
new file mode 100644
index 0000000..fd02122
--- /dev/null
+++ b/doc/ck_pr_fence_atomic_store
@@ -0,0 +1,109 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd May 16, 2013
+.Dt CK_PR_FENCE_ATOMIC_STORE 3
+.Sh NAME
+.Nm ck_pr_fence_atomic_store
+.Nd enforce ordering of atomic read-modify-write operations to store operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void
+.Fn ck_pr_fence_atomic_store void
+.Ft void
+.Fn ck_pr_fence_strict_atomic_store void
+.Sh DESCRIPTION
+The
+.Fn ck_pr_fence_atomic_store
+function enforces the ordering of any
+atomic read-modify-write operations relative to
+any load operations following the function invocation. This function
+always serve as an implicit compiler barrier. On
+architectures implementing CK_MD_TSO, this operation
+only serves as a compiler barrier and no fences
+are emitted. To force the unconditional emission of
+a fence, use
+.Fn ck_pr_fence_strict_atomic_store .
+.Sh EXAMPLE
+.Bd -literal -offset indent
+
+#include <ck_pr.h>
+
+static int a = 0;
+static int b = 0;
+
+void
+function(void)
+{
+ int c;
+
+ ck_pr_fas_int(&a, 1);
+
+ /*
+ * Guarantee that the update to a is completed
+ * with respect to the store into the value pointed
+ * to by b.
+ */
+ ck_pr_fence_atomic_store();
+ c = ck_pr_store_int(&b, 2);
+
+ return;
+}
+.Ed
+.Sh RETURN VALUES
+This function has no return value.
+.Sh SEE ALSO
+.Xr ck_pr_stall 3 ,
+.Xr ck_pr_fence_atomic 3 ,
+.Xr ck_pr_fence_atomic_load 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_atomic 3 ,
+.Xr ck_pr_fence_load_store 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_barrier 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_fence_load b/doc/ck_pr_fence_load
new file mode 100644
index 0000000..b6e778d
--- /dev/null
+++ b/doc/ck_pr_fence_load
@@ -0,0 +1,113 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 7, 2013
+.Dt ck_pr_fence_load 3
+.Sh NAME
+.Nm ck_pr_fence_load
+.Nd enforce partial ordering of load operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void
+.Fn ck_pr_fence_load void
+.Ft void
+.Fn ck_pr_fence_strict_load void
+.Sh DESCRIPTION
+This function enforces the ordering of any memory load
+and
+.Fn ck_pr_load 3
+operations relative to the invocation of the function. Any
+store operations that were committed on remote processors
+and received by the calling processor before the invocation of
+.Fn ck_pr_fence_load
+is also be made visible only after a call to
+.Fn ck_pr_fence_load .
+This function always serves as an implicit compiler barrier.
+On architectures with CK_MD_TSO or CK_MD_PSO specified (total store ordering
+and partial store ordering respectively), this operation only serves
+as a compiler barrier and no fence instructions will be emitted. To
+force the unconditional emission of a load fence, use
+.Fn ck_pr_fence_strict_load .
+Architectures implementing CK_MD_RMO always emit a load fence.
+.Sh EXAMPLE
+.Bd -literal -offset indent
+
+#include <ck_pr.h>
+
+static unsigned int a;
+static unsigned int b;
+
+void
+function(void)
+{
+ unsigned int snapshot_a, snapshot_b;
+
+ snapshot_a = ck_pr_load_uint(&a);
+
+ /*
+ * Guarantee that the load from "a" completes
+ * before the load from "b".
+ */
+ ck_pr_fence_load();
+ snapshot_b = ck_pr_load_uint(&b);
+
+ return;
+}
+.Ed
+.Sh RETURN VALUES
+This function has no return value.
+.Sh SEE ALSO
+.Xr ck_pr_stall 3 ,
+.Xr ck_pr_fence_atomic 3 ,
+.Xr ck_pr_fence_atomic_store 3 ,
+.Xr ck_pr_fence_atomic_load 3 ,
+.Xr ck_pr_fence_load_atomic 3 ,
+.Xr ck_pr_fence_load_store 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_barrier 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_fence_load_atomic b/doc/ck_pr_fence_load_atomic
new file mode 100644
index 0000000..c935491
--- /dev/null
+++ b/doc/ck_pr_fence_load_atomic
@@ -0,0 +1,113 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd May 18, 2013
+.Dt CK_PR_FENCE_LOAD_ATOMIC 3
+.Sh NAME
+.Nm ck_pr_fence_load_atomic
+.Nd enforce ordering of load operations to atomic read-modify-write operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void
+.Fn ck_pr_fence_load_atomic void
+.Ft void
+.Fn ck_pr_fence_strict_load_atomic void
+.Sh DESCRIPTION
+This function enforces the ordering of any memory load
+and
+.Fn ck_pr_load 3
+operations with respect to store operations relative to
+the invocation of the function. Any store operations that
+were committed on remote processors
+and received by the calling processor before the invocation of
+.Fn ck_pr_fence_load_atomic
+is also be made visible only after a call to
+the ck_pr_fence_load family of functions.
+This function always serves as an implicit compiler barrier.
+On architectures with CK_MD_TSO or CK_MD_PSO specified (total store ordering
+and partial store ordering respectively), this operation only serves
+as a compiler barrier and no fence instructions will be emitted. To
+force the unconditional emission of a load fence, use
+.Fn ck_pr_fence_strict_load_atomic .
+Architectures implementing CK_MD_RMO always emit a fence.
+.Sh EXAMPLE
+.Bd -literal -offset indent
+
+#include <ck_pr.h>
+
+static unsigned int a;
+static unsigned int b;
+
+void
+function(void)
+{
+ unsigned int snapshot_a, snapshot_b;
+
+ snapshot_a = ck_pr_load_uint(&a);
+
+ /*
+ * Guarantee that the load from "a" completes
+ * before the update to "b".
+ */
+ ck_pr_fence_load_atomic();
+ ck_pr_fas_uint(&b, 1);
+
+ return;
+}
+.Ed
+.Sh RETURN VALUES
+This function has no return value.
+.Sh SEE ALSO
+.Xr ck_pr_stall 3 ,
+.Xr ck_pr_fence_atomic 3 ,
+.Xr ck_pr_fence_atomic_store 3 ,
+.Xr ck_pr_fence_atomic_load 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_load_store 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_barrier 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_fence_load_depends b/doc/ck_pr_fence_load_depends
new file mode 100644
index 0000000..0c0ecfa
--- /dev/null
+++ b/doc/ck_pr_fence_load_depends
@@ -0,0 +1,75 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 7, 2013
+.Dt ck_pr_fence_load_depends 3
+.Sh NAME
+.Nm ck_pr_fence_load_depends
+.Nd data dependency barrier
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void
+.Fn ck_pr_fence_load_depends void
+.Sh DESCRIPTION
+The
+.Fn ck_pr_fence_load_depends 3
+emits necessary fences for pure data-dependent loads. It currently only serves as a compiler
+barrier for Concurrency Kit's supported platforms. Unless you're on architecture
+which re-orders data-dependent loads (such as the defunct Alpha), this function is unnecessary.
+.Sh RETURN VALUES
+This function has no return value.
+.Sh SEE ALSO
+.Xr ck_pr_stall 3 ,
+.Xr ck_pr_fence_atomic 3 ,
+.Xr ck_pr_fence_atomic_store 3 ,
+.Xr ck_pr_fence_atomic_load 3 ,
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_atomic 3 ,
+.Xr ck_pr_fence_load_store 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_barrier 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_fence_load_store b/doc/ck_pr_fence_load_store
new file mode 100644
index 0000000..4abce99
--- /dev/null
+++ b/doc/ck_pr_fence_load_store
@@ -0,0 +1,113 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd May 18, 2013
+.Dt CK_PR_FENCE_LOAD_STORE 3
+.Sh NAME
+.Nm ck_pr_fence_load_store
+.Nd enforce ordering of load operations to store operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void
+.Fn ck_pr_fence_load_store void
+.Ft void
+.Fn ck_pr_fence_strict_load_store void
+.Sh DESCRIPTION
+This function enforces the ordering of any memory load
+and
+.Fn ck_pr_load 3
+operations with respect to store operations relative to
+the invocation of the function. Any store operations that
+were committed on remote processors
+and received by the calling processor before the invocation of
+.Fn ck_pr_fence_load_store
+is also be made visible only after a call to
+the ck_pr_fence_load family of functions.
+This function always serves as an implicit compiler barrier.
+On architectures with CK_MD_TSO or CK_MD_PSO specified (total store ordering
+and partial store ordering respectively), this operation only serves
+as a compiler barrier and no fence instructions will be emitted. To
+force the unconditional emission of a load fence, use
+.Fn ck_pr_fence_strict_load_store .
+Architectures implementing CK_MD_RMO always emit a fence.
+.Sh EXAMPLE
+.Bd -literal -offset indent
+
+#include <ck_pr.h>
+
+static unsigned int a;
+static unsigned int b;
+
+void
+function(void)
+{
+ unsigned int snapshot_a;
+
+ snapshot_a = ck_pr_load_uint(&a);
+
+ /*
+ * Guarantee that the load from "a" completes
+ * before the store to "b".
+ */
+ ck_pr_fence_load_store();
+ ck_pr_store_uint(&b, 1);
+
+ return;
+}
+.Ed
+.Sh RETURN VALUES
+This function has no return value.
+.Sh SEE ALSO
+.Xr ck_pr_stall 3 ,
+.Xr ck_pr_fence_atomic 3 ,
+.Xr ck_pr_fence_atomic_store 3 ,
+.Xr ck_pr_fence_atomic_load 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_load_atomic 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_barrier 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_fence_memory b/doc/ck_pr_fence_memory
new file mode 100644
index 0000000..0dfc81b
--- /dev/null
+++ b/doc/ck_pr_fence_memory
@@ -0,0 +1,113 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 7, 2013
+.Dt ck_pr_fence_memory 3
+.Sh NAME
+.Nm ck_pr_fence_memory
+.Nd enforce partial ordering of all memory operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void
+.Fn ck_pr_fence_memory
+.Ft void
+.Fn ck_pr_fence_strict_memory
+.Sh DESCRIPTION
+The
+.Fn ck_pr_fence_memory 3
+function enforces the ordering of any memory operations
+with respect to the invocation of the function. This function
+always serves as an implicit compiler barrier.
+Achitectures implementing CK_MD_TSO do not emit
+a barrier, but compiler barrier semantics remain.
+Architectures implementing CK_MD_PSO and CK_MD_RMO always emit
+an instructions which provides the specified ordering
+guarantees. To force the unconditional emission of a memory
+fence, use
+.Fn ck_pr_fence_strict_memory .
+.Sh EXAMPLE
+.Bd -literal -offset indent
+
+#include <ck_pr.h>
+
+static int a = 0;
+static int b;
+static int c;
+static int d;
+
+void
+function(void)
+{
+ int snapshot_a;
+
+ ck_pr_store_int(&b, 1);
+ snapshot_a = ck_pr_load_int(&a);
+
+ /*
+ * Make sure previous memory operations are
+ * ordered with respect to memory operations
+ * following the ck_pr_fence_memory.
+ */
+ ck_pr_fence_memory();
+
+ ck_pr_store_int(&d, 3);
+ ck_pr_store_int(&c, 2);
+
+ return;
+}
+.Ed
+.Sh RETURN VALUES
+This function has no return value.
+.Sh SEE ALSO
+.Xr ck_pr_stall 3 ,
+.Xr ck_pr_fence_atomic 3 ,
+.Xr ck_pr_fence_atomic_store 3 ,
+.Xr ck_pr_fence_atomic_load 3 ,
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_barrier 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_fence_release b/doc/ck_pr_fence_release
new file mode 100644
index 0000000..214917c
--- /dev/null
+++ b/doc/ck_pr_fence_release
@@ -0,0 +1,71 @@
+.\"
+.\" Copyright 2014 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd January 2, 2014
+.Dt CK_PR_FENCE_RELEASE 3
+.Sh NAME
+.Nm ck_pr_fence_release
+.Nd enforce release semantics
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void
+.Fn ck_pr_fence_release void
+.Sh DESCRIPTION
+This function enforces the partial ordering of any loads prior
+to invocation with respect to any following stores and any stores
+prior to invocation with respect to any following stores.
+.Sh RETURN VALUES
+This function has no return value.
+.Sh SEE ALSO
+.Xr ck_pr_stall 3 ,
+.Xr ck_pr_fence_atomic 3 ,
+.Xr ck_pr_fence_atomic_store 3 ,
+.Xr ck_pr_fence_atomic_load 3 ,
+.Xr ck_pr_fence_acquire 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_barrier 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_fence_store b/doc/ck_pr_fence_store
new file mode 100644
index 0000000..d94e9f1
--- /dev/null
+++ b/doc/ck_pr_fence_store
@@ -0,0 +1,112 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 7, 2013
+.Dt ck_pr_fence_store 3
+.Sh NAME
+.Nm ck_pr_fence_store
+.Nd enforce partial ordering of store operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void
+.Fn ck_pr_fence_store void
+.Ft void
+.Fn ck_pr_fence_strict_store void
+.Sh DESCRIPTION
+The
+.Fn ck_pr_fence_store
+function enforces the ordering of any memory store,
+.Fn ck_pr_store
+and atomic read-modify-write operations relative to
+the invocation of the function. This function
+always serve as an implicit compiler barrier. On
+architectures implementing CK_MD_TSO, this operation
+only serves as a compiler barrier and no fences
+are emitted. On architectures implementing
+CK_MD_PSO and CK_MD_RMO, a store fence is
+emitted. To force the unconditional emission of
+a store fence, use
+.Fn ck_pr_fence_strict_store .
+.Sh EXAMPLE
+.Bd -literal -offset indent
+
+#include <ck_pr.h>
+
+static int a = 0;
+static int b = 0;
+static int c = 0;
+
+void
+function(void)
+{
+
+ ck_pr_store_int(&a, 1);
+
+ /*
+ * Guarantee that the store to a is completed
+ * with respect to the stores of b and c.
+ */
+ ck_pr_fence_store();
+ ck_pr_store_int(&b, 2);
+ ck_pr_store_int(&c, 2);
+
+ return;
+}
+.Ed
+.Sh RETURN VALUES
+This function has no return value.
+.Sh SEE ALSO
+.Xr ck_pr_stall 3 ,
+.Xr ck_pr_fence_atomic 3 ,
+.Xr ck_pr_fence_atomic_store 3 ,
+.Xr ck_pr_fence_atomic_load 3 ,
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_atomic 3 ,
+.Xr ck_pr_fence_load_store 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_barrier 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_fence_store_atomic b/doc/ck_pr_fence_store_atomic
new file mode 100644
index 0000000..309c804
--- /dev/null
+++ b/doc/ck_pr_fence_store_atomic
@@ -0,0 +1,108 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd May 18, 2013
+.Dt CK_PR_FENCE_STORE_ATOMIC 3
+.Sh NAME
+.Nm ck_pr_fence_store_atomic
+.Nd enforce ordering of store operations to load operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void
+.Fn ck_pr_fence_store_atomic void
+.Ft void
+.Fn ck_pr_fence_strict_store_atomic void
+.Sh DESCRIPTION
+The
+.Fn ck_pr_fence_store_atomic
+function enforces the ordering of any memory store,
+.Fn ck_pr_store
+and atomic read-modify-write operations to atomic read-modify-write
+operations relative to the invocation of the function. This function
+always serve as an implicit compiler barrier.
+This functions will emit a fence for PSO and RMO
+targets. In order to force the emission of a fence use the
+.Fn ck_pr_fence_strict_store_atomic
+function.
+.Sh EXAMPLE
+.Bd -literal -offset indent
+
+#include <ck_pr.h>
+
+static int a = 0;
+static int b = 0;
+
+void
+function(void)
+{
+
+ ck_pr_store_int(&a, 1);
+
+ /*
+ * Guarantee that the store to a is completed
+ * with respect to the update of b.
+ */
+ ck_pr_fence_store_atomic();
+ ck_pr_add_int(&b, 2);
+ return;
+}
+.Ed
+.Sh RETURN VALUES
+This function has no return value.
+.Sh SEE ALSO
+.Xr ck_pr_stall 3 ,
+.Xr ck_pr_fence_atomic 3 ,
+.Xr ck_pr_fence_atomic_store 3 ,
+.Xr ck_pr_fence_atomic_load 3 ,
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_atomic 3 ,
+.Xr ck_pr_fence_load_store 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_store_load 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_barrier 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_fence_store_load b/doc/ck_pr_fence_store_load
new file mode 100644
index 0000000..b595739
--- /dev/null
+++ b/doc/ck_pr_fence_store_load
@@ -0,0 +1,107 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd May 18, 2013
+.Dt CK_PR_FENCE_STORE_LOAD 3
+.Sh NAME
+.Nm ck_pr_fence_store_load
+.Nd enforce ordering of store operations to load operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void
+.Fn ck_pr_fence_store_load void
+.Ft void
+.Fn ck_pr_fence_strict_store_load void
+.Sh DESCRIPTION
+The
+.Fn ck_pr_fence_store_load
+function enforces the ordering of any memory store,
+.Fn ck_pr_store
+and atomic read-modify-write operations to load
+operations relative to the invocation of the function. This function
+always serve as an implicit compiler barrier.
+A fence will currently always be emitted for this
+operation, including for TSO memory model targets.
+.Sh EXAMPLE
+.Bd -literal -offset indent
+
+#include <ck_pr.h>
+
+static int a = 0;
+static int b = 0;
+
+void
+function(void)
+{
+ unsigned int snapshot_b;
+
+ ck_pr_store_int(&a, 1);
+
+ /*
+ * Guarantee that the store to a is completed
+ * with respect to load from b.
+ */
+ ck_pr_fence_store_load();
+ snapshot_b = ck_pr_load_int(&b, 2);
+ return;
+}
+.Ed
+.Sh RETURN VALUES
+This function has no return value.
+.Sh SEE ALSO
+.Xr ck_pr_stall 3 ,
+.Xr ck_pr_fence_atomic 3 ,
+.Xr ck_pr_fence_atomic_store 3 ,
+.Xr ck_pr_fence_atomic_load 3 ,
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_atomic 3 ,
+.Xr ck_pr_fence_load_store 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_store_atomic 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_barrier 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_inc b/doc/ck_pr_inc
new file mode 100644
index 0000000..72a3e70
--- /dev/null
+++ b/doc/ck_pr_inc
@@ -0,0 +1,124 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 7, 2013
+.Dt ck_pr_inc 3
+.Sh NAME
+.Nm ck_pr_inc_ptr ,
+.Nm ck_pr_inc_ptr_zero ,
+.Nm ck_pr_inc_double ,
+.Nm ck_pr_inc_double_zero ,
+.Nm ck_pr_inc_char ,
+.Nm ck_pr_inc_char_zero ,
+.Nm ck_pr_inc_uint ,
+.Nm ck_pr_inc_uint_zero ,
+.Nm ck_pr_inc_int ,
+.Nm ck_pr_inc_int_zero ,
+.Nm ck_pr_inc_64 ,
+.Nm ck_pr_inc_64_zero ,
+.Nm ck_pr_inc_32 ,
+.Nm ck_pr_inc_32_zero ,
+.Nm ck_pr_inc_16 ,
+.Nm ck_pr_inc_16_zero ,
+.Nm ck_pr_inc_8 ,
+.Nm ck_pr_inc_8_zero
+.Nd atomic increment operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void
+.Fn ck_pr_inc_ptr "void *target"
+.Ft void
+.Fn ck_pr_inc_ptr_zero "void *target" "bool *z"
+.Ft void
+.Fn ck_pr_inc_double "double *target"
+.Ft void
+.Fn ck_pr_inc_double_zero "double *target" "bool *z"
+.Ft void
+.Fn ck_pr_inc_char "char *target"
+.Ft void
+.Fn ck_pr_inc_char_zero "char *target" "bool *z"
+.Ft void
+.Fn ck_pr_inc_uint "unsigned int *target"
+.Ft void
+.Fn ck_pr_inc_uint_zero "unsigned int *target" "bool *z"
+.Ft void
+.Fn ck_pr_inc_int "int *target"
+.Ft void
+.Fn ck_pr_inc_int_zero "int *target" "bool *z"
+.Ft void
+.Fn ck_pr_inc_64 "uint64_t *target"
+.Ft void
+.Fn ck_pr_inc_64_zero "uint64_t *target" "bool *z"
+.Ft void
+.Fn ck_pr_inc_32 "uint32_t *target"
+.Ft void
+.Fn ck_pr_inc_32_zero "uint32_t *target" "bool *z"
+.Ft void
+.Fn ck_pr_inc_16 "uint16_t *target"
+.Ft void
+.Fn ck_pr_inc_16_zero "uint16_t *target" "bool *z"
+.Ft void
+.Fn ck_pr_inc_8 "uint8_t *target"
+.Ft void
+.Fn ck_pr_inc_8_zero "uint8_t *target" "bool *z"
+.Sh DESCRIPTION
+The
+.Fn ck_pr_inc 3
+family of functions atomically increment the value pointed to
+by
+.Fa target .
+.Sh RETURN VALUES
+The ck_pr_inc_zero family of functions set the value pointed to by
+.Fa z
+to true if the result of the increment operation was 0. The functions set
+the value pointed to by
+.Fa z
+false otherwise.
+.Sh SEE ALSO
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_load b/doc/ck_pr_load
new file mode 100644
index 0000000..ed615d3
--- /dev/null
+++ b/doc/ck_pr_load
@@ -0,0 +1,96 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 15, 2013
+.Dt ck_pr_load 3
+.Sh NAME
+.Nm ck_pr_load_ptr ,
+.Nm ck_pr_load_double ,
+.Nm ck_pr_load_uint ,
+.Nm ck_pr_load_int ,
+.Nm ck_pr_load_char ,
+.Nm ck_pr_load_64 ,
+.Nm ck_pr_load_32 ,
+.Nm ck_pr_load_16 ,
+.Nm ck_pr_load_8
+.Nd atomic volatile load operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void *
+.Fn ck_pr_load_ptr "const void *target"
+.Ft double
+.Fn ck_pr_load_double "const double *target"
+.Ft unsigned int
+.Fn ck_pr_load_uint "const unsigned int *target"
+.Ft int
+.Fn ck_pr_load_int "const int *target"
+.Ft char
+.Fn ck_pr_load_char "const char *target"
+.Ft uint64_t
+.Fn ck_pr_load_64 "const uint64_t *target"
+.Ft uint32_t
+.Fn ck_pr_load_32 "const uint32_t *target"
+.Ft uint16_t
+.Fn ck_pr_load_16 "const uint16_t *target"
+.Ft uint8_t
+.Fn ck_pr_load_8 "const uint8_t *target"
+.Sh DESCRIPTION
+The
+.Fn ck_pr_load 3
+family of functions atomically loads the value
+pointed to by
+.Fa target
+and returns it. This family of functions always
+serves as an implicit compiler barrier and is not
+susceptible to re-ordering by the compiler.
+.Sh RETURN VALUES
+This family of functions returns the value contained
+in the location pointed to by the first argument.
+.Sh SEE ALSO
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_neg b/doc/ck_pr_neg
new file mode 100644
index 0000000..38f9a0a
--- /dev/null
+++ b/doc/ck_pr_neg
@@ -0,0 +1,122 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 7, 2013
+.Dt ck_pr_neg 3
+.Sh NAME
+.Nm ck_pr_neg_ptr ,
+.Nm ck_pr_neg_ptr_zero ,
+.Nm ck_pr_neg_double ,
+.Nm ck_pr_neg_double_zero ,
+.Nm ck_pr_neg_char ,
+.Nm ck_pr_neg_char_zero ,
+.Nm ck_pr_neg_uint ,
+.Nm ck_pr_neg_uint_zero ,
+.Nm ck_pr_neg_int ,
+.Nm ck_pr_neg_int_zero ,
+.Nm ck_pr_neg_64 ,
+.Nm ck_pr_neg_64_zero ,
+.Nm ck_pr_neg_32 ,
+.Nm ck_pr_neg_32_zero ,
+.Nm ck_pr_neg_16 ,
+.Nm ck_pr_neg_16_zero ,
+.Nm ck_pr_neg_8 ,
+.Nm ck_pr_neg_8_zero
+.Nd atomic negation operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void
+.Fn ck_pr_neg_ptr "void *target"
+.Ft void
+.Fn ck_pr_neg_ptr_zero "void *target" "bool *z"
+.Ft void
+.Fn ck_pr_neg_double "double *target"
+.Ft void
+.Fn ck_pr_neg_double_zero "double *target" "bool *z"
+.Ft void
+.Fn ck_pr_neg_char "char *target"
+.Ft void
+.Fn ck_pr_neg_char_zero "char *target" "bool *z"
+.Ft void
+.Fn ck_pr_neg_uint "unsigned int *target"
+.Ft void
+.Fn ck_pr_neg_uint_zero "unsigned int *target" "bool *z"
+.Ft void
+.Fn ck_pr_neg_int "int *target"
+.Ft void
+.Fn ck_pr_neg_int_zero "int *target" "bool *z"
+.Ft void
+.Fn ck_pr_neg_64 "uint64_t *target"
+.Ft void
+.Fn ck_pr_neg_64_zero "uint64_t *target" "bool *z"
+.Ft void
+.Fn ck_pr_neg_32 "uint32_t *target"
+.Ft void
+.Fn ck_pr_neg_32_zero "uint32_t *target" "bool *z"
+.Ft void
+.Fn ck_pr_neg_16 "uint16_t *target"
+.Ft void
+.Fn ck_pr_neg_16_zero "uint16_t *target" "bool *z"
+.Ft void
+.Fn ck_pr_neg_8 "uint8_t *target"
+.Ft void
+.Fn ck_pr_neg_8_zero "uint8_t *target" "bool *z"
+.Sh DESCRIPTION
+The
+.Fn ck_pr_neg 3
+family of functions atomically negate the value pointed to
+by
+.Fa target .
+.Sh RETURN VALUES
+The ck_pr_neg_zero functions set the value pointed to by
+.Fa z
+if the result of the negation operation was 0. They set the
+pointed to value to false otherwise.
+.Sh SEE ALSO
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_not b/doc/ck_pr_not
new file mode 100644
index 0000000..b0a38b2
--- /dev/null
+++ b/doc/ck_pr_not
@@ -0,0 +1,92 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 7, 2013
+.Dt ck_pr_not 3
+.Sh NAME
+.Nm ck_pr_not_ptr ,
+.Nm ck_pr_not_double ,
+.Nm ck_pr_not_char ,
+.Nm ck_pr_not_uint ,
+.Nm ck_pr_not_int ,
+.Nm ck_pr_not_64 ,
+.Nm ck_pr_not_32 ,
+.Nm ck_pr_not_16 ,
+.Nm ck_pr_not_8
+.Nd atomic complement operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void
+.Fn ck_pr_not_ptr "void *target"
+.Ft void
+.Fn ck_pr_not_double "double *target"
+.Ft void
+.Fn ck_pr_not_char "char *target"
+.Ft void
+.Fn ck_pr_not_uint "unsigned int *target"
+.Ft void
+.Fn ck_pr_not_int "int *target"
+.Ft void
+.Fn ck_pr_not_64 "uint64_t *target"
+.Ft void
+.Fn ck_pr_not_32 "uint32_t *target"
+.Ft void
+.Fn ck_pr_not_16 "uint16_t *target"
+.Ft void
+.Fn ck_pr_not_8 "uint8_t *target"
+.Sh DESCRIPTION
+The
+.Fn ck_pr_not 3
+family of functions atomically complement the value pointed to
+by
+.Fa target .
+.Sh RETURN VALUES
+These functions have no return value.
+.Sh SEE ALSO
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_or b/doc/ck_pr_or
new file mode 100644
index 0000000..2a68330
--- /dev/null
+++ b/doc/ck_pr_or
@@ -0,0 +1,93 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 11, 2013
+.Dt ck_pr_or 3
+.Sh NAME
+.Nm ck_pr_or_ptr ,
+.Nm ck_pr_or_char ,
+.Nm ck_pr_or_uint ,
+.Nm ck_pr_or_int ,
+.Nm ck_pr_or_64 ,
+.Nm ck_pr_or_32 ,
+.Nm ck_pr_or_16 ,
+.Nm ck_pr_or_8
+.Nd atomic bitwise-or operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void
+.Fn ck_pr_or_ptr "void *target" "uintptr_t delta"
+.Ft void
+.Fn ck_pr_or_char "char *target" "char delta"
+.Ft void
+.Fn ck_pr_or_uint "unsigned int *target" "unsigned int delta"
+.Ft void
+.Fn ck_pr_or_int "int *target" "int delta"
+.Ft void
+.Fn ck_pr_or_64 "uint64_t *target" "uint64_t delta"
+.Ft void
+.Fn ck_pr_or_32 "uint32_t *target" "uint32_t delta"
+.Ft void
+.Fn ck_pr_or_16 "uint16_t *target" "uint16_t delta"
+.Ft void
+.Fn ck_pr_or_8 "uint8_t *target" "uint8_t delta"
+.Sh DESCRIPTION
+The
+.Fn ck_pr_or 3
+family of functions atomically compute and store the
+result of a bitwise-or of the value pointed to by
+.Fa target
+and
+.Fa delta
+into the value pointed to by
+.Fa target .
+.Sh RETURN VALUES
+This family of functions does not have a return value.
+.Sh SEE ALSO
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_rtm b/doc/ck_pr_rtm
new file mode 100644
index 0000000..53c31b6
--- /dev/null
+++ b/doc/ck_pr_rtm
@@ -0,0 +1,112 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd December 17, 2013
+.Dt ck_pr_rtm 3
+.Sh NAME
+.Nm ck_pr_rtm_begin ,
+.Nm ck_pr_rtm_end ,
+.Nm ck_pr_rtm_abort ,
+.Nm ck_pr_rtm_test
+.Nd restricted transactional memory
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft unsigned int
+.Fn ck_pr_rtm_begin "void"
+.Ft void
+.Fn ck_pr_rtm_end "void"
+.Ft void
+.Fn ck_pr_rtm_abort "const unsigned int status"
+.Ft bool
+.Fn ck_pr_rtm_test "void"
+.Sh DESCRIPTION
+These family of functions implement support for restricted
+transactional memory, if available on the underlying platform.
+Currently, support is only provided for Intel Haswell and
+newer x86 microarchitectures that have the TSX-NI feature.
+.Pp
+The
+.Fn ck_pr_rtm_begin
+function returns CK_PR_RTM_STARTED if a transaction was successfully
+started. In case of an abort, either internal (through a ck_pr_rtm_abort)
+or external, program flow will return to the point which the function
+was called except the return value will consist of a bitmap with one or
+more of the following bits set:
+.Bl -tag -width indent
+.It CK_PR_RTM_EXPLICIT
+Set if the transactionally was explicitly aborted through
+.Fn ck_pr_rtm_abort .
+.It CK_PR_RTM_RETRY
+Set if the transaction failed but can still succeed if
+retried.
+.It CK_PR_RTM_CONFLICT
+The transaction failed due to a conflict in one of the memory
+addresses that are part of the working set of the transaction.
+.It CK_PR_RTM_CAPACITY
+Set if the architecture-defined transaction size limit was exceeded.
+.It CK_PR_RTM_DEBUG
+Set if a hardware breakpoint was triggered.
+.It CK_PR_RTM_NESTED
+Set if a nested transaction failed.
+.El
+.Pp
+The user is also able to specify a one byte abort status
+by calling
+.Fn ck_pr_rtm_abort .
+This status byte can be extracted by calling the
+.Fn CK_PR_RTM_CODE
+function with the return value of
+.Fn ck_pr_rtm_begin
+as an argument. The return value of
+.Fn CK_PR_RTM_CODE
+will be the value of this status byte.
+For additional information, please see the Intel instruction
+set manuals.
+.Sh SEE ALSO
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_stall b/doc/ck_pr_stall
new file mode 100644
index 0000000..bc46647
--- /dev/null
+++ b/doc/ck_pr_stall
@@ -0,0 +1,86 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 7, 2013
+.Dt ck_pr_stall 3
+.Sh NAME
+.Nm ck_pr_stall
+.Nd busy-wait primitive
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void
+.Fn ck_pr_stall void
+.Sh DESCRIPTION
+The
+.Fn ck_pr_stall 3
+function should be used inside retry paths of busy-wait loops.
+It not only serves as a compiler barrier, but on some architectures
+it emits cycle-saving instructions.
+.Sh EXAMPLE
+.Bd -literal -offset indent
+
+#include <ck_pr.h>
+
+static int ready = 0;
+
+void
+function(void)
+{
+
+ /* Busy-wait until ready is non-zero. */
+ while (ck_pr_load_int(&ready) == 0)
+ ck_pr_stall();
+
+ return;
+}
+.Ed
+.Sh SEE ALSO
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_barrier 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_store b/doc/ck_pr_store
new file mode 100644
index 0000000..462cf7b
--- /dev/null
+++ b/doc/ck_pr_store
@@ -0,0 +1,96 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 15, 2013
+.Dt ck_pr_store 3
+.Sh NAME
+.Nm ck_pr_store_ptr ,
+.Nm ck_pr_store_double ,
+.Nm ck_pr_store_uint ,
+.Nm ck_pr_store_int ,
+.Nm ck_pr_store_char ,
+.Nm ck_pr_store_64 ,
+.Nm ck_pr_store_32 ,
+.Nm ck_pr_store_16 ,
+.Nm ck_pr_store_8
+.Nd atomic volatile store operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void
+.Fn ck_pr_store_ptr "void *target" "void *value"
+.Ft void
+.Fn ck_pr_store_double "double *target" "double value"
+.Ft void
+.Fn ck_pr_store_uint "unsigned int *target" "unsigned int value"
+.Ft void
+.Fn ck_pr_store_int "int *target" "int value"
+.Ft void
+.Fn ck_pr_store_char "char *target" "char value"
+.Ft void
+.Fn ck_pr_store_64 "uint64_t *target" "uint64_t value"
+.Ft void
+.Fn ck_pr_store_32 "uint32_t *target" "uint32_t value"
+.Ft void
+.Fn ck_pr_store_16 "uint16_t *target" "uint16_t value"
+.Ft void
+.Fn ck_pr_store_8 "uint8_t *target" "uint8_t value"
+.Sh DESCRIPTION
+The
+.Fn ck_pr_store 3
+family of functions atomically stores the value specified
+by
+.Fa value
+into the location pointed to by
+.Fa target .
+This family of functions always serves as an implicit compiler
+barrier and is not susceptible to compiler re-ordering.
+.Sh RETURN VALUES
+This family of functions has no return value.
+.Sh SEE ALSO
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_sub b/doc/ck_pr_sub
new file mode 100644
index 0000000..5eee170
--- /dev/null
+++ b/doc/ck_pr_sub
@@ -0,0 +1,93 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 11, 2013
+.Dt ck_pr_sub 3
+.Sh NAME
+.Nm ck_pr_sub_ptr ,
+.Nm ck_pr_sub_double ,
+.Nm ck_pr_sub_char ,
+.Nm ck_pr_sub_uint ,
+.Nm ck_pr_sub_int ,
+.Nm ck_pr_sub_64 ,
+.Nm ck_pr_sub_32 ,
+.Nm ck_pr_sub_16 ,
+.Nm ck_pr_sub_8
+.Nd atomic subtraction operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void
+.Fn ck_pr_sub_ptr "void *target" "uintptr_t delta"
+.Ft void
+.Fn ck_pr_sub_double "double *target" "double delta"
+.Ft void
+.Fn ck_pr_sub_char "char *target" "char delta"
+.Ft void
+.Fn ck_pr_sub_uint "unsigned int *target" "unsigned int delta"
+.Ft void
+.Fn ck_pr_sub_int "int *target" "int delta"
+.Ft void
+.Fn ck_pr_sub_64 "uint64_t *target" "uint64_t delta"
+.Ft void
+.Fn ck_pr_sub_32 "uint32_t *target" "uint32_t delta"
+.Ft void
+.Fn ck_pr_sub_16 "uint16_t *target" "uint16_t delta"
+.Ft void
+.Fn ck_pr_sub_8 "uint8_t *target" "uint8_t delta"
+.Sh DESCRIPTION
+The
+.Fn ck_pr_sub 3
+family of functions atomically subtract the value specified by
+.Fa delta
+from the value pointed to by
+.Fa target .
+.Sh RETURN VALUES
+This family of functions does not have a return value.
+.Sh SEE ALSO
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_xor 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_pr_xor b/doc/ck_pr_xor
new file mode 100644
index 0000000..509f60d
--- /dev/null
+++ b/doc/ck_pr_xor
@@ -0,0 +1,93 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 11, 2013
+.Dt ck_pr_xor 3
+.Sh NAME
+.Nm ck_pr_xor_ptr ,
+.Nm ck_pr_xor_char ,
+.Nm ck_pr_xor_uint ,
+.Nm ck_pr_xor_int ,
+.Nm ck_pr_xor_64 ,
+.Nm ck_pr_xor_32 ,
+.Nm ck_pr_xor_16 ,
+.Nm ck_pr_xor_8
+.Nd atomic bitwise-xor operations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_pr.h
+.Ft void
+.Fn ck_pr_xor_ptr "void *target" "uintptr_t delta"
+.Ft void
+.Fn ck_pr_xor_char "char *target" "char delta"
+.Ft void
+.Fn ck_pr_xor_uint "unsigned int *target" "unsigned int delta"
+.Ft void
+.Fn ck_pr_xor_int "int *target" "int delta"
+.Ft void
+.Fn ck_pr_xor_64 "uint64_t *target" "uint64_t delta"
+.Ft void
+.Fn ck_pr_xor_32 "uint32_t *target" "uint32_t delta"
+.Ft void
+.Fn ck_pr_xor_16 "uint16_t *target" "uint16_t delta"
+.Ft void
+.Fn ck_pr_xor_8 "uint8_t *target" "uint8_t delta"
+.Sh DESCRIPTION
+The
+.Fn ck_pr_xor 3
+family of functions atomically compute and store the
+result of a bitwise-xor of the value pointed to by
+.Fa target
+and
+.Fa delta
+into the value pointed to by
+.Fa target .
+.Sh RETURN VALUES
+This family of functions does not have a return value.
+.Sh SEE ALSO
+.Xr ck_pr_fence_load 3 ,
+.Xr ck_pr_fence_load_depends 3 ,
+.Xr ck_pr_fence_store 3 ,
+.Xr ck_pr_fence_memory 3 ,
+.Xr ck_pr_load 3 ,
+.Xr ck_pr_store 3 ,
+.Xr ck_pr_fas 3 ,
+.Xr ck_pr_faa 3 ,
+.Xr ck_pr_inc 3 ,
+.Xr ck_pr_dec 3 ,
+.Xr ck_pr_neg 3 ,
+.Xr ck_pr_not 3 ,
+.Xr ck_pr_add 3 ,
+.Xr ck_pr_sub 3 ,
+.Xr ck_pr_or 3 ,
+.Xr ck_pr_and 3 ,
+.Xr ck_pr_cas 3 ,
+.Xr ck_pr_btc 3 ,
+.Xr ck_pr_bts 3 ,
+.Xr ck_pr_btr 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_queue b/doc/ck_queue
new file mode 100644
index 0000000..a27ec15
--- /dev/null
+++ b/doc/ck_queue
@@ -0,0 +1,147 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd July 28, 2013.
+.Dt ck_queue 3
+.Sh NAME
+.Nm CK_LIST_EMPTY ,
+.Nm CK_LIST_ENTRY ,
+.Nm CK_LIST_FIRST ,
+.Nm CK_LIST_FOREACH ,
+.Nm CK_LIST_FOREACH_SAFE ,
+.Nm CK_LIST_HEAD ,
+.Nm CK_LIST_HEAD_INITIALIZER ,
+.Nm CK_LIST_INIT ,
+.Nm CK_LIST_INSERT_AFTER ,
+.Nm CK_LIST_INSERT_BEFORE ,
+.Nm CK_LIST_INSERT_HEAD ,
+.Nm CK_LIST_MOVE ,
+.Nm CK_LIST_NEXT ,
+.Nm CK_LIST_REMOVE ,
+.Nm CK_LIST_SWAP ,
+.Nm CK_SLIST_EMPTY ,
+.Nm CK_SLIST_ENTRY ,
+.Nm CK_SLIST_FIRST ,
+.Nm CK_SLIST_FOREACH ,
+.Nm CK_SLIST_FOREACH_PREVPTR ,
+.Nm CK_SLIST_FOREACH_SAFE ,
+.Nm CK_SLIST_HEAD ,
+.Nm CK_SLIST_HEAD_INITIALIZER ,
+.Nm CK_SLIST_INIT ,
+.Nm CK_SLIST_INSERT_AFTER ,
+.Nm CK_SLIST_INSERT_HEAD ,
+.Nm CK_SLIST_MOVE ,
+.Nm CK_SLIST_NEXT ,
+.Nm CK_SLIST_REMOVE ,
+.Nm CK_SLIST_REMOVE_AFTER ,
+.Nm CK_SLIST_REMOVE_HEAD ,
+.Nm CK_SLIST_SWAP ,
+.Nm CK_STAILQ_CONCAT ,
+.Nm CK_STAILQ_EMPTY ,
+.Nm CK_STAILQ_ENTRY ,
+.Nm CK_STAILQ_FIRST ,
+.Nm CK_STAILQ_FOREACH ,
+.Nm CK_STAILQ_FOREACH_SAFE ,
+.Nm CK_STAILQ_HEAD ,
+.Nm CK_STAILQ_HEAD_INITIALIZER ,
+.Nm CK_STAILQ_INIT ,
+.Nm CK_STAILQ_INSERT_AFTER ,
+.Nm CK_STAILQ_INSERT_HEAD ,
+.Nm CK_STAILQ_INSERT_TAIL ,
+.Nm CK_STAILQ_MOVE ,
+.Nm CK_STAILQ_NEXT ,
+.Nm CK_STAILQ_REMOVE ,
+.Nm CK_STAILQ_REMOVE_AFTER ,
+.Nm CK_STAILQ_REMOVE_HEAD ,
+.Nm CK_STAILQ_SWAP
+.Nd multi-reader single-writer singly-linked lists, singly-linked tail queues and lists
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_queue.h
+.Fn CK_LIST_EMPTY
+.Fn CK_LIST_ENTRY
+.Fn CK_LIST_FIRST
+.Fn CK_LIST_FOREACH
+.Fn CK_LIST_FOREACH_SAFE
+.Fn CK_LIST_HEAD
+.Fn CK_LIST_HEAD_INITIALIZER
+.Fn CK_LIST_INIT
+.Fn CK_LIST_INSERT_AFTER
+.Fn CK_LIST_INSERT_BEFORE
+.Fn CK_LIST_INSERT_HEAD
+.Fn CK_LIST_MOVE
+.Fn CK_LIST_NEXT
+.Fn CK_LIST_REMOVE
+.Fn CK_LIST_SWAP
+.Fn CK_SLIST_EMPTY
+.Fn CK_SLIST_ENTRY
+.Fn CK_SLIST_FIRST
+.Fn CK_SLIST_FOREACH
+.Fn CK_SLIST_FOREACH_PREVPTR
+.Fn CK_SLIST_FOREACH_SAFE
+.Fn CK_SLIST_HEAD
+.Fn CK_SLIST_HEAD_INITIALIZER
+.Fn CK_SLIST_INIT
+.Fn CK_SLIST_INSERT_AFTER
+.Fn CK_SLIST_INSERT_HEAD
+.Fn CK_SLIST_MOVE
+.Fn CK_SLIST_NEXT
+.Fn CK_SLIST_REMOVE
+.Fn CK_SLIST_REMOVE_AFTER
+.Fn CK_SLIST_REMOVE_HEAD
+.Fn CK_SLIST_SWAP
+.Fn CK_STAILQ_CONCAT
+.Fn CK_STAILQ_EMPTY
+.Fn CK_STAILQ_ENTRY
+.Fn CK_STAILQ_FIRST
+.Fn CK_STAILQ_FOREACH
+.Fn CK_STAILQ_FOREACH_SAFE
+.Fn CK_STAILQ_HEAD
+.Fn CK_STAILQ_HEAD_INITIALIZER
+.Fn CK_STAILQ_INIT
+.Fn CK_STAILQ_INSERT_AFTER
+.Fn CK_STAILQ_INSERT_HEAD
+.Fn CK_STAILQ_INSERT_TAIL
+.Fn CK_STAILQ_MOVE
+.Fn CK_STAILQ_NEXT
+.Fn CK_STAILQ_REMOVE
+.Fn CK_STAILQ_REMOVE_AFTER
+.Fn CK_STAILQ_REMOVE_HEAD
+.Fn CK_STAILQ_SWAP
+.Sh DESCRIPTION
+See your system's manual page for
+.Xr queue
+for additional information. ck_queue is a queue.h-compatible
+implementation of many-reader-single-writer queues. It allows
+for safe concurrent iteration, peeking and read-side access
+in the presence of a single concurrent writer without any
+usage of locks. In many cases, adoption of ck_queue will
+simply require prefixing all queue operations with CK_.
+.Sh SEE ALSO
+.Xr queue
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_rhs_apply b/doc/ck_rhs_apply
new file mode 100644
index 0000000..80b1da7
--- /dev/null
+++ b/doc/ck_rhs_apply
@@ -0,0 +1,86 @@
+.\"
+.\" Copyright 2014 Samy Al Bahra.
+.\" Copyright 2014 Backtrace I/O, Inc.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 1, 2014
+.Dt CK_RHS_APPLY 3
+.Sh NAME
+.Nm ck_rhs_apply
+.Nd apply a function to hash set value
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_rhs.h
+.Ft void *
+.Fn ck_rhs_apply_fn_t "void *key" "void *closure"
+.Ft bool
+.Fn ck_rhs_apply "ck_rhs_t *hs" "unsigned long hash" "const void *key" "ck_rhs_apply_fn_t *function" "void *argument"
+.Sh DESCRIPTION
+The
+.Fn ck_rhs_apply 3
+function will lookup the hash set slot associated with
+.Fa key
+and pass it to function pointed to by
+.Fa function
+for further action. This callback may remove or replace
+the value by respectively returning NULL or a pointer to
+another object with an identical key. The first argument
+passed to
+.Fa function
+is a pointer to the object found in the hash set and
+the second argument is the
+.Fa argument
+pointer passed to
+.Fn ck_rhs_apply 3 .
+If the pointer returned by
+.Fa function
+is equivalent to the first argument then no modification
+is made to the hash set.
+.Sh RETURN VALUES
+Upon successful completion,
+.Fn ck_rhs_apply 3
+returns true and otherwise returns false on failure.
+.Sh SEE ALSO
+.Xr ck_rhs_init 3 ,
+.Xr ck_rhs_move 3 ,
+.Xr ck_rhs_destroy 3 ,
+.Xr ck_rhs_fas 3 ,
+.Xr CK_RHS_HASH 3 ,
+.Xr ck_rhs_iterator_init 3 ,
+.Xr ck_rhs_next 3 ,
+.Xr ck_rhs_get 3 ,
+.Xr ck_rhs_put 3 ,
+.Xr ck_rhs_put_unique 3 ,
+.Xr ck_rhs_remove 3 ,
+.Xr ck_rhs_grow 3 ,
+.Xr ck_rhs_rebuild 3 ,
+.Xr ck_rhs_gc 3 ,
+.Xr ck_rhs_count 3 ,
+.Xr ck_rhs_reset 3 ,
+.Xr ck_rhs_reset_size 3 ,
+.Xr ck_rhs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_rhs_count b/doc/ck_rhs_count
new file mode 100644
index 0000000..3a42b12
--- /dev/null
+++ b/doc/ck_rhs_count
@@ -0,0 +1,70 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 17, 2012
+.Dt CK_RHS_COUNT 3
+.Sh NAME
+.Nm ck_rhs_count
+.Nd returns number of entries in hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_rhs.h
+.Ft unsigned long
+.Fn ck_rhs_count "ck_rhs_t *hs"
+.Sh DESCRIPTION
+The
+.Fn ck_rhs_count 3
+function returns the number of keys currently
+stored in
+.Fa hs .
+.Sh ERRORS
+Behavior is undefined if
+.Fa hs
+is uninitialized. Behavior is
+undefined if this function is called by a non-writer
+thread.
+.Sh SEE ALSO
+.Xr ck_rhs_init 3 ,
+.Xr ck_rhs_move 3 ,
+.Xr ck_rhs_destroy 3 ,
+.Xr CK_RHS_HASH 3 ,
+.Xr ck_rhs_iterator_init 3 ,
+.Xr ck_rhs_next 3 ,
+.Xr ck_rhs_get 3 ,
+.Xr ck_rhs_put 3 ,
+.Xr ck_rhs_put_unique 3 ,
+.Xr ck_rhs_set 3 ,
+.Xr ck_rhs_fas 3 ,
+.Xr ck_rhs_remove 3 ,
+.Xr ck_rhs_grow 3 ,
+.Xr ck_rhs_rebuild 3 ,
+.Xr ck_rhs_gc 3 ,
+.Xr ck_rhs_reset 3 ,
+.Xr ck_rhs_reset_size 3 ,
+.Xr ck_rhs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_rhs_destroy b/doc/ck_rhs_destroy
new file mode 100644
index 0000000..68de27e
--- /dev/null
+++ b/doc/ck_rhs_destroy
@@ -0,0 +1,77 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 17, 2012
+.Dt CK_RHS_DESTROY 3
+.Sh NAME
+.Nm ck_rhs_destroy
+.Nd destroy hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_rhs.h
+.Ft void
+.Fn ck_rhs_destroy "ck_rhs_t *hs"
+.Sh DESCRIPTION
+The
+.Fn ck_rhs_destroy 3
+function will request that the underlying allocator, as specified by the
+.Xr ck_rhs_init 3
+function, immediately destroy the object pointed to by the
+.Fa hs
+argument.
+The user must guarantee that no threads are accessing the object pointed to
+by
+.Fa hs
+when
+.Fn ck_rhs_destroy 3
+is called.
+.Sh RETURN VALUES
+.Fn ck_rhs_destroy 3
+has no return value.
+.Sh ERRORS
+This function is guaranteed not to fail.
+.Sh SEE ALSO
+.Xr ck_rhs_init 3 ,
+.Xr ck_rhs_move 3 ,
+.Xr CK_RHS_HASH 3 ,
+.Xr ck_rhs_iterator_init 3 ,
+.Xr ck_rhs_next 3 ,
+.Xr ck_rhs_get 3 ,
+.Xr ck_rhs_put 3 ,
+.Xr ck_rhs_put_unique 3 ,
+.Xr ck_rhs_set 3 ,
+.Xr ck_rhs_fas 3 ,
+.Xr ck_rhs_remove 3 ,
+.Xr ck_rhs_grow 3 ,
+.Xr ck_rhs_rebuild 3 ,
+.Xr ck_rhs_gc 3 ,
+.Xr ck_rhs_count 3 ,
+.Xr ck_rhs_reset 3 ,
+.Xr ck_rhs_reset_size 3 ,
+.Xr ck_rhs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_rhs_fas b/doc/ck_rhs_fas
new file mode 100644
index 0000000..453c40b
--- /dev/null
+++ b/doc/ck_rhs_fas
@@ -0,0 +1,98 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd June 20, 2013
+.Dt CK_RHS_FAS 3
+.Sh NAME
+.Nm ck_rhs_fas
+.Nd fetch and store key in hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_rhs.h
+.Ft bool
+.Fn ck_rhs_fas "ck_rhs_t *hs" "unsigned long hash" "const void *key" "void **previous"
+.Sh DESCRIPTION
+The
+.Fn ck_rhs_fas 3
+function will fetch and store the key specified by the
+.Fa key
+argument in the hash set pointed to by the
+.Fa hs
+argument. The key specified by
+.Fa key
+is expected to have the hash value specified by the
+.Fa hash
+argument (which was previously generated using the
+.Xr CK_RHS_HASH 3
+macro).
+.Pp
+If the call to
+.Fn ck_rhs_fas 3
+was successful then the key specified by
+.Fa key
+was successfully stored in the hash set pointed to by
+.Fa hs .
+The key must already exist in the hash set, and is
+replaced by
+.Fa key
+and the previous value is stored into the void pointer
+pointed to by the
+.Fa previous
+argument. If the key does not exist in the hash set
+then the function will return false and the hash set
+is unchanged. This function
+is guaranteed to be stable with respect to memory usage.
+.Sh RETURN VALUES
+Upon successful completion,
+.Fn ck_rhs_fas 3
+returns true and otherwise returns false on failure.
+.Sh ERRORS
+Behavior is undefined if
+.Fa key
+or
+.Fa hs
+are uninitialized.
+.Sh SEE ALSO
+.Xr ck_rhs_init 3 ,
+.Xr ck_rhs_move 3 ,
+.Xr ck_rhs_destroy 3 ,
+.Xr CK_RHS_HASH 3 ,
+.Xr ck_rhs_iterator_init 3 ,
+.Xr ck_rhs_next 3 ,
+.Xr ck_rhs_get 3 ,
+.Xr ck_rhs_put 3 ,
+.Xr ck_rhs_put_unique 3 ,
+.Xr ck_rhs_remove 3 ,
+.Xr ck_rhs_grow 3 ,
+.Xr ck_rhs_rebuild 3 ,
+.Xr ck_rhs_gc 3 ,
+.Xr ck_rhs_count 3 ,
+.Xr ck_rhs_reset 3 ,
+.Xr ck_rhs_reset_size 3 ,
+.Xr ck_rhs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_rhs_gc b/doc/ck_rhs_gc
new file mode 100644
index 0000000..0ad5324
--- /dev/null
+++ b/doc/ck_rhs_gc
@@ -0,0 +1,73 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd December 17, 2013
+.Dt CK_RHS_GC 3
+.Sh NAME
+.Nm ck_rhs_gc
+.Nd perform maintenance on a hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_rhs.h
+.Ft bool
+.Fn ck_rhs_gc "ck_rhs_t *hs"
+.Sh DESCRIPTION
+The
+.Fn ck_rhs_gc 3
+function will perform various maintenance routines on the hash set
+pointed to by
+.Fa hs ,
+including recalculating the maximum number of probes.
+.Sh RETURN VALUES
+Upon successful completion,
+.Fn ck_rhs_gc 3
+returns true and otherwise returns false on failure due to memory allocation
+failure.
+.Sh ERRORS
+This function will only return false if there are internal memory allocation
+failures.
+.Sh SEE ALSO
+.Xr ck_rhs_init 3 ,
+.Xr ck_rhs_move 3 ,
+.Xr ck_rhs_destroy 3 ,
+.Xr CK_RHS_HASH 3 ,
+.Xr ck_rhs_iterator_init 3 ,
+.Xr ck_rhs_next 3 ,
+.Xr ck_rhs_get 3 ,
+.Xr ck_rhs_put 3 ,
+.Xr ck_rhs_put_unique 3 ,
+.Xr ck_rhs_grow 3 ,
+.Xr ck_rhs_rebuild 3 ,
+.Xr ck_rhs_set 3 ,
+.Xr ck_rhs_fas 3 ,
+.Xr ck_rhs_remove 3 ,
+.Xr ck_rhs_count 3 ,
+.Xr ck_rhs_reset 3 ,
+.Xr ck_rhs_reset_size 3 ,
+.Xr ck_rhs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_rhs_get b/doc/ck_rhs_get
new file mode 100644
index 0000000..51c6e2f
--- /dev/null
+++ b/doc/ck_rhs_get
@@ -0,0 +1,88 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 17, 2012
+.Dt CK_RHS_GET 3
+.Sh NAME
+.Nm ck_rhs_get
+.Nd load a key from a hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_rhs.h
+.Ft void *
+.Fn ck_rhs_get "ck_rhs_t *hs" "unsigned long hash" "const void *key"
+.Sh DESCRIPTION
+The
+.Fn ck_rhs_get 3
+function will return a pointer to a key in the hash set
+.Fa hs
+that is of equivalent value to the object pointed to by
+.Fa key .
+The key specified by
+.Fa key
+is expected to have the hash value specified by the
+.Fa hash
+argument (which is to have been previously generated using the
+.Xr CK_RHS_HASH 3
+macro).
+.Sh RETURN VALUES
+If the provided key is a member of
+.Fa hs
+then a pointer to the key as stored in
+.Fa hs
+is returned. If the key was not found in
+.Fa hs
+then a value of
+.Dv NULL
+is returned.
+.Sh ERRORS
+Behavior is undefined if
+.Fa entry
+or
+.Fa hs
+are uninitialized.
+.Sh SEE ALSO
+.Xr ck_rhs_init 3 ,
+.Xr ck_rhs_move 3 ,
+.Xr ck_rhs_destroy 3 ,
+.Xr CK_RHS_HASH 3 ,
+.Xr ck_rhs_iterator_init 3 ,
+.Xr ck_rhs_next 3 ,
+.Xr ck_rhs_put 3 ,
+.Xr ck_rhs_put_unique 3 ,
+.Xr ck_rhs_set 3 ,
+.Xr ck_rhs_fas 3 ,
+.Xr ck_rhs_remove 3 ,
+.Xr ck_rhs_grow 3 ,
+.Xr ck_rhs_rebuild 3 ,
+.Xr ck_rhs_gc 3 ,
+.Xr ck_rhs_count 3 ,
+.Xr ck_rhs_reset 3 ,
+.Xr ck_rhs_reset_size 3 ,
+.Xr ck_rhs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_rhs_grow b/doc/ck_rhs_grow
new file mode 100644
index 0000000..f1cac26
--- /dev/null
+++ b/doc/ck_rhs_grow
@@ -0,0 +1,81 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 17, 2012
+.Dt CK_RHS_GROW 3
+.Sh NAME
+.Nm ck_rhs_grow
+.Nd enlarge hash set capacity
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_rhs.h
+.Ft bool
+.Fn ck_rhs_grow "ck_rhs_t *hs" "unsigned long capacity"
+.Sh DESCRIPTION
+The
+.Fn ck_rhs_grow 3
+function will resize the hash set in order to be
+able to store at least the number of entries specified by
+.Fa capacity
+at a load factor of one. The default hash set load factor
+is 0.5. If you wish to minimize the likelihood of memory allocations
+for a hash set meant to store n entries, then specify a
+.Fa capacity
+of 2n. The default behavior of ck_rhs is to round
+.Fa capacity
+to the next power of two if it is not already a power of two.
+.Sh RETURN VALUES
+Upon successful completion,
+.Fn ck_rhs_grow 3
+returns true and otherwise returns false on failure.
+.Sh ERRORS
+Behavior is undefined if
+.Fa hs
+is uninitialized. This function will only
+return false if there are internal memory allocation
+failures.
+.Sh SEE ALSO
+.Xr ck_rhs_init 3 ,
+.Xr ck_rhs_move 3 ,
+.Xr ck_rhs_destroy 3 ,
+.Xr CK_RHS_HASH 3 ,
+.Xr ck_rhs_iterator_init 3 ,
+.Xr ck_rhs_next 3 ,
+.Xr ck_rhs_get 3 ,
+.Xr ck_rhs_put 3 ,
+.Xr ck_rhs_put_unique 3 ,
+.Xr ck_rhs_set 3 ,
+.Xr ck_rhs_fas 3 ,
+.Xr ck_rhs_remove 3 ,
+.Xr ck_rhs_rebuild 3 ,
+.Xr ck_rhs_gc 3 ,
+.Xr ck_rhs_count 3 ,
+.Xr ck_rhs_reset 3 ,
+.Xr ck_rhs_reset_size 3 ,
+.Xr ck_rhs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_rhs_init b/doc/ck_rhs_init
new file mode 100644
index 0000000..17c5097
--- /dev/null
+++ b/doc/ck_rhs_init
@@ -0,0 +1,166 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 17, 2012
+.Dt CK_RHS_INIT 3
+.Sh NAME
+.Nm ck_rhs_init
+.Nd initialize a hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_rhs.h
+.Ft typedef unsigned long
+.Fn ck_rhs_hash_cb_t "const void *key" "unsigned long seed"
+.Ft typedef bool
+.Fn ck_rhs_compare_cb_t "const void *c1" "const void *c2"
+.Ft bool
+.Fn ck_rhs_init "ck_rhs_t *hs" "unsigned int mode" "ck_rhs_hash_cb_t *hash_function" "ck_rhs_compare_cb_t *compare" "struct ck_malloc *allocator" "unsigned long capacity" "unsigned long seed"
+.Sh DESCRIPTION
+The
+.Fn ck_rhs_init
+function initializes the hash set pointed to by the
+.Fa hs
+pointer.
+.Pp
+The argument
+.Fa mode
+specifies the type of key-value pairs to be stored in the
+hash set as well as the expected concurrent access model.
+The value of
+.Fa mode
+consists of a bitfield of one of the following:
+.Bl -tag -width indent
+.It CK_RHS_MODE_OBJECT
+The hash set is meant to store pointers to objects. This provides
+a hint that only CK_MD_VMA_BITS are necessary to encode the key
+argument. Any unused pointer bits are leveraged for internal
+optimizations.
+.It CK_RHS_MODE_DIRECT
+The hash set is meant to directly store key values and that all
+bits of the key are used to encode values.
+.It CK_RHS_MODE_READ_MOSTLY
+Optimize read operations over put/delete.
+.El
+.Pp
+The concurrent access model is specified by:
+.Bl -tag -width indent
+.It CK_RHS_MODE_SPMC
+The hash set should allow for concurrent readers in the
+presence of a single writer.
+.It CK_RHS_MODE_MPMC
+The hash set should allow for concurrent readers in the
+presence of concurrent writers. This is currently unsupported.
+.El
+.Pp
+The developer is free to specify additional workload hints.
+These hints are one of:
+.Bl -tag -width indent
+.El
+.Pp
+The argument
+.Fa hash_function
+is a mandatory pointer to a user-specified hash function.
+A user-specified hash function takes two arguments. The
+.Fa key
+argument is a pointer to a key. The
+.Fa seed
+argument is the initial seed associated with the hash set.
+This initial seed is specified by the user in
+.Xr ck_rhs_init 3 .
+.Pp
+The
+.Fa compare
+argument is an optional pointer to a user-specified
+key comparison function. If NULL is specified in this
+argument, then pointer equality will be used to determine
+key equality. A user-specified comparison function takes
+two arguments representing pointers to the objects being
+compared for equality. It is expected to return true
+if the keys are of equal value and false otherwise.
+.Pp
+The
+.Fa allocator
+argument is a pointer to a structure containing
+.Fa malloc
+and
+.Fa free
+function pointers which respectively define the memory allocation and
+destruction functions to be used by the hash set being initialized.
+.Pp
+The argument
+.Fa capacity
+represents the initial number of keys the hash
+set is expected to contain. This argument is simply a hint
+and the underlying implementation is free to allocate more
+or less memory than necessary to contain the number of entries
+.Fa capacity
+specifies.
+.Pp
+The argument
+.Fa seed
+specifies the initial seed used by the underlying hash function.
+The user is free to choose a value of their choice.
+.Sh RETURN VALUES
+Upon successful completion
+.Fn ck_rhs_init
+returns a value of
+.Dv true
+and otherwise returns a value of
+.Dv false
+to indicate an error.
+.Sh ERRORS
+.Bl -tag -width Er
+.Pp
+The behavior of
+.Fn ck_rhs_init
+is undefined if
+.Fa hs
+is not a pointer to a
+.Tn ck_rhs_t
+object.
+.El
+.Sh SEE ALSO
+.Xr ck_rhs_move 3 ,
+.Xr ck_rhs_destroy 3 ,
+.Xr CK_RHS_HASH 3 ,
+.Xr ck_rhs_iterator_init 3 ,
+.Xr ck_rhs_next 3 ,
+.Xr ck_rhs_get 3 ,
+.Xr ck_rhs_put 3 ,
+.Xr ck_rhs_put_unique 3 ,
+.Xr ck_rhs_set 3 ,
+.Xr ck_rhs_fas 3 ,
+.Xr ck_rhs_remove 3 ,
+.Xr ck_rhs_grow 3 ,
+.Xr ck_rhs_rebuild 3 ,
+.Xr ck_rhs_gc 3 ,
+.Xr ck_rhs_count 3 ,
+.Xr ck_rhs_reset 3 ,
+.Xr ck_rhs_reset_size 3 ,
+.Xr ck_rhs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_rhs_iterator_init b/doc/ck_rhs_iterator_init
new file mode 100644
index 0000000..4cfd083
--- /dev/null
+++ b/doc/ck_rhs_iterator_init
@@ -0,0 +1,78 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 17, 2012
+.Dt CK_RHS_ITERATOR_INIT 3
+.Sh NAME
+.Nm ck_rhs_iterator_init
+.Nd initialize hash set iterator
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_rhs.h
+.Pp
+.Dv ck_rhs_iterator_t iterator = CK_RHS_ITERATOR_INITIALIZER
+.Pp
+.Ft void
+.Fn ck_rhs_iterator_init "ck_rhs_iterator_t *iterator"
+.Sh DESCRIPTION
+The
+.Fn ck_rhs_iterator_init 3
+function will initialize the object pointed to
+by the
+.Fa iterator
+argument. Alternatively, an iterator may be statically
+initialized by assigning it to the CK_RHS_ITERATOR_INITIALIZER value.
+.Pp
+An iterator is used to iterate through hash set entries with the
+.Xr ck_rhs_next 3
+function.
+.Sh RETURN VALUES
+.Fn ck_rhs_iterator_init 3
+has no return value.
+.Sh ERRORS
+This function will not fail.
+.Sh SEE ALSO
+.Xr ck_rhs_init 3 ,
+.Xr ck_rhs_move 3 ,
+.Xr ck_rhs_destroy 3 ,
+.Xr CK_RHS_HASH 3 ,
+.Xr ck_rhs_next 3 ,
+.Xr ck_rhs_get 3 ,
+.Xr ck_rhs_put 3 ,
+.Xr ck_rhs_put_unique 3 ,
+.Xr ck_rhs_set 3 ,
+.Xr ck_rhs_fas 3 ,
+.Xr ck_rhs_remove 3 ,
+.Xr ck_rhs_grow 3 ,
+.Xr ck_rhs_rebuild 3 ,
+.Xr ck_rhs_gc 3 ,
+.Xr ck_rhs_count 3 ,
+.Xr ck_rhs_reset 3 ,
+.Xr ck_rhs_reset_size 3 ,
+.Xr ck_rhs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_rhs_move b/doc/ck_rhs_move
new file mode 100644
index 0000000..45e38e7
--- /dev/null
+++ b/doc/ck_rhs_move
@@ -0,0 +1,90 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd July 18, 2013
+.Dt CK_RHS_MOVE 3
+.Sh NAME
+.Nm ck_rhs_move
+.Nd move one from hash set to another
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_rhs.h
+.Ft bool
+.Fn ck_rhs_move "ck_rhs_t *destination" "ck_rhs_t *source" "ck_rhs_hash_cb_t *hash_cb" "ck_rhs_compare_cb_t *compare_cb" "struct ck_malloc *m"
+.Sh DESCRIPTION
+The
+.Fn ck_rhs_move 3
+function will initialize
+.Fa source
+from
+.Fa destination .
+The hash function is set to
+.Fa hash_cb ,
+comparison function to
+.Fa compare_cb
+and the allocator callbacks to
+.Fa m .
+Further modifications to
+.Fa source
+will result in undefined behavior. Concurrent
+.Xr ck_rhs_get 3
+and
+.Xr ck_rhs_fas 3
+operations to
+.Fa source
+are legal until the next write operation to
+.Fa destination .
+.Pp
+This operation moves ownership from one hash set object
+to another and re-assigns callback functions to developer-specified
+values. This allows for dynamic configuration of allocation
+callbacks and is necessary for use-cases involving executable code
+which may be unmapped underneath the hash set.
+.Sh RETURN VALUES
+Upon successful completion
+.Fn ck_rhs_move 3
+returns true and otherwise returns false to indicate an error.
+.Sh SEE ALSO
+.Xr ck_rhs_init 3 ,
+.Xr ck_rhs_destroy 3 ,
+.Xr CK_RHS_HASH 3 ,
+.Xr ck_rhs_iterator_init 3 ,
+.Xr ck_rhs_next 3 ,
+.Xr ck_rhs_put 3 ,
+.Xr ck_rhs_put_unique 3 ,
+.Xr ck_rhs_set 3 ,
+.Xr ck_rhs_fas 3 ,
+.Xr ck_rhs_remove 3 ,
+.Xr ck_rhs_grow 3 ,
+.Xr ck_rhs_rebuild 3 ,
+.Xr ck_rhs_gc 3 ,
+.Xr ck_rhs_count 3 ,
+.Xr ck_rhs_reset 3 ,
+.Xr ck_rhs_reset_size 3 ,
+.Xr ck_rhs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_rhs_next b/doc/ck_rhs_next
new file mode 100644
index 0000000..c90a7d6
--- /dev/null
+++ b/doc/ck_rhs_next
@@ -0,0 +1,92 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 17, 2012
+.Dt CK_RHS_NEXT 3
+.Sh NAME
+.Nm ck_rhs_next
+.Nd iterate to next entry in hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_rhs.h
+.Ft bool
+.Fn ck_rhs_next "ck_rhs_t *hs" "ck_rhs_iterator_t *iterator" "void **entry"
+.Sh DESCRIPTION
+The
+.Fn ck_rhs_next 3
+function will increment the iterator object pointed to by
+.Fa iterator
+to point to the next non-empty hash set entry. If
+.Fn ck_rhs_next 3
+returns true then the pointer pointed to by
+.Fa entry
+is initialized to the current hash set key pointed to by the
+.Fa iterator
+object.
+.Pp
+It is expected that
+.Fa iterator
+has been initialized using the
+.Xr ck_rhs_iterator_init 3
+function or statically initialized using CK_RHS_ITERATOR_INITIALIZER.
+.Sh RETURN VALUES
+If
+.Fn ck_rhs_next 3
+returns true then the object pointed to by
+.Fa entry
+points to a valid hash set key. If
+.Fn ck_rhs_next 3
+returns false then the value of the object pointed to by
+.Fa entry
+is undefined.
+.Sh ERRORS
+Behavior is undefined if
+.Fa iterator
+or
+.Fa hs
+are uninitialized.
+.Sh SEE ALSO
+.Xr ck_rhs_init 3 ,
+.Xr ck_rhs_move 3 ,
+.Xr ck_rhs_destroy 3 ,
+.Xr CK_RHS_HASH 3 ,
+.Xr ck_rhs_iterator_init 3 ,
+.Xr ck_rhs_get 3 ,
+.Xr ck_rhs_put 3 ,
+.Xr ck_rhs_put_unique 3 ,
+.Xr ck_rhs_set 3 ,
+.Xr ck_rhs_fas 3 ,
+.Xr ck_rhs_remove 3 ,
+.Xr ck_rhs_grow 3 ,
+.Xr ck_rhs_rebuild 3 ,
+.Xr ck_rhs_gc 3 ,
+.Xr ck_rhs_count 3 ,
+.Xr ck_rhs_reset 3 ,
+.Xr ck_rhs_reset_size 3 ,
+.Xr ck_rhs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_rhs_put b/doc/ck_rhs_put
new file mode 100644
index 0000000..8df9b65
--- /dev/null
+++ b/doc/ck_rhs_put
@@ -0,0 +1,98 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 17, 2012
+.Dt CK_RHS_PUT 3
+.Sh NAME
+.Nm ck_rhs_put
+.Nd store unique key into a hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_rhs.h
+.Ft bool
+.Fn ck_rhs_put "ck_rhs_t *hs" "unsigned long hash" "const void *key"
+.Sh DESCRIPTION
+The
+.Fn ck_rhs_put 3
+function will store the key specified by the
+.Fa key
+argument in the hash set pointed to by the
+.Fa hs
+argument. The key specified by
+.Fa key
+is expected to have the hash value specified by the
+.Fa hash
+argument (which was previously generated using the
+.Xr CK_RHS_HASH 3
+macro).
+.Pp
+If the call to
+.Fn ck_rhs_put 3
+was successful then the key specified by
+.Fa key
+was successfully stored in the hash set pointed to by
+.Fa hs .
+The function will fail if a key with an
+equivalent value to
+.Fa key
+is already present in the hash set. For replacement
+semantics, please see the
+.Xr ck_rhs_set 3
+function.
+.Sh RETURN VALUES
+Upon successful completion,
+.Fn ck_rhs_put 3
+returns true and otherwise returns false on failure.
+.Sh ERRORS
+Behavior is undefined if
+.Fa key
+or
+.Fa hs
+are uninitialized. The function will also
+return false if the hash set could not be enlarged
+to accomodate key insertion.
+.Sh SEE ALSO
+.Xr ck_rhs_init 3 ,
+.Xr ck_rhs_move 3 ,
+.Xr ck_rhs_destroy 3 ,
+.Xr CK_RHS_HASH 3 ,
+.Xr ck_rhs_iterator_init 3 ,
+.Xr ck_rhs_next 3 ,
+.Xr ck_rhs_put_unique 3 ,
+.Xr ck_rhs_get 3 ,
+.Xr ck_rhs_set 3 ,
+.Xr ck_rhs_fas 3 ,
+.Xr ck_rhs_remove 3 ,
+.Xr ck_rhs_grow 3 ,
+.Xr ck_rhs_rebuild 3 ,
+.Xr ck_rhs_gc 3 ,
+.Xr ck_rhs_count 3 ,
+.Xr ck_rhs_reset 3 ,
+.Xr ck_rhs_reset_size 3 ,
+.Xr ck_rhs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_rhs_put_unique b/doc/ck_rhs_put_unique
new file mode 100644
index 0000000..4f941ab
--- /dev/null
+++ b/doc/ck_rhs_put_unique
@@ -0,0 +1,98 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd December 7, 2013
+.Dt CK_RHS_PUT_UNIQUE 3
+.Sh NAME
+.Nm ck_rhs_put_unique
+.Nd unconditionally store unique key into a hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_rhs.h
+.Ft bool
+.Fn ck_rhs_put_unique "ck_rhs_t *hs" "unsigned long hash" "const void *key"
+.Sh DESCRIPTION
+The
+.Fn ck_rhs_put_unique 3
+function will store the key specified by the
+.Fa key
+argument in the hash set pointed to by the
+.Fa hs
+argument. The key specified by
+.Fa key
+is expected to have the hash value specified by the
+.Fa hash
+argument (which was previously generated using the
+.Xr CK_RHS_HASH 3
+macro).
+.Pp
+If the call to
+.Fn ck_rhs_put 3
+was successful then the key specified by
+.Fa key
+was successfully stored in the hash set pointed to by
+.Fa hs .
+The function will cause undefined behavior if a key with an
+equivalent value is already present in the hash set. For replacement
+semantics, please see the
+.Xr ck_rhs_set 3
+function.
+.Sh RETURN VALUES
+Upon successful completion,
+.Fn ck_rhs_put_unique 3
+returns true and otherwise returns false on failure.
+.Sh ERRORS
+Behavior is undefined if
+.Fa key
+or
+.Fa hs
+are uninitialized. The function will also
+return false if the hash set could not be enlarged
+to accomodate key insertion. The function will
+result in undefined behavior if called for an
+already inserted key value.
+.Sh SEE ALSO
+.Xr ck_rhs_init 3 ,
+.Xr ck_rhs_move 3 ,
+.Xr ck_rhs_destroy 3 ,
+.Xr CK_RHS_HASH 3 ,
+.Xr ck_rhs_iterator_init 3 ,
+.Xr ck_rhs_next 3 ,
+.Xr ck_rhs_get 3 ,
+.Xr ck_rhs_put 3 ,
+.Xr ck_rhs_set 3 ,
+.Xr ck_rhs_fas 3 ,
+.Xr ck_rhs_remove 3 ,
+.Xr ck_rhs_grow 3 ,
+.Xr ck_rhs_rebuild 3 ,
+.Xr ck_rhs_gc 3 ,
+.Xr ck_rhs_count 3 ,
+.Xr ck_rhs_reset 3 ,
+.Xr ck_rhs_reset_size 3 ,
+.Xr ck_rhs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_rhs_rebuild b/doc/ck_rhs_rebuild
new file mode 100644
index 0000000..8ab9b50
--- /dev/null
+++ b/doc/ck_rhs_rebuild
@@ -0,0 +1,76 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd December 7, 2013
+.Dt CK_RHS_REBUILD 3
+.Sh NAME
+.Nm ck_rhs_rebuild
+.Nd rebuild a hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_rhs.h
+.Ft bool
+.Fn ck_rhs_rebuild "ck_rhs_t *hs"
+.Sh DESCRIPTION
+The
+.Fn ck_rhs_rebuild 3
+function will regenerate the hash set pointed to by
+.Fa hs .
+This has the side-effect of pruning degradatory side-effects
+of workloads that are delete heavy. The regenerated hash
+set should have shorter probe sequences on average. This
+operation will require a significant amount of memory
+and is free to allocate a duplicate hash set in the
+rebuild process.
+.Sh RETURN VALUES
+Upon successful completion,
+.Fn ck_rhs_rebuild 3
+returns true and otherwise returns false on failure.
+.Sh ERRORS
+This function will only return false if there are internal memory allocation
+failures.
+.Sh SEE ALSO
+.Xr ck_rhs_init 3 ,
+.Xr ck_rhs_move 3 ,
+.Xr ck_rhs_destroy 3 ,
+.Xr CK_RHS_HASH 3 ,
+.Xr ck_rhs_iterator_init 3 ,
+.Xr ck_rhs_next 3 ,
+.Xr ck_rhs_get 3 ,
+.Xr ck_rhs_put 3 ,
+.Xr ck_rhs_put_unique 3 ,
+.Xr ck_rhs_set 3 ,
+.Xr ck_rhs_fas 3 ,
+.Xr ck_rhs_gc 3 ,
+.Xr ck_rhs_grow 3 ,
+.Xr ck_rhs_remove 3 ,
+.Xr ck_rhs_count 3 ,
+.Xr ck_rhs_reset 3 ,
+.Xr ck_rhs_reset_size 3 ,
+.Xr ck_rhs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_rhs_remove b/doc/ck_rhs_remove
new file mode 100644
index 0000000..c83bf38
--- /dev/null
+++ b/doc/ck_rhs_remove
@@ -0,0 +1,92 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 17, 2012
+.Dt CK_RHS_REMOVE 3
+.Sh NAME
+.Nm ck_rhs_remove
+.Nd remove key from a hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_rhs.h
+.Ft void *
+.Fn ck_rhs_remove "ck_rhs_t *hs" "unsigned long hash" "const void *key"
+.Sh DESCRIPTION
+The
+.Fn ck_rhs_remove 3
+function will attempt to remove the key specified by the
+.Fa key
+argument in the hash set pointed to by the
+.Fa hs
+argument. The key specified by
+.Fa key
+is expected to have the hash value specified by the
+.Fa hash
+argument (which was previously generated using the
+.Xr CK_RHS_HASH 3
+macro).
+.Pp
+If the call to
+.Fn ck_rhs_remove 3
+was successful then the key contained in the hash
+set is returned. If the key was not a member of the hash
+set then
+.Dv NULL
+is returned.
+.Sh RETURN VALUES
+Upon successful completion,
+.Fn ck_rhs_remove 3
+returns a pointer to a key and otherwise returns
+.Dv NULL
+on failure.
+.Sh ERRORS
+Behavior is undefined if
+.Fa key
+or
+.Fa hs
+are uninitialized.
+.Sh SEE ALSO
+.Xr ck_rhs_init 3 ,
+.Xr ck_rhs_move 3 ,
+.Xr ck_rhs_destroy 3 ,
+.Xr CK_RHS_HASH 3 ,
+.Xr ck_rhs_iterator_init 3 ,
+.Xr ck_rhs_next 3 ,
+.Xr ck_rhs_get 3 ,
+.Xr ck_rhs_put 3 ,
+.Xr ck_rhs_put_unique 3 ,
+.Xr ck_rhs_set 3 ,
+.Xr ck_rhs_fas 3 ,
+.Xr ck_rhs_grow 3 ,
+.Xr ck_rhs_gc 3 ,
+.Xr ck_rhs_rebuild 3 ,
+.Xr ck_rhs_count 3 ,
+.Xr ck_rhs_reset 3 ,
+.Xr ck_rhs_reset_size 3 ,
+.Xr ck_rhs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_rhs_reset b/doc/ck_rhs_reset
new file mode 100644
index 0000000..a750d85
--- /dev/null
+++ b/doc/ck_rhs_reset
@@ -0,0 +1,77 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 17, 2012
+.Dt CK_RHS_RESET 3
+.Sh NAME
+.Nm ck_rhs_reset
+.Nd remove all keys from a hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_rhs.h
+.Ft bool
+.Fn ck_rhs_reset "ck_rhs_t *hs"
+.Sh DESCRIPTION
+The
+.Fn ck_rhs_reset 3
+function will remove all keys stored in the hash
+set pointed to by the
+.Fa hs
+argument.
+.Sh RETURN VALUES
+If successful,
+.Fn ck_rhs_reset 3
+will return true and will otherwise return false on failure. This
+function will only fail if a replacement hash set could not be
+allocated internally.
+.Sh ERRORS
+Behavior is undefined if
+.Fa hs
+is uninitialized. Behavior is
+undefined if this function is called by a non-writer
+thread.
+.Sh SEE ALSO
+.Xr ck_rhs_init 3 ,
+.Xr ck_rhs_move 3 ,
+.Xr ck_rhs_destroy 3 ,
+.Xr CK_RHS_HASH 3 ,
+.Xr ck_rhs_iterator_init 3 ,
+.Xr ck_rhs_next 3 ,
+.Xr ck_rhs_get 3 ,
+.Xr ck_rhs_put 3 ,
+.Xr ck_rhs_put_unique 3 ,
+.Xr ck_rhs_set 3 ,
+.Xr ck_rhs_fas 3 ,
+.Xr ck_rhs_remove 3 ,
+.Xr ck_rhs_reset_size 3 ,
+.Xr ck_rhs_grow 3 ,
+.Xr ck_rhs_gc 3 ,
+.Xr ck_rhs_rebuild 3 ,
+.Xr ck_rhs_count 3 ,
+.Xr ck_rhs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_rhs_reset_size b/doc/ck_rhs_reset_size
new file mode 100644
index 0000000..6e9913e
--- /dev/null
+++ b/doc/ck_rhs_reset_size
@@ -0,0 +1,80 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd May 5, 2013
+.Dt CK_RHS_RESET_SIZE 3
+.Sh NAME
+.Nm ck_rhs_reset_size
+.Nd remove all keys from a hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_rhs.h
+.Ft bool
+.Fn ck_rhs_reset_size "ck_rhs_t *hs" "unsigned long size"
+.Sh DESCRIPTION
+The
+.Fn ck_rhs_reset_size 3
+function will remove all keys stored in the hash
+set pointed to by the
+.Fa hs
+argument and create a new generation of the hash set that
+is preallocated for
+.Fa size
+entries.
+.Sh RETURN VALUES
+If successful,
+.Fn ck_rhs_reset_size 3
+will return true and will otherwise return false on failure. This
+function will only fail if a replacement hash set could not be
+allocated internally.
+.Sh ERRORS
+Behavior is undefined if
+.Fa hs
+is uninitialized. Behavior is
+undefined if this function is called by a non-writer
+thread.
+.Sh SEE ALSO
+.Xr ck_rhs_init 3 ,
+.Xr ck_rhs_move 3 ,
+.Xr ck_rhs_destroy 3 ,
+.Xr CK_RHS_HASH 3 ,
+.Xr ck_rhs_iterator_init 3 ,
+.Xr ck_rhs_next 3 ,
+.Xr ck_rhs_get 3 ,
+.Xr ck_rhs_put 3 ,
+.Xr ck_rhs_put_unique 3 ,
+.Xr ck_rhs_set 3 ,
+.Xr ck_rhs_fas 3 ,
+.Xr ck_rhs_remove 3 ,
+.Xr ck_rhs_grow 3 ,
+.Xr ck_rhs_gc 3 ,
+.Xr ck_rhs_rebuild 3 ,
+.Xr ck_rhs_count 3 ,
+.Xr ck_rhs_reset 3 ,
+.Xr ck_rhs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_rhs_set b/doc/ck_rhs_set
new file mode 100644
index 0000000..6f3e280
--- /dev/null
+++ b/doc/ck_rhs_set
@@ -0,0 +1,102 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 17, 2012
+.Dt CK_RHS_SET 3
+.Sh NAME
+.Nm ck_rhs_set
+.Nd store key into a hash set
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_rhs.h
+.Ft bool
+.Fn ck_rhs_set "ck_rhs_t *hs" "unsigned long hash" "const void *key" "void **previous"
+.Sh DESCRIPTION
+The
+.Fn ck_rhs_set 3
+function will store the key specified by the
+.Fa key
+argument in the hash set pointed to by the
+.Fa hs
+argument. The key specified by
+.Fa key
+is expected to have the hash value specified by the
+.Fa hash
+argument (which was previously generated using the
+.Xr CK_RHS_HASH 3
+macro).
+.Pp
+If the call to
+.Fn ck_rhs_set 3
+was successful then the key specified by
+.Fa key
+was successfully stored in the hash set pointed to by
+.Fa hs .
+If the key already exists in the hash set, then it is
+replaced by
+.Fa key
+and the previous value is stored into the void pointer
+pointed to by the
+.Fa previous
+argument. If previous is set to
+.Dv NULL
+then
+.Fa key
+was not a replacement for an existing entry in the hash set.
+.Sh RETURN VALUES
+Upon successful completion,
+.Fn ck_rhs_set 3
+returns true and otherwise returns false on failure.
+.Sh ERRORS
+Behavior is undefined if
+.Fa key
+or
+.Fa hs
+are uninitialized. The function will also
+return false if the hash set could not be enlarged
+to accomodate key insertion.
+.Sh SEE ALSO
+.Xr ck_rhs_init 3 ,
+.Xr ck_rhs_move 3 ,
+.Xr ck_rhs_destroy 3 ,
+.Xr CK_RHS_HASH 3 ,
+.Xr ck_rhs_iterator_init 3 ,
+.Xr ck_rhs_next 3 ,
+.Xr ck_rhs_get 3 ,
+.Xr ck_rhs_put 3 ,
+.Xr ck_rhs_put_unique 3 ,
+.Xr ck_rhs_fas 3 ,
+.Xr ck_rhs_remove 3 ,
+.Xr ck_rhs_grow 3 ,
+.Xr ck_rhs_gc 3 ,
+.Xr ck_rhs_rebuild 3 ,
+.Xr ck_rhs_count 3 ,
+.Xr ck_rhs_reset 3 ,
+.Xr ck_rhs_reset_size 3 ,
+.Xr ck_rhs_stat 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_rhs_set_load_factor b/doc/ck_rhs_set_load_factor
new file mode 100644
index 0000000..4ecb847
--- /dev/null
+++ b/doc/ck_rhs_set_load_factor
@@ -0,0 +1,72 @@
+.\"
+.\" Copyright 2015 Olivier Houchard.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd May 16, 2015
+.Dt CK_RHS_SET_LOAD_FACTOR 3
+.Sh NAME
+.Nm ck_rhs_set_load_factor
+.Nd change the hash set load factor
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_rhs.h
+.Ft bool
+.Fn ck_rhs_set_load_factor "ck_rhs_t *hs" "unsigned int load_factor"
+.Sh DESCRIPTION
+The
+.Fn ck_rhs_set_load_factor 3
+function will change the load factor of the hash set. The hash set will grow if it is load_factor% filled.
+.Ed
+.Sh RETURN VALUES
+.Fn ck_rhs_set_load_factor 3
+returns true on success, or false if either the load factor is invalid (0 or > 100), or if growing was required, but failed.
+.Sh ERRORS
+Behavior is undefined if
+.Fa hs
+is uninitialized. Behavior is
+undefined if this function is called by a non-writer
+thread.
+.Sh SEE ALSO
+.Xr ck_rhs_init 3 ,
+.Xr ck_rhs_move 3 ,
+.Xr ck_rhs_destroy 3 ,
+.Xr CK_RHS_HASH 3 ,
+.Xr ck_rhs_iterator_init 3 ,
+.Xr ck_rhs_next 3 ,
+.Xr ck_rhs_get 3 ,
+.Xr ck_rhs_put 3 ,
+.Xr ck_rhs_put_unique 3 ,
+.Xr ck_rhs_set 3 ,
+.Xr ck_rhs_fas 3 ,
+.Xr ck_rhs_remove 3 ,
+.Xr ck_rhs_grow 3 ,
+.Xr ck_rhs_gc 3 ,
+.Xr ck_rhs_rebuild 3 ,
+.Xr ck_rhs_count 3 ,
+.Xr ck_rhs_reset 3 ,
+.Xr ck_rhs_reset_size 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_rhs_stat b/doc/ck_rhs_stat
new file mode 100644
index 0000000..df45672
--- /dev/null
+++ b/doc/ck_rhs_stat
@@ -0,0 +1,80 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd September 17, 2012
+.Dt CK_RHS_STAT 3
+.Sh NAME
+.Nm ck_rhs_stat
+.Nd get hash set status
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_rhs.h
+.Ft void
+.Fn ck_rhs_stat "ck_rhs_t *hs" "struct ck_rhs_stat *st"
+.Sh DESCRIPTION
+The
+.Fn ck_rhs_stat 3
+function will store various hash set statistics in
+the object pointed to by
+.Fa st .
+The ck_rhs_stat structure is defined as follows:
+.Bd -literal -offset indent
+struct ck_rhs_stat {
+ unsigned long n_entries; /* Current number of keys in hash set. */
+ unsigned int probe_maximum; /* Longest read-side probe sequence. */
+};
+.Ed
+.Sh RETURN VALUES
+.Fn ck_rhs_stat 3
+has no return value.
+.Sh ERRORS
+Behavior is undefined if
+.Fa hs
+is uninitialized. Behavior is
+undefined if this function is called by a non-writer
+thread.
+.Sh SEE ALSO
+.Xr ck_rhs_init 3 ,
+.Xr ck_rhs_move 3 ,
+.Xr ck_rhs_destroy 3 ,
+.Xr CK_RHS_HASH 3 ,
+.Xr ck_rhs_iterator_init 3 ,
+.Xr ck_rhs_next 3 ,
+.Xr ck_rhs_get 3 ,
+.Xr ck_rhs_put 3 ,
+.Xr ck_rhs_put_unique 3 ,
+.Xr ck_rhs_set 3 ,
+.Xr ck_rhs_fas 3 ,
+.Xr ck_rhs_remove 3 ,
+.Xr ck_rhs_grow 3 ,
+.Xr ck_rhs_gc 3 ,
+.Xr ck_rhs_rebuild 3 ,
+.Xr ck_rhs_count 3 ,
+.Xr ck_rhs_reset 3 ,
+.Xr ck_rhs_reset_size 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ring_capacity b/doc/ck_ring_capacity
new file mode 100644
index 0000000..645b54b
--- /dev/null
+++ b/doc/ck_ring_capacity
@@ -0,0 +1,58 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 20, 2013
+.Dt CK_RING_CAPACITY 3
+.Sh NAME
+.Nm ck_ring_capacity
+.Nd returns number of pointer slots in bounded FIFO
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ring.h
+.Ft unsigned int
+.Fn ck_ring_capacity "ck_ring_t *ring"
+.Sh DESCRIPTION
+The
+.Fn ck_ring_capacity 3
+function returns the number of pointers that can be
+held in the buffer pointed to by
+.Fa ring .
+Note that a ring can only hold
+.Fn ck_ring_capacity 3
+minus one entries at a time.
+.Sh SEE ALSO
+.Xr ck_ring_init 3 ,
+.Xr ck_ring_enqueue_spmc 3 ,
+.Xr ck_ring_dequeue_spmc 3 ,
+.Xr ck_ring_trydequeue_spmc 3 ,
+.Xr ck_ring_enqueue_spmc_size 3 ,
+.Xr ck_ring_dequeue_spsc 3 ,
+.Xr ck_ring_enqueue_spsc 3 ,
+.Xr ck_ring_enqueue_spsc_size 3 ,
+.Xr ck_ring_size 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ring_dequeue_spmc b/doc/ck_ring_dequeue_spmc
new file mode 100644
index 0000000..7fd7d9b
--- /dev/null
+++ b/doc/ck_ring_dequeue_spmc
@@ -0,0 +1,117 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 20, 2013
+.Dt CK_RING_DEQUEUE_SPMC 3
+.Sh NAME
+.Nm ck_ring_dequeue_spmc
+.Nd dequeue pointer from bounded FIFO
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ring.h
+.Ft bool
+.Fn ck_ring_dequeue_spmc "ck_ring_t *ring" "ck_ring_buffer_t *buffer" "void *result"
+.Sh DESCRIPTION
+The
+.Fn ck_ring_dequeue_spmc 3
+function dequeues a pointer from the bounded buffer
+pointed to by
+.Fa ring
+in FIFO fashion. The pointer is stored in the pointer
+pointed to by
+.Fa result .
+The buffer pointed to by
+.Fa buffer
+must be unique to
+.Fa ring .
+The decoupling of the ring from the buffer serves
+to address use-cases involving multiple address spaces
+and DMA, among others.
+If you are on non-POSIX platforms or wish for strict
+compliance with C, then it is recommended to pass a
+pointer of type void ** for
+.Fa result .
+This function is safe to call without locking for UINT_MAX
+concurrent invocations of
+.Fn ck_ring_dequeue_spmc 3
+or
+.Fn ck_ring_trydequeue_spmc 3
+and up to one concurrent
+.Fn ck_ring_enqueue_spmc 3
+or
+.Fn ck_ring_tryenqueue_spmc 3
+invocation. This function provides lock-free progress
+guarantees.
+.Sh EXAMPLE
+.Bd -literal -offset indent
+#include <ck_ring.h>
+
+/* This ring was previously initialized with ck_ring_init. */
+ck_ring_t ring;
+
+/* The ring was initialized for 1023 elements. */
+ck_ring_buffer_t buffer[1024];
+
+void
+dequeue(void)
+{
+ void *result;
+
+ /* Dequeue from ring until it is empty. */
+ while (ck_ring_dequeue_spmc(&ring, &buffer, &result) == true) {
+ /*
+ * Results contains the oldest pointer in ring
+ * since the dequeue operation returned true.
+ */
+ operation(result);
+ }
+
+ /* An empty ring was encountered, leave. */
+ return;
+}
+.Ed
+.Sh RETURN VALUES
+The function returns true if the buffer was non-empty.
+The result of the dequeue operation is stored in the
+value pointed to by
+.Fa result .
+The function will return false if the buffer was empty
+and the value in
+.Fa result
+will be undefined.
+.Sh SEE ALSO
+.Xr ck_ring_init 3 ,
+.Xr ck_ring_trydequeue_spmc 3 ,
+.Xr ck_ring_enqueue_spmc 3 ,
+.Xr ck_ring_enqueue_spmc_size 3 ,
+.Xr ck_ring_dequeue_spsc 3 ,
+.Xr ck_ring_enqueue_spsc 3 ,
+.Xr ck_ring_enqueue_spsc_size 3 ,
+.Xr ck_ring_capacity 3 ,
+.Xr ck_ring_size 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ring_dequeue_spsc b/doc/ck_ring_dequeue_spsc
new file mode 100644
index 0000000..069dc7f
--- /dev/null
+++ b/doc/ck_ring_dequeue_spsc
@@ -0,0 +1,115 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 20, 2013
+.Dt CK_RING_DEQUEUE_SPSC 3
+.Sh NAME
+.Nm ck_ring_dequeue_spsc
+.Nd dequeue pointer from bounded FIFO
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ring.h
+.Ft bool
+.Fn ck_ring_dequeue_spsc "ck_ring_t *ring" "ck_ring_buffer_t *buffer" "void *result"
+.Sh DESCRIPTION
+The
+.Fn ck_ring_dequeue_spsc 3
+function dequeues a pointer from the bounded buffer
+pointed to by
+.Fa ring
+in FIFO fashion. The pointer is stored in the pointer
+pointed to by
+.Fa result .
+The buffer pointed to by
+.Fa buffer
+must be unique to
+.Fa ring
+and point to an array of ck_ring_buffer_t of sufficient
+length (according to the power-of-2 elements in the buffer).
+The decoupling of the ring from the buffer serves
+to address use-cases involving multiple address spaces
+and DMA, among others.
+If you are on non-POSIX platforms or wish for strict
+compliance with C, then it is recommended to pass a
+pointer of type void ** for
+.Fa result .
+This function is safe to call without locking for one
+concurrent invocation of
+.Fn ck_ring_dequeue_spsc 3
+and up to one concurrent
+.Fn ck_ring_enqueue_spsc 3
+invocation. This function provides wait-free progress
+guarantees.
+.Sh EXAMPLE
+.Bd -literal -offset indent
+#include <ck_ring.h>
+
+/* This ring was previously initialized with ck_ring_init. */
+ck_ring_t ring;
+
+/* The ring was initialized for 1023 elements. */
+ck_ring_buffer_t buffer[1024];
+
+void
+dequeue(void)
+{
+ void *result;
+
+ /* Dequeue from ring until it is empty. */
+ while (ck_ring_dequeue_spsc(&ring, &buffer, &result) == true) {
+ /*
+ * Results contains the oldest pointer in ring
+ * since the dequeue operation returned true.
+ */
+ operation(result);
+ }
+
+ /* An empty ring was encountered, leave. */
+ return;
+}
+.Ed
+.Sh RETURN VALUES
+The function returns true if the buffer was non-empty.
+The result of the dequeue operation is stored in the
+value pointed to by
+.Fa result .
+The function will return false if the buffer was empty
+and the value in
+.Fa result
+will be undefined.
+.Sh SEE ALSO
+.Xr ck_ring_init 3 ,
+.Xr ck_ring_trydequeue_spmc 3 ,
+.Xr ck_ring_enqueue_spmc 3 ,
+.Xr ck_ring_enqueue_spmc_size 3 ,
+.Xr ck_ring_dequeue_spmc 3 ,
+.Xr ck_ring_enqueue_spsc 3 ,
+.Xr ck_ring_enqueue_spsc_size 3 ,
+.Xr ck_ring_capacity 3 ,
+.Xr ck_ring_size 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ring_enqueue_spmc b/doc/ck_ring_enqueue_spmc
new file mode 100644
index 0000000..ba99199
--- /dev/null
+++ b/doc/ck_ring_enqueue_spmc
@@ -0,0 +1,115 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 20, 2013
+.Dt CK_RING_ENQUEUE_SPMC 3
+.Sh NAME
+.Nm ck_ring_enqueue_spmc
+.Nd enqueue pointer into bounded FIFO
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ring.h
+.Ft bool
+.Fn ck_ring_enqueue_spmc "ck_ring_t *ring" "ck_ring_buffer_t *buffer" "void *entry"
+.Sh DESCRIPTION
+The
+.Fn ck_ring_enqueue_spmc 3
+function enqueues the pointer
+.Fa entry
+into the bounded buffer pointed to by
+.Fa ring
+in FIFO fashion.
+The buffer pointed to by
+.Fa buffer
+must be unique to
+.Fa ring
+and point to an array of ck_ring_buffer_t of sufficient
+length (according to the power-of-2 elements in the buffer).
+The decoupling of the ring from the buffer serves
+to address use-cases involving multiple address spaces
+and DMA, among others.
+If you are on non-POSIX platforms or wish for strict
+compliance with C, then it is recommended to pass a
+pointer of type void ** for
+.Fa entry .
+This function is safe to call without locking for UINT_MAX
+concurrent invocations of
+.Fn ck_ring_dequeue_spmc 3
+or
+.Fn ck_ring_trydequeue_spmc 3 .
+This function provides wait-free progress
+guarantees for one active invocation.
+.Sh EXAMPLE
+.Bd -literal -offset indent
+#include <ck_ring.h>
+
+/* This ring was previously initialized with ck_ring_init. */
+ck_ring_t ring;
+
+/* The ring was initialized for 1023 elements. */
+ck_ring_buffer_t buffer[1024];
+
+void
+enqueue(void)
+{
+ void *entry = some_object;
+
+ /* Attempt to enqueue pointer to some_object into buffer. */
+ if (ck_ring_enqueue_spmc(&ring, &buffer, &entry) == false) {
+ /*
+ * The buffer was full and the enqueue operation
+ * has failed.
+ */
+ return;
+ }
+
+ /* Enqueue operation completed successfully. */
+ return;
+}
+.Ed
+.Sh RETURN VALUES
+The function returns true if the value of
+.Fa entry
+was successfully enqueued into
+.Fa ring .
+The function will return false if the value of
+.Fa entry
+could not be enqueued which only occurs if
+.Fa ring
+was full.
+.Sh SEE ALSO
+.Xr ck_ring_init 3 ,
+.Xr ck_ring_dequeue_spmc 3 ,
+.Xr ck_ring_trydequeue_spmc 3 ,
+.Xr ck_ring_enqueue_spmc_size 3 ,
+.Xr ck_ring_dequeue_spsc 3 ,
+.Xr ck_ring_enqueue_spsc 3 ,
+.Xr ck_ring_enqueue_spsc_size 3 ,
+.Xr ck_ring_capacity 3 ,
+.Xr ck_ring_size 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ring_enqueue_spmc_size b/doc/ck_ring_enqueue_spmc_size
new file mode 100644
index 0000000..eb30cab
--- /dev/null
+++ b/doc/ck_ring_enqueue_spmc_size
@@ -0,0 +1,127 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 20, 2013
+.Dt CK_RING_ENQUEUE_SPMC_SIZE 3
+.Sh NAME
+.Nm ck_ring_enqueue_spmc_size
+.Nd enqueue pointer into bounded FIFO and return size of buffer
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ring.h
+.Ft bool
+.Fn ck_ring_enqueue_spmc_size "ck_ring_t *ring" "ck_ring_buffer_t *buffer" "void *entry" "unsigned int *length"
+.Sh DESCRIPTION
+The
+.Fn ck_ring_enqueue_spmc 3
+function enqueues the pointer
+.Fa entry
+into the bounded buffer pointed to by
+.Fa ring
+in FIFO fashion.
+The buffer pointed to by
+.Fa buffer
+must be unique to
+.Fa ring
+and point to an array of ck_ring_buffer_t of sufficient
+length (according to the power-of-2 elements in the buffer).
+The decoupling of the ring from the buffer serves
+to address use-cases involving multiple address spaces
+and DMA, among others.
+If you are on non-POSIX platforms or wish for strict
+compliance with C, then it is recommended to pass a
+pointer of type void ** for
+.Fa entry .
+This function is safe to call without locking for UINT_MAX
+concurrent invocations of
+.Fn ck_ring_dequeue_spmc 3
+or
+.Fn ck_ring_trydequeue_spmc 3 .
+This function provides wait-free progress
+guarantees for one active invocation.
+.Sh EXAMPLE
+.Bd -literal -offset indent
+#include <ck_ring.h>
+
+/* This ring was previously initialized with ck_ring_init. */
+ck_ring_t ring;
+
+/* The ring was initialized for 1023 elements. */
+ck_ring_buffer_t buffer[1024];
+
+void
+enqueue(void)
+{
+ void *entry = some_object;
+ unsigned int length;
+
+ /* Attempt to enqueue pointer to some_object into buffer. */
+ if (ck_ring_enqueue_spmc_size(&ring, &buffer, &entry, &length) == false) {
+ /*
+ * The buffer was full and the enqueue operation
+ * has failed.
+ */
+ return;
+ }
+
+ /*
+ * If entry was the 101st or greater pointer in the buffer,
+ * do something.
+ */
+ if (length > 100) {
+ do_something;
+ }
+
+ return;
+}
+.Ed
+.Sh RETURN VALUES
+The function returns true if the value of
+.Fa entry
+was successfully enqueued into
+.Fa ring .
+The function will return false if the value of
+.Fa entry
+could not be enqueued which only occurs if
+.Fa ring
+was full. The number of entries in the buffer
+with respect to the point in time that
+.Fa entry
+is enqueued is stored in the integer pointed to by
+.Fa length .
+.Sh SEE ALSO
+.Xr ck_ring_init 3 ,
+.Xr ck_ring_dequeue_spmc 3 ,
+.Xr ck_ring_trydequeue_spmc 3 ,
+.Xr ck_ring_enqueue_spmc 3 ,
+.Xr ck_ring_dequeue_spsc 3 ,
+.Xr ck_ring_enqueue_spsc 3 ,
+.Xr ck_ring_enqueue_spsc_size 3 ,
+.Xr ck_ring_capacity 3 ,
+.Xr ck_ring_size 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ring_enqueue_spsc b/doc/ck_ring_enqueue_spsc
new file mode 100644
index 0000000..2493059
--- /dev/null
+++ b/doc/ck_ring_enqueue_spsc
@@ -0,0 +1,113 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 20, 2013
+.Dt CK_RING_ENQUEUE_SPSC 3
+.Sh NAME
+.Nm ck_ring_enqueue_spsc
+.Nd enqueue pointer into bounded FIFO
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ring.h
+.Ft bool
+.Fn ck_ring_enqueue_spsc "ck_ring_t *ring" "ck_ring_buffer_t *buffer" "void *entry"
+.Sh DESCRIPTION
+The
+.Fn ck_ring_enqueue_spsc 3
+function enqueues the pointer
+.Fa entry
+into the bounded buffer pointed to by
+.Fa ring
+in FIFO fashion.
+The buffer pointed to by
+.Fa buffer
+must be unique to
+.Fa ring
+and point to an array of ck_ring_buffer_t of sufficient
+length (according to the power-of-2 elements in the buffer).
+The decoupling of the ring from the buffer serves
+to address use-cases involving multiple address spaces
+and DMA, among others.
+If you are on non-POSIX platforms or wish for strict
+compliance with C, then it is recommended to pass a
+pointer of type void ** for
+.Fa entry .
+This function is safe to call without locking for up to
+one concurrent invocation of
+.Fn ck_ring_dequeue_spsc 3 .
+This function provides wait-free progress
+guarantees.
+.Sh EXAMPLE
+.Bd -literal -offset indent
+#include <ck_ring.h>
+
+/* This ring was previously initialized with ck_ring_init. */
+ck_ring_t ring;
+
+/* The ring was initialized for 1023 elements. */
+ck_ring_buffer_t buffer[1024];
+
+void
+enqueue(void)
+{
+ void *entry = some_object;
+
+ /* Attempt to enqueue pointer to some_object into buffer. */
+ if (ck_ring_enqueue_spsc(&ring, &buffer, &entry) == false) {
+ /*
+ * The buffer was full and the enqueue operation
+ * has failed.
+ */
+ return;
+ }
+
+ /* Enqueue operation completed successfully. */
+ return;
+}
+.Ed
+.Sh RETURN VALUES
+The function returns true if the value of
+.Fa entry
+was successfully enqueued into
+.Fa ring .
+The function will return false if the value of
+.Fa entry
+could not be enqueued which only occurs if
+.Fa ring
+was full.
+.Sh SEE ALSO
+.Xr ck_ring_init 3 ,
+.Xr ck_ring_dequeue_spmc 3 ,
+.Xr ck_ring_trydequeue_spmc 3 ,
+.Xr ck_ring_enqueue_spmc 3 ,
+.Xr ck_ring_enqueue_spmc_size 3 ,
+.Xr ck_ring_dequeue_spsc 3 ,
+.Xr ck_ring_enqueue_spsc_size 3 ,
+.Xr ck_ring_capacity 3 ,
+.Xr ck_ring_size 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ring_enqueue_spsc_size b/doc/ck_ring_enqueue_spsc_size
new file mode 100644
index 0000000..7048ea1
--- /dev/null
+++ b/doc/ck_ring_enqueue_spsc_size
@@ -0,0 +1,128 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 20, 2013
+.Dt CK_RING_ENQUEUE_SPSC_SIZE 3
+.Sh NAME
+.Nm ck_ring_enqueue_spsc_size
+.Nd enqueue pointer into bounded FIFO and return size of buffer
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ring.h
+.Ft bool
+.Fn ck_ring_enqueue_spsc_size "ck_ring_t *ring" "ck_ring_buffer_t *buffer" "void *entry" "unsigned int *size"
+.Sh DESCRIPTION
+The
+.Fn ck_ring_enqueue_spsc_size 3
+function enqueues the pointer
+.Fa entry
+into the bounded buffer pointed to by
+.Fa ring
+in FIFO fashion.
+The buffer pointed to by
+.Fa buffer
+must be unique to
+.Fa ring
+and point to an array of ck_ring_buffer_t of sufficient
+length (according to the power-of-2 elements in the buffer).
+The decoupling of the ring from the buffer serves
+to address use-cases involving multiple address spaces
+and DMA, among others.
+If you are on non-POSIX platforms or wish for strict
+compliance with C, then it is recommended to pass a
+pointer of type void ** for
+.Fa entry .
+This function is safe to call without locking for up to
+one concurrent invocation of
+.Fn ck_ring_dequeue_spsc 3 .
+This function provides wait-free progress
+guarantees.
+.Sh EXAMPLE
+.Bd -literal -offset indent
+#include <ck_ring.h>
+
+/* This ring was previously initialized with ck_ring_init. */
+ck_ring_t ring;
+
+/* The ring was initialized for 1023 elements. */
+ck_ring_buffer_t buffer[1024];
+
+void
+enqueue(void)
+{
+ void *entry = some_object;
+ unsigned int length;
+
+ /* Attempt to enqueue pointer to some_object into buffer. */
+ if (ck_ring_enqueue_spsc(&ring, &buffer, &entry, &length) == false) {
+ /*
+ * The buffer was full and the enqueue operation
+ * has failed.
+ */
+ return;
+ }
+
+ /*
+ * If buffer length was 100 items or more at the time entry was
+ * enqueued, do something.
+ */
+ if (length > 100) {
+ do_something;
+ }
+
+ return;
+}
+.Ed
+.Sh RETURN VALUES
+The function returns true if the value of
+.Fa entry
+was successfully enqueued into
+.Fa ring .
+This function will return the number of items
+in
+.Fa ring
+with respect to the linearization point (the
+point in item that
+.Fa entry
+is enqueued).
+The function will return false if the value of
+.Fa entry
+could not be enqueued which only occurs if
+.Fa ring
+was full.
+.Sh SEE ALSO
+.Xr ck_ring_init 3 ,
+.Xr ck_ring_dequeue_spmc 3 ,
+.Xr ck_ring_trydequeue_spmc 3 ,
+.Xr ck_ring_enqueue_spmc 3 ,
+.Xr ck_ring_enqueue_spmc_size 3 ,
+.Xr ck_ring_dequeue_spsc 3 ,
+.Xr ck_ring_enqueue_spsc 3 ,
+.Xr ck_ring_capacity 3 ,
+.Xr ck_ring_size 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ring_init b/doc/ck_ring_init
new file mode 100644
index 0000000..914d1bb
--- /dev/null
+++ b/doc/ck_ring_init
@@ -0,0 +1,62 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 20, 2013
+.Dt CK_RING_INIT 3
+.Sh NAME
+.Nm ck_ring_init
+.Nd initialize bounded FIFO
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ring.h
+.Ft void
+.Fn ck_ring_init "ck_ring_t *ring" "unsigned int size"
+.Sh DESCRIPTION
+The
+.Fn ck_ring_init
+function initializes a bounded FIFO buffer pointed to by
+.Fa ring
+for the storage of up to
+.Fa size
+number of pointers.
+The
+.Fa size
+argument must be a power-of-two greater than or equal to 4.
+.Sh RETURN VALUES
+This function has no return value.
+.Sh SEE ALSO
+.Xr ck_ring_dequeue_spmc 3 ,
+.Xr ck_ring_trydequeue_spmc 3 ,
+.Xr ck_ring_enqueue_spmc 3 ,
+.Xr ck_ring_enqueue_spmc_size 3 ,
+.Xr ck_ring_dequeue_spsc 3 ,
+.Xr ck_ring_enqueue_spsc 3 ,
+.Xr ck_ring_enqueue_spsc_size 3 ,
+.Xr ck_ring_capacity 3 ,
+.Xr ck_ring_size 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ring_size b/doc/ck_ring_size
new file mode 100644
index 0000000..7ec69f4
--- /dev/null
+++ b/doc/ck_ring_size
@@ -0,0 +1,55 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 20, 2013
+.Dt CK_RING_SIZE 3
+.Sh NAME
+.Nm ck_ring_size
+.Nd return number of pointers enqueued in bounded FIFO
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ring.h
+.Ft unsigned int
+.Fn ck_ring_size "ck_ring_t *ring"
+.Sh DESCRIPTION
+The
+.Fn ck_ring_size 3
+function returns the number of pointers currently
+enqueued in the buffer pointed to by
+.Fa ring .
+.Sh SEE ALSO
+.Xr ck_ring_init 3 ,
+.Xr ck_ring_enqueue_spmc 3 ,
+.Xr ck_ring_dequeue_spmc 3 ,
+.Xr ck_ring_trydequeue_spmc 3 ,
+.Xr ck_ring_enqueue_spmc_size 3 ,
+.Xr ck_ring_dequeue_spsc 3 ,
+.Xr ck_ring_enqueue_spsc 3 ,
+.Xr ck_ring_enqueue_spsc_size 3 ,
+.Xr ck_ring_capacity 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_ring_trydequeue_spmc b/doc/ck_ring_trydequeue_spmc
new file mode 100644
index 0000000..16f83ee
--- /dev/null
+++ b/doc/ck_ring_trydequeue_spmc
@@ -0,0 +1,126 @@
+.\"
+.\" Copyright 2012-2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 20, 2013
+.Dt CK_RING_TRYDEQUEUE_SPMC 3
+.Sh NAME
+.Nm ck_ring_trydequeue_spmc
+.Nd dequeue from bounded FIFO and allow for spurious failure
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_ring.h
+.Ft bool
+.Fn ck_ring_trydequeue_spmc "ck_ring_t *ring" "ck_ring_buffer_t *buffer" "void *result"
+.Sh DESCRIPTION
+The
+.Fn ck_ring_trydequeue_spmc 3
+function attempts to dequeue a pointer from the bounded buffer
+pointed to by
+.Fa ring
+in FIFO fashion. The pointer is stored in the pointer
+pointed to by
+.Fa result .
+The buffer pointed to by
+.Fa buffer
+must be unique to
+.Fa ring
+and point to an array of ck_ring_buffer_t of sufficient
+length (according to the power-of-2 elements in the buffer).
+The decoupling of the ring from the buffer serves
+to address use-cases involving multiple address spaces
+and DMA, among others.
+If you are on non-POSIX platforms or wish for strict
+compliance with C, then it is recommended to pass a
+pointer of type void ** for
+.Fa result .
+This function is safe to call without locking for UINT_MAX
+concurrent
+.Fn ck_ring_dequeue_spmc 3
+or
+.Fn ck_ring_trydequeue_spmc 3
+invocations and up to one concurrent
+.Fn ck_ring_enqueue_spmc 3
+or
+.Fn ck_ring_tryenqueue_spmc 3
+invocation. This operation will always complete
+in a bounded number of steps. It is
+possible for the function to return false even
+if
+.Fa ring
+is non-empty. This
+.Sh EXAMPLE
+.Bd -literal -offset indent
+#include <ck_ring.h>
+
+/* This ring was previously initialized with ck_ring_init. */
+ck_ring_t ring;
+
+/* The ring was initialized for 1023 elements. */
+ck_ring_buffer_t buffer[1024];
+
+void
+dequeue(void)
+{
+ void *result;
+
+ /* Dequeue from ring until contention is actively observed. */
+ while (ck_ring_trydequeue_spmc(&ring, &buffer, &result) == true) {
+ /*
+ * Results contains the oldest pointer in ring
+ * since the dequeue operation returned true.
+ */
+ operation(result);
+ }
+
+ /* An empty ring was encountered, leave. */
+ return;
+}
+.Ed
+.Sh RETURN VALUES
+The function returns true if the dequeue operation
+completely successfully in a bounded number of steps.
+The result of the dequeue operation is stored in the
+value pointed to by
+.Fa result .
+Otherwise, the function will return false if the buffer was empty
+or if the operation could not be completed in a bounded
+number of steps. If the function returns false, then the contents
+of
+.Fa result
+are undefined.
+.Sh SEE ALSO
+.Xr ck_ring_init 3 ,
+.Xr ck_ring_dequeue_spmc 3 ,
+.Xr ck_ring_enqueue_spmc 3 ,
+.Xr ck_ring_enqueue_spmc_size 3 ,
+.Xr ck_ring_dequeue_spsc 3 ,
+.Xr ck_ring_enqueue_spsc 3 ,
+.Xr ck_ring_enqueue_spsc_size 3 ,
+.Xr ck_ring_capacity 3 ,
+.Xr ck_ring_size 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_rwcohort b/doc/ck_rwcohort
new file mode 100644
index 0000000..ba2b5f9
--- /dev/null
+++ b/doc/ck_rwcohort
@@ -0,0 +1,203 @@
+.\"
+.\" Copyright 2013 Brendon Scheinman.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 23, 2013.
+.Dt ck_rwcohort 3
+.Sh NAME
+.Nm ck_rwcohort
+.Nd generalized interface for reader-writer locks using cohort locks
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_rwcohort.h
+In each of the following macros, "STRATEGY" should be replaced with either "NEUTRAL", "RP", or "WP"
+depending on which locking strategy the user prefers. RP and WP represent reader preference and
+writer preference, respectively, while NEUTRAL represents a strategy neutral to reads vs. writes.
+.Fn CK_RWCOHORT_STRATEGY_PROTOTYPE "COHORT_NAME cohort_name"
+.Fn CK_RWCOHORT_STRATEGY_NAME "COHORT_NAME cohort_name"
+.Fn CK_RWCOHORT_STRATEGY_INSTANCE "COHORT_NAME cohort_name"
+.Fn CK_RWCOHORT_STRATEGY_INIT "COHORT_NAME cohort_name" "RWCOHORT lock" "unsigned int wait_limit"
+Note: the wait_limit argument should be omitted for locks using the neutral strategy
+.Fn CK_RWCOHORT_STRATEGY_READ_LOCK "COHORT_NAME cohort_name" "RWCOHORT lock" "COHORT cohort" \
+"void *global_context" "void *local_context"
+.Fn CK_RWCOHORT_STRATEGY_READ_UNLOCK "COHORT_NAME cohort_name" "RWCOHORT lock" "COHORT cohort" \
+"void *global_context" "void *local_context"
+.Fn CK_RWCOHORT_STRATEGY_WRITE_LOCK "COHORT_NAME cohort_name" "RWCOHORT lock" "COHORT cohort" \
+"void *global_context" "void *local_context"
+.Fn CK_RWCOHORT_STRATEGY_WRITE_UNLOCK "COHORT_NAME cohort_name" "RWCOHORT lock" "COHORT cohort" \
+"void *global_context" "void *local_context"
+.Pp
+Arguments of type RWCOHORT must be pointers to structs defined using the
+.Xr CK_RWCOHORT_STRATEGY_PROTOTYPE 3
+macro with the same strategy and cohort name as the current call.
+.Pp
+Arguments of type COHORT must be pointers to structs defined using the
+.Xr CK_COHORT_PROTOTYPE 3
+macro.
+.Sh DESCRIPTION
+ck_rwcohort.h provides an interface for defining reader-writer locks
+that use cohort locks internally to increase performance on NUMA
+architectures. See
+.Xr ck_cohort 3
+for more information about cohort locks.
+.Pp
+Before using a reader-writer cohort lock, the user must define a cohort type using
+either the
+.Xr CK_COHORT_PROTOTYPE 3
+or the
+.Xr CK_COHORT_TRYLOCK_PROTOTYPE 3
+macros, and define a reader-writer lock type using the
+.Xr CK_RWCOHORT_PROTOTYPE 3
+macro.
+.Pp
+.Sh EXAMPLE
+.Bd -literal -offset indent
+#include <stdlib.h>
+#include <pthread.h>
+
+#include <ck_pr.h>
+#include <ck_cohort.h>
+#include <ck_rwcohort.h>
+#include <ck_spinlock.h>
+
+/* Create cohort methods with signatures that match the required signature */
+
+static void
+ck_spinlock_lock_with_context(ck_spinlock_t *lock, void *context)
+{
+ (void)context;
+ ck_spinlock_lock(lock);
+ return;
+}
+
+static void
+ck_spinlock_unlock_with_context(ck_spinlock_t *lock, void *context)
+{
+ (void)context;
+ ck_spinlock_unlock(lock);
+ return;
+}
+
+static bool
+ck_spinlock_locked_with_context(ck_spinlock_t *lock, void *context)
+{
+ (void)context;
+ return ck_spinlock_locked(lock);
+}
+
+/*
+ * define a cohort type named "test_cohort" that will use
+ * the above methods for both its global and local locks
+ */
+CK_COHORT_PROTOTYPE(test_cohort,
+ ck_spinlock_lock_with_context, ck_spinlock_unlock_with_context, ck_spinlock_locked_with_context,
+ ck_spinlock_lock_with_context, ck_spinlock_unlock_with_context, ck_spinlock_locked_with_context)
+
+/* define a reader-writer type using the same cohort type */
+CK_RWCOHORT_WP_PROTOTYPE(test_cohort)
+
+static ck_spinlock_t global_lock = CK_SPINLOCK_INITIALIZER;
+static CK_COHORT_INSTANCE(test_cohort) *cohorts;
+static CK_RWCOHORT_WP_INSTANCE(test_cohort) rw_cohort = CK_RWCOHORT_WP_INITIALIZER;
+static unsigned int ready;
+
+static void *
+function(void *context)
+{
+ CK_COHORT_INSTANCE(test_cohort) *cohort = context;
+
+ while (ck_pr_load_uint(&ready) == 0);
+
+ while (ck_pr_load_uint(&ready) > 0) {
+ /*
+ * acquire the cohort lock before performing critical section.
+ * note that we pass NULL for both the global and local context
+ * arguments because neither the lock nor unlock functions
+ * will use them.
+ */
+ CK_COHORT_LOCK(test_cohort, cohort, NULL, NULL);
+
+ /* perform critical section */
+
+ /* relinquish cohort lock */
+ CK_COHORT_UNLOCK(test_cohort, cohort, NULL, NULL);
+ }
+
+ return NULL;
+}
+
+int
+main(void)
+{
+ unsigned int nthr = 4;
+ unsigned int n_cohorts = 2;
+ unsigned int i;
+
+ /* allocate 2 cohorts of the defined type */
+ CK_COHORT_INSTANCE(test_cohort) *cohorts =
+ calloc(n_cohorts, sizeof(CK_COHORT_INSTANCE(test_cohort)));
+
+ /* create local locks to use with each cohort */
+ ck_spinlock_t *local_locks =
+ calloc(n_cohorts, sizeof(ck_spinlock_t));
+
+ pthread_t *threads =
+ calloc(nthr, sizeof(pthread_t));
+
+ /* initialize each of the cohorts before using them */
+ for (i = 0 ; i < n_cohorts ; ++i) {
+ CK_COHORT_INIT(test_cohort, cohorts + i, &global_lock, local_locks + i,
+ CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT);
+ }
+
+ /* start each thread and assign cohorts equally */
+ for (i = 0 ; i < nthr ; ++i) {
+ pthread_create(threads + i, NULL, function, cohorts + (i % n_cohorts));
+ }
+
+ ck_pr_store_uint(&ready, 1);
+ sleep(10);
+ ck_pr_store_uint(&ready, 0);
+
+ for (i = 0 ; i < nthr ; ++i) {
+ pthread_join(threads[i], NULL);
+ }
+
+ return 0;
+}
+.Ed
+.Sh SEE ALSO
+.Xr CK_COHORT_PROTOTYPE 3 ,
+.Xr CK_COHORT_TRYLOCK_PROTOTYPE 3 ,
+.Xr CK_COHORT_INSTANCE 3 ,
+.Xr CK_COHORT_INITIALIZER 3 ,
+.Xr CK_COHORT_INIT 3 ,
+.Xr CK_COHORT_LOCK 3 ,
+.Xr CK_COHORT_UNLOCK 3 ,
+.Xr CK_COHORT_LOCKED 3 ,
+.Xr CK_COHORT_TRYLOCK 3 ,
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_rwlock b/doc/ck_rwlock
new file mode 100644
index 0000000..60a18ab
--- /dev/null
+++ b/doc/ck_rwlock
@@ -0,0 +1,143 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd July 26, 2013.
+.Dt ck_rwlock 3
+.Sh NAME
+.Nm ck_rwlock_init ,
+.Nm ck_rwlock_write_lock ,
+.Nm ck_rwlock_write_unlock ,
+.Nm ck_rwlock_write_trylock ,
+.Nm ck_rwlock_write_downgrade ,
+.Nm ck_rwlock_locked_writer ,
+.Nm ck_rwlock_read_lock ,
+.Nm ck_rwlock_read_trylock ,
+.Nm ck_rwlock_read_unlock ,
+.Nm ck_rwlock_locked_reader ,
+.Nm ck_rwlock_recursive_write_lock ,
+.Nm ck_rwlock_recursive_write_trylock ,
+.Nm ck_rwlock_recurisve_write_unlock ,
+.Nm ck_rwlock_recursive_read_lock ,
+.Nm ck_rwlock_recursive_read_trylock ,
+.Nm ck_rwlock_recursive_read_unlock
+.Nd centralized write-biased reader-writer locks
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_rwlock.h
+.Pp
+.Dv ck_rwlock_t lock = CK_RWLOCK_INITIALIZER;
+.Pp
+.Ft void
+.Fn ck_rwlock_init "ck_rwlock_t *lock"
+.Ft void
+.Fn ck_rwlock_write_lock "ck_rwlock_t *lock"
+.Ft void
+.Fn ck_rwlock_write_unlock "ck_rwlock_t *lock"
+.Ft bool
+.Fn ck_rwlock_write_trylock "ck_rwlock_t *lock"
+.Ft bool
+.Fn ck_rwlock_write_downgrade "ck_rwlock_t *lock"
+.Ft bool
+.Fn ck_rwlock_locked_writer "ck_rwlock_t *lock"
+.Ft void
+.Fn ck_rwlock_read_lock "ck_rwlock_t *lock"
+.Ft bool
+.Fn ck_rwlock_read_trylock "ck_rwlock_t *lock"
+.Ft void
+.Fn ck_rwlock_read_unlock "ck_rwlock_t *lock"
+.Ft bool
+.Fn ck_rwlock_locked_reader "ck_rwlock_t *lock"
+.Pp
+.Dv ck_rwlock_recursive_t lock = CK_RWLOCK_RECURSIVE_INITIALIZER;
+.Pp
+.Ft void
+.Fn ck_rwlock_recursive_write_lock "ck_rwlock_recursive_t *lock" "unsigned int tid"
+.Ft bool
+.Fn ck_rwlock_recursive_write_trylock "ck_rwlock_recursive_t *lock" "unsigned int tid"
+.Ft void
+.Fn ck_rwlock_recurisve_write_unlock "ck_rwlock_recursive_t *lock"
+.Ft void
+.Fn ck_rwlock_recursive_read_lock "ck_rwlock_recursive_t *lock"
+.Ft bool
+.Fn ck_rwlock_recursive_read_trylock "ck_rwlock_recursive_t *lock"
+.Ft void
+.Fn ck_rwlock_recursive_read_unlock "ck_rwlock_recursive_t *lock"
+.Sh DESCRIPTION
+This is a centralized write-biased reader-writer lock. It
+requires very little space overhead and has a low latency
+fast path. Write-side recursion requires usage of ck_rwlock_recursive.
+Read-side recursion is disallowed. The
+.Fn ck_rwlock_write_downgrade
+function degrades the caller's write-side acquisition to a read-side
+acquisition without forfeit of current critical section.
+.Sh EXAMPLE
+.Bd -literal -offset indent
+#include <ck_rwlock.h>
+
+static ck_rwlock_t lock = CK_RWLOCK_INITIALIZER;
+
+static void
+reader(void)
+{
+
+ for (;;) {
+ ck_rwlock_read_lock(&lock);
+ /* Read-side critical section. */
+ ck_rwlock_read_unlock(&lock);
+
+ if (ck_rwlock_read_trylock(&lock) == true) {
+ /* Read-side critical section. */
+ ck_rwlock_read_unlock(&lock);
+ }
+ }
+
+ return;
+}
+
+static void
+writer(void)
+{
+
+ for (;;) {
+ ck_rwlock_write_lock(&lock);
+ /* Write-side critical section. */
+ ck_rwlock_write_unlock(&lock);
+
+ if (ck_rwlock_write_trylock(&lock, 1) == true) {
+ /* Write-side critical section. */
+ ck_rwlock_write_unlock(&lock);
+ }
+ }
+
+ return;
+}
+.Ed
+.Sh SEE ALSO
+.Xr ck_brlock 3 ,
+.Xr ck_elide 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_sequence b/doc/ck_sequence
new file mode 100644
index 0000000..faa1631
--- /dev/null
+++ b/doc/ck_sequence
@@ -0,0 +1,144 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd July 26, 2013.
+.Dt ck_sequence 3
+.Sh NAME
+.Nm ck_sequence_init ,
+.Nm ck_sequence_read_begin ,
+.Nm ck_sequence_read_retry ,
+.Nm ck_sequence_write_begin ,
+.Nm ck_sequence_write_end
+.Nd sequence locks
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_sequence.h
+.Pp
+.Dv ck_sequence_t seqlock = CK_SEQUENCE_INITIALIZER;
+.Pp
+.Ft void
+.Fn ck_sequence_init "ck_sequence_t *sq"
+.Ft unsigned int
+.Fn ck_sequence_read_begin "const ck_sequence_t *sq"
+.Ft bool
+.Fn ck_sequence_read_retry "const ck_sequence_t *sq" "unsigned int version"
+.Ft void
+.Fn ck_sequence_write_begin "ck_sequence_t *sq"
+.Ft void
+.Fn ck_sequence_write_end "ck_sequence_t *sq"
+.Sh DESCRIPTION
+It is recommended to use ck_sequence when a small amount of data that cannot be
+accessed atomically has to be synchronized with readers in a fashion that does
+not block any writer. Readers are able to execute their read-side critical
+sections without any atomic operations. A ck_sequence_t must be initialized
+before use. It may be initialized using either a static initializer
+(CK_SEQUENCE_INITIALIZER) or using
+.Fn ck_sequence_init .
+Before readers attempt to
+read data that may be concurrently modified they must first save the return
+value of
+.Fn ck_sequence_read_begin .
+While or after a reader has completed copying
+the data associated with a ck_sequence_t it must pass the earlier return value
+of
+.Fn ck_sequence_read_begin
+to
+.Fn "ck_sequence_read_retry". If
+.Fn ck_sequence_read_retry
+returns true then the copy of data may be inconsistent and the read process
+must be retried. Writers must rely on their own synchronization primitives.
+Once a writer has entered its respective critical section, it must call
+.Fn ck_sequence_write_begin
+to signal intent to update the data protected
+by the ck_sequence_t. Before the writer leaves its critical section it must
+execute
+.Fn ck_sequence_write_end
+to indicate that the updates have left respective objects in a consistent state.
+.Sh EXAMPLE
+.Bd -literal -offset indent
+#include <ck_sequence.h>
+#include <stdlib.h>
+
+static struct example {
+ int a;
+ int b;
+ int c;
+} global;
+
+static ck_sequence_t seqlock = CK_SEQUENCE_INITIALIZER;
+
+void
+reader(void)
+{
+ struct example copy;
+ unsigned int version;
+
+ /*
+ * Attempt a read of the data structure. If the structure
+ * has been modified between ck_sequence_read_begin and
+ * ck_sequence_read_retry then attempt another read since
+ * the data may be in an inconsistent state.
+ */
+ do {
+ version = ck_sequence_read_begin(&seqlock);
+ copy = global;
+ } while (ck_sequence_read_retry(&seqlock, version));
+
+ /*
+ * The previous may also be expressed using CK_SEQUENCE_READ.
+ * Generally recommend to only use ck_sequence_read_retry
+ * if you would like to detect a conflicting write at some
+ * higher granularity.
+ */
+ CK_SEQUENCE_READ(&seqlock, &version) {
+ copy = global;
+ }
+
+ return;
+}
+
+void
+writer(void)
+{
+
+ for (;;) {
+ ck_sequence_write_begin(&seqlock);
+ global.a = rand();
+ global.b = global.a + global.b;
+ global.c = global.b + global.c;
+ ck_sequence_write_end(&seqlock);
+ }
+
+ return;
+}
+.Ed
+.Sh SEE ALSO
+.Xr ck_brlock 3 ,
+.Xr ck_bytelock 3 ,
+.Xr ck_rwlock 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_spinlock b/doc/ck_spinlock
new file mode 100644
index 0000000..564d185
--- /dev/null
+++ b/doc/ck_spinlock
@@ -0,0 +1,259 @@
+.\"
+.\" Copyright 2013 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd July 26, 2013.
+.Dt ck_spinlock 3
+.Sh NAME
+.Nm ck_spinlock_init ,
+.Nm ck_spinlock_lock ,
+.Nm ck_spinlock_unlock ,
+.Nm ck_spinlock_locked ,
+.Nm ck_spinlock_trylock ,
+.Nm ck_spinlock_anderson_init ,
+.Nm ck_spinlock_anderson_locked ,
+.Nm ck_spinlock_anderson_lock ,
+.Nm ck_spinlock_anderson_unlock ,
+.Nm ck_spinlock_cas_init ,
+.Nm ck_spinlock_cas_locked ,
+.Nm ck_spinlock_cas_lock ,
+.Nm ck_spinlock_cas_lock_eb ,
+.Nm ck_spinlock_cas_trylock ,
+.Nm ck_spinlock_cas_unlock ,
+.Nm ck_spinlock_clh_init ,
+.Nm ck_spinlock_clh_locked ,
+.Nm ck_spinlock_clh_lock ,
+.Nm ck_spinlock_clh_unlock ,
+.Nm ck_spinlock_dec_init ,
+.Nm ck_spinlock_dec_locked ,
+.Nm ck_spinlock_dec_lock ,
+.Nm ck_spinlock_dec_lock_eb ,
+.Nm ck_spinlock_dec_trylock ,
+.Nm ck_spinlock_dec_unlock ,
+.Nm ck_spinlock_fas_init ,
+.Nm ck_spinlock_fas_lock ,
+.Nm ck_spinlock_fas_lock_eb ,
+.Nm ck_spinlock_fas_locked ,
+.Nm ck_spinlock_fas_trylock ,
+.Nm ck_spinlock_fas_unlock ,
+.Nm ck_spinlock_hclh_init ,
+.Nm ck_spinlock_hclh_locked ,
+.Nm ck_spinlock_hclh_lock ,
+.Nm ck_spinlock_hclh_unlock ,
+.Nm ck_spinlock_mcs_init ,
+.Nm ck_spinlock_mcs_locked ,
+.Nm ck_spinlock_mcs_lock ,
+.Nm ck_spinlock_mcs_trylock ,
+.Nm ck_spinlock_mcs_unlock ,
+.Nm ck_spinlock_ticket_init ,
+.Nm ck_spinlock_ticket_locked ,
+.Nm ck_spinlock_ticket_lock ,
+.Nm ck_spinlock_ticket_lock_pb ,
+.Nm ck_spinlock_ticket_trylock ,
+.Nm ck_spinlock_ticket_unlock
+.Nd spinlock implementations
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_spinlock.h
+.Pp
+.Dv ck_spinlock_t spinlock = CK_SPINLOCK_INITIALIZER;
+.Ft void
+.Fn ck_spinlock_init "ck_spinlock_t *lock"
+.Ft void
+.Fn ck_spinlock_lock "ck_spinlock_t *lock"
+.Ft void
+.Fn ck_spinlock_unlock "ck_spinlock_t *lock"
+.Ft bool
+.Fn ck_spinlock_locked "ck_spinlock_t *lock"
+.Ft bool
+.Fn ck_spinlock_trylock "ck_spinlock_t *lock"
+.Ft void
+.Fn ck_spinlock_anderson_init "ck_spinlock_anderson_t *lock" "ck_spinlock_anderson_thread_t *slots" "unsigned int count"
+.Ft bool
+.Fn ck_spinlock_anderson_locked "ck_spinlock_anderson_t *lock"
+.Ft void
+.Fn ck_spinlock_anderson_lock "ck_spinlock_anderson_t *lock" "ck_spinlock_anderson_thread_t **slot"
+.Ft void
+.Fn ck_spinlock_anderson_unlock "ck_spinlock_anderson_t *lock" "ck_spinlock_anderson_thread_t *slot"
+.Pp
+.Dv ck_spinlock_cas_t spinlock = CK_SPINLOCK_CAS_INITIALIZER;
+.Ft void
+.Fn ck_spinlock_cas_init "ck_spinlock_cas_t *lock"
+.Ft bool
+.Fn ck_spinlock_cas_locked "ck_spinlock_cas_t *lock"
+.Ft void
+.Fn ck_spinlock_cas_lock "ck_spinlock_cas_t *lock"
+.Ft void
+.Fn ck_spinlock_cas_lock_eb "ck_spinlock_cas_t *lock"
+.Ft bool
+.Fn ck_spinlock_cas_trylock "ck_spinlock_cas_t *lock"
+.Ft void
+.Fn ck_spinlock_cas_unlock "ck_spinlock_cas_t *lock"
+.Ft void
+.Fn ck_spinlock_clh_init "ck_spinlock_clh_t **lock" "ck_spinlock_clh_t *unowned"
+.Ft bool
+.Fn ck_spinlock_clh_locked "ck_spinlock_clh_t **lock"
+.Ft void
+.Fn ck_spinlock_clh_lock "ck_spinlock_clh_t **lock" "ck_spinlock_clh_t *node"
+.Ft void
+.Fn ck_spinlock_clh_unlock "ck_spinlock_clh_t **node"
+.Pp
+.Dv ck_spinlock_dec_t spinlock = CK_SPINLOCK_DEC_INITIALIZER;
+.Ft void
+.Fn ck_spinlock_dec_init "ck_spinlock_dec_t *lock"
+.Ft bool
+.Fn ck_spinlock_dec_locked "ck_spinlock_dec_t *lock"
+.Ft void
+.Fn ck_spinlock_dec_lock "ck_spinlock_dec_t *lock"
+.Ft void
+.Fn ck_spinlock_dec_lock_eb "ck_spinlock_dec_t *lock"
+.Ft bool
+.Fn ck_spinlock_dec_trylock "ck_spinlock_dec_t *lock"
+.Ft void
+.Fn ck_spinlock_dec_unlock "ck_spinlock_dec_t *lock"
+.Pp
+.Dv ck_spinlock_fas_t spinlock = CK_SPINLOCK_FAS_INITIALIZER;
+.Ft void
+.Fn ck_spinlock_fas_init "ck_spinlock_fas_t *lock"
+.Ft void
+.Fn ck_spinlock_fas_lock "ck_spinlock_fas_t *lock"
+.Ft void
+.Fn ck_spinlock_fas_lock_eb "ck_spinlock_fas_t *lock"
+.Ft bool
+.Fn ck_spinlock_fas_locked "ck_spinlock_fas_t *lock"
+.Ft bool
+.Fn ck_spinlock_fas_trylock "ck_spinlock_fas_t *lock"
+.Ft void
+.Fn ck_spinlock_fas_unlock "ck_spinlock_fas_t *lock"
+.Pp
+.Ft void
+.Fn ck_spinlock_hclh_init "ck_spinlock_hclh_t **lock" "ck_spinlock_hclh_t *unowned"
+.Ft bool
+.Fn ck_spinlock_hclh_locked "ck_spinlock_hclh_t **lock"
+.Ft void
+.Fn ck_spinlock_hclh_lock "ck_spinlock_hclh_t **lock" "ck_spinlock_hclh_t *node"
+.Ft void
+.Fn ck_spinlock_hclh_unlock "ck_spinlock_hclh_t **node"
+.Pp
+.Dv ck_spinlock_mcs_t spinlock = CK_SPINLOCK_MCS_INITIALIZER;
+.Ft void
+.Fn ck_spinlock_mcs_init "ck_spinlock_mcs_t **lock"
+.Ft bool
+.Fn ck_spinlock_mcs_locked "ck_spinlock_mcs_t **lock"
+.Ft void
+.Fn ck_spinlock_mcs_lock "ck_spinlock_mcs_t **lock" "ck_spinlock_mcs_t *node"
+.Ft bool
+.Fn ck_spinlock_mcs_trylock "ck_spinlock_mcs_t **lock" "ck_spinlock_mcs_t *node"
+.Ft void
+.Fn ck_spinlock_mcs_unlock "ck_spinlock_mcs_t **lock" "ck_spinlock_mcs_t *node"
+.Pp
+.Dv ck_spinlock_ticket_t spinlock = CK_SPINLOCK_TICKET_INITIALIZER;
+.Ft void
+.Fn ck_spinlock_ticket_init "ck_spinlock_ticket_t *lock"
+.Ft bool
+.Fn ck_spinlock_ticket_locked "ck_spinlock_ticket_t *lock"
+.Ft void
+.Fn ck_spinlock_ticket_lock "ck_spinlock_ticket_t *lock"
+.Ft void
+.Fn ck_spinlock_ticket_lock_pb "ck_spinlock_ticket_t *lock" "unsigned int period"
+.Ft bool
+.Fn ck_spinlock_ticket_trylock "ck_spinlock_ticket_t *lock"
+.Ft void
+.Fn ck_spinlock_ticket_unlock "ck_spinlock_ticket_t *lock"
+.Sh DESCRIPTION
+A family of busy-wait spinlock implementations. The ck_spinlock_t implementation is simply
+a wrapper around the fetch-and-swap (ck_spinlock_fas_t) implementation. The table below
+provides a summary of the current implementations.
+.Bd -literal
+| Namespace | Algorithm | Type | Restrictions | Fair |
+\'----------------------|-----------------------------|---------------|-------------------------|--------'
+ ck_spinlock_anderson Anderson Array Fixed number of threads Yes
+ ck_spinlock_cas Compare-and-Swap Centralized None No
+ ck_spinlock_clh Craig, Landin and Hagersten Queue Lifetime requirements Yes
+ ck_spinlock_dec Decrement (Linux kernel) Centralized UINT_MAX concurrency No
+ ck_spinlock_fas Fetch-and-store Centralized None No
+ ck_spinlock_hclh Hierarchical CLH Queue Lifetime requirements Yes *
+ ck_spinlock_mcs Mellor-Crummey and Scott Queue None Yes
+ ck_spinlock_ticket Ticket Centralized None Yes
+.Ed
+.Pp
+* Hierarchical CLH only offers weak fairness for threads accross cluster
+nodes.
+.Pp
+If contention is low and there is no hard requirement for starvation-freedom
+then a centralized greedy (unfair) spinlock is recommended. If contention is
+high and there is no requirement for starvation-freedom then a centralized
+greedy spinlock is recommended to be used with an exponential backoff
+mechanism. If contention is generally low and there is a hard requirement for
+starvation-freedom then the ticket lock is recommended. If contention is high
+and there is a hard requirement for starvation-freedom then the Craig and
+Landin and Hagersten queue spinlock is recommended unless stack allocation is
+necessary or NUMA factor is high, in which case the Mellor-Crummey and Scott
+spinlock is recommended. If you cannot afford O(n) space-usage from array
+or queue spinlocks but still require fairness under high contention then
+the ticket lock with proportional back-off is recommended.
+If NUMA factor is high but prefer a greedy lock, then please see
+.Xr ck_cohort 3 .
+.Sh EXAMPLE
+.Bd -literal -offset indent
+#include <ck_spinlock.h>
+#include <stdbool.h>
+
+/*
+ * Alternatively, the mutex may be initialized at run-time with
+ * ck_spinlock_init(&mutex).
+ */
+ck_spinlock_t mutex = CK_SPINLOCK_INITIALIZER;
+
+void
+example(void)
+{
+
+ ck_spinlock_lock(&mutex);
+ /*
+ * Critical section.
+ */
+ ck_spinlock_unlock(&mutex);
+
+ ck_spinlock_lock_eb(&mutex);
+ /*
+ * Critical section.
+ */
+ ck_spinlock_unlock(&mutex);
+
+ if (ck_spinlock_trylock(&mutex) == true) {
+ /*
+ * Critical section.
+ */
+ ck_spinlock_unlock(&mutex);
+ }
+}
+.Ed
+.Sh SEE ALSO
+.Xr ck_cohort 3 ,
+.Xr ck_elide 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_swlock b/doc/ck_swlock
new file mode 100644
index 0000000..e101334
--- /dev/null
+++ b/doc/ck_swlock
@@ -0,0 +1,138 @@
+.\"
+.\" Copyright 2014 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 22, 2014.
+.Dt ck_swlock 3
+.Sh NAME
+.Nm ck_swlock_init ,
+.Nm ck_swlock_write_latch ,
+.Nm ck_swlock_write_unlatch ,
+.Nm ck_swlock_write_lock ,
+.Nm ck_swlock_write_unlock ,
+.Nm ck_swlock_write_trylock ,
+.Nm ck_swlock_write_downgrade ,
+.Nm ck_swlock_locked_writer ,
+.Nm ck_swlock_read_lock ,
+.Nm ck_swlock_read_trylock ,
+.Nm ck_swlock_read_unlock ,
+.Nm ck_swlock_locked_reader
+.Nd centralized copy-safe write-biased single-writer read-write locks
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_swlock.h
+.Pp
+.Dv ck_swlock_t lock = CK_SWLOCK_INITIALIZER;
+.Pp
+.Ft void
+.Fn ck_swlock_init "ck_swlock_t *lock"
+.Ft void
+.Fn ck_swlock_write_lock "ck_swlock_t *lock"
+.Ft void
+.Fn ck_swlock_write_unlock "ck_swlock_t *lock"
+.Ft void
+.Fn ck_swlatch_write_latch "ck_swlatch_t *latch"
+.Ft void
+.Fn ck_swlatch_write_unlatch "ck_swlatch_t *latch"
+.Ft bool
+.Fn ck_swlock_write_trylock "ck_swlock_t *lock"
+.Ft bool
+.Fn ck_swlock_write_downgrade "ck_swlock_t *lock"
+.Ft bool
+.Fn ck_swlock_locked_writer "ck_swlock_t *lock"
+.Ft void
+.Fn ck_swlock_read_lock "ck_swlock_t *lock"
+.Ft bool
+.Fn ck_swlock_read_trylock "ck_swlock_t *lock"
+.Ft void
+.Fn ck_swlock_read_unlock "ck_swlock_t *lock"
+.Ft bool
+.Fn ck_swlock_locked_reader "ck_swlock_t *lock"
+.Sh DESCRIPTION
+This is a centralized write-biased single-writer reader-writer lock. It
+requires half the space that ck_rwlock does and has a low latency
+fast path. The lock supports latch and unlatch operations that
+allow it to be used in a copy-safe manner (reader-bits may be
+over-written safely).
+.Sh EXAMPLE
+.Bd -literal -offset indent
+#include <ck_swlock.h>
+
+static ck_swlock_t lock = CK_SWLOCK_INITIALIZER;
+
+static void
+reader(void)
+{
+
+ for (;;) {
+ ck_swlock_read_lock(&lock);
+ /* Read-side critical section. */
+ ck_swlock_read_unlock(&lock);
+
+ if (ck_swlock_read_trylock(&lock) == true) {
+ /* Read-side critical section. */
+ ck_swlock_read_unlock(&lock);
+ }
+ }
+
+ return;
+}
+
+static void
+writer(void)
+{
+ ck_swlock_t contrived;
+
+ for (;;) {
+ ck_swlock_write_lock(&lock);
+ /* Write-side critical section. */
+ ck_swlock_write_unlock(&lock);
+
+ if (ck_swlock_write_trylock(&lock) == true) {
+ /* Write-side critical section. */
+ ck_swlock_write_unlock(&lock);
+ }
+
+ ck_swlock_write_latch(&lock);
+ /* Write-side critical section. */
+
+ /* This is safe to do with-in a latch. */
+ contrived = lock;
+ lock = contrived;
+ ck_swlock_write_unlatch(&lock);
+ }
+
+ return;
+}
+.Ed
+.Sh SEE ALSO
+.Xr ck_brlock 3 ,
+.Xr ck_elide 3 ,
+.Xr ck_pflock 3 ,
+.Xr ck_rwlock 3 ,
+.Xr ck_tflock 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/ck_tflock b/doc/ck_tflock
new file mode 100644
index 0000000..629dbd4
--- /dev/null
+++ b/doc/ck_tflock
@@ -0,0 +1,95 @@
+.\"
+.\" Copyright 2014 Samy Al Bahra.
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\"
+.Dd April 22, 2014.
+.Dt ck_tflock 3
+.Sh NAME
+.Nm ck_tflock_ticket_init ,
+.Nm ck_tflock_ticket_write_lock ,
+.Nm ck_tflock_ticket_write_unlock ,
+.Nm ck_tflock_ticket_read_lock ,
+.Nm ck_tflock_ticket_read_unlock ,
+.Nd centralized task-fair reader-writer locks
+.Sh LIBRARY
+Concurrency Kit (libck, \-lck)
+.Sh SYNOPSIS
+.In ck_tflock.h
+.Pp
+.Dv ck_tflock_ticket_t lock = CK_TFLOCK_TICKET_INITIALIZER;
+.Pp
+.Ft void
+.Fn ck_tflock_ticket_init "ck_tflock_ticket_t *lock"
+.Ft void
+.Fn ck_tflock_ticket_write_lock "ck_tflock_ticket_t *lock"
+.Ft void
+.Fn ck_tflock_ticket_write_unlock "ck_tflock_ticket_t *lock"
+.Ft void
+.Fn ck_tflock_ticket_read_lock "ck_tflock_ticket_t *lock"
+.Ft void
+.Fn ck_tflock_ticket_read_unlock "ck_tflock_ticket_t *lock"
+.Sh DESCRIPTION
+This is a centralized task-fair reader-writer lock. It
+requires little space overhead and has a low latency
+fast path.
+.Sh EXAMPLE
+.Bd -literal -offset indent
+#include <ck_tflock.h>
+
+static ck_tflock_ticket_t lock = CK_TFLOCK_INITIALIZER;
+
+static void
+reader(void)
+{
+
+ for (;;) {
+ ck_tflock_ticket_read_lock(&lock);
+ /* Read-side critical section. */
+ ck_tflock_ticket_read_unlock(&lock);
+ }
+
+ return;
+}
+
+static void
+writer(void)
+{
+
+ for (;;) {
+ ck_tflock_ticket_write_lock(&lock);
+ /* Write-side critical section. */
+ ck_tflock_ticket_write_unlock(&lock);
+ }
+
+ return;
+}
+.Ed
+.Sh SEE ALSO
+.Xr ck_brlock 3 ,
+.Xr ck_rwlock 3 ,
+.Xr ck_pflock 3 ,
+.Xr ck_swlock 3
+.Pp
+Additional information available at http://concurrencykit.org/
diff --git a/doc/refcheck.pl b/doc/refcheck.pl
new file mode 100755
index 0000000..1ed3a65
--- /dev/null
+++ b/doc/refcheck.pl
@@ -0,0 +1,27 @@
+#!/usr/bin/perl
+
+use warnings;
+use strict;
+
+my @files = @ARGV;
+
+my $h;
+
+foreach my $file (@files) {
+ $h->{$file} = 1;
+}
+
+foreach my $file (@files) {
+ open(my $fh, "<", $file) or die "cannot open < $file: $!";
+ while (<$fh>) {
+ chomp;
+ if ($_ =~ /\.Xr ((ck|CK)_[a-zA-Z_]+) ([0-9])/) {
+ my $name = $1;
+ my $section = $3;
+ if (!$h->{$name}) {
+ print STDERR "$file: ref to missing ${name}($section)\n";
+ }
+ }
+ }
+ close($fh) or die("cannot close $file: $!");
+}
diff --git a/include/ck_array.h b/include/ck_array.h
new file mode 100644
index 0000000..9cb97b2
--- /dev/null
+++ b/include/ck_array.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2013-2015 Samy Al Bahra
+ * Copyright 2013-2014 AppNexus, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_ARRAY_H
+#define CK_ARRAY_H
+
+#include <ck_cc.h>
+#include <ck_malloc.h>
+#include <ck_pr.h>
+#include <ck_stdbool.h>
+#include <ck_stddef.h>
+
+struct _ck_array {
+ unsigned int n_committed;
+ unsigned int length;
+ void *values[];
+};
+
+struct ck_array {
+ struct ck_malloc *allocator;
+ struct _ck_array *active;
+ unsigned int n_entries;
+ struct _ck_array *transaction;
+};
+typedef struct ck_array ck_array_t;
+
+struct ck_array_iterator {
+ struct _ck_array *snapshot;
+};
+typedef struct ck_array_iterator ck_array_iterator_t;
+
+#define CK_ARRAY_MODE_SPMC 0U
+#define CK_ARRAY_MODE_MPMC (void) /* Unsupported. */
+
+bool ck_array_init(ck_array_t *, unsigned int, struct ck_malloc *, unsigned int);
+bool ck_array_commit(ck_array_t *);
+bool ck_array_put(ck_array_t *, void *);
+int ck_array_put_unique(ck_array_t *, void *);
+bool ck_array_remove(ck_array_t *, void *);
+void ck_array_deinit(ck_array_t *, bool);
+
+CK_CC_INLINE static unsigned int
+ck_array_length(struct ck_array *array)
+{
+ struct _ck_array *a = ck_pr_load_ptr(&array->active);
+
+ ck_pr_fence_load();
+ return ck_pr_load_uint(&a->n_committed);
+}
+
+CK_CC_INLINE static void *
+ck_array_buffer(struct ck_array *array, unsigned int *length)
+{
+ struct _ck_array *a = ck_pr_load_ptr(&array->active);
+
+ ck_pr_fence_load();
+ *length = ck_pr_load_uint(&a->n_committed);
+ return a->values;
+}
+
+CK_CC_INLINE static bool
+ck_array_initialized(struct ck_array *array)
+{
+
+ return ck_pr_load_ptr(&array->active) != NULL;
+}
+
+#define CK_ARRAY_FOREACH(a, i, b) \
+ (i)->snapshot = ck_pr_load_ptr(&(a)->active); \
+ ck_pr_fence_load(); \
+ for (unsigned int _ck_i = 0; \
+ _ck_i < (a)->active->n_committed && \
+ ((*b) = (a)->active->values[_ck_i], 1); \
+ _ck_i++)
+
+#endif /* CK_ARRAY_H */
diff --git a/include/ck_backoff.h b/include/ck_backoff.h
new file mode 100644
index 0000000..82a4f21
--- /dev/null
+++ b/include/ck_backoff.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_BACKOFF_H
+#define CK_BACKOFF_H
+
+#include <ck_cc.h>
+#include <ck_pr.h>
+
+#ifndef CK_BACKOFF_CEILING
+#define CK_BACKOFF_CEILING ((1 << 20) - 1)
+#endif
+
+#define CK_BACKOFF_INITIALIZER (1 << 9)
+
+typedef unsigned int ck_backoff_t;
+
+/*
+ * This is a exponential back-off implementation.
+ */
+CK_CC_INLINE static void
+ck_backoff_eb(unsigned int *c)
+{
+ unsigned int i, ceiling;
+
+ ceiling = *c;
+ for (i = 0; i < ceiling; i++)
+ ck_pr_barrier();
+
+ *c = ceiling <<= ceiling < CK_BACKOFF_CEILING;
+ return;
+}
+
+#endif /* CK_BACKOFF_H */
diff --git a/include/ck_barrier.h b/include/ck_barrier.h
new file mode 100644
index 0000000..d4c12ca
--- /dev/null
+++ b/include/ck_barrier.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_BARRIER_H
+#define CK_BARRIER_H
+
+#include <ck_spinlock.h>
+
+struct ck_barrier_centralized {
+ unsigned int value;
+ unsigned int sense;
+};
+typedef struct ck_barrier_centralized ck_barrier_centralized_t;
+
+struct ck_barrier_centralized_state {
+ unsigned int sense;
+};
+typedef struct ck_barrier_centralized_state ck_barrier_centralized_state_t;
+
+#define CK_BARRIER_CENTRALIZED_INITIALIZER {0, 0}
+#define CK_BARRIER_CENTRALIZED_STATE_INITIALIZER {0}
+
+void ck_barrier_centralized(ck_barrier_centralized_t *,
+ ck_barrier_centralized_state_t *, unsigned int);
+
+struct ck_barrier_combining_group {
+ unsigned int k;
+ unsigned int count;
+ unsigned int sense;
+ struct ck_barrier_combining_group *parent;
+ struct ck_barrier_combining_group *left;
+ struct ck_barrier_combining_group *right;
+ struct ck_barrier_combining_group *next;
+} CK_CC_CACHELINE;
+typedef struct ck_barrier_combining_group ck_barrier_combining_group_t;
+
+struct ck_barrier_combining_state {
+ unsigned int sense;
+};
+typedef struct ck_barrier_combining_state ck_barrier_combining_state_t;
+
+#define CK_BARRIER_COMBINING_STATE_INITIALIZER {~0}
+
+struct ck_barrier_combining {
+ struct ck_barrier_combining_group *root;
+ ck_spinlock_fas_t mutex;
+};
+typedef struct ck_barrier_combining ck_barrier_combining_t;
+
+void ck_barrier_combining_init(ck_barrier_combining_t *, ck_barrier_combining_group_t *);
+
+void ck_barrier_combining_group_init(ck_barrier_combining_t *,
+ ck_barrier_combining_group_t *, unsigned int);
+
+void ck_barrier_combining(ck_barrier_combining_t *,
+ ck_barrier_combining_group_t *,
+ ck_barrier_combining_state_t *);
+
+struct ck_barrier_dissemination_flag {
+ unsigned int tflag;
+ unsigned int *pflag;
+};
+typedef struct ck_barrier_dissemination_flag ck_barrier_dissemination_flag_t;
+
+struct ck_barrier_dissemination {
+ unsigned int nthr;
+ unsigned int size;
+ unsigned int tid;
+ struct ck_barrier_dissemination_flag *flags[2];
+};
+typedef struct ck_barrier_dissemination ck_barrier_dissemination_t;
+
+struct ck_barrier_dissemination_state {
+ int parity;
+ unsigned int sense;
+ unsigned int tid;
+};
+typedef struct ck_barrier_dissemination_state ck_barrier_dissemination_state_t;
+
+void ck_barrier_dissemination_init(ck_barrier_dissemination_t *,
+ ck_barrier_dissemination_flag_t **, unsigned int);
+
+void ck_barrier_dissemination_subscribe(ck_barrier_dissemination_t *,
+ ck_barrier_dissemination_state_t *);
+
+unsigned int ck_barrier_dissemination_size(unsigned int);
+
+void ck_barrier_dissemination(ck_barrier_dissemination_t *,
+ ck_barrier_dissemination_state_t *);
+
+struct ck_barrier_tournament_round {
+ int role;
+ unsigned int *opponent;
+ unsigned int flag;
+};
+typedef struct ck_barrier_tournament_round ck_barrier_tournament_round_t;
+
+struct ck_barrier_tournament {
+ unsigned int tid;
+ unsigned int size;
+ struct ck_barrier_tournament_round **rounds;
+};
+typedef struct ck_barrier_tournament ck_barrier_tournament_t;
+
+struct ck_barrier_tournament_state {
+ unsigned int sense;
+ unsigned int vpid;
+};
+typedef struct ck_barrier_tournament_state ck_barrier_tournament_state_t;
+
+void ck_barrier_tournament_subscribe(ck_barrier_tournament_t *,
+ ck_barrier_tournament_state_t *);
+void ck_barrier_tournament_init(ck_barrier_tournament_t *,
+ ck_barrier_tournament_round_t **,
+ unsigned int);
+unsigned int ck_barrier_tournament_size(unsigned int);
+void ck_barrier_tournament(ck_barrier_tournament_t *, ck_barrier_tournament_state_t *);
+
+struct ck_barrier_mcs {
+ unsigned int tid;
+ unsigned int *children[2];
+ unsigned int childnotready[4];
+ unsigned int dummy;
+ unsigned int havechild[4];
+ unsigned int *parent;
+ unsigned int parentsense;
+};
+typedef struct ck_barrier_mcs ck_barrier_mcs_t;
+
+struct ck_barrier_mcs_state {
+ unsigned int sense;
+ unsigned int vpid;
+};
+typedef struct ck_barrier_mcs_state ck_barrier_mcs_state_t;
+
+void ck_barrier_mcs_init(ck_barrier_mcs_t *, unsigned int);
+void ck_barrier_mcs_subscribe(ck_barrier_mcs_t *, ck_barrier_mcs_state_t *);
+void ck_barrier_mcs(ck_barrier_mcs_t *, ck_barrier_mcs_state_t *);
+
+#endif /* CK_BARRIER_H */
diff --git a/include/ck_bitmap.h b/include/ck_bitmap.h
new file mode 100644
index 0000000..624e953
--- /dev/null
+++ b/include/ck_bitmap.h
@@ -0,0 +1,515 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra.
+ * Copyright 2012-2014 AppNexus, Inc.
+ * Copyright 2014 Paul Khuong.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_BITMAP_H
+#define CK_BITMAP_H
+
+#include <ck_cc.h>
+#include <ck_limits.h>
+#include <ck_pr.h>
+#include <ck_stdint.h>
+#include <ck_stdbool.h>
+#include <ck_stddef.h>
+#include <ck_stdbool.h>
+#include <ck_stddef.h>
+#include <ck_string.h>
+
+#if !defined(CK_F_PR_LOAD_UINT) || !defined(CK_F_PR_STORE_UINT) || \
+ !defined(CK_F_PR_AND_UINT) || !defined(CK_F_PR_OR_UINT) || \
+ !defined(CK_F_CC_CTZ)
+#error "ck_bitmap is not supported on your platform."
+#endif
+
+#define CK_BITMAP_BLOCK (sizeof(unsigned int) * CHAR_BIT)
+#define CK_BITMAP_OFFSET(i) ((i) % CK_BITMAP_BLOCK)
+#define CK_BITMAP_BIT(i) (1U << CK_BITMAP_OFFSET(i))
+#define CK_BITMAP_PTR(x, i) ((x) + ((i) / CK_BITMAP_BLOCK))
+#define CK_BITMAP_BLOCKS(n) (((n) + CK_BITMAP_BLOCK - 1) / CK_BITMAP_BLOCK)
+
+#define CK_BITMAP_INSTANCE(n_entries) \
+ union { \
+ struct { \
+ unsigned int n_bits; \
+ unsigned int map[CK_BITMAP_BLOCKS(n_entries)]; \
+ } content; \
+ struct ck_bitmap bitmap; \
+ }
+
+#define CK_BITMAP_ITERATOR_INIT(a, b) \
+ ck_bitmap_iterator_init((a), &(b)->bitmap)
+
+#define CK_BITMAP_INIT(a, b, c) \
+ ck_bitmap_init(&(a)->bitmap, (b), (c))
+
+#define CK_BITMAP_NEXT(a, b, c) \
+ ck_bitmap_next(&(a)->bitmap, (b), (c))
+
+#define CK_BITMAP_SET(a, b) \
+ ck_bitmap_set(&(a)->bitmap, (b))
+
+#define CK_BITMAP_BTS(a, b) \
+ ck_bitmap_bts(&(a)->bitmap, (b))
+
+#define CK_BITMAP_RESET(a, b) \
+ ck_bitmap_reset(&(a)->bitmap, (b))
+
+#define CK_BITMAP_TEST(a, b) \
+ ck_bitmap_test(&(a)->bitmap, (b))
+
+#define CK_BITMAP_UNION(a, b) \
+ ck_bitmap_union(&(a)->bitmap, &(b)->bitmap)
+
+#define CK_BITMAP_INTERSECTION(a, b) \
+ ck_bitmap_intersection(&(a)->bitmap, &(b)->bitmap)
+
+#define CK_BITMAP_INTERSECTION_NEGATE(a, b) \
+ ck_bitmap_intersection_negate(&(a)->bitmap, &(b)->bitmap)
+
+#define CK_BITMAP_CLEAR(a) \
+ ck_bitmap_clear(&(a)->bitmap)
+
+#define CK_BITMAP_EMPTY(a, b) \
+ ck_bitmap_empty(&(a)->bitmap, b)
+
+#define CK_BITMAP_FULL(a, b) \
+ ck_bitmap_full(&(a)->bitmap, b)
+
+#define CK_BITMAP_COUNT(a, b) \
+ ck_bitmap_count(&(a)->bitmap, b)
+
+#define CK_BITMAP_COUNT_INTERSECT(a, b, c) \
+ ck_bitmap_count_intersect(&(a)->bitmap, b, c)
+
+#define CK_BITMAP_BITS(a) \
+ ck_bitmap_bits(&(a)->bitmap)
+
+#define CK_BITMAP_BUFFER(a) \
+ ck_bitmap_buffer(&(a)->bitmap)
+
+#define CK_BITMAP(a) \
+ (&(a)->bitmap)
+
+struct ck_bitmap {
+ unsigned int n_bits;
+ unsigned int map[];
+};
+typedef struct ck_bitmap ck_bitmap_t;
+
+struct ck_bitmap_iterator {
+ unsigned int cache;
+ unsigned int n_block;
+ unsigned int n_limit;
+};
+typedef struct ck_bitmap_iterator ck_bitmap_iterator_t;
+
+CK_CC_INLINE static unsigned int
+ck_bitmap_base(unsigned int n_bits)
+{
+
+ return CK_BITMAP_BLOCKS(n_bits) * sizeof(unsigned int);
+}
+
+/*
+ * Returns the required number of bytes for a ck_bitmap_t object supporting the
+ * specified number of bits.
+ */
+CK_CC_INLINE static unsigned int
+ck_bitmap_size(unsigned int n_bits)
+{
+
+ return ck_bitmap_base(n_bits) + sizeof(struct ck_bitmap);
+}
+
+/*
+ * Returns total number of bits in specified bitmap.
+ */
+CK_CC_INLINE static unsigned int
+ck_bitmap_bits(const struct ck_bitmap *bitmap)
+{
+
+ return bitmap->n_bits;
+}
+
+/*
+ * Returns a pointer to the bit buffer associated
+ * with the specified bitmap.
+ */
+CK_CC_INLINE static void *
+ck_bitmap_buffer(struct ck_bitmap *bitmap)
+{
+
+ return bitmap->map;
+}
+
+/*
+ * Sets the bit at the offset specified in the second argument.
+ */
+CK_CC_INLINE static void
+ck_bitmap_set(struct ck_bitmap *bitmap, unsigned int n)
+{
+
+ ck_pr_or_uint(CK_BITMAP_PTR(bitmap->map, n), CK_BITMAP_BIT(n));
+ return;
+}
+
+/*
+ * Performs a test-and-set operation at the offset specified in the
+ * second argument.
+ * Returns true if the bit at the specified offset was already set,
+ * false otherwise.
+ */
+CK_CC_INLINE static bool
+ck_bitmap_bts(struct ck_bitmap *bitmap, unsigned int n)
+{
+
+ return ck_pr_bts_uint(CK_BITMAP_PTR(bitmap->map, n),
+ CK_BITMAP_OFFSET(n));
+}
+
+/*
+ * Resets the bit at the offset specified in the second argument.
+ */
+CK_CC_INLINE static void
+ck_bitmap_reset(struct ck_bitmap *bitmap, unsigned int n)
+{
+
+ ck_pr_and_uint(CK_BITMAP_PTR(bitmap->map, n), ~CK_BITMAP_BIT(n));
+ return;
+}
+
+/*
+ * Determines whether the bit at offset specified in the
+ * second argument is set.
+ */
+CK_CC_INLINE static bool
+ck_bitmap_test(const struct ck_bitmap *bitmap, unsigned int n)
+{
+ unsigned int block;
+
+ block = ck_pr_load_uint(CK_BITMAP_PTR(bitmap->map, n));
+ return block & CK_BITMAP_BIT(n);
+}
+
+/*
+ * Combines bits from second bitmap into the first bitmap. This is not a
+ * linearized operation with respect to the complete bitmap.
+ */
+CK_CC_INLINE static void
+ck_bitmap_union(struct ck_bitmap *dst, const struct ck_bitmap *src)
+{
+ unsigned int n;
+ unsigned int n_buckets = dst->n_bits;
+
+ if (src->n_bits < dst->n_bits)
+ n_buckets = src->n_bits;
+
+ n_buckets = CK_BITMAP_BLOCKS(n_buckets);
+ for (n = 0; n < n_buckets; n++) {
+ ck_pr_or_uint(&dst->map[n],
+ ck_pr_load_uint(&src->map[n]));
+ }
+
+ return;
+}
+
+/*
+ * Intersects bits from second bitmap into the first bitmap. This is
+ * not a linearized operation with respect to the complete bitmap.
+ * Any trailing bit in dst is cleared.
+ */
+CK_CC_INLINE static void
+ck_bitmap_intersection(struct ck_bitmap *dst, const struct ck_bitmap *src)
+{
+ unsigned int n;
+ unsigned int n_buckets = dst->n_bits;
+ unsigned int n_intersect = n_buckets;
+
+ if (src->n_bits < n_intersect)
+ n_intersect = src->n_bits;
+
+ n_buckets = CK_BITMAP_BLOCKS(n_buckets);
+ n_intersect = CK_BITMAP_BLOCKS(n_intersect);
+ for (n = 0; n < n_intersect; n++) {
+ ck_pr_and_uint(&dst->map[n],
+ ck_pr_load_uint(&src->map[n]));
+ }
+
+ for (; n < n_buckets; n++)
+ ck_pr_store_uint(&dst->map[n], 0);
+
+ return;
+}
+
+/*
+ * Intersects the complement of bits from second bitmap into the first
+ * bitmap. This is not a linearized operation with respect to the
+ * complete bitmap. Any trailing bit in dst is left as is.
+ */
+CK_CC_INLINE static void
+ck_bitmap_intersection_negate(struct ck_bitmap *dst,
+ const struct ck_bitmap *src)
+{
+ unsigned int n;
+ unsigned int n_intersect = dst->n_bits;
+
+ if (src->n_bits < n_intersect)
+ n_intersect = src->n_bits;
+
+ n_intersect = CK_BITMAP_BLOCKS(n_intersect);
+ for (n = 0; n < n_intersect; n++) {
+ ck_pr_and_uint(&dst->map[n],
+ (~ck_pr_load_uint(&src->map[n])));
+ }
+
+ return;
+}
+
+/*
+ * Resets all bits in the provided bitmap. This is not a linearized
+ * operation in ck_bitmap.
+ */
+CK_CC_INLINE static void
+ck_bitmap_clear(struct ck_bitmap *bitmap)
+{
+ unsigned int i;
+ unsigned int n_buckets = ck_bitmap_base(bitmap->n_bits) /
+ sizeof(unsigned int);
+
+ for (i = 0; i < n_buckets; i++)
+ ck_pr_store_uint(&bitmap->map[i], 0);
+
+ return;
+}
+
+/*
+ * Returns true if the first limit bits in bitmap are cleared. If
+ * limit is greater than the bitmap size, limit is truncated to that
+ * size.
+ */
+CK_CC_INLINE static bool
+ck_bitmap_empty(const ck_bitmap_t *bitmap, unsigned int limit)
+{
+ unsigned int i, words, slop;
+
+ if (limit > bitmap->n_bits)
+ limit = bitmap->n_bits;
+
+ words = limit / CK_BITMAP_BLOCK;
+ slop = limit % CK_BITMAP_BLOCK;
+ for (i = 0; i < words; i++) {
+ if (ck_pr_load_uint(&bitmap->map[i]) != 0) {
+ return false;
+ }
+ }
+
+ if (slop > 0) {
+ unsigned int word;
+
+ word = ck_pr_load_uint(&bitmap->map[i]);
+ if ((word & ((1U << slop) - 1)) != 0)
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Returns true if the first limit bits in bitmap are set. If limit
+ * is greater than the bitmap size, limit is truncated to that size.
+ */
+CK_CC_UNUSED static bool
+ck_bitmap_full(const ck_bitmap_t *bitmap, unsigned int limit)
+{
+ unsigned int i, slop, words;
+
+ if (limit > bitmap->n_bits) {
+ limit = bitmap->n_bits;
+ }
+
+ words = limit / CK_BITMAP_BLOCK;
+ slop = limit % CK_BITMAP_BLOCK;
+ for (i = 0; i < words; i++) {
+ if (ck_pr_load_uint(&bitmap->map[i]) != -1U)
+ return false;
+ }
+
+ if (slop > 0) {
+ unsigned int word;
+
+ word = ~ck_pr_load_uint(&bitmap->map[i]);
+ if ((word & ((1U << slop) - 1)) != 0)
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Returns the number of set bit in bitmap, upto (and excluding)
+ * limit. If limit is greater than the bitmap size, it is truncated
+ * to that size.
+ */
+CK_CC_INLINE static unsigned int
+ck_bitmap_count(const ck_bitmap_t *bitmap, unsigned int limit)
+{
+ unsigned int count, i, slop, words;
+
+ if (limit > bitmap->n_bits)
+ limit = bitmap->n_bits;
+
+ words = limit / CK_BITMAP_BLOCK;
+ slop = limit % CK_BITMAP_BLOCK;
+ for (i = 0, count = 0; i < words; i++)
+ count += ck_cc_popcount(ck_pr_load_uint(&bitmap->map[i]));
+
+ if (slop > 0) {
+ unsigned int word;
+
+ word = ck_pr_load_uint(&bitmap->map[i]);
+ count += ck_cc_popcount(word & ((1U << slop) - 1));
+ }
+ return count;
+}
+
+/*
+ * Returns the number of set bit in the intersection of two bitmaps,
+ * upto (and excluding) limit. If limit is greater than either bitmap
+ * size, it is truncated to the smallest.
+ */
+CK_CC_INLINE static unsigned int
+ck_bitmap_count_intersect(const ck_bitmap_t *x, const ck_bitmap_t *y,
+ unsigned int limit)
+{
+ unsigned int count, i, slop, words;
+
+ if (limit > x->n_bits)
+ limit = x->n_bits;
+
+ if (limit > y->n_bits)
+ limit = y->n_bits;
+
+ words = limit / CK_BITMAP_BLOCK;
+ slop = limit % CK_BITMAP_BLOCK;
+ for (i = 0, count = 0; i < words; i++) {
+ unsigned int xi, yi;
+
+ xi = ck_pr_load_uint(&x->map[i]);
+ yi = ck_pr_load_uint(&y->map[i]);
+ count += ck_cc_popcount(xi & yi);
+ }
+
+ if (slop > 0) {
+ unsigned int word, xi, yi;
+
+ xi = ck_pr_load_uint(&x->map[i]);
+ yi = ck_pr_load_uint(&y->map[i]);
+ word = xi & yi;
+ count += ck_cc_popcount(word & ((1U << slop) - 1));
+ }
+ return count;
+}
+
+/*
+ * Initializes a ck_bitmap pointing to a region of memory with
+ * ck_bitmap_size(n_bits) bytes. Third argument determines whether
+ * default bit value is 1 (true) or 0 (false).
+ */
+CK_CC_INLINE static void
+ck_bitmap_init(struct ck_bitmap *bitmap,
+ unsigned int n_bits,
+ bool set)
+{
+ unsigned int base = ck_bitmap_base(n_bits);
+
+ bitmap->n_bits = n_bits;
+ memset(bitmap->map, -(int)set, base);
+
+ if (set == true) {
+ unsigned int b = n_bits % CK_BITMAP_BLOCK;
+
+ if (b == 0)
+ return;
+
+ *CK_BITMAP_PTR(bitmap->map, n_bits - 1) &= (1U << b) - 1U;
+ }
+
+ return;
+}
+
+/*
+ * Initialize iterator for use with provided bitmap.
+ */
+CK_CC_INLINE static void
+ck_bitmap_iterator_init(struct ck_bitmap_iterator *i,
+ const struct ck_bitmap *bitmap)
+{
+
+ i->n_block = 0;
+ i->n_limit = CK_BITMAP_BLOCKS(bitmap->n_bits);
+ if (i->n_limit > 0) {
+ i->cache = ck_pr_load_uint(&bitmap->map[0]);
+ } else {
+ i->cache = 0;
+ }
+ return;
+}
+
+/*
+ * Iterate to next bit.
+ */
+CK_CC_INLINE static bool
+ck_bitmap_next(const struct ck_bitmap *bitmap,
+ struct ck_bitmap_iterator *i,
+ unsigned int *bit)
+{
+ unsigned int cache = i->cache;
+ unsigned int n_block = i->n_block;
+ unsigned int n_limit = i->n_limit;
+
+ if (cache == 0) {
+ if (n_block >= n_limit)
+ return false;
+
+ for (n_block++; n_block < n_limit; n_block++) {
+ cache = ck_pr_load_uint(&bitmap->map[n_block]);
+ if (cache != 0)
+ goto non_zero;
+ }
+
+ i->cache = 0;
+ i->n_block = n_block;
+ return false;
+ }
+
+non_zero:
+ *bit = CK_BITMAP_BLOCK * n_block + ck_cc_ctz(cache);
+ i->cache = cache & (cache - 1);
+ i->n_block = n_block;
+ return true;
+}
+
+#endif /* CK_BITMAP_H */
diff --git a/include/ck_brlock.h b/include/ck_brlock.h
new file mode 100644
index 0000000..d1b4ed1
--- /dev/null
+++ b/include/ck_brlock.h
@@ -0,0 +1,279 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_BRLOCK_H
+#define CK_BRLOCK_H
+
+/*
+ * Big reader spinlocks provide cache-local contention-free read
+ * lock acquisition in the absence of writers. This comes at the
+ * cost of O(n) write lock acquisition. They were first implemented
+ * in the Linux kernel by Ingo Molnar and David S. Miller around the
+ * year 2000.
+ *
+ * This implementation is thread-agnostic which comes at the cost
+ * of larger reader objects due to necessary linkage overhead. In
+ * order to cut down on TLB pressure, it is recommended to allocate
+ * these objects on the same page.
+ */
+
+#include <ck_pr.h>
+#include <ck_stdbool.h>
+#include <ck_stddef.h>
+
+struct ck_brlock_reader {
+ unsigned int n_readers;
+ struct ck_brlock_reader *previous;
+ struct ck_brlock_reader *next;
+};
+typedef struct ck_brlock_reader ck_brlock_reader_t;
+
+#define CK_BRLOCK_READER_INITIALIZER {0}
+
+struct ck_brlock {
+ struct ck_brlock_reader *readers;
+ unsigned int writer;
+};
+typedef struct ck_brlock ck_brlock_t;
+
+#define CK_BRLOCK_INITIALIZER {NULL, false}
+
+CK_CC_INLINE static void
+ck_brlock_init(struct ck_brlock *br)
+{
+
+ br->readers = NULL;
+ br->writer = false;
+ ck_pr_barrier();
+ return;
+}
+
+CK_CC_INLINE static void
+ck_brlock_write_lock(struct ck_brlock *br)
+{
+ struct ck_brlock_reader *cursor;
+
+ /*
+ * As the frequency of write acquisitions should be low,
+ * there is no point to more advanced contention avoidance.
+ */
+ while (ck_pr_fas_uint(&br->writer, true) == true)
+ ck_pr_stall();
+
+ ck_pr_fence_atomic_load();
+
+ /* The reader list is protected under the writer br. */
+ for (cursor = br->readers; cursor != NULL; cursor = cursor->next) {
+ while (ck_pr_load_uint(&cursor->n_readers) != 0)
+ ck_pr_stall();
+ }
+
+ ck_pr_fence_lock();
+ return;
+}
+
+CK_CC_INLINE static void
+ck_brlock_write_unlock(struct ck_brlock *br)
+{
+
+ ck_pr_fence_unlock();
+ ck_pr_store_uint(&br->writer, false);
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_brlock_write_trylock(struct ck_brlock *br, unsigned int factor)
+{
+ struct ck_brlock_reader *cursor;
+ unsigned int steps = 0;
+
+ while (ck_pr_fas_uint(&br->writer, true) == true) {
+ if (++steps >= factor)
+ return false;
+
+ ck_pr_stall();
+ }
+
+ /*
+ * We do not require a strict fence here as atomic RMW operations
+ * are serializing.
+ */
+ ck_pr_fence_atomic_load();
+
+ for (cursor = br->readers; cursor != NULL; cursor = cursor->next) {
+ while (ck_pr_load_uint(&cursor->n_readers) != 0) {
+ if (++steps >= factor) {
+ ck_brlock_write_unlock(br);
+ return false;
+ }
+
+ ck_pr_stall();
+ }
+ }
+
+ ck_pr_fence_lock();
+ return true;
+}
+
+CK_CC_INLINE static void
+ck_brlock_read_register(struct ck_brlock *br, struct ck_brlock_reader *reader)
+{
+
+ reader->n_readers = 0;
+ reader->previous = NULL;
+
+ /* Implicit compiler barrier. */
+ ck_brlock_write_lock(br);
+
+ reader->next = ck_pr_load_ptr(&br->readers);
+ if (reader->next != NULL)
+ reader->next->previous = reader;
+ ck_pr_store_ptr(&br->readers, reader);
+
+ ck_brlock_write_unlock(br);
+ return;
+}
+
+CK_CC_INLINE static void
+ck_brlock_read_unregister(struct ck_brlock *br, struct ck_brlock_reader *reader)
+{
+
+ ck_brlock_write_lock(br);
+
+ if (reader->next != NULL)
+ reader->next->previous = reader->previous;
+
+ if (reader->previous != NULL)
+ reader->previous->next = reader->next;
+ else
+ br->readers = reader->next;
+
+ ck_brlock_write_unlock(br);
+ return;
+}
+
+CK_CC_INLINE static void
+ck_brlock_read_lock(struct ck_brlock *br, struct ck_brlock_reader *reader)
+{
+
+ if (reader->n_readers >= 1) {
+ ck_pr_store_uint(&reader->n_readers, reader->n_readers + 1);
+ return;
+ }
+
+ for (;;) {
+ while (ck_pr_load_uint(&br->writer) == true)
+ ck_pr_stall();
+
+#if defined(__x86__) || defined(__x86_64__)
+ ck_pr_fas_uint(&reader->n_readers, 1);
+
+ /*
+ * Serialize reader counter update with respect to load of
+ * writer.
+ */
+ ck_pr_fence_atomic_load();
+#else
+ ck_pr_store_uint(&reader->n_readers, 1);
+
+ /*
+ * Serialize reader counter update with respect to load of
+ * writer.
+ */
+ ck_pr_fence_store_load();
+#endif
+
+ if (ck_pr_load_uint(&br->writer) == false)
+ break;
+
+ ck_pr_store_uint(&reader->n_readers, 0);
+ }
+
+ ck_pr_fence_lock();
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_brlock_read_trylock(struct ck_brlock *br,
+ struct ck_brlock_reader *reader,
+ unsigned int factor)
+{
+ unsigned int steps = 0;
+
+ if (reader->n_readers >= 1) {
+ ck_pr_store_uint(&reader->n_readers, reader->n_readers + 1);
+ return true;
+ }
+
+ for (;;) {
+ while (ck_pr_load_uint(&br->writer) == true) {
+ if (++steps >= factor)
+ return false;
+
+ ck_pr_stall();
+ }
+
+#if defined(__x86__) || defined(__x86_64__)
+ ck_pr_fas_uint(&reader->n_readers, 1);
+
+ /*
+ * Serialize reader counter update with respect to load of
+ * writer.
+ */
+ ck_pr_fence_atomic_load();
+#else
+ ck_pr_store_uint(&reader->n_readers, 1);
+
+ /*
+ * Serialize reader counter update with respect to load of
+ * writer.
+ */
+ ck_pr_fence_store_load();
+#endif
+
+ if (ck_pr_load_uint(&br->writer) == false)
+ break;
+
+ ck_pr_store_uint(&reader->n_readers, 0);
+
+ if (++steps >= factor)
+ return false;
+ }
+
+ ck_pr_fence_lock();
+ return true;
+}
+
+CK_CC_INLINE static void
+ck_brlock_read_unlock(struct ck_brlock_reader *reader)
+{
+
+ ck_pr_fence_unlock();
+ ck_pr_store_uint(&reader->n_readers, reader->n_readers - 1);
+ return;
+}
+
+#endif /* CK_BRLOCK_H */
diff --git a/include/ck_bytelock.h b/include/ck_bytelock.h
new file mode 100644
index 0000000..d437316
--- /dev/null
+++ b/include/ck_bytelock.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_BYTELOCK_H
+#define CK_BYTELOCK_H
+
+/*
+ * The implementations here are derived from the work described in:
+ * Dice, D. and Shavit, N. 2010. TLRW: return of the read-write lock.
+ * In Proceedings of the 22nd ACM Symposium on Parallelism in Algorithms
+ * and Architectures (Thira, Santorini, Greece, June 13 - 15, 2010).
+ * SPAA '10. ACM, New York, NY, 284-293.
+ */
+
+#include <ck_cc.h>
+#include <ck_md.h>
+#include <ck_pr.h>
+#include <ck_stdbool.h>
+#include <ck_stddef.h>
+#include <ck_limits.h>
+
+struct ck_bytelock {
+ unsigned int owner;
+ unsigned int n_readers;
+ uint8_t readers[CK_MD_CACHELINE - sizeof(unsigned int) * 2] CK_CC_ALIGN(8);
+};
+typedef struct ck_bytelock ck_bytelock_t;
+
+#define CK_BYTELOCK_INITIALIZER { 0, 0, {0} }
+#define CK_BYTELOCK_UNSLOTTED UINT_MAX
+
+CK_CC_INLINE static void
+ck_bytelock_init(struct ck_bytelock *bytelock)
+{
+ unsigned int i;
+
+ bytelock->owner = 0;
+ bytelock->n_readers = 0;
+ for (i = 0; i < sizeof bytelock->readers; i++)
+ bytelock->readers[i] = false;
+
+ ck_pr_barrier();
+ return;
+}
+
+#ifdef CK_F_PR_LOAD_64
+#define CK_BYTELOCK_LENGTH sizeof(uint64_t)
+#define CK_BYTELOCK_LOAD ck_pr_load_64
+#define CK_BYTELOCK_TYPE uint64_t
+#elif defined(CK_F_PR_LOAD_32)
+#define CK_BYTELOCK_LENGTH sizeof(uint32_t)
+#define CK_BYTELOCK_LOAD ck_pr_load_32
+#define CK_BYTELOCK_TYPE uint32_t
+#else
+#error Unsupported platform.
+#endif
+
+CK_CC_INLINE static void
+ck_bytelock_write_lock(struct ck_bytelock *bytelock, unsigned int slot)
+{
+ CK_BYTELOCK_TYPE *readers = (void *)bytelock->readers;
+ unsigned int i;
+
+ /* Announce upcoming writer acquisition. */
+ while (ck_pr_cas_uint(&bytelock->owner, 0, slot) == false)
+ ck_pr_stall();
+
+ /* If we are slotted, we might be upgrading from a read lock. */
+ if (slot <= sizeof bytelock->readers)
+ ck_pr_store_8(&bytelock->readers[slot - 1], false);
+
+ /*
+ * Wait for slotted readers to drain out. This also provides the
+ * lock acquire semantics.
+ */
+ ck_pr_fence_atomic_load();
+
+ for (i = 0; i < sizeof(bytelock->readers) / CK_BYTELOCK_LENGTH; i++) {
+ while (CK_BYTELOCK_LOAD(&readers[i]) != false)
+ ck_pr_stall();
+ }
+
+ /* Wait for unslotted readers to drain out. */
+ while (ck_pr_load_uint(&bytelock->n_readers) != 0)
+ ck_pr_stall();
+
+ ck_pr_fence_lock();
+ return;
+}
+
+#undef CK_BYTELOCK_LENGTH
+#undef CK_BYTELOCK_LOAD
+#undef CK_BYTELOCK_TYPE
+
+CK_CC_INLINE static void
+ck_bytelock_write_unlock(struct ck_bytelock *bytelock)
+{
+
+ ck_pr_fence_unlock();
+ ck_pr_store_uint(&bytelock->owner, 0);
+ return;
+}
+
+CK_CC_INLINE static void
+ck_bytelock_read_lock(struct ck_bytelock *bytelock, unsigned int slot)
+{
+
+ if (ck_pr_load_uint(&bytelock->owner) == slot) {
+ ck_pr_store_8(&bytelock->readers[slot - 1], true);
+ ck_pr_fence_strict_store();
+ ck_pr_store_uint(&bytelock->owner, 0);
+ return;
+ }
+
+ /* Unslotted threads will have to use the readers counter. */
+ if (slot > sizeof bytelock->readers) {
+ for (;;) {
+ ck_pr_inc_uint(&bytelock->n_readers);
+ ck_pr_fence_atomic_load();
+ if (ck_pr_load_uint(&bytelock->owner) == 0)
+ break;
+ ck_pr_dec_uint(&bytelock->n_readers);
+
+ while (ck_pr_load_uint(&bytelock->owner) != 0)
+ ck_pr_stall();
+ }
+
+ ck_pr_fence_lock();
+ return;
+ }
+
+ slot -= 1;
+ for (;;) {
+#ifdef CK_F_PR_FAA_8
+ ck_pr_fas_8(&bytelock->readers[slot], true);
+ ck_pr_fence_atomic_load();
+#else
+ ck_pr_store_8(&bytelock->readers[slot], true);
+ ck_pr_fence_store_load();
+#endif
+
+ /*
+ * If there is no owner at this point, our slot has
+ * already been published and it is guaranteed no
+ * write acquisition will succeed until we drain out.
+ */
+ if (ck_pr_load_uint(&bytelock->owner) == 0)
+ break;
+
+ ck_pr_store_8(&bytelock->readers[slot], false);
+ while (ck_pr_load_uint(&bytelock->owner) != 0)
+ ck_pr_stall();
+ }
+
+ ck_pr_fence_lock();
+ return;
+}
+
+CK_CC_INLINE static void
+ck_bytelock_read_unlock(struct ck_bytelock *bytelock, unsigned int slot)
+{
+
+ ck_pr_fence_unlock();
+
+ if (slot > sizeof bytelock->readers)
+ ck_pr_dec_uint(&bytelock->n_readers);
+ else
+ ck_pr_store_8(&bytelock->readers[slot - 1], false);
+
+ return;
+}
+
+#endif /* CK_BYTELOCK_H */
diff --git a/include/ck_cc.h b/include/ck_cc.h
new file mode 100644
index 0000000..e17dc7b
--- /dev/null
+++ b/include/ck_cc.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * Copyright 2014 Paul Khuong.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_CC_H
+#define CK_CC_H
+
+#if defined(__GNUC__) || defined(__SUNPRO_C)
+#include "gcc/ck_cc.h"
+#endif
+
+#ifndef CK_CC_RESTRICT
+#define CK_CC_RESTRICT
+#endif
+
+#ifndef CK_CC_INLINE
+#define CK_CC_INLINE inline
+#endif
+
+#ifndef CK_CC_FORCE_INLINE
+#define CK_CC_FORCE_INLINE inline
+#endif
+
+#define CK_CC_DECONST_PTR(X) ((void *)(uintptr_t)(X))
+
+/*
+ * Container function.
+ * This relies on (compiler) implementation-defined behavior.
+ */
+#define CK_CC_CONTAINER(F, T, M, N) \
+ CK_CC_INLINE static T * \
+ N(F *p) \
+ { \
+ F *n = p; \
+ return (T *)(void *)(((char *)n) - ((size_t)&((T *)0)->M)); \
+ }
+
+#define CK_CC_PAD(x) union { char pad[x]; }
+
+#ifndef CK_CC_ALIASED
+#define CK_CC_ALIASED
+#endif
+
+#ifndef CK_CC_UNUSED
+#define CK_CC_UNUSED
+#endif
+
+#ifndef CK_CC_USED
+#define CK_CC_USED
+#endif
+
+#ifndef CK_CC_IMM
+#define CK_CC_IMM
+#endif
+
+#ifndef CK_CC_PACKED
+#define CK_CC_PACKED
+#endif
+
+#ifndef CK_CC_WEAKREF
+#define CK_CC_WEAKREF
+#endif
+
+#ifndef CK_CC_ALIGN
+#define CK_CC_ALIGN(X)
+#endif
+
+#ifndef CK_CC_CACHELINE
+#define CK_CC_CACHELINE
+#endif
+
+#ifndef CK_CC_LIKELY
+#define CK_CC_LIKELY(x) x
+#endif
+
+#ifndef CK_CC_UNLIKELY
+#define CK_CC_UNLIKELY(x) x
+#endif
+
+#ifndef CK_CC_TYPEOF
+#define CK_CC_TYPEOF(X, DEFAULT) (DEFAULT)
+#endif
+
+#ifndef CK_F_CC_FFS
+#define CK_F_CC_FFS
+CK_CC_INLINE static int
+ck_cc_ffs(unsigned int x)
+{
+ unsigned int i;
+
+ if (x == 0)
+ return 0;
+
+ for (i = 1; (x & 1) == 0; i++, x >>= 1);
+
+ return i;
+}
+#endif
+
+#ifndef CK_F_CC_CLZ
+#define CK_F_CC_CLZ
+#include <ck_limits.h>
+
+CK_CC_INLINE static int
+ck_cc_clz(unsigned int x)
+{
+ unsigned int count, i;
+
+ for (count = 0, i = sizeof(unsigned int) * CHAR_BIT; i > 0; count++) {
+ unsigned int bit = 1U << --i;
+
+ if (x & bit)
+ break;
+ }
+
+ return count;
+}
+#endif
+
+#ifndef CK_F_CC_CTZ
+#define CK_F_CC_CTZ
+CK_CC_INLINE static int
+ck_cc_ctz(unsigned int x)
+{
+ unsigned int i;
+
+ if (x == 0)
+ return 0;
+
+ for (i = 0; (x & 1) == 0; i++, x >>= 1);
+
+ return i;
+}
+#endif
+
+#ifndef CK_F_CC_POPCOUNT
+#define CK_F_CC_POPCOUNT
+CK_CC_INLINE static int
+ck_cc_popcount(unsigned int x)
+{
+ unsigned int acc;
+
+ for (acc = 0; x != 0; x >>= 1)
+ acc += x & 1;
+
+ return acc;
+}
+#endif
+
+
+#ifdef __cplusplus
+#define CK_CPP_CAST(type, arg) static_cast<type>(arg)
+#else
+#define CK_CPP_CAST(type, arg) arg
+#endif
+
+#endif /* CK_CC_H */
diff --git a/include/ck_cohort.h b/include/ck_cohort.h
new file mode 100644
index 0000000..c13cfa0
--- /dev/null
+++ b/include/ck_cohort.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2013-2015 Samy Al Bahra.
+ * Copyright 2013 Brendon Scheinman.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_COHORT_H
+#define CK_COHORT_H
+
+/*
+ * This is an implementation of lock cohorts as described in:
+ * Dice, D.; Marathe, V.; and Shavit, N. 2012.
+ * Lock Cohorting: A General Technique for Designing NUMA Locks
+ */
+
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <ck_stddef.h>
+
+enum ck_cohort_state {
+ CK_COHORT_STATE_GLOBAL = 0,
+ CK_COHORT_STATE_LOCAL = 1
+};
+
+#define CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT 10
+
+#define CK_COHORT_NAME(N) ck_cohort_##N
+#define CK_COHORT_INSTANCE(N) struct CK_COHORT_NAME(N)
+#define CK_COHORT_INIT(N, C, GL, LL, P) ck_cohort_##N##_init(C, GL, LL, P)
+#define CK_COHORT_LOCK(N, C, GC, LC) ck_cohort_##N##_lock(C, GC, LC)
+#define CK_COHORT_UNLOCK(N, C, GC, LC) ck_cohort_##N##_unlock(C, GC, LC)
+#define CK_COHORT_TRYLOCK(N, C, GLC, LLC, LUC) ck_cohort_##N##_trylock(C, GLC, LLC, LUC)
+#define CK_COHORT_LOCKED(N, C, GC, LC) ck_cohort_##N##_locked(C, GC, LC)
+
+#define CK_COHORT_PROTOTYPE(N, GL, GU, GI, LL, LU, LI) \
+ CK_COHORT_INSTANCE(N) { \
+ void *global_lock; \
+ void *local_lock; \
+ enum ck_cohort_state release_state; \
+ unsigned int waiting_threads; \
+ unsigned int acquire_count; \
+ unsigned int local_pass_limit; \
+ }; \
+ \
+ CK_CC_INLINE static void \
+ ck_cohort_##N##_init(struct ck_cohort_##N *cohort, \
+ void *global_lock, void *local_lock, unsigned int pass_limit) \
+ { \
+ cohort->global_lock = global_lock; \
+ cohort->local_lock = local_lock; \
+ cohort->release_state = CK_COHORT_STATE_GLOBAL; \
+ cohort->waiting_threads = 0; \
+ cohort->acquire_count = 0; \
+ cohort->local_pass_limit = pass_limit; \
+ ck_pr_barrier(); \
+ return; \
+ } \
+ \
+ CK_CC_INLINE static void \
+ ck_cohort_##N##_lock(CK_COHORT_INSTANCE(N) *cohort, \
+ void *global_context, void *local_context) \
+ { \
+ \
+ ck_pr_inc_uint(&cohort->waiting_threads); \
+ LL(cohort->local_lock, local_context); \
+ ck_pr_dec_uint(&cohort->waiting_threads); \
+ \
+ if (cohort->release_state == CK_COHORT_STATE_GLOBAL) { \
+ GL(cohort->global_lock, global_context); \
+ } \
+ \
+ ++cohort->acquire_count; \
+ return; \
+ } \
+ \
+ CK_CC_INLINE static void \
+ ck_cohort_##N##_unlock(CK_COHORT_INSTANCE(N) *cohort, \
+ void *global_context, void *local_context) \
+ { \
+ \
+ if (ck_pr_load_uint(&cohort->waiting_threads) > 0 \
+ && cohort->acquire_count < cohort->local_pass_limit) { \
+ cohort->release_state = CK_COHORT_STATE_LOCAL; \
+ } else { \
+ GU(cohort->global_lock, global_context); \
+ cohort->release_state = CK_COHORT_STATE_GLOBAL; \
+ cohort->acquire_count = 0; \
+ } \
+ \
+ ck_pr_fence_release(); \
+ LU(cohort->local_lock, local_context); \
+ \
+ return; \
+ } \
+ \
+ CK_CC_INLINE static bool \
+ ck_cohort_##N##_locked(CK_COHORT_INSTANCE(N) *cohort, \
+ void *global_context, void *local_context) \
+ { \
+ return GI(cohort->local_lock, local_context) || \
+ LI(cohort->global_lock, global_context); \
+ }
+
+#define CK_COHORT_TRYLOCK_PROTOTYPE(N, GL, GU, GI, GTL, LL, LU, LI, LTL) \
+ CK_COHORT_PROTOTYPE(N, GL, GU, GI, LL, LU, LI) \
+ CK_CC_INLINE static bool \
+ ck_cohort_##N##_trylock(CK_COHORT_INSTANCE(N) *cohort, \
+ void *global_context, void *local_context, \
+ void *local_unlock_context) \
+ { \
+ \
+ bool trylock_result; \
+ \
+ ck_pr_inc_uint(&cohort->waiting_threads); \
+ trylock_result = LTL(cohort->local_lock, local_context); \
+ ck_pr_dec_uint(&cohort->waiting_threads); \
+ if (trylock_result == false) { \
+ return false; \
+ } \
+ \
+ if (cohort->release_state == CK_COHORT_STATE_GLOBAL && \
+ GTL(cohort->global_lock, global_context) == false) { \
+ LU(cohort->local_lock, local_unlock_context); \
+ return false; \
+ } \
+ \
+ ++cohort->acquire_count; \
+ return true; \
+ }
+
+#define CK_COHORT_INITIALIZER { \
+ .global_lock = NULL, \
+ .local_lock = NULL, \
+ .release_state = CK_COHORT_STATE_GLOBAL, \
+ .waiting_threads = 0, \
+ .acquire_count = 0, \
+ .local_pass_limit = CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT \
+}
+
+#endif /* CK_COHORT_H */
diff --git a/include/ck_elide.h b/include/ck_elide.h
new file mode 100644
index 0000000..1b90041
--- /dev/null
+++ b/include/ck_elide.h
@@ -0,0 +1,321 @@
+/*
+ * Copyright 2013-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_ELIDE_H
+#define CK_ELIDE_H
+
+/*
+ * As RTM is currently only supported on TSO x86 architectures,
+ * fences have been omitted. They will be necessary for other
+ * non-TSO architectures with TM support.
+ */
+
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <ck_string.h>
+
+/*
+ * skip_-prefixed counters represent the number of consecutive
+ * elisions to forfeit. retry_-prefixed counters represent the
+ * number of elision retries to attempt before forfeit.
+ *
+ * _busy: Lock was busy
+ * _other: Unknown explicit abort
+ * _conflict: Data conflict in elision section
+ */
+struct ck_elide_config {
+ unsigned short skip_busy;
+ short retry_busy;
+ unsigned short skip_other;
+ short retry_other;
+ unsigned short skip_conflict;
+ short retry_conflict;
+};
+
+#define CK_ELIDE_CONFIG_DEFAULT_INITIALIZER { \
+ .skip_busy = 5, \
+ .retry_busy = 256, \
+ .skip_other = 3, \
+ .retry_other = 3, \
+ .skip_conflict = 2, \
+ .retry_conflict = 5 \
+}
+
+struct ck_elide_stat {
+ unsigned int n_fallback;
+ unsigned int n_elide;
+ unsigned short skip;
+};
+typedef struct ck_elide_stat ck_elide_stat_t;
+
+#define CK_ELIDE_STAT_INITIALIZER { 0, 0, 0 }
+
+CK_CC_INLINE static void
+ck_elide_stat_init(ck_elide_stat_t *st)
+{
+
+ memset(st, 0, sizeof(*st));
+ return;
+}
+
+#ifdef CK_F_PR_RTM
+enum _ck_elide_hint {
+ CK_ELIDE_HINT_RETRY = 0,
+ CK_ELIDE_HINT_SPIN,
+ CK_ELIDE_HINT_STOP
+};
+
+#define CK_ELIDE_LOCK_BUSY 0xFF
+
+static enum _ck_elide_hint
+_ck_elide_fallback(int *retry,
+ struct ck_elide_stat *st,
+ struct ck_elide_config *c,
+ unsigned int status)
+{
+
+ st->n_fallback++;
+ if (*retry > 0)
+ return CK_ELIDE_HINT_RETRY;
+
+ if (st->skip != 0)
+ return CK_ELIDE_HINT_STOP;
+
+ if (status & CK_PR_RTM_EXPLICIT) {
+ if (CK_PR_RTM_CODE(status) == CK_ELIDE_LOCK_BUSY) {
+ st->skip = c->skip_busy;
+ *retry = c->retry_busy;
+ return CK_ELIDE_HINT_SPIN;
+ }
+
+ st->skip = c->skip_other;
+ return CK_ELIDE_HINT_STOP;
+ }
+
+ if ((status & CK_PR_RTM_RETRY) &&
+ (status & CK_PR_RTM_CONFLICT)) {
+ st->skip = c->skip_conflict;
+ *retry = c->retry_conflict;
+ return CK_ELIDE_HINT_RETRY;
+ }
+
+ /*
+ * Capacity, debug and nesting abortions are likely to be
+ * invariant conditions for the acquisition, execute regular
+ * path instead. If retry bit is not set, then take the hint.
+ */
+ st->skip = USHRT_MAX;
+ return CK_ELIDE_HINT_STOP;
+}
+
+/*
+ * Defines an elision implementation according to the following variables:
+ * N - Namespace of elision implementation.
+ * T - Typename of mutex.
+ * L_P - Lock predicate, returns false if resource is available.
+ * L - Function to call if resource is unavailable of transaction aborts.
+ * U_P - Unlock predicate, returns false if elision failed.
+ * U - Function to call if transaction failed.
+ */
+#define CK_ELIDE_PROTOTYPE(N, T, L_P, L, U_P, U) \
+ CK_CC_INLINE static void \
+ ck_elide_##N##_lock_adaptive(T *lock, \
+ struct ck_elide_stat *st, \
+ struct ck_elide_config *c) \
+ { \
+ enum _ck_elide_hint hint; \
+ int retry; \
+ \
+ if (CK_CC_UNLIKELY(st->skip != 0)) { \
+ st->skip--; \
+ goto acquire; \
+ } \
+ \
+ retry = c->retry_conflict; \
+ do { \
+ unsigned int status = ck_pr_rtm_begin(); \
+ if (status == CK_PR_RTM_STARTED) { \
+ if (L_P(lock) == true) \
+ ck_pr_rtm_abort(CK_ELIDE_LOCK_BUSY); \
+ \
+ return; \
+ } \
+ \
+ hint = _ck_elide_fallback(&retry, st, c, status); \
+ if (hint == CK_ELIDE_HINT_RETRY) \
+ continue; \
+ \
+ if (hint == CK_ELIDE_HINT_SPIN) { \
+ while (--retry != 0) { \
+ if (L_P(lock) == false) \
+ break; \
+ \
+ ck_pr_stall(); \
+ } \
+ \
+ continue; \
+ } \
+ \
+ if (hint == CK_ELIDE_HINT_STOP) \
+ break; \
+ } while (CK_CC_LIKELY(--retry > 0)); \
+ \
+ acquire: \
+ L(lock); \
+ return; \
+ } \
+ CK_CC_INLINE static void \
+ ck_elide_##N##_unlock_adaptive(struct ck_elide_stat *st, T *lock) \
+ { \
+ \
+ if (U_P(lock) == false) { \
+ ck_pr_rtm_end(); \
+ st->skip = 0; \
+ st->n_elide++; \
+ } else { \
+ U(lock); \
+ } \
+ \
+ return; \
+ } \
+ CK_CC_INLINE static void \
+ ck_elide_##N##_lock(T *lock) \
+ { \
+ \
+ if (ck_pr_rtm_begin() != CK_PR_RTM_STARTED) { \
+ L(lock); \
+ return; \
+ } \
+ \
+ if (L_P(lock) == true) \
+ ck_pr_rtm_abort(CK_ELIDE_LOCK_BUSY); \
+ \
+ return; \
+ } \
+ CK_CC_INLINE static void \
+ ck_elide_##N##_unlock(T *lock) \
+ { \
+ \
+ if (U_P(lock) == false) { \
+ ck_pr_rtm_end(); \
+ } else { \
+ U(lock); \
+ } \
+ \
+ return; \
+ }
+
+#define CK_ELIDE_TRYLOCK_PROTOTYPE(N, T, TL_P, TL) \
+ CK_CC_INLINE static bool \
+ ck_elide_##N##_trylock(T *lock) \
+ { \
+ \
+ if (ck_pr_rtm_begin() != CK_PR_RTM_STARTED) \
+ return false; \
+ \
+ if (TL_P(lock) == true) \
+ ck_pr_rtm_abort(CK_ELIDE_LOCK_BUSY); \
+ \
+ return true; \
+ }
+#else
+/*
+ * If RTM is not enabled on the target platform (CK_F_PR_RTM) then these
+ * elision wrappers directly calls into the user-specified lock operations.
+ * Unfortunately, the storage cost of both ck_elide_config and ck_elide_stat
+ * are paid (typically a storage cost that is a function of lock objects and
+ * thread count).
+ */
+#define CK_ELIDE_PROTOTYPE(N, T, L_P, L, U_P, U) \
+ CK_CC_INLINE static void \
+ ck_elide_##N##_lock_adaptive(T *lock, \
+ struct ck_elide_stat *st, \
+ struct ck_elide_config *c) \
+ { \
+ \
+ (void)st; \
+ (void)c; \
+ L(lock); \
+ return; \
+ } \
+ CK_CC_INLINE static void \
+ ck_elide_##N##_unlock_adaptive(struct ck_elide_stat *st, \
+ T *lock) \
+ { \
+ \
+ (void)st; \
+ U(lock); \
+ return; \
+ } \
+ CK_CC_INLINE static void \
+ ck_elide_##N##_lock(T *lock) \
+ { \
+ \
+ L(lock); \
+ return; \
+ } \
+ CK_CC_INLINE static void \
+ ck_elide_##N##_unlock(T *lock) \
+ { \
+ \
+ U(lock); \
+ return; \
+ }
+
+#define CK_ELIDE_TRYLOCK_PROTOTYPE(N, T, TL_P, TL) \
+ CK_CC_INLINE static bool \
+ ck_elide_##N##_trylock(T *lock) \
+ { \
+ \
+ return TL_P(lock); \
+ }
+#endif /* !CK_F_PR_RTM */
+
+/*
+ * Best-effort elision lock operations. First argument is name (N)
+ * associated with implementation and the second is a pointer to
+ * the type specified above (T).
+ *
+ * Unlike the adaptive variant, this interface does not have any retry
+ * semantics. In environments where jitter is low, this may yield a tighter
+ * fast path.
+ */
+#define CK_ELIDE_LOCK(NAME, LOCK) ck_elide_##NAME##_lock(LOCK)
+#define CK_ELIDE_UNLOCK(NAME, LOCK) ck_elide_##NAME##_unlock(LOCK)
+#define CK_ELIDE_TRYLOCK(NAME, LOCK) ck_elide_##NAME##_trylock(LOCK)
+
+/*
+ * Adaptive elision lock operations. In addition to name and pointer
+ * to the lock, you must pass in a pointer to an initialized
+ * ck_elide_config structure along with a per-thread stat structure.
+ */
+#define CK_ELIDE_LOCK_ADAPTIVE(NAME, STAT, CONFIG, LOCK) \
+ ck_elide_##NAME##_lock_adaptive(LOCK, STAT, CONFIG)
+
+#define CK_ELIDE_UNLOCK_ADAPTIVE(NAME, STAT, LOCK) \
+ ck_elide_##NAME##_unlock_adaptive(STAT, LOCK)
+
+#endif /* CK_ELIDE_H */
diff --git a/include/ck_epoch.h b/include/ck_epoch.h
new file mode 100644
index 0000000..e7ce5bc
--- /dev/null
+++ b/include/ck_epoch.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_EPOCH_H
+#define CK_EPOCH_H
+
+/*
+ * The implementation here is inspired from the work described in:
+ * Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University
+ * of Cambridge Computing Laboratory.
+ */
+
+#include <ck_cc.h>
+#include <ck_md.h>
+#include <ck_pr.h>
+#include <ck_stack.h>
+#include <ck_stdbool.h>
+
+#ifndef CK_EPOCH_LENGTH
+#define CK_EPOCH_LENGTH 4
+#endif
+
+/*
+ * This is used for sense detection with-respect to concurrent
+ * epoch sections.
+ */
+#define CK_EPOCH_SENSE (2)
+
+struct ck_epoch_entry;
+typedef struct ck_epoch_entry ck_epoch_entry_t;
+typedef void ck_epoch_cb_t(ck_epoch_entry_t *);
+
+/*
+ * This should be embedded into objects you wish to be the target of
+ * ck_epoch_cb_t functions (with ck_epoch_call).
+ */
+struct ck_epoch_entry {
+ ck_epoch_cb_t *function;
+ ck_stack_entry_t stack_entry;
+};
+
+/*
+ * A section object may be passed to every begin-end pair to allow for
+ * forward progress guarantees with-in prolonged active sections.
+ */
+struct ck_epoch_section {
+ unsigned int bucket;
+};
+typedef struct ck_epoch_section ck_epoch_section_t;
+
+/*
+ * Return pointer to ck_epoch_entry container object.
+ */
+#define CK_EPOCH_CONTAINER(T, M, N) \
+ CK_CC_CONTAINER(struct ck_epoch_entry, T, M, N)
+
+struct ck_epoch_ref {
+ unsigned int epoch;
+ unsigned int count;
+};
+
+struct ck_epoch_record {
+ struct ck_epoch *global;
+ unsigned int state;
+ unsigned int epoch;
+ unsigned int active;
+ struct {
+ struct ck_epoch_ref bucket[CK_EPOCH_SENSE];
+ } local CK_CC_CACHELINE;
+ unsigned int n_pending;
+ unsigned int n_peak;
+ unsigned long n_dispatch;
+ ck_stack_t pending[CK_EPOCH_LENGTH];
+ ck_stack_entry_t record_next;
+} CK_CC_CACHELINE;
+typedef struct ck_epoch_record ck_epoch_record_t;
+
+struct ck_epoch {
+ unsigned int epoch;
+ char pad[CK_MD_CACHELINE - sizeof(unsigned int)];
+ ck_stack_t records;
+ unsigned int n_free;
+};
+typedef struct ck_epoch ck_epoch_t;
+
+/*
+ * Internal functions.
+ */
+void _ck_epoch_addref(ck_epoch_record_t *, ck_epoch_section_t *);
+void _ck_epoch_delref(ck_epoch_record_t *, ck_epoch_section_t *);
+
+/*
+ * Marks the beginning of an epoch-protected section.
+ */
+CK_CC_FORCE_INLINE static void
+ck_epoch_begin(ck_epoch_record_t *record, ck_epoch_section_t *section)
+{
+ struct ck_epoch *epoch = record->global;
+
+ /*
+ * Only observe new epoch if thread is not recursing into a read
+ * section.
+ */
+ if (record->active == 0) {
+ unsigned int g_epoch;
+
+ /*
+ * It is possible for loads to be re-ordered before the store
+ * is committed into the caller's epoch and active fields.
+ * For this reason, store to load serialization is necessary.
+ */
+#if defined(CK_MD_TSO)
+ ck_pr_fas_uint(&record->active, 1);
+ ck_pr_fence_atomic_load();
+#else
+ ck_pr_store_uint(&record->active, 1);
+ ck_pr_fence_memory();
+#endif
+
+ /*
+ * This load is allowed to be re-ordered prior to setting
+ * active flag due to monotonic nature of the global epoch.
+ * However, stale values lead to measurable performance
+ * degradation in some torture tests so we disallow early load
+ * of global epoch.
+ */
+ g_epoch = ck_pr_load_uint(&epoch->epoch);
+ ck_pr_store_uint(&record->epoch, g_epoch);
+ } else {
+ ck_pr_store_uint(&record->active, record->active + 1);
+ }
+
+ if (section != NULL)
+ _ck_epoch_addref(record, section);
+
+ return;
+}
+
+/*
+ * Marks the end of an epoch-protected section.
+ */
+CK_CC_FORCE_INLINE static void
+ck_epoch_end(ck_epoch_record_t *record, ck_epoch_section_t *section)
+{
+
+ ck_pr_fence_release();
+ ck_pr_store_uint(&record->active, record->active - 1);
+
+ if (section != NULL)
+ _ck_epoch_delref(record, section);
+
+ return;
+}
+
+/*
+ * Defers the execution of the function pointed to by the "cb"
+ * argument until an epoch counter loop. This allows for a
+ * non-blocking deferral.
+ */
+CK_CC_FORCE_INLINE static void
+ck_epoch_call(ck_epoch_record_t *record,
+ ck_epoch_entry_t *entry,
+ ck_epoch_cb_t *function)
+{
+ struct ck_epoch *epoch = record->global;
+ unsigned int e = ck_pr_load_uint(&epoch->epoch);
+ unsigned int offset = e & (CK_EPOCH_LENGTH - 1);
+
+ record->n_pending++;
+ entry->function = function;
+ ck_stack_push_spnc(&record->pending[offset], &entry->stack_entry);
+ return;
+}
+
+void ck_epoch_init(ck_epoch_t *);
+ck_epoch_record_t *ck_epoch_recycle(ck_epoch_t *);
+void ck_epoch_register(ck_epoch_t *, ck_epoch_record_t *);
+void ck_epoch_unregister(ck_epoch_record_t *);
+bool ck_epoch_poll(ck_epoch_record_t *);
+void ck_epoch_synchronize(ck_epoch_record_t *);
+void ck_epoch_barrier(ck_epoch_record_t *);
+void ck_epoch_reclaim(ck_epoch_record_t *);
+
+#endif /* CK_EPOCH_H */
diff --git a/include/ck_fifo.h b/include/ck_fifo.h
new file mode 100644
index 0000000..6d50070
--- /dev/null
+++ b/include/ck_fifo.h
@@ -0,0 +1,478 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_FIFO_H
+#define CK_FIFO_H
+
+#include <ck_cc.h>
+#include <ck_md.h>
+#include <ck_pr.h>
+#include <ck_spinlock.h>
+#include <ck_stddef.h>
+
+#ifndef CK_F_FIFO_SPSC
+#define CK_F_FIFO_SPSC
+struct ck_fifo_spsc_entry {
+ void *value;
+ struct ck_fifo_spsc_entry *next;
+};
+typedef struct ck_fifo_spsc_entry ck_fifo_spsc_entry_t;
+
+struct ck_fifo_spsc {
+ ck_spinlock_t m_head;
+ struct ck_fifo_spsc_entry *head;
+ char pad[CK_MD_CACHELINE - sizeof(struct ck_fifo_spsc_entry *) - sizeof(ck_spinlock_t)];
+ ck_spinlock_t m_tail;
+ struct ck_fifo_spsc_entry *tail;
+ struct ck_fifo_spsc_entry *head_snapshot;
+ struct ck_fifo_spsc_entry *garbage;
+};
+typedef struct ck_fifo_spsc ck_fifo_spsc_t;
+
+CK_CC_INLINE static bool
+ck_fifo_spsc_enqueue_trylock(struct ck_fifo_spsc *fifo)
+{
+
+ return ck_spinlock_trylock(&fifo->m_tail);
+}
+
+CK_CC_INLINE static void
+ck_fifo_spsc_enqueue_lock(struct ck_fifo_spsc *fifo)
+{
+
+ ck_spinlock_lock(&fifo->m_tail);
+ return;
+}
+
+CK_CC_INLINE static void
+ck_fifo_spsc_enqueue_unlock(struct ck_fifo_spsc *fifo)
+{
+
+ ck_spinlock_unlock(&fifo->m_tail);
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_fifo_spsc_dequeue_trylock(struct ck_fifo_spsc *fifo)
+{
+
+ return ck_spinlock_trylock(&fifo->m_head);
+}
+
+CK_CC_INLINE static void
+ck_fifo_spsc_dequeue_lock(struct ck_fifo_spsc *fifo)
+{
+
+ ck_spinlock_lock(&fifo->m_head);
+ return;
+}
+
+CK_CC_INLINE static void
+ck_fifo_spsc_dequeue_unlock(struct ck_fifo_spsc *fifo)
+{
+
+ ck_spinlock_unlock(&fifo->m_head);
+ return;
+}
+
+CK_CC_INLINE static void
+ck_fifo_spsc_init(struct ck_fifo_spsc *fifo, struct ck_fifo_spsc_entry *stub)
+{
+
+ ck_spinlock_init(&fifo->m_head);
+ ck_spinlock_init(&fifo->m_tail);
+
+ stub->next = NULL;
+ fifo->head = fifo->tail = fifo->head_snapshot = fifo->garbage = stub;
+ return;
+}
+
+CK_CC_INLINE static void
+ck_fifo_spsc_deinit(struct ck_fifo_spsc *fifo, struct ck_fifo_spsc_entry **garbage)
+{
+
+ *garbage = fifo->head;
+ fifo->head = fifo->tail = NULL;
+ return;
+}
+
+CK_CC_INLINE static void
+ck_fifo_spsc_enqueue(struct ck_fifo_spsc *fifo,
+ struct ck_fifo_spsc_entry *entry,
+ void *value)
+{
+
+ entry->value = value;
+ entry->next = NULL;
+
+ /* If stub->next is visible, guarantee that entry is consistent. */
+ ck_pr_fence_store();
+ ck_pr_store_ptr(&fifo->tail->next, entry);
+ fifo->tail = entry;
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_fifo_spsc_dequeue(struct ck_fifo_spsc *fifo, void *value)
+{
+ struct ck_fifo_spsc_entry *entry;
+
+ /*
+ * The head pointer is guaranteed to always point to a stub entry.
+ * If the stub entry does not point to an entry, then the queue is
+ * empty.
+ */
+ entry = ck_pr_load_ptr(&fifo->head->next);
+ if (entry == NULL)
+ return false;
+
+ /* If entry is visible, guarantee store to value is visible. */
+ ck_pr_store_ptr_unsafe(value, entry->value);
+ ck_pr_fence_store();
+ ck_pr_store_ptr(&fifo->head, entry);
+ return true;
+}
+
+/*
+ * Recycle a node. This technique for recycling nodes is based on
+ * Dmitriy Vyukov's work.
+ */
+CK_CC_INLINE static struct ck_fifo_spsc_entry *
+ck_fifo_spsc_recycle(struct ck_fifo_spsc *fifo)
+{
+ struct ck_fifo_spsc_entry *garbage;
+
+ if (fifo->head_snapshot == fifo->garbage) {
+ fifo->head_snapshot = ck_pr_load_ptr(&fifo->head);
+ if (fifo->head_snapshot == fifo->garbage)
+ return NULL;
+ }
+
+ garbage = fifo->garbage;
+ fifo->garbage = garbage->next;
+ return garbage;
+}
+
+CK_CC_INLINE static bool
+ck_fifo_spsc_isempty(struct ck_fifo_spsc *fifo)
+{
+ struct ck_fifo_spsc_entry *head = ck_pr_load_ptr(&fifo->head);
+ return ck_pr_load_ptr(&head->next) == NULL;
+}
+
+#define CK_FIFO_SPSC_ISEMPTY(f) ((f)->head->next == NULL)
+#define CK_FIFO_SPSC_FIRST(f) ((f)->head->next)
+#define CK_FIFO_SPSC_NEXT(m) ((m)->next)
+#define CK_FIFO_SPSC_SPARE(f) ((f)->head)
+#define CK_FIFO_SPSC_FOREACH(fifo, entry) \
+ for ((entry) = CK_FIFO_SPSC_FIRST(fifo); \
+ (entry) != NULL; \
+ (entry) = CK_FIFO_SPSC_NEXT(entry))
+#define CK_FIFO_SPSC_FOREACH_SAFE(fifo, entry, T) \
+ for ((entry) = CK_FIFO_SPSC_FIRST(fifo); \
+ (entry) != NULL && ((T) = (entry)->next, 1); \
+ (entry) = (T))
+
+#endif /* CK_F_FIFO_SPSC */
+
+#ifdef CK_F_PR_CAS_PTR_2
+#ifndef CK_F_FIFO_MPMC
+#define CK_F_FIFO_MPMC
+struct ck_fifo_mpmc_entry;
+struct ck_fifo_mpmc_pointer {
+ struct ck_fifo_mpmc_entry *pointer;
+ char *generation CK_CC_PACKED;
+} CK_CC_ALIGN(16);
+
+struct ck_fifo_mpmc_entry {
+ void *value;
+ struct ck_fifo_mpmc_pointer next;
+};
+typedef struct ck_fifo_mpmc_entry ck_fifo_mpmc_entry_t;
+
+struct ck_fifo_mpmc {
+ struct ck_fifo_mpmc_pointer head;
+ char pad[CK_MD_CACHELINE - sizeof(struct ck_fifo_mpmc_pointer)];
+ struct ck_fifo_mpmc_pointer tail;
+};
+typedef struct ck_fifo_mpmc ck_fifo_mpmc_t;
+
+CK_CC_INLINE static void
+ck_fifo_mpmc_init(struct ck_fifo_mpmc *fifo, struct ck_fifo_mpmc_entry *stub)
+{
+
+ stub->next.pointer = NULL;
+ stub->next.generation = NULL;
+ fifo->head.pointer = fifo->tail.pointer = stub;
+ fifo->head.generation = fifo->tail.generation = NULL;
+ return;
+}
+
+CK_CC_INLINE static void
+ck_fifo_mpmc_deinit(struct ck_fifo_mpmc *fifo, struct ck_fifo_mpmc_entry **garbage)
+{
+
+ *garbage = fifo->head.pointer;
+ fifo->head.pointer = fifo->tail.pointer = NULL;
+ return;
+}
+
+CK_CC_INLINE static void
+ck_fifo_mpmc_enqueue(struct ck_fifo_mpmc *fifo,
+ struct ck_fifo_mpmc_entry *entry,
+ void *value)
+{
+ struct ck_fifo_mpmc_pointer tail, next, update;
+
+ /*
+ * Prepare the upcoming node and make sure to commit the updates
+ * before publishing.
+ */
+ entry->value = value;
+ entry->next.pointer = NULL;
+ entry->next.generation = 0;
+ ck_pr_fence_store_atomic();
+
+ for (;;) {
+ tail.generation = ck_pr_load_ptr(&fifo->tail.generation);
+ ck_pr_fence_load();
+ tail.pointer = ck_pr_load_ptr(&fifo->tail.pointer);
+ next.generation = ck_pr_load_ptr(&tail.pointer->next.generation);
+ ck_pr_fence_load();
+ next.pointer = ck_pr_load_ptr(&tail.pointer->next.pointer);
+
+ if (ck_pr_load_ptr(&fifo->tail.generation) != tail.generation)
+ continue;
+
+ if (next.pointer != NULL) {
+ /*
+ * If the tail pointer has an entry following it then
+ * it needs to be forwarded to the next entry. This
+ * helps us guarantee we are always operating on the
+ * last entry.
+ */
+ update.pointer = next.pointer;
+ update.generation = tail.generation + 1;
+ ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
+ } else {
+ /*
+ * Attempt to commit new entry to the end of the
+ * current tail.
+ */
+ update.pointer = entry;
+ update.generation = next.generation + 1;
+ if (ck_pr_cas_ptr_2(&tail.pointer->next, &next, &update) == true)
+ break;
+ }
+ }
+
+ ck_pr_fence_atomic();
+
+ /* After a successful insert, forward the tail to the new entry. */
+ update.generation = tail.generation + 1;
+ ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_fifo_mpmc_tryenqueue(struct ck_fifo_mpmc *fifo,
+ struct ck_fifo_mpmc_entry *entry,
+ void *value)
+{
+ struct ck_fifo_mpmc_pointer tail, next, update;
+
+ entry->value = value;
+ entry->next.pointer = NULL;
+ entry->next.generation = 0;
+
+ ck_pr_fence_store_atomic();
+
+ tail.generation = ck_pr_load_ptr(&fifo->tail.generation);
+ ck_pr_fence_load();
+ tail.pointer = ck_pr_load_ptr(&fifo->tail.pointer);
+ next.generation = ck_pr_load_ptr(&tail.pointer->next.generation);
+ ck_pr_fence_load();
+ next.pointer = ck_pr_load_ptr(&tail.pointer->next.pointer);
+
+ if (ck_pr_load_ptr(&fifo->tail.generation) != tail.generation)
+ return false;
+
+ if (next.pointer != NULL) {
+ /*
+ * If the tail pointer has an entry following it then
+ * it needs to be forwarded to the next entry. This
+ * helps us guarantee we are always operating on the
+ * last entry.
+ */
+ update.pointer = next.pointer;
+ update.generation = tail.generation + 1;
+ ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
+ return false;
+ } else {
+ /*
+ * Attempt to commit new entry to the end of the
+ * current tail.
+ */
+ update.pointer = entry;
+ update.generation = next.generation + 1;
+ if (ck_pr_cas_ptr_2(&tail.pointer->next, &next, &update) == false)
+ return false;
+ }
+
+ ck_pr_fence_atomic();
+
+ /* After a successful insert, forward the tail to the new entry. */
+ update.generation = tail.generation + 1;
+ ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
+ return true;
+}
+
+CK_CC_INLINE static bool
+ck_fifo_mpmc_dequeue(struct ck_fifo_mpmc *fifo,
+ void *value,
+ struct ck_fifo_mpmc_entry **garbage)
+{
+ struct ck_fifo_mpmc_pointer head, tail, next, update;
+
+ for (;;) {
+ head.generation = ck_pr_load_ptr(&fifo->head.generation);
+ ck_pr_fence_load();
+ head.pointer = ck_pr_load_ptr(&fifo->head.pointer);
+ tail.generation = ck_pr_load_ptr(&fifo->tail.generation);
+ ck_pr_fence_load();
+ tail.pointer = ck_pr_load_ptr(&fifo->tail.pointer);
+
+ next.generation = ck_pr_load_ptr(&head.pointer->next.generation);
+ ck_pr_fence_load();
+ next.pointer = ck_pr_load_ptr(&head.pointer->next.pointer);
+
+ update.pointer = next.pointer;
+ if (head.pointer == tail.pointer) {
+ /*
+ * The head is guaranteed to always point at a stub
+ * entry. If the stub entry has no references then the
+ * queue is empty.
+ */
+ if (next.pointer == NULL)
+ return false;
+
+ /* Forward the tail pointer if necessary. */
+ update.generation = tail.generation + 1;
+ ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
+ } else {
+ /*
+ * It is possible for head snapshot to have been
+ * re-used. Avoid deferencing during enqueue
+ * re-use.
+ */
+ if (next.pointer == NULL)
+ continue;
+
+ /* Save value before commit. */
+ *(void **)value = ck_pr_load_ptr(&next.pointer->value);
+
+ /* Forward the head pointer to the next entry. */
+ update.generation = head.generation + 1;
+ if (ck_pr_cas_ptr_2(&fifo->head, &head, &update) == true)
+ break;
+ }
+ }
+
+ *garbage = head.pointer;
+ return true;
+}
+
+CK_CC_INLINE static bool
+ck_fifo_mpmc_trydequeue(struct ck_fifo_mpmc *fifo,
+ void *value,
+ struct ck_fifo_mpmc_entry **garbage)
+{
+ struct ck_fifo_mpmc_pointer head, tail, next, update;
+
+ head.generation = ck_pr_load_ptr(&fifo->head.generation);
+ ck_pr_fence_load();
+ head.pointer = ck_pr_load_ptr(&fifo->head.pointer);
+
+ tail.generation = ck_pr_load_ptr(&fifo->tail.generation);
+ ck_pr_fence_load();
+ tail.pointer = ck_pr_load_ptr(&fifo->tail.pointer);
+
+ next.generation = ck_pr_load_ptr(&head.pointer->next.generation);
+ ck_pr_fence_load();
+ next.pointer = ck_pr_load_ptr(&head.pointer->next.pointer);
+
+ update.pointer = next.pointer;
+ if (head.pointer == tail.pointer) {
+ /*
+ * The head is guaranteed to always point at a stub
+ * entry. If the stub entry has no references then the
+ * queue is empty.
+ */
+ if (next.pointer == NULL)
+ return false;
+
+ /* Forward the tail pointer if necessary. */
+ update.generation = tail.generation + 1;
+ ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
+ return false;
+ } else {
+ /*
+ * It is possible for head snapshot to have been
+ * re-used. Avoid deferencing during enqueue.
+ */
+ if (next.pointer == NULL)
+ return false;
+
+ /* Save value before commit. */
+ *(void **)value = ck_pr_load_ptr(&next.pointer->value);
+
+ /* Forward the head pointer to the next entry. */
+ update.generation = head.generation + 1;
+ if (ck_pr_cas_ptr_2(&fifo->head, &head, &update) == false)
+ return false;
+ }
+
+ *garbage = head.pointer;
+ return true;
+}
+
+#define CK_FIFO_MPMC_ISEMPTY(f) ((f)->head.pointer->next.pointer == NULL)
+#define CK_FIFO_MPMC_FIRST(f) ((f)->head.pointer->next.pointer)
+#define CK_FIFO_MPMC_NEXT(m) ((m)->next.pointer)
+#define CK_FIFO_MPMC_FOREACH(fifo, entry) \
+ for ((entry) = CK_FIFO_MPMC_FIRST(fifo); \
+ (entry) != NULL; \
+ (entry) = CK_FIFO_MPMC_NEXT(entry))
+#define CK_FIFO_MPMC_FOREACH_SAFE(fifo, entry, T) \
+ for ((entry) = CK_FIFO_MPMC_FIRST(fifo); \
+ (entry) != NULL && ((T) = (entry)->next.pointer, 1); \
+ (entry) = (T))
+
+#endif /* CK_F_FIFO_MPMC */
+#endif /* CK_F_PR_CAS_PTR_2 */
+
+#endif /* CK_FIFO_H */
diff --git a/include/ck_hp.h b/include/ck_hp.h
new file mode 100644
index 0000000..c7d8cfb
--- /dev/null
+++ b/include/ck_hp.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_HP_H
+#define CK_HP_H
+
+#include <ck_cc.h>
+#include <ck_md.h>
+#include <ck_pr.h>
+#include <ck_stack.h>
+
+#ifndef CK_HP_CACHE
+#define CK_HP_CACHE 512
+#endif
+
+struct ck_hp_hazard;
+typedef void (*ck_hp_destructor_t)(void *);
+
+struct ck_hp {
+ ck_stack_t subscribers;
+ unsigned int n_subscribers;
+ unsigned int n_free;
+ unsigned int threshold;
+ unsigned int degree;
+ ck_hp_destructor_t destroy;
+};
+typedef struct ck_hp ck_hp_t;
+
+struct ck_hp_hazard {
+ void *pointer;
+ void *data;
+ ck_stack_entry_t pending_entry;
+};
+typedef struct ck_hp_hazard ck_hp_hazard_t;
+
+enum {
+ CK_HP_USED = 0,
+ CK_HP_FREE = 1
+};
+
+struct ck_hp_record {
+ int state;
+ void **pointers;
+ void *cache[CK_HP_CACHE];
+ struct ck_hp *global;
+ ck_stack_t pending;
+ unsigned int n_pending;
+ ck_stack_entry_t global_entry;
+ unsigned int n_peak;
+ uint64_t n_reclamations;
+} CK_CC_CACHELINE;
+typedef struct ck_hp_record ck_hp_record_t;
+
+CK_CC_INLINE static void
+ck_hp_set(struct ck_hp_record *record, unsigned int i, void *pointer)
+{
+
+ ck_pr_store_ptr(&record->pointers[i], pointer);
+ return;
+}
+
+CK_CC_INLINE static void
+ck_hp_set_fence(struct ck_hp_record *record, unsigned int i, void *pointer)
+{
+
+#ifdef CK_MD_TSO
+ ck_pr_fas_ptr(&record->pointers[i], pointer);
+#else
+ ck_pr_store_ptr(&record->pointers[i], pointer);
+ ck_pr_fence_memory();
+#endif
+
+ return;
+}
+
+CK_CC_INLINE static void
+ck_hp_clear(struct ck_hp_record *record)
+{
+ void **pointers = record->pointers;
+ unsigned int i;
+
+ for (i = 0; i < record->global->degree; i++)
+ *pointers++ = NULL;
+
+ return;
+}
+
+void ck_hp_init(ck_hp_t *, unsigned int, unsigned int, ck_hp_destructor_t);
+void ck_hp_set_threshold(ck_hp_t *, unsigned int);
+void ck_hp_register(ck_hp_t *, ck_hp_record_t *, void **);
+void ck_hp_unregister(ck_hp_record_t *);
+ck_hp_record_t *ck_hp_recycle(ck_hp_t *);
+void ck_hp_reclaim(ck_hp_record_t *);
+void ck_hp_free(ck_hp_record_t *, ck_hp_hazard_t *, void *, void *);
+void ck_hp_retire(ck_hp_record_t *, ck_hp_hazard_t *, void *, void *);
+void ck_hp_purge(ck_hp_record_t *);
+
+#endif /* CK_HP_H */
diff --git a/include/ck_hp_fifo.h b/include/ck_hp_fifo.h
new file mode 100644
index 0000000..fd78ae6
--- /dev/null
+++ b/include/ck_hp_fifo.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_HP_FIFO_H
+#define CK_HP_FIFO_H
+
+#include <ck_cc.h>
+#include <ck_hp.h>
+#include <ck_pr.h>
+#include <ck_stddef.h>
+
+#define CK_HP_FIFO_SLOTS_COUNT (2)
+#define CK_HP_FIFO_SLOTS_SIZE (sizeof(void *) * CK_HP_FIFO_SLOTS_COUNT)
+
+/*
+ * Though it is possible to embed the data structure, measurements need
+ * to be made for the cost of this. If we were to embed the hazard pointer
+ * state into the data structure, this means every deferred reclamation
+ * will also include a cache invalidation when linking into the hazard pointer
+ * pending queue. This may lead to terrible cache line bouncing.
+ */
+struct ck_hp_fifo_entry {
+ void *value;
+ ck_hp_hazard_t hazard;
+ struct ck_hp_fifo_entry *next;
+};
+typedef struct ck_hp_fifo_entry ck_hp_fifo_entry_t;
+
+struct ck_hp_fifo {
+ struct ck_hp_fifo_entry *head;
+ struct ck_hp_fifo_entry *tail;
+};
+typedef struct ck_hp_fifo ck_hp_fifo_t;
+
+CK_CC_INLINE static void
+ck_hp_fifo_init(struct ck_hp_fifo *fifo, struct ck_hp_fifo_entry *stub)
+{
+
+ fifo->head = fifo->tail = stub;
+ stub->next = NULL;
+ return;
+}
+
+CK_CC_INLINE static void
+ck_hp_fifo_deinit(struct ck_hp_fifo *fifo, struct ck_hp_fifo_entry **stub)
+{
+
+ *stub = fifo->head;
+ fifo->head = fifo->tail = NULL;
+ return;
+}
+
+CK_CC_INLINE static void
+ck_hp_fifo_enqueue_mpmc(ck_hp_record_t *record,
+ struct ck_hp_fifo *fifo,
+ struct ck_hp_fifo_entry *entry,
+ void *value)
+{
+ struct ck_hp_fifo_entry *tail, *next;
+
+ entry->value = value;
+ entry->next = NULL;
+ ck_pr_fence_store_atomic();
+
+ for (;;) {
+ tail = ck_pr_load_ptr(&fifo->tail);
+ ck_hp_set_fence(record, 0, tail);
+ if (tail != ck_pr_load_ptr(&fifo->tail))
+ continue;
+
+ next = ck_pr_load_ptr(&tail->next);
+ if (next != NULL) {
+ ck_pr_cas_ptr(&fifo->tail, tail, next);
+ continue;
+ } else if (ck_pr_cas_ptr(&fifo->tail->next, next, entry) == true)
+ break;
+ }
+
+ ck_pr_fence_atomic();
+ ck_pr_cas_ptr(&fifo->tail, tail, entry);
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_hp_fifo_tryenqueue_mpmc(ck_hp_record_t *record,
+ struct ck_hp_fifo *fifo,
+ struct ck_hp_fifo_entry *entry,
+ void *value)
+{
+ struct ck_hp_fifo_entry *tail, *next;
+
+ entry->value = value;
+ entry->next = NULL;
+ ck_pr_fence_store_atomic();
+
+ tail = ck_pr_load_ptr(&fifo->tail);
+ ck_hp_set_fence(record, 0, tail);
+ if (tail != ck_pr_load_ptr(&fifo->tail))
+ return false;
+
+ next = ck_pr_load_ptr(&tail->next);
+ if (next != NULL) {
+ ck_pr_cas_ptr(&fifo->tail, tail, next);
+ return false;
+ } else if (ck_pr_cas_ptr(&fifo->tail->next, next, entry) == false)
+ return false;
+
+ ck_pr_fence_atomic();
+ ck_pr_cas_ptr(&fifo->tail, tail, entry);
+ return true;
+}
+
+CK_CC_INLINE static struct ck_hp_fifo_entry *
+ck_hp_fifo_dequeue_mpmc(ck_hp_record_t *record,
+ struct ck_hp_fifo *fifo,
+ void *value)
+{
+ struct ck_hp_fifo_entry *head, *tail, *next;
+
+ for (;;) {
+ head = ck_pr_load_ptr(&fifo->head);
+ ck_pr_fence_load();
+ tail = ck_pr_load_ptr(&fifo->tail);
+ ck_hp_set_fence(record, 0, head);
+ if (head != ck_pr_load_ptr(&fifo->head))
+ continue;
+
+ next = ck_pr_load_ptr(&head->next);
+ ck_hp_set_fence(record, 1, next);
+ if (head != ck_pr_load_ptr(&fifo->head))
+ continue;
+
+ if (head == tail) {
+ if (next == NULL)
+ return NULL;
+
+ ck_pr_cas_ptr(&fifo->tail, tail, next);
+ continue;
+ } else if (ck_pr_cas_ptr(&fifo->head, head, next) == true)
+ break;
+ }
+
+ ck_pr_store_ptr_unsafe(value, next->value);
+ return head;
+}
+
+CK_CC_INLINE static struct ck_hp_fifo_entry *
+ck_hp_fifo_trydequeue_mpmc(ck_hp_record_t *record,
+ struct ck_hp_fifo *fifo,
+ void *value)
+{
+ struct ck_hp_fifo_entry *head, *tail, *next;
+
+ head = ck_pr_load_ptr(&fifo->head);
+ ck_pr_fence_load();
+ tail = ck_pr_load_ptr(&fifo->tail);
+ ck_hp_set_fence(record, 0, head);
+ if (head != ck_pr_load_ptr(&fifo->head))
+ return NULL;
+
+ next = ck_pr_load_ptr(&head->next);
+ ck_hp_set_fence(record, 1, next);
+ if (head != ck_pr_load_ptr(&fifo->head))
+ return NULL;
+
+ if (head == tail) {
+ if (next == NULL)
+ return NULL;
+
+ ck_pr_cas_ptr(&fifo->tail, tail, next);
+ return NULL;
+ } else if (ck_pr_cas_ptr(&fifo->head, head, next) == false)
+ return NULL;
+
+ ck_pr_store_ptr_unsafe(value, next->value);
+ return head;
+}
+
+#define CK_HP_FIFO_ISEMPTY(f) ((f)->head->next == NULL)
+#define CK_HP_FIFO_FIRST(f) ((f)->head->next)
+#define CK_HP_FIFO_NEXT(m) ((m)->next)
+#define CK_HP_FIFO_FOREACH(fifo, entry) \
+ for ((entry) = CK_HP_FIFO_FIRST(fifo); \
+ (entry) != NULL; \
+ (entry) = CK_HP_FIFO_NEXT(entry))
+#define CK_HP_FIFO_FOREACH_SAFE(fifo, entry, T) \
+ for ((entry) = CK_HP_FIFO_FIRST(fifo); \
+ (entry) != NULL && ((T) = (entry)->next, 1); \
+ (entry) = (T))
+
+#endif /* CK_HP_FIFO_H */
diff --git a/include/ck_hp_stack.h b/include/ck_hp_stack.h
new file mode 100644
index 0000000..fb5a1e3
--- /dev/null
+++ b/include/ck_hp_stack.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_HP_STACK_H
+#define CK_HP_STACK_H
+
+#include <ck_cc.h>
+#include <ck_hp.h>
+#include <ck_pr.h>
+#include <ck_stack.h>
+#include <ck_stddef.h>
+
+#define CK_HP_STACK_SLOTS_COUNT 1
+#define CK_HP_STACK_SLOTS_SIZE sizeof(void *)
+
+CK_CC_INLINE static void
+ck_hp_stack_push_mpmc(struct ck_stack *target, struct ck_stack_entry *entry)
+{
+
+ ck_stack_push_upmc(target, entry);
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_hp_stack_trypush_mpmc(struct ck_stack *target, struct ck_stack_entry *entry)
+{
+
+ return ck_stack_trypush_upmc(target, entry);
+}
+
+CK_CC_INLINE static struct ck_stack_entry *
+ck_hp_stack_pop_mpmc(ck_hp_record_t *record, struct ck_stack *target)
+{
+ struct ck_stack_entry *entry, *update;
+
+ do {
+ entry = ck_pr_load_ptr(&target->head);
+ if (entry == NULL)
+ return NULL;
+
+ ck_hp_set_fence(record, 0, entry);
+ } while (entry != ck_pr_load_ptr(&target->head));
+
+ while (ck_pr_cas_ptr_value(&target->head, entry, entry->next, &entry) == false) {
+ if (entry == NULL)
+ return NULL;
+
+ ck_hp_set_fence(record, 0, entry);
+
+ update = ck_pr_load_ptr(&target->head);
+ while (entry != update) {
+ ck_hp_set_fence(record, 0, update);
+ entry = update;
+ update = ck_pr_load_ptr(&target->head);
+ if (update == NULL)
+ return NULL;
+ }
+ }
+
+ return entry;
+}
+
+CK_CC_INLINE static bool
+ck_hp_stack_trypop_mpmc(ck_hp_record_t *record, struct ck_stack *target, struct ck_stack_entry **r)
+{
+ struct ck_stack_entry *entry;
+
+ entry = ck_pr_load_ptr(&target->head);
+ if (entry == NULL)
+ return false;
+
+ ck_hp_set_fence(record, 0, entry);
+ if (entry != ck_pr_load_ptr(&target->head))
+ goto leave;
+
+ if (ck_pr_cas_ptr_value(&target->head, entry, entry->next, &entry) == false)
+ goto leave;
+
+ *r = entry;
+ return true;
+
+leave:
+ ck_hp_set(record, 0, NULL);
+ return false;
+}
+
+#endif /* CK_HP_STACK_H */
diff --git a/include/ck_hs.h b/include/ck_hs.h
new file mode 100644
index 0000000..b3eb046
--- /dev/null
+++ b/include/ck_hs.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_HS_H
+#define CK_HS_H
+
+#include <ck_cc.h>
+#include <ck_malloc.h>
+#include <ck_md.h>
+#include <ck_pr.h>
+#include <ck_stdint.h>
+#include <ck_stdbool.h>
+#include <ck_stddef.h>
+
+/*
+ * Indicates a single-writer many-reader workload. Mutually
+ * exclusive with CK_HS_MODE_MPMC
+ */
+#define CK_HS_MODE_SPMC 1
+
+/*
+ * Indicates that values to be stored are not pointers but
+ * values. Allows for full precision. Mutually exclusive
+ * with CK_HS_MODE_OBJECT.
+ */
+#define CK_HS_MODE_DIRECT 2
+
+/*
+ * Indicates that the values to be stored are pointers.
+ * Allows for space optimizations in the presence of pointer
+ * packing. Mutually exclusive with CK_HS_MODE_DIRECT.
+ */
+#define CK_HS_MODE_OBJECT 8
+
+/*
+ * Indicates a delete-heavy workload. This will reduce the
+ * need for garbage collection at the cost of approximately
+ * 12% to 20% increased memory usage.
+ */
+#define CK_HS_MODE_DELETE 16
+
+/* Currently unsupported. */
+#define CK_HS_MODE_MPMC (void)
+
+/*
+ * Hash callback function.
+ */
+typedef unsigned long ck_hs_hash_cb_t(const void *, unsigned long);
+
+/*
+ * Returns pointer to object if objects are equivalent.
+ */
+typedef bool ck_hs_compare_cb_t(const void *, const void *);
+
+#if defined(CK_MD_POINTER_PACK_ENABLE) && defined(CK_MD_VMA_BITS)
+#define CK_HS_PP
+#define CK_HS_KEY_MASK ((1U << ((sizeof(void *) * 8) - CK_MD_VMA_BITS)) - 1)
+#endif
+
+struct ck_hs_map;
+struct ck_hs {
+ struct ck_malloc *m;
+ struct ck_hs_map *map;
+ unsigned int mode;
+ unsigned long seed;
+ ck_hs_hash_cb_t *hf;
+ ck_hs_compare_cb_t *compare;
+};
+typedef struct ck_hs ck_hs_t;
+
+struct ck_hs_stat {
+ unsigned long tombstones;
+ unsigned long n_entries;
+ unsigned int probe_maximum;
+};
+
+struct ck_hs_iterator {
+ void **cursor;
+ unsigned long offset;
+};
+typedef struct ck_hs_iterator ck_hs_iterator_t;
+
+#define CK_HS_ITERATOR_INITIALIZER { NULL, 0 }
+
+/* Convenience wrapper to table hash function. */
+#define CK_HS_HASH(T, F, K) F((K), (T)->seed)
+
+typedef void *ck_hs_apply_fn_t(void *, void *);
+bool ck_hs_apply(ck_hs_t *, unsigned long, const void *, ck_hs_apply_fn_t *, void *);
+void ck_hs_iterator_init(ck_hs_iterator_t *);
+bool ck_hs_next(ck_hs_t *, ck_hs_iterator_t *, void **);
+bool ck_hs_move(ck_hs_t *, ck_hs_t *, ck_hs_hash_cb_t *,
+ ck_hs_compare_cb_t *, struct ck_malloc *);
+bool ck_hs_init(ck_hs_t *, unsigned int, ck_hs_hash_cb_t *,
+ ck_hs_compare_cb_t *, struct ck_malloc *, unsigned long, unsigned long);
+void ck_hs_destroy(ck_hs_t *);
+void *ck_hs_get(ck_hs_t *, unsigned long, const void *);
+bool ck_hs_put(ck_hs_t *, unsigned long, const void *);
+bool ck_hs_put_unique(ck_hs_t *, unsigned long, const void *);
+bool ck_hs_set(ck_hs_t *, unsigned long, const void *, void **);
+bool ck_hs_fas(ck_hs_t *, unsigned long, const void *, void **);
+void *ck_hs_remove(ck_hs_t *, unsigned long, const void *);
+bool ck_hs_grow(ck_hs_t *, unsigned long);
+bool ck_hs_rebuild(ck_hs_t *);
+bool ck_hs_gc(ck_hs_t *, unsigned long, unsigned long);
+unsigned long ck_hs_count(ck_hs_t *);
+bool ck_hs_reset(ck_hs_t *);
+bool ck_hs_reset_size(ck_hs_t *, unsigned long);
+void ck_hs_stat(ck_hs_t *, struct ck_hs_stat *);
+
+#endif /* CK_HS_H */
diff --git a/include/ck_ht.h b/include/ck_ht.h
new file mode 100644
index 0000000..a949d30
--- /dev/null
+++ b/include/ck_ht.h
@@ -0,0 +1,271 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_HT_H
+#define CK_HT_H
+
+#include <ck_pr.h>
+
+#define CK_F_HT
+#if defined(CK_F_PR_LOAD_64) && defined(CK_F_PR_STORE_64)
+#define CK_HT_TYPE uint64_t
+#define CK_HT_TYPE_LOAD ck_pr_load_64
+#define CK_HT_TYPE_STORE ck_pr_store_64
+#define CK_HT_TYPE_MAX UINT64_MAX
+#else
+#define CK_HT_TYPE uint32_t
+#define CK_HT_TYPE_LOAD ck_pr_load_32
+#define CK_HT_TYPE_STORE ck_pr_store_32
+#define CK_HT_TYPE_MAX UINT32_MAX
+#endif
+
+
+#include <ck_cc.h>
+#include <ck_malloc.h>
+#include <ck_md.h>
+#include <ck_stdint.h>
+#include <ck_stdbool.h>
+#include <ck_stddef.h>
+
+struct ck_ht_hash {
+ uint64_t value;
+};
+typedef struct ck_ht_hash ck_ht_hash_t;
+
+#define CK_HT_MODE_DIRECT 1U
+#define CK_HT_MODE_BYTESTRING 2U
+#define CK_HT_WORKLOAD_DELETE 4U
+
+#if defined(CK_MD_POINTER_PACK_ENABLE) && defined(CK_MD_VMA_BITS)
+#define CK_HT_PP
+#define CK_HT_KEY_LENGTH ((sizeof(void *) * 8) - CK_MD_VMA_BITS)
+#define CK_HT_KEY_MASK ((1U << CK_HT_KEY_LENGTH) - 1)
+#else
+#define CK_HT_KEY_LENGTH 65535U
+#endif
+
+struct ck_ht_entry {
+#ifdef CK_HT_PP
+ uintptr_t key;
+ uintptr_t value CK_CC_PACKED;
+} CK_CC_ALIGN(16);
+#else
+ uintptr_t key;
+ uintptr_t value;
+ CK_HT_TYPE key_length;
+ CK_HT_TYPE hash;
+} CK_CC_ALIGN(32);
+#endif
+typedef struct ck_ht_entry ck_ht_entry_t;
+
+/*
+ * The user is free to define their own stub values.
+ */
+#ifndef CK_HT_KEY_EMPTY
+#define CK_HT_KEY_EMPTY ((uintptr_t)0)
+#endif
+
+#ifndef CK_HT_KEY_TOMBSTONE
+#define CK_HT_KEY_TOMBSTONE (~CK_HT_KEY_EMPTY)
+#endif
+
+/*
+ * Hash callback function. First argument is updated to contain a hash value,
+ * second argument is the key, third argument is key length and final argument
+ * is the hash table seed value.
+ */
+typedef void ck_ht_hash_cb_t(ck_ht_hash_t *, const void *, size_t, uint64_t);
+
+struct ck_ht_map;
+struct ck_ht {
+ struct ck_malloc *m;
+ struct ck_ht_map *map;
+ unsigned int mode;
+ uint64_t seed;
+ ck_ht_hash_cb_t *h;
+};
+typedef struct ck_ht ck_ht_t;
+
+struct ck_ht_stat {
+ uint64_t probe_maximum;
+ uint64_t n_entries;
+};
+
+struct ck_ht_iterator {
+ struct ck_ht_entry *current;
+ uint64_t offset;
+};
+typedef struct ck_ht_iterator ck_ht_iterator_t;
+
+#define CK_HT_ITERATOR_INITIALIZER { NULL, 0 }
+
+CK_CC_INLINE static void
+ck_ht_iterator_init(struct ck_ht_iterator *iterator)
+{
+
+ iterator->current = NULL;
+ iterator->offset = 0;
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_ht_entry_empty(ck_ht_entry_t *entry)
+{
+
+ return entry->key == CK_HT_KEY_EMPTY;
+}
+
+CK_CC_INLINE static void
+ck_ht_entry_key_set_direct(ck_ht_entry_t *entry, uintptr_t key)
+{
+
+ entry->key = key;
+ return;
+}
+
+CK_CC_INLINE static void
+ck_ht_entry_key_set(ck_ht_entry_t *entry, const void *key, uint16_t key_length)
+{
+
+#ifdef CK_HT_PP
+ entry->key = (uintptr_t)key | ((uintptr_t)key_length << CK_MD_VMA_BITS);
+#else
+ entry->key = (uintptr_t)key;
+ entry->key_length = key_length;
+#endif
+
+ return;
+}
+
+CK_CC_INLINE static void *
+ck_ht_entry_key(ck_ht_entry_t *entry)
+{
+
+#ifdef CK_HT_PP
+ return (void *)(entry->key & (((uintptr_t)1 << CK_MD_VMA_BITS) - 1));
+#else
+ return (void *)entry->key;
+#endif
+}
+
+CK_CC_INLINE static uint16_t
+ck_ht_entry_key_length(ck_ht_entry_t *entry)
+{
+
+#ifdef CK_HT_PP
+ return entry->key >> CK_MD_VMA_BITS;
+#else
+ return entry->key_length;
+#endif
+}
+
+CK_CC_INLINE static void *
+ck_ht_entry_value(ck_ht_entry_t *entry)
+{
+
+#ifdef CK_HT_PP
+ return (void *)(entry->value & (((uintptr_t)1 << CK_MD_VMA_BITS) - 1));
+#else
+ return (void *)entry->value;
+#endif
+}
+
+CK_CC_INLINE static void
+ck_ht_entry_set(struct ck_ht_entry *entry,
+ ck_ht_hash_t h,
+ const void *key,
+ uint16_t key_length,
+ const void *value)
+{
+
+#ifdef CK_HT_PP
+ entry->key = (uintptr_t)key | ((uintptr_t)key_length << CK_MD_VMA_BITS);
+ entry->value = (uintptr_t)value | ((uintptr_t)(h.value >> 32) << CK_MD_VMA_BITS);
+#else
+ entry->key = (uintptr_t)key;
+ entry->value = (uintptr_t)value;
+ entry->key_length = key_length;
+ entry->hash = h.value;
+#endif
+
+ return;
+}
+
+CK_CC_INLINE static void
+ck_ht_entry_set_direct(struct ck_ht_entry *entry,
+ ck_ht_hash_t h,
+ uintptr_t key,
+ uintptr_t value)
+{
+
+ entry->key = key;
+ entry->value = value;
+
+#ifndef CK_HT_PP
+ entry->hash = h.value;
+#else
+ (void)h;
+#endif
+ return;
+}
+
+CK_CC_INLINE static uintptr_t
+ck_ht_entry_key_direct(ck_ht_entry_t *entry)
+{
+
+ return entry->key;
+}
+
+CK_CC_INLINE static uintptr_t
+ck_ht_entry_value_direct(ck_ht_entry_t *entry)
+{
+
+ return entry->value;
+}
+
+/*
+ * Iteration must occur without any concurrent mutations on
+ * the hash table.
+ */
+bool ck_ht_next(ck_ht_t *, ck_ht_iterator_t *, ck_ht_entry_t **entry);
+
+void ck_ht_stat(ck_ht_t *, struct ck_ht_stat *);
+void ck_ht_hash(ck_ht_hash_t *, ck_ht_t *, const void *, uint16_t);
+void ck_ht_hash_direct(ck_ht_hash_t *, ck_ht_t *, uintptr_t);
+bool ck_ht_init(ck_ht_t *, unsigned int, ck_ht_hash_cb_t *,
+ struct ck_malloc *, CK_HT_TYPE, uint64_t);
+void ck_ht_destroy(ck_ht_t *);
+bool ck_ht_set_spmc(ck_ht_t *, ck_ht_hash_t, ck_ht_entry_t *);
+bool ck_ht_put_spmc(ck_ht_t *, ck_ht_hash_t, ck_ht_entry_t *);
+bool ck_ht_get_spmc(ck_ht_t *, ck_ht_hash_t, ck_ht_entry_t *);
+bool ck_ht_gc(struct ck_ht *, unsigned long, unsigned long);
+bool ck_ht_grow_spmc(ck_ht_t *, CK_HT_TYPE);
+bool ck_ht_remove_spmc(ck_ht_t *, ck_ht_hash_t, ck_ht_entry_t *);
+bool ck_ht_reset_spmc(ck_ht_t *);
+bool ck_ht_reset_size_spmc(ck_ht_t *, CK_HT_TYPE);
+CK_HT_TYPE ck_ht_count(ck_ht_t *);
+
+#endif /* CK_HT_H */
diff --git a/include/ck_limits.h b/include/ck_limits.h
new file mode 100644
index 0000000..c874955
--- /dev/null
+++ b/include/ck_limits.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(__linux__) && defined(__KERNEL__)
+#include <linux/kernel.h>
+
+#ifndef UINT8_MAX
+#define UINT8_MAX ((u8)(~0U))
+#endif
+#ifndef UINT16_MAX
+#define UINT16_MAX USHRT_MAX
+#endif
+#ifndef UINT32_MAX
+#define UINT32_MAX UINT_MAX
+#endif
+#ifndef UINT64_MAX
+#define UINT64_MAX ULLONG_MAX
+#endif
+
+#elif defined(__FreeBSD__) && defined(_KERNEL)
+#include <sys/stdint.h>
+#include <sys/limits.h>
+#else
+#include <limits.h>
+#endif /* __linux__ && __KERNEL__ */
diff --git a/include/ck_malloc.h b/include/ck_malloc.h
new file mode 100644
index 0000000..e14dde3
--- /dev/null
+++ b/include/ck_malloc.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_MALLOC_H
+#define CK_MALLOC_H
+
+#include <ck_stdbool.h>
+#include <sys/types.h>
+
+struct ck_malloc {
+ void *(*malloc)(size_t);
+ void *(*realloc)(void *, size_t, size_t, bool);
+ void (*free)(void *, size_t, bool);
+};
+
+#endif /* CK_MALLOC_H */
diff --git a/include/ck_md.h.in b/include/ck_md.h.in
new file mode 100644
index 0000000..cb5783e
--- /dev/null
+++ b/include/ck_md.h.in
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2011-2012 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_MD_H
+#define CK_MD_H
+
+#ifndef CK_MD_CACHELINE
+#define CK_MD_CACHELINE (64)
+#endif
+
+#ifndef CK_MD_PAGESIZE
+#define CK_MD_PAGESIZE (4096)
+#endif
+
+#ifndef @RTM_ENABLE@
+#define @RTM_ENABLE@
+#endif /* @RTM_ENABLE@ */
+
+#ifndef @LSE_ENABLE@
+#define @LSE_ENABLE@
+#endif /* @LSE_ENABLE@ */
+
+#ifndef @POINTER_PACK_ENABLE@
+#define @POINTER_PACK_ENABLE@
+#endif /* @POINTER_PACK_ENABLE@ */
+
+#ifndef @VMA_BITS@
+#define @VMA_BITS@ @VMA_BITS_VALUE@
+#endif /* @VMA_BITS@ */
+
+#ifndef @MM@
+#define @MM@
+#endif /* @MM@ */
+
+#ifndef @DISABLE_DOUBLE@
+#define @DISABLE_DOUBLE@
+#endif /* @DISABLE_DOUBLE@ */
+
+#define CK_VERSION "@VERSION@"
+#define CK_GIT_SHA "@GIT_SHA@"
+
+#endif /* CK_MD_H */
diff --git a/include/ck_pflock.h b/include/ck_pflock.h
new file mode 100644
index 0000000..61b42bd
--- /dev/null
+++ b/include/ck_pflock.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2013 John Wittrock.
+ * Copyright 2013-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_PFLOCK_H
+#define CK_PFLOCK_H
+
+/*
+ * This is an implementation of phase-fair locks derived from the work
+ * described in:
+ * Brandenburg, B. and Anderson, J. 2010. Spin-Based
+ * Reader-Writer Synchronization for Multiprocessor Real-Time Systems
+ */
+
+#include <ck_cc.h>
+#include <ck_pr.h>
+
+struct ck_pflock {
+ uint32_t rin;
+ uint32_t rout;
+ uint32_t win;
+ uint32_t wout;
+};
+typedef struct ck_pflock ck_pflock_t;
+
+#define CK_PFLOCK_LSB 0xFFFFFFF0
+#define CK_PFLOCK_RINC 0x100 /* Reader increment value. */
+#define CK_PFLOCK_WBITS 0x3 /* Writer bits in reader. */
+#define CK_PFLOCK_PRES 0x2 /* Writer present bit. */
+#define CK_PFLOCK_PHID 0x1 /* Phase ID bit. */
+
+#define CK_PFLOCK_INITIALIZER {0, 0, 0, 0}
+
+CK_CC_INLINE static void
+ck_pflock_init(struct ck_pflock *pf)
+{
+
+ pf->rin = 0;
+ pf->rout = 0;
+ pf->win = 0;
+ pf->wout = 0;
+ ck_pr_barrier();
+
+ return;
+}
+
+CK_CC_INLINE static void
+ck_pflock_write_unlock(ck_pflock_t *pf)
+{
+
+ ck_pr_fence_unlock();
+
+ /* Migrate from write phase to read phase. */
+ ck_pr_and_32(&pf->rin, CK_PFLOCK_LSB);
+
+ /* Allow other writers to continue. */
+ ck_pr_faa_32(&pf->wout, 1);
+ return;
+}
+
+CK_CC_INLINE static void
+ck_pflock_write_lock(ck_pflock_t *pf)
+{
+ uint32_t ticket;
+
+ /* Acquire ownership of write-phase. */
+ ticket = ck_pr_faa_32(&pf->win, 1);
+ while (ck_pr_load_32(&pf->wout) != ticket)
+ ck_pr_stall();
+
+ /*
+ * Acquire ticket on read-side in order to allow them
+ * to flush. Indicates to any incoming reader that a
+ * write-phase is pending.
+ */
+ ticket = ck_pr_faa_32(&pf->rin,
+ (ticket & CK_PFLOCK_PHID) | CK_PFLOCK_PRES);
+
+ /* Wait for any pending readers to flush. */
+ while (ck_pr_load_32(&pf->rout) != ticket)
+ ck_pr_stall();
+
+ ck_pr_fence_lock();
+ return;
+}
+
+CK_CC_INLINE static void
+ck_pflock_read_unlock(ck_pflock_t *pf)
+{
+
+ ck_pr_fence_unlock();
+ ck_pr_faa_32(&pf->rout, CK_PFLOCK_RINC);
+ return;
+}
+
+CK_CC_INLINE static void
+ck_pflock_read_lock(ck_pflock_t *pf)
+{
+ uint32_t w;
+
+ /*
+ * If no writer is present, then the operation has completed
+ * successfully.
+ */
+ w = ck_pr_faa_32(&pf->rin, CK_PFLOCK_RINC) & CK_PFLOCK_WBITS;
+ if (w == 0)
+ goto leave;
+
+ /* Wait for current write phase to complete. */
+ while ((ck_pr_load_32(&pf->rin) & CK_PFLOCK_WBITS) == w)
+ ck_pr_stall();
+
+leave:
+ /* Acquire semantics with respect to readers. */
+ ck_pr_fence_lock();
+ return;
+}
+
+#endif /* CK_PFLOCK_H */
diff --git a/include/ck_pr.h b/include/ck_pr.h
new file mode 100644
index 0000000..9b7fc42
--- /dev/null
+++ b/include/ck_pr.h
@@ -0,0 +1,1219 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_PR_H
+#define CK_PR_H
+
+#include <ck_cc.h>
+#include <ck_limits.h>
+#include <ck_md.h>
+#include <ck_stdint.h>
+#include <ck_stdbool.h>
+
+#ifndef CK_USE_CC_BUILTINS
+#if defined(__x86_64__)
+#include "gcc/x86_64/ck_pr.h"
+#elif defined(__x86__)
+#include "gcc/x86/ck_pr.h"
+#elif defined(__sparcv9__)
+#include "gcc/sparcv9/ck_pr.h"
+#elif defined(__ppc64__)
+#include "gcc/ppc64/ck_pr.h"
+#elif defined(__ppc__)
+#include "gcc/ppc/ck_pr.h"
+#elif defined(__arm__)
+#include "gcc/arm/ck_pr.h"
+#elif defined(__aarch64__)
+#include "gcc/aarch64/ck_pr.h"
+#elif !defined(__GNUC__)
+#error Your platform is unsupported
+#endif
+#endif /* !CK_USE_CC_BUILTINS */
+
+#if defined(__GNUC__)
+#include "gcc/ck_pr.h"
+#endif
+
+#define CK_PR_FENCE_EMIT(T) \
+ CK_CC_INLINE static void \
+ ck_pr_fence_##T(void) \
+ { \
+ ck_pr_fence_strict_##T(); \
+ return; \
+ }
+#define CK_PR_FENCE_NOOP(T) \
+ CK_CC_INLINE static void \
+ ck_pr_fence_##T(void) \
+ { \
+ ck_pr_barrier(); \
+ return; \
+ }
+
+/*
+ * None of the currently supported platforms allow for data-dependent
+ * load ordering.
+ */
+CK_PR_FENCE_NOOP(load_depends)
+#define ck_pr_fence_strict_load_depends ck_pr_fence_load_depends
+
+/*
+ * In memory models where atomic operations do not have serializing
+ * effects, atomic read-modify-write operations are modeled as stores.
+ */
+#if defined(CK_MD_RMO)
+/*
+ * Only stores to the same location have a global
+ * ordering.
+ */
+CK_PR_FENCE_EMIT(atomic)
+CK_PR_FENCE_EMIT(atomic_load)
+CK_PR_FENCE_EMIT(atomic_store)
+CK_PR_FENCE_EMIT(store_atomic)
+CK_PR_FENCE_EMIT(load_atomic)
+CK_PR_FENCE_EMIT(load_store)
+CK_PR_FENCE_EMIT(store_load)
+CK_PR_FENCE_EMIT(load)
+CK_PR_FENCE_EMIT(store)
+CK_PR_FENCE_EMIT(memory)
+CK_PR_FENCE_EMIT(acquire)
+CK_PR_FENCE_EMIT(release)
+CK_PR_FENCE_EMIT(acqrel)
+CK_PR_FENCE_EMIT(lock)
+CK_PR_FENCE_EMIT(unlock)
+#elif defined(CK_MD_PSO)
+/*
+ * Anything can be re-ordered with respect to stores.
+ * Otherwise, loads are executed in-order.
+ */
+CK_PR_FENCE_EMIT(atomic)
+CK_PR_FENCE_NOOP(atomic_load)
+CK_PR_FENCE_EMIT(atomic_store)
+CK_PR_FENCE_EMIT(store_atomic)
+CK_PR_FENCE_NOOP(load_atomic)
+CK_PR_FENCE_EMIT(load_store)
+CK_PR_FENCE_EMIT(store_load)
+CK_PR_FENCE_NOOP(load)
+CK_PR_FENCE_EMIT(store)
+CK_PR_FENCE_EMIT(memory)
+CK_PR_FENCE_EMIT(acquire)
+CK_PR_FENCE_EMIT(release)
+CK_PR_FENCE_EMIT(acqrel)
+CK_PR_FENCE_EMIT(lock)
+CK_PR_FENCE_EMIT(unlock)
+#elif defined(CK_MD_TSO)
+/*
+ * Only loads are re-ordered and only with respect to
+ * prior stores. Atomic operations are serializing.
+ */
+CK_PR_FENCE_NOOP(atomic)
+CK_PR_FENCE_NOOP(atomic_load)
+CK_PR_FENCE_NOOP(atomic_store)
+CK_PR_FENCE_NOOP(store_atomic)
+CK_PR_FENCE_NOOP(load_atomic)
+CK_PR_FENCE_NOOP(load_store)
+CK_PR_FENCE_EMIT(store_load)
+CK_PR_FENCE_NOOP(load)
+CK_PR_FENCE_NOOP(store)
+CK_PR_FENCE_EMIT(memory)
+CK_PR_FENCE_NOOP(acquire)
+CK_PR_FENCE_NOOP(release)
+CK_PR_FENCE_NOOP(acqrel)
+CK_PR_FENCE_NOOP(lock)
+CK_PR_FENCE_NOOP(unlock)
+#else
+#error "No memory model has been defined."
+#endif /* CK_MD_TSO */
+
+#undef CK_PR_FENCE_EMIT
+#undef CK_PR_FENCE_NOOP
+
+#ifndef CK_F_PR_RFO
+#define CK_F_PR_RFO
+CK_CC_INLINE static void
+ck_pr_rfo(const void *m)
+{
+
+ (void)m;
+ return;
+}
+#endif /* CK_F_PR_RFO */
+
+#define CK_PR_STORE_SAFE(DST, VAL, TYPE) \
+ ck_pr_md_store_##TYPE( \
+ ((void)sizeof(*(DST) = (VAL)), (DST)), \
+ (VAL))
+
+#define ck_pr_store_ptr(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), ptr)
+#define ck_pr_store_char(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), char)
+#ifndef CK_PR_DISABLE_DOUBLE
+#define ck_pr_store_double(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), double)
+#endif
+#define ck_pr_store_uint(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), uint)
+#define ck_pr_store_int(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), int)
+#define ck_pr_store_32(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), 32)
+#define ck_pr_store_16(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), 16)
+#define ck_pr_store_8(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), 8)
+
+#define ck_pr_store_ptr_unsafe(DST, VAL) ck_pr_md_store_ptr((DST), (VAL))
+
+#ifdef CK_F_PR_LOAD_64
+#define ck_pr_store_64(DST, VAL) CK_PR_STORE_SAFE((DST), (VAL), 64)
+#endif /* CK_F_PR_LOAD_64 */
+
+#define CK_PR_LOAD_PTR_SAFE(SRC) (CK_CC_TYPEOF(*(SRC), (void *)))ck_pr_md_load_ptr((SRC))
+#define ck_pr_load_ptr(SRC) CK_PR_LOAD_PTR_SAFE((SRC))
+
+#define CK_PR_LOAD_SAFE(SRC, TYPE) ck_pr_md_load_##TYPE((SRC))
+#define ck_pr_load_char(SRC) CK_PR_LOAD_SAFE((SRC), char)
+#ifndef CK_PR_DISABLE_DOUBLE
+#define ck_pr_load_double(SRC) CK_PR_LOAD_SAFE((SRC), double)
+#endif
+#define ck_pr_load_uint(SRC) CK_PR_LOAD_SAFE((SRC), uint)
+#define ck_pr_load_int(SRC) CK_PR_LOAD_SAFE((SRC), int)
+#define ck_pr_load_32(SRC) CK_PR_LOAD_SAFE((SRC), 32)
+#define ck_pr_load_16(SRC) CK_PR_LOAD_SAFE((SRC), 16)
+#define ck_pr_load_8(SRC) CK_PR_LOAD_SAFE((SRC), 8)
+
+#ifdef CK_F_PR_LOAD_64
+#define ck_pr_load_64(SRC) CK_PR_LOAD_SAFE((SRC), 64)
+#endif /* CK_F_PR_LOAD_64 */
+
+#define CK_PR_BIN(K, S, M, T, P, C) \
+ CK_CC_INLINE static void \
+ ck_pr_##K##_##S(M *target, T value) \
+ { \
+ T previous; \
+ C punt; \
+ punt = ck_pr_md_load_##S(target); \
+ previous = (T)punt; \
+ while (ck_pr_cas_##S##_value(target, \
+ (C)previous, \
+ (C)(previous P value), \
+ &previous) == false) \
+ ck_pr_stall(); \
+ \
+ return; \
+ }
+
+#define CK_PR_BIN_S(K, S, T, P) CK_PR_BIN(K, S, T, T, P, T)
+
+#if defined(CK_F_PR_LOAD_CHAR) && defined(CK_F_PR_CAS_CHAR_VALUE)
+
+#ifndef CK_F_PR_ADD_CHAR
+#define CK_F_PR_ADD_CHAR
+CK_PR_BIN_S(add, char, char, +)
+#endif /* CK_F_PR_ADD_CHAR */
+
+#ifndef CK_F_PR_SUB_CHAR
+#define CK_F_PR_SUB_CHAR
+CK_PR_BIN_S(sub, char, char, -)
+#endif /* CK_F_PR_SUB_CHAR */
+
+#ifndef CK_F_PR_AND_CHAR
+#define CK_F_PR_AND_CHAR
+CK_PR_BIN_S(and, char, char, &)
+#endif /* CK_F_PR_AND_CHAR */
+
+#ifndef CK_F_PR_XOR_CHAR
+#define CK_F_PR_XOR_CHAR
+CK_PR_BIN_S(xor, char, char, ^)
+#endif /* CK_F_PR_XOR_CHAR */
+
+#ifndef CK_F_PR_OR_CHAR
+#define CK_F_PR_OR_CHAR
+CK_PR_BIN_S(or, char, char, |)
+#endif /* CK_F_PR_OR_CHAR */
+
+#endif /* CK_F_PR_LOAD_CHAR && CK_F_PR_CAS_CHAR_VALUE */
+
+#if defined(CK_F_PR_LOAD_INT) && defined(CK_F_PR_CAS_INT_VALUE)
+
+#ifndef CK_F_PR_ADD_INT
+#define CK_F_PR_ADD_INT
+CK_PR_BIN_S(add, int, int, +)
+#endif /* CK_F_PR_ADD_INT */
+
+#ifndef CK_F_PR_SUB_INT
+#define CK_F_PR_SUB_INT
+CK_PR_BIN_S(sub, int, int, -)
+#endif /* CK_F_PR_SUB_INT */
+
+#ifndef CK_F_PR_AND_INT
+#define CK_F_PR_AND_INT
+CK_PR_BIN_S(and, int, int, &)
+#endif /* CK_F_PR_AND_INT */
+
+#ifndef CK_F_PR_XOR_INT
+#define CK_F_PR_XOR_INT
+CK_PR_BIN_S(xor, int, int, ^)
+#endif /* CK_F_PR_XOR_INT */
+
+#ifndef CK_F_PR_OR_INT
+#define CK_F_PR_OR_INT
+CK_PR_BIN_S(or, int, int, |)
+#endif /* CK_F_PR_OR_INT */
+
+#endif /* CK_F_PR_LOAD_INT && CK_F_PR_CAS_INT_VALUE */
+
+#if defined(CK_F_PR_LOAD_DOUBLE) && defined(CK_F_PR_CAS_DOUBLE_VALUE) && \
+ !defined(CK_PR_DISABLE_DOUBLE)
+
+#ifndef CK_F_PR_ADD_DOUBLE
+#define CK_F_PR_ADD_DOUBLE
+CK_PR_BIN_S(add, double, double, +)
+#endif /* CK_F_PR_ADD_DOUBLE */
+
+#ifndef CK_F_PR_SUB_DOUBLE
+#define CK_F_PR_SUB_DOUBLE
+CK_PR_BIN_S(sub, double, double, -)
+#endif /* CK_F_PR_SUB_DOUBLE */
+
+#endif /* CK_F_PR_LOAD_DOUBLE && CK_F_PR_CAS_DOUBLE_VALUE && !CK_PR_DISABLE_DOUBLE */
+
+#if defined(CK_F_PR_LOAD_UINT) && defined(CK_F_PR_CAS_UINT_VALUE)
+
+#ifndef CK_F_PR_ADD_UINT
+#define CK_F_PR_ADD_UINT
+CK_PR_BIN_S(add, uint, unsigned int, +)
+#endif /* CK_F_PR_ADD_UINT */
+
+#ifndef CK_F_PR_SUB_UINT
+#define CK_F_PR_SUB_UINT
+CK_PR_BIN_S(sub, uint, unsigned int, -)
+#endif /* CK_F_PR_SUB_UINT */
+
+#ifndef CK_F_PR_AND_UINT
+#define CK_F_PR_AND_UINT
+CK_PR_BIN_S(and, uint, unsigned int, &)
+#endif /* CK_F_PR_AND_UINT */
+
+#ifndef CK_F_PR_XOR_UINT
+#define CK_F_PR_XOR_UINT
+CK_PR_BIN_S(xor, uint, unsigned int, ^)
+#endif /* CK_F_PR_XOR_UINT */
+
+#ifndef CK_F_PR_OR_UINT
+#define CK_F_PR_OR_UINT
+CK_PR_BIN_S(or, uint, unsigned int, |)
+#endif /* CK_F_PR_OR_UINT */
+
+#endif /* CK_F_PR_LOAD_UINT && CK_F_PR_CAS_UINT_VALUE */
+
+#if defined(CK_F_PR_LOAD_PTR) && defined(CK_F_PR_CAS_PTR_VALUE)
+
+#ifndef CK_F_PR_ADD_PTR
+#define CK_F_PR_ADD_PTR
+CK_PR_BIN(add, ptr, void, uintptr_t, +, void *)
+#endif /* CK_F_PR_ADD_PTR */
+
+#ifndef CK_F_PR_SUB_PTR
+#define CK_F_PR_SUB_PTR
+CK_PR_BIN(sub, ptr, void, uintptr_t, -, void *)
+#endif /* CK_F_PR_SUB_PTR */
+
+#ifndef CK_F_PR_AND_PTR
+#define CK_F_PR_AND_PTR
+CK_PR_BIN(and, ptr, void, uintptr_t, &, void *)
+#endif /* CK_F_PR_AND_PTR */
+
+#ifndef CK_F_PR_XOR_PTR
+#define CK_F_PR_XOR_PTR
+CK_PR_BIN(xor, ptr, void, uintptr_t, ^, void *)
+#endif /* CK_F_PR_XOR_PTR */
+
+#ifndef CK_F_PR_OR_PTR
+#define CK_F_PR_OR_PTR
+CK_PR_BIN(or, ptr, void, uintptr_t, |, void *)
+#endif /* CK_F_PR_OR_PTR */
+
+#endif /* CK_F_PR_LOAD_PTR && CK_F_PR_CAS_PTR_VALUE */
+
+#if defined(CK_F_PR_LOAD_64) && defined(CK_F_PR_CAS_64_VALUE)
+
+#ifndef CK_F_PR_ADD_64
+#define CK_F_PR_ADD_64
+CK_PR_BIN_S(add, 64, uint64_t, +)
+#endif /* CK_F_PR_ADD_64 */
+
+#ifndef CK_F_PR_SUB_64
+#define CK_F_PR_SUB_64
+CK_PR_BIN_S(sub, 64, uint64_t, -)
+#endif /* CK_F_PR_SUB_64 */
+
+#ifndef CK_F_PR_AND_64
+#define CK_F_PR_AND_64
+CK_PR_BIN_S(and, 64, uint64_t, &)
+#endif /* CK_F_PR_AND_64 */
+
+#ifndef CK_F_PR_XOR_64
+#define CK_F_PR_XOR_64
+CK_PR_BIN_S(xor, 64, uint64_t, ^)
+#endif /* CK_F_PR_XOR_64 */
+
+#ifndef CK_F_PR_OR_64
+#define CK_F_PR_OR_64
+CK_PR_BIN_S(or, 64, uint64_t, |)
+#endif /* CK_F_PR_OR_64 */
+
+#endif /* CK_F_PR_LOAD_64 && CK_F_PR_CAS_64_VALUE */
+
+#if defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_CAS_32_VALUE)
+
+#ifndef CK_F_PR_ADD_32
+#define CK_F_PR_ADD_32
+CK_PR_BIN_S(add, 32, uint32_t, +)
+#endif /* CK_F_PR_ADD_32 */
+
+#ifndef CK_F_PR_SUB_32
+#define CK_F_PR_SUB_32
+CK_PR_BIN_S(sub, 32, uint32_t, -)
+#endif /* CK_F_PR_SUB_32 */
+
+#ifndef CK_F_PR_AND_32
+#define CK_F_PR_AND_32
+CK_PR_BIN_S(and, 32, uint32_t, &)
+#endif /* CK_F_PR_AND_32 */
+
+#ifndef CK_F_PR_XOR_32
+#define CK_F_PR_XOR_32
+CK_PR_BIN_S(xor, 32, uint32_t, ^)
+#endif /* CK_F_PR_XOR_32 */
+
+#ifndef CK_F_PR_OR_32
+#define CK_F_PR_OR_32
+CK_PR_BIN_S(or, 32, uint32_t, |)
+#endif /* CK_F_PR_OR_32 */
+
+#endif /* CK_F_PR_LOAD_32 && CK_F_PR_CAS_32_VALUE */
+
+#if defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_CAS_16_VALUE)
+
+#ifndef CK_F_PR_ADD_16
+#define CK_F_PR_ADD_16
+CK_PR_BIN_S(add, 16, uint16_t, +)
+#endif /* CK_F_PR_ADD_16 */
+
+#ifndef CK_F_PR_SUB_16
+#define CK_F_PR_SUB_16
+CK_PR_BIN_S(sub, 16, uint16_t, -)
+#endif /* CK_F_PR_SUB_16 */
+
+#ifndef CK_F_PR_AND_16
+#define CK_F_PR_AND_16
+CK_PR_BIN_S(and, 16, uint16_t, &)
+#endif /* CK_F_PR_AND_16 */
+
+#ifndef CK_F_PR_XOR_16
+#define CK_F_PR_XOR_16
+CK_PR_BIN_S(xor, 16, uint16_t, ^)
+#endif /* CK_F_PR_XOR_16 */
+
+#ifndef CK_F_PR_OR_16
+#define CK_F_PR_OR_16
+CK_PR_BIN_S(or, 16, uint16_t, |)
+#endif /* CK_F_PR_OR_16 */
+
+#endif /* CK_F_PR_LOAD_16 && CK_F_PR_CAS_16_VALUE */
+
+#if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_CAS_8_VALUE)
+
+#ifndef CK_F_PR_ADD_8
+#define CK_F_PR_ADD_8
+CK_PR_BIN_S(add, 8, uint8_t, +)
+#endif /* CK_F_PR_ADD_8 */
+
+#ifndef CK_F_PR_SUB_8
+#define CK_F_PR_SUB_8
+CK_PR_BIN_S(sub, 8, uint8_t, -)
+#endif /* CK_F_PR_SUB_8 */
+
+#ifndef CK_F_PR_AND_8
+#define CK_F_PR_AND_8
+CK_PR_BIN_S(and, 8, uint8_t, &)
+#endif /* CK_F_PR_AND_8 */
+
+#ifndef CK_F_PR_XOR_8
+#define CK_F_PR_XOR_8
+CK_PR_BIN_S(xor, 8, uint8_t, ^)
+#endif /* CK_F_PR_XOR_8 */
+
+#ifndef CK_F_PR_OR_8
+#define CK_F_PR_OR_8
+CK_PR_BIN_S(or, 8, uint8_t, |)
+#endif /* CK_F_PR_OR_8 */
+
+#endif /* CK_F_PR_LOAD_8 && CK_F_PR_CAS_8_VALUE */
+
+#undef CK_PR_BIN_S
+#undef CK_PR_BIN
+
+#define CK_PR_BTX(K, S, M, T, P, C, R) \
+ CK_CC_INLINE static bool \
+ ck_pr_##K##_##S(M *target, unsigned int offset) \
+ { \
+ T previous; \
+ C punt; \
+ punt = ck_pr_md_load_##S(target); \
+ previous = (T)punt; \
+ while (ck_pr_cas_##S##_value(target, (C)previous, \
+ (C)(previous P (R ((T)1 << offset))), &previous) == false) \
+ ck_pr_stall(); \
+ return ((previous >> offset) & 1); \
+ }
+
+#define CK_PR_BTX_S(K, S, T, P, R) CK_PR_BTX(K, S, T, T, P, T, R)
+
+#if defined(CK_F_PR_LOAD_INT) && defined(CK_F_PR_CAS_INT_VALUE)
+
+#ifndef CK_F_PR_BTC_INT
+#define CK_F_PR_BTC_INT
+CK_PR_BTX_S(btc, int, int, ^,)
+#endif /* CK_F_PR_BTC_INT */
+
+#ifndef CK_F_PR_BTR_INT
+#define CK_F_PR_BTR_INT
+CK_PR_BTX_S(btr, int, int, &, ~)
+#endif /* CK_F_PR_BTR_INT */
+
+#ifndef CK_F_PR_BTS_INT
+#define CK_F_PR_BTS_INT
+CK_PR_BTX_S(bts, int, int, |,)
+#endif /* CK_F_PR_BTS_INT */
+
+#endif /* CK_F_PR_LOAD_INT && CK_F_PR_CAS_INT_VALUE */
+
+#if defined(CK_F_PR_LOAD_UINT) && defined(CK_F_PR_CAS_UINT_VALUE)
+
+#ifndef CK_F_PR_BTC_UINT
+#define CK_F_PR_BTC_UINT
+CK_PR_BTX_S(btc, uint, unsigned int, ^,)
+#endif /* CK_F_PR_BTC_UINT */
+
+#ifndef CK_F_PR_BTR_UINT
+#define CK_F_PR_BTR_UINT
+CK_PR_BTX_S(btr, uint, unsigned int, &, ~)
+#endif /* CK_F_PR_BTR_UINT */
+
+#ifndef CK_F_PR_BTS_UINT
+#define CK_F_PR_BTS_UINT
+CK_PR_BTX_S(bts, uint, unsigned int, |,)
+#endif /* CK_F_PR_BTS_UINT */
+
+#endif /* CK_F_PR_LOAD_UINT && CK_F_PR_CAS_UINT_VALUE */
+
+#if defined(CK_F_PR_LOAD_PTR) && defined(CK_F_PR_CAS_PTR_VALUE)
+
+#ifndef CK_F_PR_BTC_PTR
+#define CK_F_PR_BTC_PTR
+CK_PR_BTX(btc, ptr, void, uintptr_t, ^, void *,)
+#endif /* CK_F_PR_BTC_PTR */
+
+#ifndef CK_F_PR_BTR_PTR
+#define CK_F_PR_BTR_PTR
+CK_PR_BTX(btr, ptr, void, uintptr_t, &, void *, ~)
+#endif /* CK_F_PR_BTR_PTR */
+
+#ifndef CK_F_PR_BTS_PTR
+#define CK_F_PR_BTS_PTR
+CK_PR_BTX(bts, ptr, void, uintptr_t, |, void *,)
+#endif /* CK_F_PR_BTS_PTR */
+
+#endif /* CK_F_PR_LOAD_PTR && CK_F_PR_CAS_PTR_VALUE */
+
+#if defined(CK_F_PR_LOAD_64) && defined(CK_F_PR_CAS_64_VALUE)
+
+#ifndef CK_F_PR_BTC_64
+#define CK_F_PR_BTC_64
+CK_PR_BTX_S(btc, 64, uint64_t, ^,)
+#endif /* CK_F_PR_BTC_64 */
+
+#ifndef CK_F_PR_BTR_64
+#define CK_F_PR_BTR_64
+CK_PR_BTX_S(btr, 64, uint64_t, &, ~)
+#endif /* CK_F_PR_BTR_64 */
+
+#ifndef CK_F_PR_BTS_64
+#define CK_F_PR_BTS_64
+CK_PR_BTX_S(bts, 64, uint64_t, |,)
+#endif /* CK_F_PR_BTS_64 */
+
+#endif /* CK_F_PR_LOAD_64 && CK_F_PR_CAS_64_VALUE */
+
+#if defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_CAS_32_VALUE)
+
+#ifndef CK_F_PR_BTC_32
+#define CK_F_PR_BTC_32
+CK_PR_BTX_S(btc, 32, uint32_t, ^,)
+#endif /* CK_F_PR_BTC_32 */
+
+#ifndef CK_F_PR_BTR_32
+#define CK_F_PR_BTR_32
+CK_PR_BTX_S(btr, 32, uint32_t, &, ~)
+#endif /* CK_F_PR_BTR_32 */
+
+#ifndef CK_F_PR_BTS_32
+#define CK_F_PR_BTS_32
+CK_PR_BTX_S(bts, 32, uint32_t, |,)
+#endif /* CK_F_PR_BTS_32 */
+
+#endif /* CK_F_PR_LOAD_32 && CK_F_PR_CAS_32_VALUE */
+
+#if defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_CAS_16_VALUE)
+
+#ifndef CK_F_PR_BTC_16
+#define CK_F_PR_BTC_16
+CK_PR_BTX_S(btc, 16, uint16_t, ^,)
+#endif /* CK_F_PR_BTC_16 */
+
+#ifndef CK_F_PR_BTR_16
+#define CK_F_PR_BTR_16
+CK_PR_BTX_S(btr, 16, uint16_t, &, ~)
+#endif /* CK_F_PR_BTR_16 */
+
+#ifndef CK_F_PR_BTS_16
+#define CK_F_PR_BTS_16
+CK_PR_BTX_S(bts, 16, uint16_t, |,)
+#endif /* CK_F_PR_BTS_16 */
+
+#endif /* CK_F_PR_LOAD_16 && CK_F_PR_CAS_16_VALUE */
+
+#undef CK_PR_BTX_S
+#undef CK_PR_BTX
+
+#define CK_PR_UNARY(K, X, S, M, T) \
+ CK_CC_INLINE static void \
+ ck_pr_##K##_##S(M *target) \
+ { \
+ ck_pr_##X##_##S(target, (T)1); \
+ return; \
+ }
+
+#define CK_PR_UNARY_Z(K, S, M, T, P, C, Z) \
+ CK_CC_INLINE static void \
+ ck_pr_##K##_##S##_zero(M *target, bool *zero) \
+ { \
+ T previous; \
+ C punt; \
+ punt = (C)ck_pr_md_load_##S(target); \
+ previous = (T)punt; \
+ while (ck_pr_cas_##S##_value(target, \
+ (C)previous, \
+ (C)(previous P 1), \
+ &previous) == false) \
+ ck_pr_stall(); \
+ *zero = previous == (T)Z; \
+ return; \
+ }
+
+#define CK_PR_UNARY_S(K, X, S, M) CK_PR_UNARY(K, X, S, M, M)
+#define CK_PR_UNARY_Z_S(K, S, M, P, Z) CK_PR_UNARY_Z(K, S, M, M, P, M, Z)
+
+#if defined(CK_F_PR_LOAD_CHAR) && defined(CK_F_PR_CAS_CHAR_VALUE)
+
+#ifndef CK_F_PR_INC_CHAR
+#define CK_F_PR_INC_CHAR
+CK_PR_UNARY_S(inc, add, char, char)
+#endif /* CK_F_PR_INC_CHAR */
+
+#ifndef CK_F_PR_INC_CHAR_ZERO
+#define CK_F_PR_INC_CHAR_ZERO
+CK_PR_UNARY_Z_S(inc, char, char, +, -1)
+#endif /* CK_F_PR_INC_CHAR_ZERO */
+
+#ifndef CK_F_PR_DEC_CHAR
+#define CK_F_PR_DEC_CHAR
+CK_PR_UNARY_S(dec, sub, char, char)
+#endif /* CK_F_PR_DEC_CHAR */
+
+#ifndef CK_F_PR_DEC_CHAR_ZERO
+#define CK_F_PR_DEC_CHAR_ZERO
+CK_PR_UNARY_Z_S(dec, char, char, -, 1)
+#endif /* CK_F_PR_DEC_CHAR_ZERO */
+
+#endif /* CK_F_PR_LOAD_CHAR && CK_F_PR_CAS_CHAR_VALUE */
+
+#if defined(CK_F_PR_LOAD_INT) && defined(CK_F_PR_CAS_INT_VALUE)
+
+#ifndef CK_F_PR_INC_INT
+#define CK_F_PR_INC_INT
+CK_PR_UNARY_S(inc, add, int, int)
+#endif /* CK_F_PR_INC_INT */
+
+#ifndef CK_F_PR_INC_INT_ZERO
+#define CK_F_PR_INC_INT_ZERO
+CK_PR_UNARY_Z_S(inc, int, int, +, -1)
+#endif /* CK_F_PR_INC_INT_ZERO */
+
+#ifndef CK_F_PR_DEC_INT
+#define CK_F_PR_DEC_INT
+CK_PR_UNARY_S(dec, sub, int, int)
+#endif /* CK_F_PR_DEC_INT */
+
+#ifndef CK_F_PR_DEC_INT_ZERO
+#define CK_F_PR_DEC_INT_ZERO
+CK_PR_UNARY_Z_S(dec, int, int, -, 1)
+#endif /* CK_F_PR_DEC_INT_ZERO */
+
+#endif /* CK_F_PR_LOAD_INT && CK_F_PR_CAS_INT_VALUE */
+
+#if defined(CK_F_PR_LOAD_DOUBLE) && defined(CK_F_PR_CAS_DOUBLE_VALUE) && \
+ !defined(CK_PR_DISABLE_DOUBLE)
+
+#ifndef CK_F_PR_INC_DOUBLE
+#define CK_F_PR_INC_DOUBLE
+CK_PR_UNARY_S(inc, add, double, double)
+#endif /* CK_F_PR_INC_DOUBLE */
+
+#ifndef CK_F_PR_DEC_DOUBLE
+#define CK_F_PR_DEC_DOUBLE
+CK_PR_UNARY_S(dec, sub, double, double)
+#endif /* CK_F_PR_DEC_DOUBLE */
+
+#endif /* CK_F_PR_LOAD_DOUBLE && CK_F_PR_CAS_DOUBLE_VALUE && !CK_PR_DISABLE_DOUBLE */
+
+#if defined(CK_F_PR_LOAD_UINT) && defined(CK_F_PR_CAS_UINT_VALUE)
+
+#ifndef CK_F_PR_INC_UINT
+#define CK_F_PR_INC_UINT
+CK_PR_UNARY_S(inc, add, uint, unsigned int)
+#endif /* CK_F_PR_INC_UINT */
+
+#ifndef CK_F_PR_INC_UINT_ZERO
+#define CK_F_PR_INC_UINT_ZERO
+CK_PR_UNARY_Z_S(inc, uint, unsigned int, +, UINT_MAX)
+#endif /* CK_F_PR_INC_UINT_ZERO */
+
+#ifndef CK_F_PR_DEC_UINT
+#define CK_F_PR_DEC_UINT
+CK_PR_UNARY_S(dec, sub, uint, unsigned int)
+#endif /* CK_F_PR_DEC_UINT */
+
+#ifndef CK_F_PR_DEC_UINT_ZERO
+#define CK_F_PR_DEC_UINT_ZERO
+CK_PR_UNARY_Z_S(dec, uint, unsigned int, -, 1)
+#endif /* CK_F_PR_DEC_UINT_ZERO */
+
+#endif /* CK_F_PR_LOAD_UINT && CK_F_PR_CAS_UINT_VALUE */
+
+#if defined(CK_F_PR_LOAD_PTR) && defined(CK_F_PR_CAS_PTR_VALUE)
+
+#ifndef CK_F_PR_INC_PTR
+#define CK_F_PR_INC_PTR
+CK_PR_UNARY(inc, add, ptr, void, uintptr_t)
+#endif /* CK_F_PR_INC_PTR */
+
+#ifndef CK_F_PR_INC_PTR_ZERO
+#define CK_F_PR_INC_PTR_ZERO
+CK_PR_UNARY_Z(inc, ptr, void, uintptr_t, +, void *, UINT_MAX)
+#endif /* CK_F_PR_INC_PTR_ZERO */
+
+#ifndef CK_F_PR_DEC_PTR
+#define CK_F_PR_DEC_PTR
+CK_PR_UNARY(dec, sub, ptr, void, uintptr_t)
+#endif /* CK_F_PR_DEC_PTR */
+
+#ifndef CK_F_PR_DEC_PTR_ZERO
+#define CK_F_PR_DEC_PTR_ZERO
+CK_PR_UNARY_Z(dec, ptr, void, uintptr_t, -, void *, 1)
+#endif /* CK_F_PR_DEC_PTR_ZERO */
+
+#endif /* CK_F_PR_LOAD_PTR && CK_F_PR_CAS_PTR_VALUE */
+
+#if defined(CK_F_PR_LOAD_64) && defined(CK_F_PR_CAS_64_VALUE)
+
+#ifndef CK_F_PR_INC_64
+#define CK_F_PR_INC_64
+CK_PR_UNARY_S(inc, add, 64, uint64_t)
+#endif /* CK_F_PR_INC_64 */
+
+#ifndef CK_F_PR_INC_64_ZERO
+#define CK_F_PR_INC_64_ZERO
+CK_PR_UNARY_Z_S(inc, 64, uint64_t, +, UINT64_MAX)
+#endif /* CK_F_PR_INC_64_ZERO */
+
+#ifndef CK_F_PR_DEC_64
+#define CK_F_PR_DEC_64
+CK_PR_UNARY_S(dec, sub, 64, uint64_t)
+#endif /* CK_F_PR_DEC_64 */
+
+#ifndef CK_F_PR_DEC_64_ZERO
+#define CK_F_PR_DEC_64_ZERO
+CK_PR_UNARY_Z_S(dec, 64, uint64_t, -, 1)
+#endif /* CK_F_PR_DEC_64_ZERO */
+
+#endif /* CK_F_PR_LOAD_64 && CK_F_PR_CAS_64_VALUE */
+
+#if defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_CAS_32_VALUE)
+
+#ifndef CK_F_PR_INC_32
+#define CK_F_PR_INC_32
+CK_PR_UNARY_S(inc, add, 32, uint32_t)
+#endif /* CK_F_PR_INC_32 */
+
+#ifndef CK_F_PR_INC_32_ZERO
+#define CK_F_PR_INC_32_ZERO
+CK_PR_UNARY_Z_S(inc, 32, uint32_t, +, UINT32_MAX)
+#endif /* CK_F_PR_INC_32_ZERO */
+
+#ifndef CK_F_PR_DEC_32
+#define CK_F_PR_DEC_32
+CK_PR_UNARY_S(dec, sub, 32, uint32_t)
+#endif /* CK_F_PR_DEC_32 */
+
+#ifndef CK_F_PR_DEC_32_ZERO
+#define CK_F_PR_DEC_32_ZERO
+CK_PR_UNARY_Z_S(dec, 32, uint32_t, -, 1)
+#endif /* CK_F_PR_DEC_32_ZERO */
+
+#endif /* CK_F_PR_LOAD_32 && CK_F_PR_CAS_32_VALUE */
+
+#if defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_CAS_16_VALUE)
+
+#ifndef CK_F_PR_INC_16
+#define CK_F_PR_INC_16
+CK_PR_UNARY_S(inc, add, 16, uint16_t)
+#endif /* CK_F_PR_INC_16 */
+
+#ifndef CK_F_PR_INC_16_ZERO
+#define CK_F_PR_INC_16_ZERO
+CK_PR_UNARY_Z_S(inc, 16, uint16_t, +, UINT16_MAX)
+#endif /* CK_F_PR_INC_16_ZERO */
+
+#ifndef CK_F_PR_DEC_16
+#define CK_F_PR_DEC_16
+CK_PR_UNARY_S(dec, sub, 16, uint16_t)
+#endif /* CK_F_PR_DEC_16 */
+
+#ifndef CK_F_PR_DEC_16_ZERO
+#define CK_F_PR_DEC_16_ZERO
+CK_PR_UNARY_Z_S(dec, 16, uint16_t, -, 1)
+#endif /* CK_F_PR_DEC_16_ZERO */
+
+#endif /* CK_F_PR_LOAD_16 && CK_F_PR_CAS_16_VALUE */
+
+#if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_CAS_8_VALUE)
+
+#ifndef CK_F_PR_INC_8
+#define CK_F_PR_INC_8
+CK_PR_UNARY_S(inc, add, 8, uint8_t)
+#endif /* CK_F_PR_INC_8 */
+
+#ifndef CK_F_PR_INC_8_ZERO
+#define CK_F_PR_INC_8_ZERO
+CK_PR_UNARY_Z_S(inc, 8, uint8_t, +, UINT8_MAX)
+#endif /* CK_F_PR_INC_8_ZERO */
+
+#ifndef CK_F_PR_DEC_8
+#define CK_F_PR_DEC_8
+CK_PR_UNARY_S(dec, sub, 8, uint8_t)
+#endif /* CK_F_PR_DEC_8 */
+
+#ifndef CK_F_PR_DEC_8_ZERO
+#define CK_F_PR_DEC_8_ZERO
+CK_PR_UNARY_Z_S(dec, 8, uint8_t, -, 1)
+#endif /* CK_F_PR_DEC_8_ZERO */
+
+#endif /* CK_F_PR_LOAD_8 && CK_F_PR_CAS_8_VALUE */
+
+#undef CK_PR_UNARY_Z_S
+#undef CK_PR_UNARY_S
+#undef CK_PR_UNARY_Z
+#undef CK_PR_UNARY
+
+#define CK_PR_N(K, S, M, T, P, C) \
+ CK_CC_INLINE static void \
+ ck_pr_##K##_##S(M *target) \
+ { \
+ T previous; \
+ C punt; \
+ punt = (C)ck_pr_md_load_##S(target); \
+ previous = (T)punt; \
+ while (ck_pr_cas_##S##_value(target, \
+ (C)previous, \
+ (C)(P previous), \
+ &previous) == false) \
+ ck_pr_stall(); \
+ \
+ return; \
+ }
+
+#define CK_PR_N_Z(S, M, T, C) \
+ CK_CC_INLINE static void \
+ ck_pr_neg_##S##_zero(M *target, bool *zero) \
+ { \
+ T previous; \
+ C punt; \
+ punt = (C)ck_pr_md_load_##S(target); \
+ previous = (T)punt; \
+ while (ck_pr_cas_##S##_value(target, \
+ (C)previous, \
+ (C)(-previous), \
+ &previous) == false) \
+ ck_pr_stall(); \
+ \
+ *zero = previous == 0; \
+ return; \
+ }
+
+#define CK_PR_N_S(K, S, M, P) CK_PR_N(K, S, M, M, P, M)
+#define CK_PR_N_Z_S(S, M) CK_PR_N_Z(S, M, M, M)
+
+#if defined(CK_F_PR_LOAD_CHAR) && defined(CK_F_PR_CAS_CHAR_VALUE)
+
+#ifndef CK_F_PR_NOT_CHAR
+#define CK_F_PR_NOT_CHAR
+CK_PR_N_S(not, char, char, ~)
+#endif /* CK_F_PR_NOT_CHAR */
+
+#ifndef CK_F_PR_NEG_CHAR
+#define CK_F_PR_NEG_CHAR
+CK_PR_N_S(neg, char, char, -)
+#endif /* CK_F_PR_NEG_CHAR */
+
+#ifndef CK_F_PR_NEG_CHAR_ZERO
+#define CK_F_PR_NEG_CHAR_ZERO
+CK_PR_N_Z_S(char, char)
+#endif /* CK_F_PR_NEG_CHAR_ZERO */
+
+#endif /* CK_F_PR_LOAD_CHAR && CK_F_PR_CAS_CHAR_VALUE */
+
+#if defined(CK_F_PR_LOAD_INT) && defined(CK_F_PR_CAS_INT_VALUE)
+
+#ifndef CK_F_PR_NOT_INT
+#define CK_F_PR_NOT_INT
+CK_PR_N_S(not, int, int, ~)
+#endif /* CK_F_PR_NOT_INT */
+
+#ifndef CK_F_PR_NEG_INT
+#define CK_F_PR_NEG_INT
+CK_PR_N_S(neg, int, int, -)
+#endif /* CK_F_PR_NEG_INT */
+
+#ifndef CK_F_PR_NEG_INT_ZERO
+#define CK_F_PR_NEG_INT_ZERO
+CK_PR_N_Z_S(int, int)
+#endif /* CK_F_PR_NEG_INT_ZERO */
+
+#endif /* CK_F_PR_LOAD_INT && CK_F_PR_CAS_INT_VALUE */
+
+#if defined(CK_F_PR_LOAD_DOUBLE) && defined(CK_F_PR_CAS_DOUBLE_VALUE) && \
+ !defined(CK_PR_DISABLE_DOUBLE)
+
+#ifndef CK_F_PR_NEG_DOUBLE
+#define CK_F_PR_NEG_DOUBLE
+CK_PR_N_S(neg, double, double, -)
+#endif /* CK_F_PR_NEG_DOUBLE */
+
+#endif /* CK_F_PR_LOAD_DOUBLE && CK_F_PR_CAS_DOUBLE_VALUE && !CK_PR_DISABLE_DOUBLE */
+
+#if defined(CK_F_PR_LOAD_UINT) && defined(CK_F_PR_CAS_UINT_VALUE)
+
+#ifndef CK_F_PR_NOT_UINT
+#define CK_F_PR_NOT_UINT
+CK_PR_N_S(not, uint, unsigned int, ~)
+#endif /* CK_F_PR_NOT_UINT */
+
+#ifndef CK_F_PR_NEG_UINT
+#define CK_F_PR_NEG_UINT
+CK_PR_N_S(neg, uint, unsigned int, -)
+#endif /* CK_F_PR_NEG_UINT */
+
+#ifndef CK_F_PR_NEG_UINT_ZERO
+#define CK_F_PR_NEG_UINT_ZERO
+CK_PR_N_Z_S(uint, unsigned int)
+#endif /* CK_F_PR_NEG_UINT_ZERO */
+
+#endif /* CK_F_PR_LOAD_UINT && CK_F_PR_CAS_UINT_VALUE */
+
+#if defined(CK_F_PR_LOAD_PTR) && defined(CK_F_PR_CAS_PTR_VALUE)
+
+#ifndef CK_F_PR_NOT_PTR
+#define CK_F_PR_NOT_PTR
+CK_PR_N(not, ptr, void, uintptr_t, ~, void *)
+#endif /* CK_F_PR_NOT_PTR */
+
+#ifndef CK_F_PR_NEG_PTR
+#define CK_F_PR_NEG_PTR
+CK_PR_N(neg, ptr, void, uintptr_t, -, void *)
+#endif /* CK_F_PR_NEG_PTR */
+
+#ifndef CK_F_PR_NEG_PTR_ZERO
+#define CK_F_PR_NEG_PTR_ZERO
+CK_PR_N_Z(ptr, void, uintptr_t, void *)
+#endif /* CK_F_PR_NEG_PTR_ZERO */
+
+#endif /* CK_F_PR_LOAD_PTR && CK_F_PR_CAS_PTR_VALUE */
+
+#if defined(CK_F_PR_LOAD_64) && defined(CK_F_PR_CAS_64_VALUE)
+
+#ifndef CK_F_PR_NOT_64
+#define CK_F_PR_NOT_64
+CK_PR_N_S(not, 64, uint64_t, ~)
+#endif /* CK_F_PR_NOT_64 */
+
+#ifndef CK_F_PR_NEG_64
+#define CK_F_PR_NEG_64
+CK_PR_N_S(neg, 64, uint64_t, -)
+#endif /* CK_F_PR_NEG_64 */
+
+#ifndef CK_F_PR_NEG_64_ZERO
+#define CK_F_PR_NEG_64_ZERO
+CK_PR_N_Z_S(64, uint64_t)
+#endif /* CK_F_PR_NEG_64_ZERO */
+
+#endif /* CK_F_PR_LOAD_64 && CK_F_PR_CAS_64_VALUE */
+
+#if defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_CAS_32_VALUE)
+
+#ifndef CK_F_PR_NOT_32
+#define CK_F_PR_NOT_32
+CK_PR_N_S(not, 32, uint32_t, ~)
+#endif /* CK_F_PR_NOT_32 */
+
+#ifndef CK_F_PR_NEG_32
+#define CK_F_PR_NEG_32
+CK_PR_N_S(neg, 32, uint32_t, -)
+#endif /* CK_F_PR_NEG_32 */
+
+#ifndef CK_F_PR_NEG_32_ZERO
+#define CK_F_PR_NEG_32_ZERO
+CK_PR_N_Z_S(32, uint32_t)
+#endif /* CK_F_PR_NEG_32_ZERO */
+
+#endif /* CK_F_PR_LOAD_32 && CK_F_PR_CAS_32_VALUE */
+
+#if defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_CAS_16_VALUE)
+
+#ifndef CK_F_PR_NOT_16
+#define CK_F_PR_NOT_16
+CK_PR_N_S(not, 16, uint16_t, ~)
+#endif /* CK_F_PR_NOT_16 */
+
+#ifndef CK_F_PR_NEG_16
+#define CK_F_PR_NEG_16
+CK_PR_N_S(neg, 16, uint16_t, -)
+#endif /* CK_F_PR_NEG_16 */
+
+#ifndef CK_F_PR_NEG_16_ZERO
+#define CK_F_PR_NEG_16_ZERO
+CK_PR_N_Z_S(16, uint16_t)
+#endif /* CK_F_PR_NEG_16_ZERO */
+
+#endif /* CK_F_PR_LOAD_16 && CK_F_PR_CAS_16_VALUE */
+
+#if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_CAS_8_VALUE)
+
+#ifndef CK_F_PR_NOT_8
+#define CK_F_PR_NOT_8
+CK_PR_N_S(not, 8, uint8_t, ~)
+#endif /* CK_F_PR_NOT_8 */
+
+#ifndef CK_F_PR_NEG_8
+#define CK_F_PR_NEG_8
+CK_PR_N_S(neg, 8, uint8_t, -)
+#endif /* CK_F_PR_NEG_8 */
+
+#ifndef CK_F_PR_NEG_8_ZERO
+#define CK_F_PR_NEG_8_ZERO
+CK_PR_N_Z_S(8, uint8_t)
+#endif /* CK_F_PR_NEG_8_ZERO */
+
+#endif /* CK_F_PR_LOAD_8 && CK_F_PR_CAS_8_VALUE */
+
+#undef CK_PR_N_Z_S
+#undef CK_PR_N_S
+#undef CK_PR_N_Z
+#undef CK_PR_N
+
+#define CK_PR_FAA(S, M, T, C) \
+ CK_CC_INLINE static C \
+ ck_pr_faa_##S(M *target, T delta) \
+ { \
+ T previous; \
+ C punt; \
+ punt = (C)ck_pr_md_load_##S(target); \
+ previous = (T)punt; \
+ while (ck_pr_cas_##S##_value(target, \
+ (C)previous, \
+ (C)(previous + delta), \
+ &previous) == false) \
+ ck_pr_stall(); \
+ \
+ return ((C)previous); \
+ }
+
+#define CK_PR_FAS(S, M, C) \
+ CK_CC_INLINE static C \
+ ck_pr_fas_##S(M *target, C update) \
+ { \
+ C previous; \
+ previous = ck_pr_md_load_##S(target); \
+ while (ck_pr_cas_##S##_value(target, \
+ previous, \
+ update, \
+ &previous) == false) \
+ ck_pr_stall(); \
+ \
+ return (previous); \
+ }
+
+#define CK_PR_FAA_S(S, M) CK_PR_FAA(S, M, M, M)
+#define CK_PR_FAS_S(S, M) CK_PR_FAS(S, M, M)
+
+#if defined(CK_F_PR_LOAD_CHAR) && defined(CK_F_PR_CAS_CHAR_VALUE)
+
+#ifndef CK_F_PR_FAA_CHAR
+#define CK_F_PR_FAA_CHAR
+CK_PR_FAA_S(char, char)
+#endif /* CK_F_PR_FAA_CHAR */
+
+#ifndef CK_F_PR_FAS_CHAR
+#define CK_F_PR_FAS_CHAR
+CK_PR_FAS_S(char, char)
+#endif /* CK_F_PR_FAS_CHAR */
+
+#endif /* CK_F_PR_LOAD_CHAR && CK_F_PR_CAS_CHAR_VALUE */
+
+#if defined(CK_F_PR_LOAD_INT) && defined(CK_F_PR_CAS_INT_VALUE)
+
+#ifndef CK_F_PR_FAA_INT
+#define CK_F_PR_FAA_INT
+CK_PR_FAA_S(int, int)
+#endif /* CK_F_PR_FAA_INT */
+
+#ifndef CK_F_PR_FAS_INT
+#define CK_F_PR_FAS_INT
+CK_PR_FAS_S(int, int)
+#endif /* CK_F_PR_FAS_INT */
+
+#endif /* CK_F_PR_LOAD_INT && CK_F_PR_CAS_INT_VALUE */
+
+#if defined(CK_F_PR_LOAD_DOUBLE) && defined(CK_F_PR_CAS_DOUBLE_VALUE) && \
+ !defined(CK_PR_DISABLE_DOUBLE)
+
+#ifndef CK_F_PR_FAA_DOUBLE
+#define CK_F_PR_FAA_DOUBLE
+CK_PR_FAA_S(double, double)
+#endif /* CK_F_PR_FAA_DOUBLE */
+
+#ifndef CK_F_PR_FAS_DOUBLE
+#define CK_F_PR_FAS_DOUBLE
+CK_PR_FAS_S(double, double)
+#endif /* CK_F_PR_FAS_DOUBLE */
+
+#endif /* CK_F_PR_LOAD_DOUBLE && CK_F_PR_CAS_DOUBLE_VALUE && !CK_PR_DISABLE_DOUBLE */
+
+#if defined(CK_F_PR_LOAD_UINT) && defined(CK_F_PR_CAS_UINT_VALUE)
+
+#ifndef CK_F_PR_FAA_UINT
+#define CK_F_PR_FAA_UINT
+CK_PR_FAA_S(uint, unsigned int)
+#endif /* CK_F_PR_FAA_UINT */
+
+#ifndef CK_F_PR_FAS_UINT
+#define CK_F_PR_FAS_UINT
+CK_PR_FAS_S(uint, unsigned int)
+#endif /* CK_F_PR_FAS_UINT */
+
+#endif /* CK_F_PR_LOAD_UINT && CK_F_PR_CAS_UINT_VALUE */
+
+#if defined(CK_F_PR_LOAD_PTR) && defined(CK_F_PR_CAS_PTR_VALUE)
+
+#ifndef CK_F_PR_FAA_PTR
+#define CK_F_PR_FAA_PTR
+CK_PR_FAA(ptr, void, uintptr_t, void *)
+#endif /* CK_F_PR_FAA_PTR */
+
+#ifndef CK_F_PR_FAS_PTR
+#define CK_F_PR_FAS_PTR
+CK_PR_FAS(ptr, void, void *)
+#endif /* CK_F_PR_FAS_PTR */
+
+#endif /* CK_F_PR_LOAD_PTR && CK_F_PR_CAS_PTR_VALUE */
+
+#if defined(CK_F_PR_LOAD_64) && defined(CK_F_PR_CAS_64_VALUE)
+
+#ifndef CK_F_PR_FAA_64
+#define CK_F_PR_FAA_64
+CK_PR_FAA_S(64, uint64_t)
+#endif /* CK_F_PR_FAA_64 */
+
+#ifndef CK_F_PR_FAS_64
+#define CK_F_PR_FAS_64
+CK_PR_FAS_S(64, uint64_t)
+#endif /* CK_F_PR_FAS_64 */
+
+#endif /* CK_F_PR_LOAD_64 && CK_F_PR_CAS_64_VALUE */
+
+#if defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_CAS_32_VALUE)
+
+#ifndef CK_F_PR_FAA_32
+#define CK_F_PR_FAA_32
+CK_PR_FAA_S(32, uint32_t)
+#endif /* CK_F_PR_FAA_32 */
+
+#ifndef CK_F_PR_FAS_32
+#define CK_F_PR_FAS_32
+CK_PR_FAS_S(32, uint32_t)
+#endif /* CK_F_PR_FAS_32 */
+
+#endif /* CK_F_PR_LOAD_32 && CK_F_PR_CAS_32_VALUE */
+
+#if defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_CAS_16_VALUE)
+
+#ifndef CK_F_PR_FAA_16
+#define CK_F_PR_FAA_16
+CK_PR_FAA_S(16, uint16_t)
+#endif /* CK_F_PR_FAA_16 */
+
+#ifndef CK_F_PR_FAS_16
+#define CK_F_PR_FAS_16
+CK_PR_FAS_S(16, uint16_t)
+#endif /* CK_F_PR_FAS_16 */
+
+#endif /* CK_F_PR_LOAD_16 && CK_F_PR_CAS_16_VALUE */
+
+#if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_CAS_8_VALUE)
+
+#ifndef CK_F_PR_FAA_8
+#define CK_F_PR_FAA_8
+CK_PR_FAA_S(8, uint8_t)
+#endif /* CK_F_PR_FAA_8 */
+
+#ifndef CK_F_PR_FAS_8
+#define CK_F_PR_FAS_8
+CK_PR_FAS_S(8, uint8_t)
+#endif /* CK_F_PR_FAS_8 */
+
+#endif /* CK_F_PR_LOAD_8 && CK_F_PR_CAS_8_VALUE */
+
+#undef CK_PR_FAA_S
+#undef CK_PR_FAS_S
+#undef CK_PR_FAA
+#undef CK_PR_FAS
+
+#endif /* CK_PR_H */
diff --git a/include/ck_queue.h b/include/ck_queue.h
new file mode 100644
index 0000000..c1e9872
--- /dev/null
+++ b/include/ck_queue.h
@@ -0,0 +1,428 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*-
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
+ * $FreeBSD: release/9.0.0/sys/sys/queue.h 221843 2011-05-13 15:49:23Z mdf $
+ */
+
+#ifndef CK_QUEUE_H
+#define CK_QUEUE_H
+
+#include <ck_pr.h>
+
+/*
+ * This file defines three types of data structures: singly-linked lists,
+ * singly-linked tail queues and lists.
+ *
+ * A singly-linked list is headed by a single forward pointer. The elements
+ * are singly linked for minimum space and pointer manipulation overhead at
+ * the expense of O(n) removal for arbitrary elements. New elements can be
+ * added to the list after an existing element or at the head of the list.
+ * Elements being removed from the head of the list should use the explicit
+ * macro for this purpose for optimum efficiency. A singly-linked list may
+ * only be traversed in the forward direction. Singly-linked lists are ideal
+ * for applications with large datasets and few or no removals or for
+ * implementing a LIFO queue.
+ *
+ * A singly-linked tail queue is headed by a pair of pointers, one to the
+ * head of the list and the other to the tail of the list. The elements are
+ * singly linked for minimum space and pointer manipulation overhead at the
+ * expense of O(n) removal for arbitrary elements. New elements can be added
+ * to the list after an existing element, at the head of the list, or at the
+ * end of the list. Elements being removed from the head of the tail queue
+ * should use the explicit macro for this purpose for optimum efficiency.
+ * A singly-linked tail queue may only be traversed in the forward direction.
+ * Singly-linked tail queues are ideal for applications with large datasets
+ * and few or no removals or for implementing a FIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * It is safe to use _FOREACH/_FOREACH_SAFE in the presence of concurrent
+ * modifications to the list. Writers to these lists must, on the other hand,
+ * implement writer-side synchronization. The _SWAP operations are not atomic.
+ * This facility is currently unsupported on architectures such as the Alpha
+ * which require load-depend memory fences.
+ *
+ * CK_SLIST CK_LIST CK_STAILQ
+ * _HEAD + + +
+ * _HEAD_INITIALIZER + + +
+ * _ENTRY + + +
+ * _INIT + + +
+ * _EMPTY + + +
+ * _FIRST + + +
+ * _NEXT + + +
+ * _FOREACH + + +
+ * _FOREACH_SAFE + + +
+ * _INSERT_HEAD + + +
+ * _INSERT_BEFORE - + -
+ * _INSERT_AFTER + + +
+ * _INSERT_TAIL - - +
+ * _REMOVE_AFTER + - +
+ * _REMOVE_HEAD + - +
+ * _REMOVE + + +
+ * _SWAP + + +
+ * _MOVE + + +
+ */
+
+/*
+ * Singly-linked List declarations.
+ */
+#define CK_SLIST_HEAD(name, type) \
+struct name { \
+ struct type *slh_first; /* first element */ \
+}
+
+#define CK_SLIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define CK_SLIST_ENTRY(type) \
+struct { \
+ struct type *sle_next; /* next element */ \
+}
+
+/*
+ * Singly-linked List functions.
+ */
+#define CK_SLIST_EMPTY(head) \
+ (ck_pr_load_ptr(&(head)->slh_first) == NULL)
+
+#define CK_SLIST_FIRST(head) \
+ (ck_pr_load_ptr(&(head)->slh_first))
+
+#define CK_SLIST_NEXT(elm, field) \
+ ck_pr_load_ptr(&((elm)->field.sle_next))
+
+#define CK_SLIST_FOREACH(var, head, field) \
+ for ((var) = CK_SLIST_FIRST((head)); \
+ (var) && (ck_pr_fence_load(), 1); \
+ (var) = CK_SLIST_NEXT((var), field))
+
+#define CK_SLIST_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = CK_SLIST_FIRST(head); \
+ (var) && (ck_pr_fence_load(), (tvar) = CK_SLIST_NEXT(var, field), 1);\
+ (var) = (tvar))
+
+#define CK_SLIST_FOREACH_PREVPTR(var, varp, head, field) \
+ for ((varp) = &(head)->slh_first; \
+ ((var) = ck_pr_load_ptr(varp)) != NULL && (ck_pr_fence_load(), 1); \
+ (varp) = &(var)->field.sle_next)
+
+#define CK_SLIST_INIT(head) do { \
+ ck_pr_store_ptr(&(head)->slh_first, NULL); \
+ ck_pr_fence_store(); \
+} while (0)
+
+#define CK_SLIST_INSERT_AFTER(a, b, field) do { \
+ (b)->field.sle_next = (a)->field.sle_next; \
+ ck_pr_fence_store(); \
+ ck_pr_store_ptr(&(a)->field.sle_next, b); \
+} while (0)
+
+#define CK_SLIST_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.sle_next = (head)->slh_first; \
+ ck_pr_fence_store(); \
+ ck_pr_store_ptr(&(head)->slh_first, elm); \
+} while (0)
+
+#define CK_SLIST_REMOVE_AFTER(elm, field) do { \
+ ck_pr_store_ptr(&(elm)->field.sle_next, \
+ (elm)->field.sle_next->field.sle_next); \
+} while (0)
+
+#define CK_SLIST_REMOVE(head, elm, type, field) do { \
+ if ((head)->slh_first == (elm)) { \
+ CK_SLIST_REMOVE_HEAD((head), field); \
+ } else { \
+ struct type *curelm = (head)->slh_first; \
+ while (curelm->field.sle_next != (elm)) \
+ curelm = curelm->field.sle_next; \
+ CK_SLIST_REMOVE_AFTER(curelm, field); \
+ } \
+} while (0)
+
+#define CK_SLIST_REMOVE_HEAD(head, field) do { \
+ ck_pr_store_ptr(&(head)->slh_first, \
+ (head)->slh_first->field.sle_next); \
+} while (0)
+
+#define CK_SLIST_MOVE(head1, head2, field) do { \
+ ck_pr_store_ptr(&(head1)->slh_first, (head2)->slh_first); \
+} while (0)
+
+/*
+ * This operation is not applied atomically.
+ */
+#define CK_SLIST_SWAP(a, b, type) do { \
+ struct type *swap_first = (a)->slh_first; \
+ (a)->slh_first = (b)->slh_first; \
+ (b)->slh_first = swap_first; \
+} while (0)
+
+/*
+ * Singly-linked Tail queue declarations.
+ */
+#define CK_STAILQ_HEAD(name, type) \
+struct name { \
+ struct type *stqh_first;/* first element */ \
+ struct type **stqh_last;/* addr of last next element */ \
+}
+
+#define CK_STAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).stqh_first }
+
+#define CK_STAILQ_ENTRY(type) \
+struct { \
+ struct type *stqe_next; /* next element */ \
+}
+
+/*
+ * Singly-linked Tail queue functions.
+ */
+#define CK_STAILQ_CONCAT(head1, head2) do { \
+ if ((head2)->stqh_first == NULL) { \
+ ck_pr_store_ptr((head1)->stqh_last, (head2)->stqh_first); \
+ ck_pr_fence_store(); \
+ (head1)->stqh_last = (head2)->stqh_last; \
+ CK_STAILQ_INIT((head2)); \
+ } \
+} while (0)
+
+#define CK_STAILQ_EMPTY(head) (ck_pr_load_ptr(&(head)->stqh_first) == NULL)
+
+#define CK_STAILQ_FIRST(head) (ck_pr_load_ptr(&(head)->stqh_first))
+
+#define CK_STAILQ_FOREACH(var, head, field) \
+ for((var) = CK_STAILQ_FIRST((head)); \
+ (var) && (ck_pr_fence_load(), 1); \
+ (var) = CK_STAILQ_NEXT((var), field))
+
+#define CK_STAILQ_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = CK_STAILQ_FIRST((head)); \
+ (var) && (ck_pr_fence_load(), (tvar) = \
+ CK_STAILQ_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+#define CK_STAILQ_INIT(head) do { \
+ ck_pr_store_ptr(&(head)->stqh_first, NULL); \
+ ck_pr_fence_store(); \
+ (head)->stqh_last = &(head)->stqh_first; \
+} while (0)
+
+#define CK_STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \
+ (elm)->field.stqe_next = (tqelm)->field.stqe_next; \
+ ck_pr_fence_store(); \
+ ck_pr_store_ptr(&(tqelm)->field.stqe_next, elm); \
+ if ((elm)->field.stqe_next == NULL) \
+ (head)->stqh_last = &(elm)->field.stqe_next; \
+} while (0)
+
+#define CK_STAILQ_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.stqe_next = (head)->stqh_first; \
+ ck_pr_fence_store(); \
+ ck_pr_store_ptr(&(head)->stqh_first, elm); \
+ if ((elm)->field.stqe_next == NULL) \
+ (head)->stqh_last = &(elm)->field.stqe_next; \
+} while (0)
+
+#define CK_STAILQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.stqe_next = NULL; \
+ ck_pr_fence_store(); \
+ ck_pr_store_ptr((head)->stqh_last, (elm)); \
+ (head)->stqh_last = &(elm)->field.stqe_next; \
+} while (0)
+
+#define CK_STAILQ_NEXT(elm, field) \
+ (ck_pr_load_ptr(&(elm)->field.stqe_next))
+
+#define CK_STAILQ_REMOVE(head, elm, type, field) do { \
+ if ((head)->stqh_first == (elm)) { \
+ CK_STAILQ_REMOVE_HEAD((head), field); \
+ } else { \
+ struct type *curelm = (head)->stqh_first; \
+ while (curelm->field.stqe_next != (elm)) \
+ curelm = curelm->field.stqe_next; \
+ CK_STAILQ_REMOVE_AFTER(head, curelm, field); \
+ } \
+} while (0)
+
+#define CK_STAILQ_REMOVE_AFTER(head, elm, field) do { \
+ ck_pr_store_ptr(&(elm)->field.stqe_next, \
+ (elm)->field.stqe_next->field.stqe_next); \
+ if ((elm)->field.stqe_next == NULL) \
+ (head)->stqh_last = &(elm)->field.stqe_next; \
+} while (0)
+
+#define CK_STAILQ_REMOVE_HEAD(head, field) do { \
+ ck_pr_store_ptr(&(head)->stqh_first, \
+ (head)->stqh_first->field.stqe_next); \
+ if ((head)->stqh_first == NULL) \
+ (head)->stqh_last = &(head)->stqh_first; \
+} while (0)
+
+#define CK_STAILQ_MOVE(head1, head2, field) do { \
+ ck_pr_store_ptr(&(head1)->stqh_first, (head2)->stqh_first); \
+ (head1)->stqh_last = (head2)->stqh_last; \
+ if ((head2)->stqh_last == &(head2)->stqh_first) \
+ (head1)->stqh_last = &(head1)->stqh_first; \
+} while (0)
+
+/*
+ * This operation is not applied atomically.
+ */
+#define CK_STAILQ_SWAP(head1, head2, type) do { \
+ struct type *swap_first = CK_STAILQ_FIRST(head1); \
+ struct type **swap_last = (head1)->stqh_last; \
+ CK_STAILQ_FIRST(head1) = CK_STAILQ_FIRST(head2); \
+ (head1)->stqh_last = (head2)->stqh_last; \
+ CK_STAILQ_FIRST(head2) = swap_first; \
+ (head2)->stqh_last = swap_last; \
+ if (CK_STAILQ_EMPTY(head1)) \
+ (head1)->stqh_last = &(head1)->stqh_first; \
+ if (CK_STAILQ_EMPTY(head2)) \
+ (head2)->stqh_last = &(head2)->stqh_first; \
+} while (0)
+
+/*
+ * List declarations.
+ */
+#define CK_LIST_HEAD(name, type) \
+struct name { \
+ struct type *lh_first; /* first element */ \
+}
+
+#define CK_LIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define CK_LIST_ENTRY(type) \
+struct { \
+ struct type *le_next; /* next element */ \
+ struct type **le_prev; /* address of previous next element */ \
+}
+
+#define CK_LIST_FIRST(head) ck_pr_load_ptr(&(head)->lh_first)
+#define CK_LIST_EMPTY(head) (CK_LIST_FIRST(head) == NULL)
+#define CK_LIST_NEXT(elm, field) ck_pr_load_ptr(&(elm)->field.le_next)
+
+#define CK_LIST_FOREACH(var, head, field) \
+ for ((var) = CK_LIST_FIRST((head)); \
+ (var) && (ck_pr_fence_load(), 1); \
+ (var) = CK_LIST_NEXT((var), field))
+
+#define CK_LIST_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = CK_LIST_FIRST((head)); \
+ (var) && (ck_pr_fence_load(), (tvar) = CK_LIST_NEXT((var), field), 1);\
+ (var) = (tvar))
+
+#define CK_LIST_INIT(head) do { \
+ ck_pr_store_ptr(&(head)->lh_first, NULL); \
+ ck_pr_fence_store(); \
+} while (0)
+
+#define CK_LIST_INSERT_AFTER(listelm, elm, field) do { \
+ (elm)->field.le_next = (listelm)->field.le_next; \
+ (elm)->field.le_prev = &(listelm)->field.le_next; \
+ ck_pr_fence_store(); \
+ if ((listelm)->field.le_next != NULL) \
+ (listelm)->field.le_next->field.le_prev = &(elm)->field.le_next;\
+ ck_pr_store_ptr(&(listelm)->field.le_next, elm); \
+} while (0)
+
+#define CK_LIST_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.le_prev = (listelm)->field.le_prev; \
+ (elm)->field.le_next = (listelm); \
+ ck_pr_fence_store(); \
+ ck_pr_store_ptr((listelm)->field.le_prev, (elm)); \
+ (listelm)->field.le_prev = &(elm)->field.le_next; \
+} while (0)
+
+#define CK_LIST_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.le_next = (head)->lh_first; \
+ ck_pr_fence_store(); \
+ if ((elm)->field.le_next != NULL) \
+ (head)->lh_first->field.le_prev = &(elm)->field.le_next; \
+ ck_pr_store_ptr(&(head)->lh_first, elm); \
+ (elm)->field.le_prev = &(head)->lh_first; \
+} while (0)
+
+#define CK_LIST_REMOVE(elm, field) do { \
+ ck_pr_store_ptr((elm)->field.le_prev, (elm)->field.le_next); \
+ if ((elm)->field.le_next != NULL) \
+ (elm)->field.le_next->field.le_prev = (elm)->field.le_prev; \
+} while (0)
+
+#define CK_LIST_MOVE(head1, head2, field) do { \
+ ck_pr_store_ptr(&(head1)->lh_first, (head2)->lh_first); \
+ if ((head1)->lh_first != NULL) \
+ (head1)->lh_first->field.le_prev = &(head1)->lh_first; \
+} while (0)
+
+/*
+ * This operation is not applied atomically.
+ */
+#define CK_LIST_SWAP(head1, head2, type, field) do { \
+ struct type *swap_tmp = (head1)->lh_first; \
+ (head1)->lh_first = (head2)->lh_first; \
+ (head2)->lh_first = swap_tmp; \
+ if ((swap_tmp = (head1)->lh_first) != NULL) \
+ swap_tmp->field.le_prev = &(head1)->lh_first; \
+ if ((swap_tmp = (head2)->lh_first) != NULL) \
+ swap_tmp->field.le_prev = &(head2)->lh_first; \
+} while (0)
+
+#endif /* CK_QUEUE_H */
diff --git a/include/ck_rhs.h b/include/ck_rhs.h
new file mode 100644
index 0000000..2a21a73
--- /dev/null
+++ b/include/ck_rhs.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_RHS_H
+#define CK_RHS_H
+
+#include <ck_cc.h>
+#include <ck_malloc.h>
+#include <ck_md.h>
+#include <ck_pr.h>
+#include <ck_stdint.h>
+#include <ck_stdbool.h>
+#include <ck_stddef.h>
+
+/*
+ * Indicates a single-writer many-reader workload. Mutually
+ * exclusive with CK_RHS_MODE_MPMC
+ */
+#define CK_RHS_MODE_SPMC 1
+
+/*
+ * Indicates that values to be stored are not pointers but
+ * values. Allows for full precision. Mutually exclusive
+ * with CK_RHS_MODE_OBJECT.
+ */
+#define CK_RHS_MODE_DIRECT 2
+
+/*
+ * Indicates that the values to be stored are pointers.
+ * Allows for space optimizations in the presence of pointer
+ * packing. Mutually exclusive with CK_RHS_MODE_DIRECT.
+ */
+#define CK_RHS_MODE_OBJECT 8
+
+/*
+ * Indicated that the load is read-mostly, so get should be optimized
+ * over put and delete
+ */
+#define CK_RHS_MODE_READ_MOSTLY 16
+
+/* Currently unsupported. */
+#define CK_RHS_MODE_MPMC (void)
+
+/*
+ * Hash callback function.
+ */
+typedef unsigned long ck_rhs_hash_cb_t(const void *, unsigned long);
+
+/*
+ * Returns pointer to object if objects are equivalent.
+ */
+typedef bool ck_rhs_compare_cb_t(const void *, const void *);
+
+#if defined(CK_MD_POINTER_PACK_ENABLE) && defined(CK_MD_VMA_BITS)
+#define CK_RHS_PP
+#define CK_RHS_KEY_MASK ((1U << ((sizeof(void *) * 8) - CK_MD_VMA_BITS)) - 1)
+#endif
+
+struct ck_rhs_map;
+struct ck_rhs {
+ struct ck_malloc *m;
+ struct ck_rhs_map *map;
+ unsigned int mode;
+ unsigned int load_factor;
+ unsigned long seed;
+ ck_rhs_hash_cb_t *hf;
+ ck_rhs_compare_cb_t *compare;
+};
+typedef struct ck_rhs ck_rhs_t;
+
+struct ck_rhs_stat {
+ unsigned long n_entries;
+ unsigned int probe_maximum;
+};
+
+struct ck_rhs_iterator {
+ void **cursor;
+ unsigned long offset;
+};
+typedef struct ck_rhs_iterator ck_rhs_iterator_t;
+
+#define CK_RHS_ITERATOR_INITIALIZER { NULL, 0 }
+
+/* Convenience wrapper to table hash function. */
+#define CK_RHS_HASH(T, F, K) F((K), (T)->seed)
+
+typedef void *ck_rhs_apply_fn_t(void *, void *);
+bool ck_rhs_apply(ck_rhs_t *, unsigned long, const void *, ck_rhs_apply_fn_t *, void *);
+void ck_rhs_iterator_init(ck_rhs_iterator_t *);
+bool ck_rhs_next(ck_rhs_t *, ck_rhs_iterator_t *, void **);
+bool ck_rhs_move(ck_rhs_t *, ck_rhs_t *, ck_rhs_hash_cb_t *,
+ ck_rhs_compare_cb_t *, struct ck_malloc *);
+bool ck_rhs_init(ck_rhs_t *, unsigned int, ck_rhs_hash_cb_t *,
+ ck_rhs_compare_cb_t *, struct ck_malloc *, unsigned long, unsigned long);
+void ck_rhs_destroy(ck_rhs_t *);
+void *ck_rhs_get(ck_rhs_t *, unsigned long, const void *);
+bool ck_rhs_put(ck_rhs_t *, unsigned long, const void *);
+bool ck_rhs_put_unique(ck_rhs_t *, unsigned long, const void *);
+bool ck_rhs_set(ck_rhs_t *, unsigned long, const void *, void **);
+bool ck_rhs_fas(ck_rhs_t *, unsigned long, const void *, void **);
+void *ck_rhs_remove(ck_rhs_t *, unsigned long, const void *);
+bool ck_rhs_grow(ck_rhs_t *, unsigned long);
+bool ck_rhs_rebuild(ck_rhs_t *);
+bool ck_rhs_gc(ck_rhs_t *);
+unsigned long ck_rhs_count(ck_rhs_t *);
+bool ck_rhs_reset(ck_rhs_t *);
+bool ck_rhs_reset_size(ck_rhs_t *, unsigned long);
+void ck_rhs_stat(ck_rhs_t *, struct ck_rhs_stat *);
+bool ck_rhs_set_load_factor(ck_rhs_t *, unsigned int);
+
+#endif /* CK_RHS_H */
diff --git a/include/ck_ring.h b/include/ck_ring.h
new file mode 100644
index 0000000..8a2a791
--- /dev/null
+++ b/include/ck_ring.h
@@ -0,0 +1,656 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_RING_H
+#define CK_RING_H
+
+#include <ck_cc.h>
+#include <ck_md.h>
+#include <ck_pr.h>
+#include <ck_stdbool.h>
+#include <ck_string.h>
+
+/*
+ * Concurrent ring buffer.
+ */
+
+struct ck_ring {
+ unsigned int c_head;
+ char pad[CK_MD_CACHELINE - sizeof(unsigned int)];
+ unsigned int p_tail;
+ unsigned int p_head;
+ char _pad[CK_MD_CACHELINE - sizeof(unsigned int) * 2];
+ unsigned int size;
+ unsigned int mask;
+};
+typedef struct ck_ring ck_ring_t;
+
+struct ck_ring_buffer {
+ void *value;
+};
+typedef struct ck_ring_buffer ck_ring_buffer_t;
+
+CK_CC_INLINE static unsigned int
+ck_ring_size(const struct ck_ring *ring)
+{
+ unsigned int c, p;
+
+ c = ck_pr_load_uint(&ring->c_head);
+ p = ck_pr_load_uint(&ring->p_tail);
+ return (p - c) & ring->mask;
+}
+
+CK_CC_INLINE static unsigned int
+ck_ring_capacity(const struct ck_ring *ring)
+{
+ return ring->size;
+}
+
+CK_CC_INLINE static void
+ck_ring_init(struct ck_ring *ring, unsigned int size)
+{
+
+ ring->size = size;
+ ring->mask = size - 1;
+ ring->p_tail = 0;
+ ring->p_head = 0;
+ ring->c_head = 0;
+ return;
+}
+
+/*
+ * The _ck_ring_* namespace is internal only and must not used externally.
+ */
+CK_CC_FORCE_INLINE static bool
+_ck_ring_enqueue_sp(struct ck_ring *ring,
+ void *CK_CC_RESTRICT buffer,
+ const void *CK_CC_RESTRICT entry,
+ unsigned int ts,
+ unsigned int *size)
+{
+ const unsigned int mask = ring->mask;
+ unsigned int consumer, producer, delta;
+
+ consumer = ck_pr_load_uint(&ring->c_head);
+ producer = ring->p_tail;
+ delta = producer + 1;
+ if (size != NULL)
+ *size = (producer - consumer) & mask;
+
+ if (CK_CC_UNLIKELY((delta & mask) == (consumer & mask)))
+ return false;
+
+ buffer = (char *)buffer + ts * (producer & mask);
+ memcpy(buffer, entry, ts);
+
+ /*
+ * Make sure to update slot value before indicating
+ * that the slot is available for consumption.
+ */
+ ck_pr_fence_store();
+ ck_pr_store_uint(&ring->p_tail, delta);
+ return true;
+}
+
+CK_CC_FORCE_INLINE static bool
+_ck_ring_enqueue_sp_size(struct ck_ring *ring,
+ void *CK_CC_RESTRICT buffer,
+ const void *CK_CC_RESTRICT entry,
+ unsigned int ts,
+ unsigned int *size)
+{
+ unsigned int sz;
+ bool r;
+
+ r = _ck_ring_enqueue_sp(ring, buffer, entry, ts, &sz);
+ *size = sz;
+ return r;
+}
+
+CK_CC_FORCE_INLINE static bool
+_ck_ring_dequeue_sc(struct ck_ring *ring,
+ const void *CK_CC_RESTRICT buffer,
+ void *CK_CC_RESTRICT target,
+ unsigned int size)
+{
+ const unsigned int mask = ring->mask;
+ unsigned int consumer, producer;
+
+ consumer = ring->c_head;
+ producer = ck_pr_load_uint(&ring->p_tail);
+
+ if (CK_CC_UNLIKELY(consumer == producer))
+ return false;
+
+ /*
+ * Make sure to serialize with respect to our snapshot
+ * of the producer counter.
+ */
+ ck_pr_fence_load();
+
+ buffer = (const char *)buffer + size * (consumer & mask);
+ memcpy(target, buffer, size);
+
+ /*
+ * Make sure copy is completed with respect to consumer
+ * update.
+ */
+ ck_pr_fence_store();
+ ck_pr_store_uint(&ring->c_head, consumer + 1);
+ return true;
+}
+
+CK_CC_FORCE_INLINE static bool
+_ck_ring_enqueue_mp(struct ck_ring *ring,
+ void *buffer,
+ const void *entry,
+ unsigned int ts,
+ unsigned int *size)
+{
+ const unsigned int mask = ring->mask;
+ unsigned int producer, consumer, delta;
+ bool r = true;
+
+ producer = ck_pr_load_uint(&ring->p_head);
+
+ do {
+ /*
+ * The snapshot of producer must be up to date with
+ * respect to consumer.
+ */
+ ck_pr_fence_load();
+ consumer = ck_pr_load_uint(&ring->c_head);
+
+ delta = producer + 1;
+ if (CK_CC_UNLIKELY((delta & mask) == (consumer & mask))) {
+ r = false;
+ goto leave;
+ }
+ } while (ck_pr_cas_uint_value(&ring->p_head,
+ producer,
+ delta,
+ &producer) == false);
+
+ buffer = (char *)buffer + ts * (producer & mask);
+ memcpy(buffer, entry, ts);
+
+ /*
+ * Wait until all concurrent producers have completed writing
+ * their data into the ring buffer.
+ */
+ while (ck_pr_load_uint(&ring->p_tail) != producer)
+ ck_pr_stall();
+
+ /*
+ * Ensure that copy is completed before updating shared producer
+ * counter.
+ */
+ ck_pr_fence_store();
+ ck_pr_store_uint(&ring->p_tail, delta);
+
+leave:
+ if (size != NULL)
+ *size = (producer - consumer) & mask;
+
+ return r;
+}
+
+CK_CC_FORCE_INLINE static bool
+_ck_ring_enqueue_mp_size(struct ck_ring *ring,
+ void *buffer,
+ const void *entry,
+ unsigned int ts,
+ unsigned int *size)
+{
+ unsigned int sz;
+ bool r;
+
+ r = _ck_ring_enqueue_mp(ring, buffer, entry, ts, &sz);
+ *size = sz;
+ return r;
+}
+
+CK_CC_FORCE_INLINE static bool
+_ck_ring_trydequeue_mc(struct ck_ring *ring,
+ const void *buffer,
+ void *data,
+ unsigned int size)
+{
+ const unsigned int mask = ring->mask;
+ unsigned int consumer, producer;
+
+ consumer = ck_pr_load_uint(&ring->c_head);
+ ck_pr_fence_load();
+ producer = ck_pr_load_uint(&ring->p_tail);
+
+ if (CK_CC_UNLIKELY(consumer == producer))
+ return false;
+
+ ck_pr_fence_load();
+
+ buffer = (const char *)buffer + size * (consumer & mask);
+ memcpy(data, buffer, size);
+
+ ck_pr_fence_store_atomic();
+ return ck_pr_cas_uint(&ring->c_head, consumer, consumer + 1);
+}
+
+CK_CC_FORCE_INLINE static bool
+_ck_ring_dequeue_mc(struct ck_ring *ring,
+ const void *buffer,
+ void *data,
+ unsigned int ts)
+{
+ const unsigned int mask = ring->mask;
+ unsigned int consumer, producer;
+
+ consumer = ck_pr_load_uint(&ring->c_head);
+
+ do {
+ const char *target;
+
+ /*
+ * Producer counter must represent state relative to
+ * our latest consumer snapshot.
+ */
+ ck_pr_fence_load();
+ producer = ck_pr_load_uint(&ring->p_tail);
+
+ if (CK_CC_UNLIKELY(consumer == producer))
+ return false;
+
+ ck_pr_fence_load();
+
+ target = (const char *)buffer + ts * (consumer & mask);
+ memcpy(data, target, ts);
+
+ /* Serialize load with respect to head update. */
+ ck_pr_fence_store_atomic();
+ } while (ck_pr_cas_uint_value(&ring->c_head,
+ consumer,
+ consumer + 1,
+ &consumer) == false);
+
+ return true;
+}
+
+/*
+ * The ck_ring_*_spsc namespace is the public interface for interacting with a
+ * ring buffer containing pointers. Correctness is only provided if there is up
+ * to one concurrent consumer and up to one concurrent producer.
+ */
+CK_CC_INLINE static bool
+ck_ring_enqueue_spsc_size(struct ck_ring *ring,
+ struct ck_ring_buffer *buffer,
+ const void *entry,
+ unsigned int *size)
+{
+
+ return _ck_ring_enqueue_sp_size(ring, buffer, &entry,
+ sizeof(entry), size);
+}
+
+CK_CC_INLINE static bool
+ck_ring_enqueue_spsc(struct ck_ring *ring,
+ struct ck_ring_buffer *buffer,
+ const void *entry)
+{
+
+ return _ck_ring_enqueue_sp(ring, buffer,
+ &entry, sizeof(entry), NULL);
+}
+
+CK_CC_INLINE static bool
+ck_ring_dequeue_spsc(struct ck_ring *ring,
+ const struct ck_ring_buffer *buffer,
+ void *data)
+{
+
+ return _ck_ring_dequeue_sc(ring, buffer,
+ (void **)data, sizeof(void *));
+}
+
+/*
+ * The ck_ring_*_mpmc namespace is the public interface for interacting with a
+ * ring buffer containing pointers. Correctness is provided for any number of
+ * producers and consumers.
+ */
+CK_CC_INLINE static bool
+ck_ring_enqueue_mpmc(struct ck_ring *ring,
+ struct ck_ring_buffer *buffer,
+ const void *entry)
+{
+
+ return _ck_ring_enqueue_mp(ring, buffer, &entry,
+ sizeof(entry), NULL);
+}
+
+CK_CC_INLINE static bool
+ck_ring_enqueue_mpmc_size(struct ck_ring *ring,
+ struct ck_ring_buffer *buffer,
+ const void *entry,
+ unsigned int *size)
+{
+
+ return _ck_ring_enqueue_mp_size(ring, buffer, &entry,
+ sizeof(entry), size);
+}
+
+CK_CC_INLINE static bool
+ck_ring_trydequeue_mpmc(struct ck_ring *ring,
+ const struct ck_ring_buffer *buffer,
+ void *data)
+{
+
+ return _ck_ring_trydequeue_mc(ring,
+ buffer, (void **)data, sizeof(void *));
+}
+
+CK_CC_INLINE static bool
+ck_ring_dequeue_mpmc(struct ck_ring *ring,
+ const struct ck_ring_buffer *buffer,
+ void *data)
+{
+
+ return _ck_ring_dequeue_mc(ring, buffer, (void **)data,
+ sizeof(void *));
+}
+
+/*
+ * The ck_ring_*_spmc namespace is the public interface for interacting with a
+ * ring buffer containing pointers. Correctness is provided for any number of
+ * consumers with up to one concurrent producer.
+ */
+CK_CC_INLINE static bool
+ck_ring_enqueue_spmc_size(struct ck_ring *ring,
+ struct ck_ring_buffer *buffer,
+ const void *entry,
+ unsigned int *size)
+{
+
+ return _ck_ring_enqueue_sp_size(ring, buffer, &entry,
+ sizeof(entry), size);
+}
+
+CK_CC_INLINE static bool
+ck_ring_enqueue_spmc(struct ck_ring *ring,
+ struct ck_ring_buffer *buffer,
+ const void *entry)
+{
+
+ return _ck_ring_enqueue_sp(ring, buffer, &entry,
+ sizeof(entry), NULL);
+}
+
+CK_CC_INLINE static bool
+ck_ring_trydequeue_spmc(struct ck_ring *ring,
+ const struct ck_ring_buffer *buffer,
+ void *data)
+{
+
+ return _ck_ring_trydequeue_mc(ring, buffer, (void **)data, sizeof(void *));
+}
+
+CK_CC_INLINE static bool
+ck_ring_dequeue_spmc(struct ck_ring *ring,
+ const struct ck_ring_buffer *buffer,
+ void *data)
+{
+
+ return _ck_ring_dequeue_mc(ring, buffer, (void **)data, sizeof(void *));
+}
+
+/*
+ * The ck_ring_*_mpsc namespace is the public interface for interacting with a
+ * ring buffer containing pointers. Correctness is provided for any number of
+ * producers with up to one concurrent consumers.
+ */
+CK_CC_INLINE static bool
+ck_ring_enqueue_mpsc(struct ck_ring *ring,
+ struct ck_ring_buffer *buffer,
+ const void *entry)
+{
+
+ return _ck_ring_enqueue_mp(ring, buffer, &entry,
+ sizeof(entry), NULL);
+}
+
+CK_CC_INLINE static bool
+ck_ring_enqueue_mpsc_size(struct ck_ring *ring,
+ struct ck_ring_buffer *buffer,
+ const void *entry,
+ unsigned int *size)
+{
+
+ return _ck_ring_enqueue_mp_size(ring, buffer, &entry,
+ sizeof(entry), size);
+}
+
+CK_CC_INLINE static bool
+ck_ring_dequeue_mpsc(struct ck_ring *ring,
+ const struct ck_ring_buffer *buffer,
+ void *data)
+{
+
+ return _ck_ring_dequeue_sc(ring, buffer, (void **)data,
+ sizeof(void *));
+}
+
+/*
+ * CK_RING_PROTOTYPE is used to define a type-safe interface for inlining
+ * values of a particular type in the ring the buffer.
+ */
+#define CK_RING_PROTOTYPE(name, type) \
+CK_CC_INLINE static bool \
+ck_ring_enqueue_spsc_size_##name(struct ck_ring *a, \
+ struct type *b, \
+ struct type *c, \
+ unsigned int *d) \
+{ \
+ \
+ return _ck_ring_enqueue_sp_size(a, b, c, \
+ sizeof(struct type), d); \
+} \
+ \
+CK_CC_INLINE static bool \
+ck_ring_enqueue_spsc_##name(struct ck_ring *a, \
+ struct type *b, \
+ struct type *c) \
+{ \
+ \
+ return _ck_ring_enqueue_sp(a, b, c, \
+ sizeof(struct type), NULL); \
+} \
+ \
+CK_CC_INLINE static bool \
+ck_ring_dequeue_spsc_##name(struct ck_ring *a, \
+ struct type *b, \
+ struct type *c) \
+{ \
+ \
+ return _ck_ring_dequeue_sc(a, b, c, \
+ sizeof(struct type)); \
+} \
+ \
+CK_CC_INLINE static bool \
+ck_ring_enqueue_spmc_size_##name(struct ck_ring *a, \
+ struct type *b, \
+ struct type *c, \
+ unsigned int *d) \
+{ \
+ \
+ return _ck_ring_enqueue_sp_size(a, b, c, \
+ sizeof(struct type), d); \
+} \
+ \
+CK_CC_INLINE static bool \
+ck_ring_enqueue_spmc_##name(struct ck_ring *a, \
+ struct type *b, \
+ struct type *c) \
+{ \
+ \
+ return _ck_ring_enqueue_sp(a, b, c, \
+ sizeof(struct type), NULL); \
+} \
+ \
+CK_CC_INLINE static bool \
+ck_ring_trydequeue_spmc_##name(struct ck_ring *a, \
+ struct type *b, \
+ struct type *c) \
+{ \
+ \
+ return _ck_ring_trydequeue_mc(a, \
+ b, c, sizeof(struct type)); \
+} \
+ \
+CK_CC_INLINE static bool \
+ck_ring_dequeue_spmc_##name(struct ck_ring *a, \
+ struct type *b, \
+ struct type *c) \
+{ \
+ \
+ return _ck_ring_dequeue_mc(a, b, c, \
+ sizeof(struct type)); \
+} \
+ \
+CK_CC_INLINE static bool \
+ck_ring_enqueue_mpsc_##name(struct ck_ring *a, \
+ struct type *b, \
+ struct type *c) \
+{ \
+ \
+ return _ck_ring_enqueue_mp(a, b, c, \
+ sizeof(struct type), NULL); \
+} \
+ \
+CK_CC_INLINE static bool \
+ck_ring_enqueue_mpsc_size_##name(struct ck_ring *a, \
+ struct type *b, \
+ struct type *c, \
+ unsigned int *d) \
+{ \
+ \
+ return _ck_ring_enqueue_mp_size(a, b, c, \
+ sizeof(struct type), d); \
+} \
+ \
+CK_CC_INLINE static bool \
+ck_ring_dequeue_mpsc_##name(struct ck_ring *a, \
+ struct type *b, \
+ struct type *c) \
+{ \
+ \
+ return _ck_ring_dequeue_sc(a, b, c, \
+ sizeof(struct type)); \
+} \
+ \
+CK_CC_INLINE static bool \
+ck_ring_enqueue_mpmc_size_##name(struct ck_ring *a, \
+ struct type *b, \
+ struct type *c, \
+ unsigned int *d) \
+{ \
+ \
+ return _ck_ring_enqueue_mp_size(a, b, c, \
+ sizeof(struct type), d); \
+} \
+ \
+CK_CC_INLINE static bool \
+ck_ring_enqueue_mpmc_##name(struct ck_ring *a, \
+ struct type *b, \
+ struct type *c) \
+{ \
+ \
+ return _ck_ring_enqueue_mp(a, b, c, \
+ sizeof(struct type), NULL); \
+} \
+ \
+CK_CC_INLINE static bool \
+ck_ring_trydequeue_mpmc_##name(struct ck_ring *a, \
+ struct type *b, \
+ struct type *c) \
+{ \
+ \
+ return _ck_ring_trydequeue_mc(a, \
+ b, c, sizeof(struct type)); \
+} \
+ \
+CK_CC_INLINE static bool \
+ck_ring_dequeue_mpmc_##name(struct ck_ring *a, \
+ struct type *b, \
+ struct type *c) \
+{ \
+ \
+ return _ck_ring_dequeue_mc(a, b, c, \
+ sizeof(struct type)); \
+}
+
+/*
+ * A single producer with one concurrent consumer.
+ */
+#define CK_RING_ENQUEUE_SPSC(name, a, b, c) \
+ ck_ring_enqueue_spsc_##name(a, b, c)
+#define CK_RING_ENQUEUE_SPSC_SIZE(name, a, b, c, d) \
+ ck_ring_enqueue_spsc_size_##name(a, b, c, d)
+#define CK_RING_DEQUEUE_SPSC(name, a, b, c) \
+ ck_ring_dequeue_spsc_##name(a, b, c)
+
+/*
+ * A single producer with any number of concurrent consumers.
+ */
+#define CK_RING_ENQUEUE_SPMC(name, a, b, c) \
+ ck_ring_enqueue_spmc_##name(a, b, c)
+#define CK_RING_ENQUEUE_SPMC_SIZE(name, a, b, c, d) \
+ ck_ring_enqueue_spmc_size_##name(a, b, c, d)
+#define CK_RING_TRYDEQUEUE_SPMC(name, a, b, c) \
+ ck_ring_trydequeue_spmc_##name(a, b, c)
+#define CK_RING_DEQUEUE_SPMC(name, a, b, c) \
+ ck_ring_dequeue_spmc_##name(a, b, c)
+
+/*
+ * Any number of concurrent producers with up to one
+ * concurrent consumer.
+ */
+#define CK_RING_ENQUEUE_MPSC(name, a, b, c) \
+ ck_ring_enqueue_mpsc_##name(a, b, c)
+#define CK_RING_ENQUEUE_MPSC_SIZE(name, a, b, c, d) \
+ ck_ring_enqueue_mpsc_size_##name(a, b, c, d)
+#define CK_RING_DEQUEUE_MPSC(name, a, b, c) \
+ ck_ring_dequeue_mpsc_##name(a, b, c)
+
+/*
+ * Any number of concurrent producers and consumers.
+ */
+#define CK_RING_ENQUEUE_MPMC(name, a, b, c) \
+ ck_ring_enqueue_mpmc_##name(a, b, c)
+#define CK_RING_ENQUEUE_MPMC_SIZE(name, a, b, c, d) \
+ ck_ring_enqueue_mpmc_size_##name(a, b, c, d)
+#define CK_RING_TRYDEQUEUE_MPMC(name, a, b, c) \
+ ck_ring_trydequeue_mpmc_##name(a, b, c)
+#define CK_RING_DEQUEUE_MPMC(name, a, b, c) \
+ ck_ring_dequeue_mpmc_##name(a, b, c)
+
+#endif /* CK_RING_H */
diff --git a/include/ck_rwcohort.h b/include/ck_rwcohort.h
new file mode 100644
index 0000000..7519b5d
--- /dev/null
+++ b/include/ck_rwcohort.h
@@ -0,0 +1,317 @@
+/*
+ * Copyright 2013-2015 Samy Al Bahra.
+ * Copyright 2013 Brendon Scheinman.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_RWCOHORT_H
+#define CK_RWCOHORT_H
+
+/*
+ * This is an implementation of NUMA-aware reader-writer locks as described in:
+ * Calciu, I.; Dice, D.; Lev, Y.; Luchangco, V.; Marathe, V.; and Shavit, N. 2014.
+ * NUMA-Aware Reader-Writer Locks
+ */
+
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <ck_stddef.h>
+#include <ck_cohort.h>
+
+#define CK_RWCOHORT_WP_NAME(N) ck_rwcohort_wp_##N
+#define CK_RWCOHORT_WP_INSTANCE(N) struct CK_RWCOHORT_WP_NAME(N)
+#define CK_RWCOHORT_WP_INIT(N, RW, WL) ck_rwcohort_wp_##N##_init(RW, WL)
+#define CK_RWCOHORT_WP_READ_LOCK(N, RW, C, GC, LC) \
+ ck_rwcohort_wp_##N##_read_lock(RW, C, GC, LC)
+#define CK_RWCOHORT_WP_READ_UNLOCK(N, RW, C, GC, LC) \
+ ck_rwcohort_wp_##N##_read_unlock(RW)
+#define CK_RWCOHORT_WP_WRITE_LOCK(N, RW, C, GC, LC) \
+ ck_rwcohort_wp_##N##_write_lock(RW, C, GC, LC)
+#define CK_RWCOHORT_WP_WRITE_UNLOCK(N, RW, C, GC, LC) \
+ ck_rwcohort_wp_##N##_write_unlock(RW, C, GC, LC)
+#define CK_RWCOHORT_WP_DEFAULT_WAIT_LIMIT 1000
+
+#define CK_RWCOHORT_WP_PROTOTYPE(N) \
+ CK_RWCOHORT_WP_INSTANCE(N) { \
+ unsigned int read_counter; \
+ unsigned int write_barrier; \
+ unsigned int wait_limit; \
+ }; \
+ CK_CC_INLINE static void \
+ ck_rwcohort_wp_##N##_init(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \
+ unsigned int wait_limit) \
+ { \
+ \
+ rw_cohort->read_counter = 0; \
+ rw_cohort->write_barrier = 0; \
+ rw_cohort->wait_limit = wait_limit; \
+ ck_pr_barrier(); \
+ return; \
+ } \
+ CK_CC_INLINE static void \
+ ck_rwcohort_wp_##N##_write_lock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \
+ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
+ void *local_context) \
+ { \
+ \
+ while (ck_pr_load_uint(&rw_cohort->write_barrier) > 0) \
+ ck_pr_stall(); \
+ \
+ CK_COHORT_LOCK(N, cohort, global_context, local_context); \
+ \
+ while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) \
+ ck_pr_stall(); \
+ \
+ return; \
+ } \
+ CK_CC_INLINE static void \
+ ck_rwcohort_wp_##N##_write_unlock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \
+ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
+ void *local_context) \
+ { \
+ \
+ (void)rw_cohort; \
+ CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \
+ return; \
+ } \
+ CK_CC_INLINE static void \
+ ck_rwcohort_wp_##N##_read_lock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \
+ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
+ void *local_context) \
+ { \
+ unsigned int wait_count = 0; \
+ bool raised = false; \
+ \
+ for (;;) { \
+ ck_pr_inc_uint(&rw_cohort->read_counter); \
+ ck_pr_fence_atomic_load(); \
+ if (CK_COHORT_LOCKED(N, cohort, global_context, \
+ local_context) == false) \
+ break; \
+ \
+ ck_pr_dec_uint(&rw_cohort->read_counter); \
+ while (CK_COHORT_LOCKED(N, cohort, global_context, \
+ local_context) == true) { \
+ ck_pr_stall(); \
+ if (++wait_count > rw_cohort->wait_limit && \
+ raised == false) { \
+ ck_pr_inc_uint(&rw_cohort->write_barrier); \
+ raised = true; \
+ } \
+ } \
+ } \
+ \
+ if (raised == true) \
+ ck_pr_dec_uint(&rw_cohort->write_barrier); \
+ \
+ ck_pr_fence_load(); \
+ return; \
+ } \
+ CK_CC_INLINE static void \
+ ck_rwcohort_wp_##N##_read_unlock(CK_RWCOHORT_WP_INSTANCE(N) *cohort) \
+ { \
+ \
+ ck_pr_fence_load_atomic(); \
+ ck_pr_dec_uint(&cohort->read_counter); \
+ return; \
+ }
+
+#define CK_RWCOHORT_WP_INITIALIZER { \
+ .read_counter = 0, \
+ .write_barrier = 0, \
+ .wait_limit = 0 \
+}
+
+#define CK_RWCOHORT_RP_NAME(N) ck_rwcohort_rp_##N
+#define CK_RWCOHORT_RP_INSTANCE(N) struct CK_RWCOHORT_RP_NAME(N)
+#define CK_RWCOHORT_RP_INIT(N, RW, WL) ck_rwcohort_rp_##N##_init(RW, WL)
+#define CK_RWCOHORT_RP_READ_LOCK(N, RW, C, GC, LC) \
+ ck_rwcohort_rp_##N##_read_lock(RW, C, GC, LC)
+#define CK_RWCOHORT_RP_READ_UNLOCK(N, RW, C, GC, LC) \
+ ck_rwcohort_rp_##N##_read_unlock(RW)
+#define CK_RWCOHORT_RP_WRITE_LOCK(N, RW, C, GC, LC) \
+ ck_rwcohort_rp_##N##_write_lock(RW, C, GC, LC)
+#define CK_RWCOHORT_RP_WRITE_UNLOCK(N, RW, C, GC, LC) \
+ ck_rwcohort_rp_##N##_write_unlock(RW, C, GC, LC)
+#define CK_RWCOHORT_RP_DEFAULT_WAIT_LIMIT 1000
+
+#define CK_RWCOHORT_RP_PROTOTYPE(N) \
+ CK_RWCOHORT_RP_INSTANCE(N) { \
+ unsigned int read_counter; \
+ unsigned int read_barrier; \
+ unsigned int wait_limit; \
+ }; \
+ CK_CC_INLINE static void \
+ ck_rwcohort_rp_##N##_init(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \
+ unsigned int wait_limit) \
+ { \
+ \
+ rw_cohort->read_counter = 0; \
+ rw_cohort->read_barrier = 0; \
+ rw_cohort->wait_limit = wait_limit; \
+ ck_pr_barrier(); \
+ return; \
+ } \
+ CK_CC_INLINE static void \
+ ck_rwcohort_rp_##N##_write_lock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \
+ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
+ void *local_context) \
+ { \
+ unsigned int wait_count = 0; \
+ bool raised = false; \
+ \
+ for (;;) { \
+ CK_COHORT_LOCK(N, cohort, global_context, local_context); \
+ if (ck_pr_load_uint(&rw_cohort->read_counter) == 0) \
+ break; \
+ \
+ CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \
+ while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) { \
+ ck_pr_stall(); \
+ if (++wait_count > rw_cohort->wait_limit && \
+ raised == false) { \
+ ck_pr_inc_uint(&rw_cohort->read_barrier); \
+ raised = true; \
+ } \
+ } \
+ } \
+ \
+ if (raised == true) \
+ ck_pr_dec_uint(&rw_cohort->read_barrier); \
+ \
+ return; \
+ } \
+ CK_CC_INLINE static void \
+ ck_rwcohort_rp_##N##_write_unlock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \
+ CK_COHORT_INSTANCE(N) *cohort, void *global_context, void *local_context) \
+ { \
+ \
+ (void)rw_cohort; \
+ CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \
+ return; \
+ } \
+ CK_CC_INLINE static void \
+ ck_rwcohort_rp_##N##_read_lock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \
+ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
+ void *local_context) \
+ { \
+ \
+ while (ck_pr_load_uint(&rw_cohort->read_barrier) > 0) \
+ ck_pr_stall(); \
+ \
+ ck_pr_inc_uint(&rw_cohort->read_counter); \
+ ck_pr_fence_atomic_load(); \
+ \
+ while (CK_COHORT_LOCKED(N, cohort, global_context, \
+ local_context) == true) \
+ ck_pr_stall(); \
+ \
+ return; \
+ } \
+ CK_CC_INLINE static void \
+ ck_rwcohort_rp_##N##_read_unlock(CK_RWCOHORT_RP_INSTANCE(N) *cohort) \
+ { \
+ \
+ ck_pr_fence_load_atomic(); \
+ ck_pr_dec_uint(&cohort->read_counter); \
+ return; \
+ }
+
+#define CK_RWCOHORT_RP_INITIALIZER { \
+ .read_counter = 0, \
+ .read_barrier = 0, \
+ .wait_limit = 0 \
+}
+
+#define CK_RWCOHORT_NEUTRAL_NAME(N) ck_rwcohort_neutral_##N
+#define CK_RWCOHORT_NEUTRAL_INSTANCE(N) struct CK_RWCOHORT_NEUTRAL_NAME(N)
+#define CK_RWCOHORT_NEUTRAL_INIT(N, RW) ck_rwcohort_neutral_##N##_init(RW)
+#define CK_RWCOHORT_NEUTRAL_READ_LOCK(N, RW, C, GC, LC) \
+ ck_rwcohort_neutral_##N##_read_lock(RW, C, GC, LC)
+#define CK_RWCOHORT_NEUTRAL_READ_UNLOCK(N, RW, C, GC, LC) \
+ ck_rwcohort_neutral_##N##_read_unlock(RW)
+#define CK_RWCOHORT_NEUTRAL_WRITE_LOCK(N, RW, C, GC, LC) \
+ ck_rwcohort_neutral_##N##_write_lock(RW, C, GC, LC)
+#define CK_RWCOHORT_NEUTRAL_WRITE_UNLOCK(N, RW, C, GC, LC) \
+ ck_rwcohort_neutral_##N##_write_unlock(RW, C, GC, LC)
+#define CK_RWCOHORT_NEUTRAL_DEFAULT_WAIT_LIMIT 1000
+
+#define CK_RWCOHORT_NEUTRAL_PROTOTYPE(N) \
+ CK_RWCOHORT_NEUTRAL_INSTANCE(N) { \
+ unsigned int read_counter; \
+ }; \
+ CK_CC_INLINE static void \
+ ck_rwcohort_neutral_##N##_init(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort) \
+ { \
+ \
+ rw_cohort->read_counter = 0; \
+ ck_pr_barrier(); \
+ return; \
+ } \
+ CK_CC_INLINE static void \
+ ck_rwcohort_neutral_##N##_write_lock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort,\
+ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
+ void *local_context) \
+ { \
+ \
+ CK_COHORT_LOCK(N, cohort, global_context, local_context); \
+ while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) { \
+ ck_pr_stall(); \
+ } \
+ return; \
+ } \
+ CK_CC_INLINE static void \
+ ck_rwcohort_neutral_##N##_write_unlock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort,\
+ CK_COHORT_INSTANCE(N) *cohort, void *global_context, void *local_context) \
+ { \
+ \
+ (void)rw_cohort; \
+ CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \
+ return; \
+ } \
+ CK_CC_INLINE static void \
+ ck_rwcohort_neutral_##N##_read_lock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort, \
+ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \
+ void *local_context) \
+ { \
+ \
+ CK_COHORT_LOCK(N, cohort, global_context, local_context); \
+ ck_pr_inc_uint(&rw_cohort->read_counter); \
+ CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \
+ return; \
+ } \
+ CK_CC_INLINE static void \
+ ck_rwcohort_neutral_##N##_read_unlock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *cohort) \
+ { \
+ \
+ ck_pr_fence_load_atomic(); \
+ ck_pr_dec_uint(&cohort->read_counter); \
+ return; \
+ }
+
+#define CK_RWCOHORT_NEUTRAL_INITIALIZER { \
+ .read_counter = 0, \
+}
+
+#endif /* CK_RWCOHORT_H */
diff --git a/include/ck_rwlock.h b/include/ck_rwlock.h
new file mode 100644
index 0000000..b82b4b5
--- /dev/null
+++ b/include/ck_rwlock.h
@@ -0,0 +1,302 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_RWLOCK_H
+#define CK_RWLOCK_H
+
+#include <ck_elide.h>
+#include <ck_pr.h>
+#include <ck_stdbool.h>
+#include <ck_stddef.h>
+
+struct ck_rwlock {
+ unsigned int writer;
+ unsigned int n_readers;
+};
+typedef struct ck_rwlock ck_rwlock_t;
+
+#define CK_RWLOCK_INITIALIZER {0, 0}
+
+CK_CC_INLINE static void
+ck_rwlock_init(struct ck_rwlock *rw)
+{
+
+ rw->writer = 0;
+ rw->n_readers = 0;
+ ck_pr_barrier();
+ return;
+}
+
+CK_CC_INLINE static void
+ck_rwlock_write_unlock(ck_rwlock_t *rw)
+{
+
+ ck_pr_fence_unlock();
+ ck_pr_store_uint(&rw->writer, 0);
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_rwlock_locked_writer(ck_rwlock_t *rw)
+{
+ bool r;
+
+ r = ck_pr_load_uint(&rw->writer);
+ ck_pr_fence_acquire();
+ return r;
+}
+
+CK_CC_INLINE static void
+ck_rwlock_write_downgrade(ck_rwlock_t *rw)
+{
+
+ ck_pr_inc_uint(&rw->n_readers);
+ ck_rwlock_write_unlock(rw);
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_rwlock_locked(ck_rwlock_t *rw)
+{
+ bool l;
+
+ l = ck_pr_load_uint(&rw->n_readers) |
+ ck_pr_load_uint(&rw->writer);
+ ck_pr_fence_acquire();
+ return l;
+}
+
+CK_CC_INLINE static bool
+ck_rwlock_write_trylock(ck_rwlock_t *rw)
+{
+
+ if (ck_pr_fas_uint(&rw->writer, 1) != 0)
+ return false;
+
+ ck_pr_fence_atomic_load();
+
+ if (ck_pr_load_uint(&rw->n_readers) != 0) {
+ ck_rwlock_write_unlock(rw);
+ return false;
+ }
+
+ ck_pr_fence_lock();
+ return true;
+}
+
+CK_ELIDE_TRYLOCK_PROTOTYPE(ck_rwlock_write, ck_rwlock_t,
+ ck_rwlock_locked, ck_rwlock_write_trylock)
+
+CK_CC_INLINE static void
+ck_rwlock_write_lock(ck_rwlock_t *rw)
+{
+
+ while (ck_pr_fas_uint(&rw->writer, 1) != 0)
+ ck_pr_stall();
+
+ ck_pr_fence_atomic_load();
+
+ while (ck_pr_load_uint(&rw->n_readers) != 0)
+ ck_pr_stall();
+
+ ck_pr_fence_lock();
+ return;
+}
+
+CK_ELIDE_PROTOTYPE(ck_rwlock_write, ck_rwlock_t,
+ ck_rwlock_locked, ck_rwlock_write_lock,
+ ck_rwlock_locked_writer, ck_rwlock_write_unlock)
+
+CK_CC_INLINE static bool
+ck_rwlock_read_trylock(ck_rwlock_t *rw)
+{
+
+ if (ck_pr_load_uint(&rw->writer) != 0)
+ return false;
+
+ ck_pr_inc_uint(&rw->n_readers);
+
+ /*
+ * Serialize with respect to concurrent write
+ * lock operation.
+ */
+ ck_pr_fence_atomic_load();
+
+ if (ck_pr_load_uint(&rw->writer) == 0) {
+ ck_pr_fence_lock();
+ return true;
+ }
+
+ ck_pr_dec_uint(&rw->n_readers);
+ return false;
+}
+
+CK_ELIDE_TRYLOCK_PROTOTYPE(ck_rwlock_read, ck_rwlock_t,
+ ck_rwlock_locked_writer, ck_rwlock_read_trylock)
+
+CK_CC_INLINE static void
+ck_rwlock_read_lock(ck_rwlock_t *rw)
+{
+
+ for (;;) {
+ while (ck_pr_load_uint(&rw->writer) != 0)
+ ck_pr_stall();
+
+ ck_pr_inc_uint(&rw->n_readers);
+
+ /*
+ * Serialize with respect to concurrent write
+ * lock operation.
+ */
+ ck_pr_fence_atomic_load();
+
+ if (ck_pr_load_uint(&rw->writer) == 0)
+ break;
+
+ ck_pr_dec_uint(&rw->n_readers);
+ }
+
+ /* Acquire semantics are necessary. */
+ ck_pr_fence_load();
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_rwlock_locked_reader(ck_rwlock_t *rw)
+{
+
+ ck_pr_fence_load();
+ return ck_pr_load_uint(&rw->n_readers);
+}
+
+CK_CC_INLINE static void
+ck_rwlock_read_unlock(ck_rwlock_t *rw)
+{
+
+ ck_pr_fence_load_atomic();
+ ck_pr_dec_uint(&rw->n_readers);
+ return;
+}
+
+CK_ELIDE_PROTOTYPE(ck_rwlock_read, ck_rwlock_t,
+ ck_rwlock_locked_writer, ck_rwlock_read_lock,
+ ck_rwlock_locked_reader, ck_rwlock_read_unlock)
+
+/*
+ * Recursive writer reader-writer lock implementation.
+ */
+struct ck_rwlock_recursive {
+ struct ck_rwlock rw;
+ unsigned int wc;
+};
+typedef struct ck_rwlock_recursive ck_rwlock_recursive_t;
+
+#define CK_RWLOCK_RECURSIVE_INITIALIZER {CK_RWLOCK_INITIALIZER, 0}
+
+CK_CC_INLINE static void
+ck_rwlock_recursive_write_lock(ck_rwlock_recursive_t *rw, unsigned int tid)
+{
+ unsigned int o;
+
+ o = ck_pr_load_uint(&rw->rw.writer);
+ if (o == tid)
+ goto leave;
+
+ while (ck_pr_cas_uint(&rw->rw.writer, 0, tid) == false)
+ ck_pr_stall();
+
+ ck_pr_fence_atomic_load();
+
+ while (ck_pr_load_uint(&rw->rw.n_readers) != 0)
+ ck_pr_stall();
+
+ ck_pr_fence_lock();
+leave:
+ rw->wc++;
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_rwlock_recursive_write_trylock(ck_rwlock_recursive_t *rw, unsigned int tid)
+{
+ unsigned int o;
+
+ o = ck_pr_load_uint(&rw->rw.writer);
+ if (o == tid)
+ goto leave;
+
+ if (ck_pr_cas_uint(&rw->rw.writer, 0, tid) == false)
+ return false;
+
+ ck_pr_fence_atomic_load();
+
+ if (ck_pr_load_uint(&rw->rw.n_readers) != 0) {
+ ck_pr_store_uint(&rw->rw.writer, 0);
+ return false;
+ }
+
+ ck_pr_fence_lock();
+leave:
+ rw->wc++;
+ return true;
+}
+
+CK_CC_INLINE static void
+ck_rwlock_recursive_write_unlock(ck_rwlock_recursive_t *rw)
+{
+
+ if (--rw->wc == 0) {
+ ck_pr_fence_unlock();
+ ck_pr_store_uint(&rw->rw.writer, 0);
+ }
+
+ return;
+}
+
+CK_CC_INLINE static void
+ck_rwlock_recursive_read_lock(ck_rwlock_recursive_t *rw)
+{
+
+ ck_rwlock_read_lock(&rw->rw);
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_rwlock_recursive_read_trylock(ck_rwlock_recursive_t *rw)
+{
+
+ return ck_rwlock_read_trylock(&rw->rw);
+}
+
+CK_CC_INLINE static void
+ck_rwlock_recursive_read_unlock(ck_rwlock_recursive_t *rw)
+{
+
+ ck_rwlock_read_unlock(&rw->rw);
+ return;
+}
+
+#endif /* CK_RWLOCK_H */
diff --git a/include/ck_sequence.h b/include/ck_sequence.h
new file mode 100644
index 0000000..6a482b9
--- /dev/null
+++ b/include/ck_sequence.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_SEQUENCE_H
+#define CK_SEQUENCE_H
+
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <ck_stdbool.h>
+
+struct ck_sequence {
+ unsigned int sequence;
+};
+typedef struct ck_sequence ck_sequence_t;
+
+#define CK_SEQUENCE_INITIALIZER { .sequence = 0 }
+
+CK_CC_INLINE static void
+ck_sequence_init(struct ck_sequence *sq)
+{
+
+ ck_pr_store_uint(&sq->sequence, 0);
+ return;
+}
+
+CK_CC_INLINE static unsigned int
+ck_sequence_read_begin(const struct ck_sequence *sq)
+{
+ unsigned int version;
+
+ for (;;) {
+ version = ck_pr_load_uint(&sq->sequence);
+
+ /*
+ * If a sequence is even then associated data may be in a
+ * consistent state.
+ */
+ if (CK_CC_LIKELY((version & 1) == 0))
+ break;
+
+ /*
+ * If a sequence is odd then a thread is in the middle of an
+ * update. Retry the read to avoid operating on inconsistent
+ * data.
+ */
+ ck_pr_stall();
+ }
+
+ ck_pr_fence_load();
+ return version;
+}
+
+CK_CC_INLINE static bool
+ck_sequence_read_retry(const struct ck_sequence *sq, unsigned int version)
+{
+
+ /*
+ * If the sequence number was updated then a read should be
+ * re-attempted.
+ */
+ ck_pr_fence_load();
+ return ck_pr_load_uint(&sq->sequence) != version;
+}
+
+#define CK_SEQUENCE_READ(seqlock, version) \
+ for (*(version) = 1; \
+ (*(version) != 0) && (*(version) = ck_sequence_read_begin(seqlock), 1); \
+ *(version) = ck_sequence_read_retry(seqlock, *(version)))
+
+/*
+ * This must be called after a successful mutex acquisition.
+ */
+CK_CC_INLINE static void
+ck_sequence_write_begin(struct ck_sequence *sq)
+{
+
+ /*
+ * Increment the sequence to an odd number to indicate
+ * the beginning of a write update.
+ */
+ ck_pr_store_uint(&sq->sequence, sq->sequence + 1);
+ ck_pr_fence_store();
+ return;
+}
+
+/*
+ * This must be called before mutex ownership is relinquished.
+ */
+CK_CC_INLINE static void
+ck_sequence_write_end(struct ck_sequence *sq)
+{
+
+ /*
+ * Increment the sequence to an even number to indicate
+ * completion of a write update.
+ */
+ ck_pr_fence_store();
+ ck_pr_store_uint(&sq->sequence, sq->sequence + 1);
+ return;
+}
+
+#endif /* CK_SEQUENCE_H */
diff --git a/include/ck_spinlock.h b/include/ck_spinlock.h
new file mode 100644
index 0000000..e9585f2
--- /dev/null
+++ b/include/ck_spinlock.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_SPINLOCK_H
+#define CK_SPINLOCK_H
+
+#include "spinlock/anderson.h"
+#include "spinlock/cas.h"
+#include "spinlock/clh.h"
+#include "spinlock/dec.h"
+#include "spinlock/fas.h"
+#include "spinlock/hclh.h"
+#include "spinlock/mcs.h"
+#include "spinlock/ticket.h"
+
+/*
+ * On tested x86, x86_64, PPC64 and SPARC64 targets,
+ * ck_spinlock_fas proved to have lowest latency
+ * in fast path testing or negligible degradation
+ * from faster but less robust implementations.
+ */
+#define CK_SPINLOCK_INITIALIZER CK_SPINLOCK_FAS_INITIALIZER
+#define ck_spinlock_t ck_spinlock_fas_t
+#define ck_spinlock_init(x) ck_spinlock_fas_init(x)
+#define ck_spinlock_lock(x) ck_spinlock_fas_lock(x)
+#define ck_spinlock_lock_eb(x) ck_spinlock_fas_lock_eb(x)
+#define ck_spinlock_unlock(x) ck_spinlock_fas_unlock(x)
+#define ck_spinlock_locked(x) ck_spinlock_fas_locked(x)
+#define ck_spinlock_trylock(x) ck_spinlock_fas_trylock(x)
+
+CK_ELIDE_PROTOTYPE(ck_spinlock, ck_spinlock_t,
+ ck_spinlock_locked, ck_spinlock_lock,
+ ck_spinlock_locked, ck_spinlock_unlock)
+
+CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock, ck_spinlock_t,
+ ck_spinlock_locked, ck_spinlock_trylock)
+
+#endif /* CK_SPINLOCK_H */
diff --git a/include/ck_stack.h b/include/ck_stack.h
new file mode 100644
index 0000000..eb2b685
--- /dev/null
+++ b/include/ck_stack.h
@@ -0,0 +1,357 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_STACK_H
+#define CK_STACK_H
+
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <ck_stdbool.h>
+#include <ck_stddef.h>
+
+struct ck_stack_entry {
+ struct ck_stack_entry *next;
+};
+typedef struct ck_stack_entry ck_stack_entry_t;
+
+struct ck_stack {
+ struct ck_stack_entry *head;
+ char *generation CK_CC_PACKED;
+} CK_CC_ALIASED;
+typedef struct ck_stack ck_stack_t;
+
+#define CK_STACK_INITIALIZER { NULL, NULL }
+
+#ifndef CK_F_STACK_PUSH_UPMC
+#define CK_F_STACK_PUSH_UPMC
+/*
+ * Stack producer operation safe for multiple unique producers and multiple consumers.
+ */
+CK_CC_INLINE static void
+ck_stack_push_upmc(struct ck_stack *target, struct ck_stack_entry *entry)
+{
+ struct ck_stack_entry *stack;
+
+ stack = ck_pr_load_ptr(&target->head);
+ entry->next = stack;
+ ck_pr_fence_store();
+
+ while (ck_pr_cas_ptr_value(&target->head, stack, entry, &stack) == false) {
+ entry->next = stack;
+ ck_pr_fence_store();
+ }
+
+ return;
+}
+#endif /* CK_F_STACK_PUSH_UPMC */
+
+#ifndef CK_F_STACK_TRYPUSH_UPMC
+#define CK_F_STACK_TRYPUSH_UPMC
+/*
+ * Stack producer operation for multiple unique producers and multiple consumers.
+ * Returns true on success and false on failure.
+ */
+CK_CC_INLINE static bool
+ck_stack_trypush_upmc(struct ck_stack *target, struct ck_stack_entry *entry)
+{
+ struct ck_stack_entry *stack;
+
+ stack = ck_pr_load_ptr(&target->head);
+ entry->next = stack;
+ ck_pr_fence_store();
+
+ return ck_pr_cas_ptr(&target->head, stack, entry);
+}
+#endif /* CK_F_STACK_TRYPUSH_UPMC */
+
+#ifndef CK_F_STACK_POP_UPMC
+#define CK_F_STACK_POP_UPMC
+/*
+ * Stack consumer operation safe for multiple unique producers and multiple consumers.
+ */
+CK_CC_INLINE static struct ck_stack_entry *
+ck_stack_pop_upmc(struct ck_stack *target)
+{
+ struct ck_stack_entry *entry, *next;
+
+ entry = ck_pr_load_ptr(&target->head);
+ if (entry == NULL)
+ return NULL;
+
+ ck_pr_fence_load();
+ next = entry->next;
+ while (ck_pr_cas_ptr_value(&target->head, entry, next, &entry) == false) {
+ if (entry == NULL)
+ break;
+
+ ck_pr_fence_load();
+ next = entry->next;
+ }
+
+ return entry;
+}
+#endif
+
+#ifndef CK_F_STACK_TRYPOP_UPMC
+#define CK_F_STACK_TRYPOP_UPMC
+/*
+ * Stack production operation for multiple unique producers and multiple consumers.
+ * Returns true on success and false on failure. The value pointed to by the second
+ * argument is set to a valid ck_stack_entry_t reference if true is returned. If
+ * false is returned, then the value pointed to by the second argument is undefined.
+ */
+CK_CC_INLINE static bool
+ck_stack_trypop_upmc(struct ck_stack *target, struct ck_stack_entry **r)
+{
+ struct ck_stack_entry *entry;
+
+ entry = ck_pr_load_ptr(&target->head);
+ if (entry == NULL)
+ return false;
+
+ ck_pr_fence_load();
+ if (ck_pr_cas_ptr(&target->head, entry, entry->next) == true) {
+ *r = entry;
+ return true;
+ }
+
+ return false;
+}
+#endif /* CK_F_STACK_TRYPOP_UPMC */
+
+#ifndef CK_F_STACK_BATCH_POP_UPMC
+#define CK_F_STACK_BATCH_POP_UPMC
+/*
+ * Pop all items off the stack.
+ */
+CK_CC_INLINE static struct ck_stack_entry *
+ck_stack_batch_pop_upmc(struct ck_stack *target)
+{
+ struct ck_stack_entry *entry;
+
+ entry = ck_pr_fas_ptr(&target->head, NULL);
+ ck_pr_fence_load();
+ return entry;
+}
+#endif /* CK_F_STACK_BATCH_POP_UPMC */
+
+#ifndef CK_F_STACK_PUSH_MPMC
+#define CK_F_STACK_PUSH_MPMC
+/*
+ * Stack producer operation safe for multiple producers and multiple consumers.
+ */
+CK_CC_INLINE static void
+ck_stack_push_mpmc(struct ck_stack *target, struct ck_stack_entry *entry)
+{
+
+ ck_stack_push_upmc(target, entry);
+ return;
+}
+#endif /* CK_F_STACK_PUSH_MPMC */
+
+#ifndef CK_F_STACK_TRYPUSH_MPMC
+#define CK_F_STACK_TRYPUSH_MPMC
+/*
+ * Stack producer operation safe for multiple producers and multiple consumers.
+ */
+CK_CC_INLINE static bool
+ck_stack_trypush_mpmc(struct ck_stack *target, struct ck_stack_entry *entry)
+{
+
+ return ck_stack_trypush_upmc(target, entry);
+}
+#endif /* CK_F_STACK_TRYPUSH_MPMC */
+
+#ifdef CK_F_PR_CAS_PTR_2_VALUE
+#ifndef CK_F_STACK_POP_MPMC
+#define CK_F_STACK_POP_MPMC
+/*
+ * Stack consumer operation safe for multiple producers and multiple consumers.
+ */
+CK_CC_INLINE static struct ck_stack_entry *
+ck_stack_pop_mpmc(struct ck_stack *target)
+{
+ struct ck_stack original, update;
+
+ original.generation = ck_pr_load_ptr(&target->generation);
+ ck_pr_fence_load();
+ original.head = ck_pr_load_ptr(&target->head);
+ if (original.head == NULL)
+ return NULL;
+
+ /* Order with respect to next pointer. */
+ ck_pr_fence_load();
+
+ update.generation = original.generation + 1;
+ update.head = original.head->next;
+
+ while (ck_pr_cas_ptr_2_value(target, &original, &update, &original) == false) {
+ if (original.head == NULL)
+ return NULL;
+
+ update.generation = original.generation + 1;
+
+ /* Order with respect to next pointer. */
+ ck_pr_fence_load();
+ update.head = original.head->next;
+ }
+
+ return original.head;
+}
+#endif /* CK_F_STACK_POP_MPMC */
+
+#ifndef CK_F_STACK_TRYPOP_MPMC
+#define CK_F_STACK_TRYPOP_MPMC
+CK_CC_INLINE static bool
+ck_stack_trypop_mpmc(struct ck_stack *target, struct ck_stack_entry **r)
+{
+ struct ck_stack original, update;
+
+ original.generation = ck_pr_load_ptr(&target->generation);
+ ck_pr_fence_load();
+ original.head = ck_pr_load_ptr(&target->head);
+ if (original.head == NULL)
+ return false;
+
+ update.generation = original.generation + 1;
+ ck_pr_fence_load();
+ update.head = original.head->next;
+
+ if (ck_pr_cas_ptr_2_value(target, &original, &update, &original) == true) {
+ *r = original.head;
+ return true;
+ }
+
+ return false;
+}
+#endif /* CK_F_STACK_TRYPOP_MPMC */
+#endif /* CK_F_PR_CAS_PTR_2_VALUE */
+
+#ifndef CK_F_STACK_BATCH_POP_MPMC
+#define CK_F_STACK_BATCH_POP_MPMC
+/*
+ * This is equivalent to the UP/MC version as NULL does not need a
+ * a generation count.
+ */
+CK_CC_INLINE static struct ck_stack_entry *
+ck_stack_batch_pop_mpmc(struct ck_stack *target)
+{
+
+ return ck_stack_batch_pop_upmc(target);
+}
+#endif /* CK_F_STACK_BATCH_POP_MPMC */
+
+#ifndef CK_F_STACK_PUSH_MPNC
+#define CK_F_STACK_PUSH_MPNC
+/*
+ * Stack producer operation safe with no concurrent consumers.
+ */
+CK_CC_INLINE static void
+ck_stack_push_mpnc(struct ck_stack *target, struct ck_stack_entry *entry)
+{
+ struct ck_stack_entry *stack;
+
+ entry->next = NULL;
+ ck_pr_fence_store_atomic();
+ stack = ck_pr_fas_ptr(&target->head, entry);
+ ck_pr_store_ptr(&entry->next, stack);
+ ck_pr_fence_store();
+
+ return;
+}
+#endif /* CK_F_STACK_PUSH_MPNC */
+
+/*
+ * Stack producer operation for single producer and no concurrent consumers.
+ */
+CK_CC_INLINE static void
+ck_stack_push_spnc(struct ck_stack *target, struct ck_stack_entry *entry)
+{
+
+ entry->next = target->head;
+ target->head = entry;
+ return;
+}
+
+/*
+ * Stack consumer operation for no concurrent producers and single consumer.
+ */
+CK_CC_INLINE static struct ck_stack_entry *
+ck_stack_pop_npsc(struct ck_stack *target)
+{
+ struct ck_stack_entry *n;
+
+ if (target->head == NULL)
+ return NULL;
+
+ n = target->head;
+ target->head = n->next;
+
+ return n;
+}
+
+/*
+ * Pop all items off a stack.
+ */
+CK_CC_INLINE static struct ck_stack_entry *
+ck_stack_batch_pop_npsc(struct ck_stack *target)
+{
+ struct ck_stack_entry *n;
+
+ n = target->head;
+ target->head = NULL;
+
+ return n;
+}
+
+/*
+ * Stack initialization function. Guarantees initialization across processors.
+ */
+CK_CC_INLINE static void
+ck_stack_init(struct ck_stack *stack)
+{
+
+ stack->head = NULL;
+ stack->generation = NULL;
+ return;
+}
+
+/* Defines a container_of functions for */
+#define CK_STACK_CONTAINER(T, M, N) CK_CC_CONTAINER(ck_stack_entry_t, T, M, N)
+
+#define CK_STACK_ISEMPTY(m) ((m)->head == NULL)
+#define CK_STACK_FIRST(s) ((s)->head)
+#define CK_STACK_NEXT(m) ((m)->next)
+#define CK_STACK_FOREACH(stack, entry) \
+ for ((entry) = CK_STACK_FIRST(stack); \
+ (entry) != NULL; \
+ (entry) = CK_STACK_NEXT(entry))
+#define CK_STACK_FOREACH_SAFE(stack, entry, T) \
+ for ((entry) = CK_STACK_FIRST(stack); \
+ (entry) != NULL && ((T) = (entry)->next, 1); \
+ (entry) = (T))
+
+#endif /* CK_STACK_H */
diff --git a/include/ck_stdbool.h b/include/ck_stdbool.h
new file mode 100644
index 0000000..b9a7982
--- /dev/null
+++ b/include/ck_stdbool.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2015 Olivier Houchard.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && defined(_KERNEL)
+#include <sys/types.h>
+#else
+#include <stdbool.h>
+#endif
diff --git a/include/ck_stddef.h b/include/ck_stddef.h
new file mode 100644
index 0000000..6019ea9
--- /dev/null
+++ b/include/ck_stddef.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2015 Olivier Houchard.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && defined(_KERNEL)
+#include <sys/stddef.h>
+#else
+#include <stddef.h>
+#endif
diff --git a/include/ck_stdint.h b/include/ck_stdint.h
new file mode 100644
index 0000000..8f416a9
--- /dev/null
+++ b/include/ck_stdint.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(__linux__) && defined(__KERNEL__)
+#include <linux/kernel.h>
+#include <linux/types.h>
+#elif defined(__FreeBSD__) && defined(_KERNEL)
+#include <sys/stdint.h>
+#else
+#include <stdint.h>
+#endif /* __linux__ && __KERNEL__ */
diff --git a/include/ck_stdlib.h b/include/ck_stdlib.h
new file mode 100644
index 0000000..c1ac69b
--- /dev/null
+++ b/include/ck_stdlib.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2015 Olivier Houchard.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && defined(_KERNEL)
+#include <sys/systm.h>
+#else
+#include <stdlib.h>
+#endif
diff --git a/include/ck_string.h b/include/ck_string.h
new file mode 100644
index 0000000..8d2c252
--- /dev/null
+++ b/include/ck_string.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2015 Olivier Houchard.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(__FreeBSD__) && defined(_KERNEL)
+#include <sys/systm.h>
+#else
+#include <string.h>
+#endif
diff --git a/include/ck_swlock.h b/include/ck_swlock.h
new file mode 100644
index 0000000..ad6d3a0
--- /dev/null
+++ b/include/ck_swlock.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright 2014 Jaidev Sridhar.
+ * Copyright 2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_SWLOCK_H
+#define CK_SWLOCK_H
+
+#include <ck_elide.h>
+#include <ck_limits.h>
+#include <ck_pr.h>
+#include <ck_stdbool.h>
+#include <ck_stddef.h>
+
+struct ck_swlock {
+ uint32_t value;
+};
+typedef struct ck_swlock ck_swlock_t;
+
+#define CK_SWLOCK_INITIALIZER {0}
+#define CK_SWLOCK_WRITER_BIT (1UL << 31)
+#define CK_SWLOCK_LATCH_BIT (1UL << 30)
+#define CK_SWLOCK_WRITER_MASK (CK_SWLOCK_LATCH_BIT | CK_SWLOCK_WRITER_BIT)
+#define CK_SWLOCK_READER_MASK (UINT32_MAX ^ CK_SWLOCK_WRITER_MASK)
+
+CK_CC_INLINE static void
+ck_swlock_init(struct ck_swlock *rw)
+{
+
+ rw->value = 0;
+ ck_pr_barrier();
+ return;
+}
+
+CK_CC_INLINE static void
+ck_swlock_write_unlock(ck_swlock_t *rw)
+{
+
+ ck_pr_fence_unlock();
+ ck_pr_and_32(&rw->value, CK_SWLOCK_READER_MASK);
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_swlock_locked_writer(ck_swlock_t *rw)
+{
+ bool r;
+
+ r = ck_pr_load_32(&rw->value) & CK_SWLOCK_WRITER_BIT;
+ ck_pr_fence_acquire();
+ return r;
+}
+
+CK_CC_INLINE static void
+ck_swlock_write_downgrade(ck_swlock_t *rw)
+{
+
+ ck_pr_inc_32(&rw->value);
+ ck_swlock_write_unlock(rw);
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_swlock_locked(ck_swlock_t *rw)
+{
+ bool r;
+
+ r = ck_pr_load_32(&rw->value);
+ ck_pr_fence_acquire();
+ return r;
+}
+
+CK_CC_INLINE static bool
+ck_swlock_write_trylock(ck_swlock_t *rw)
+{
+ bool r;
+
+ r = ck_pr_cas_32(&rw->value, 0, CK_SWLOCK_WRITER_BIT);
+ ck_pr_fence_lock();
+ return r;
+}
+
+CK_ELIDE_TRYLOCK_PROTOTYPE(ck_swlock_write, ck_swlock_t,
+ ck_swlock_locked, ck_swlock_write_trylock)
+
+CK_CC_INLINE static void
+ck_swlock_write_lock(ck_swlock_t *rw)
+{
+
+ ck_pr_or_32(&rw->value, CK_SWLOCK_WRITER_BIT);
+ while (ck_pr_load_32(&rw->value) & CK_SWLOCK_READER_MASK)
+ ck_pr_stall();
+
+ ck_pr_fence_lock();
+ return;
+}
+
+CK_CC_INLINE static void
+ck_swlock_write_latch(ck_swlock_t *rw)
+{
+
+ /* Publish intent to acquire lock. */
+ ck_pr_or_32(&rw->value, CK_SWLOCK_WRITER_BIT);
+
+ /* Stall until readers have seen the writer and cleared. */
+ while (ck_pr_cas_32(&rw->value, CK_SWLOCK_WRITER_BIT,
+ CK_SWLOCK_WRITER_MASK) == false) {
+ do {
+ ck_pr_stall();
+ } while (ck_pr_load_32(&rw->value) != CK_SWLOCK_WRITER_BIT);
+ }
+
+ ck_pr_fence_lock();
+ return;
+}
+
+CK_CC_INLINE static void
+ck_swlock_write_unlatch(ck_swlock_t *rw)
+{
+
+ ck_pr_fence_unlock();
+ ck_pr_store_32(&rw->value, 0);
+ return;
+}
+
+CK_ELIDE_PROTOTYPE(ck_swlock_write, ck_swlock_t,
+ ck_swlock_locked, ck_swlock_write_lock,
+ ck_swlock_locked_writer, ck_swlock_write_unlock)
+
+CK_ELIDE_TRYLOCK_PROTOTYPE(ck_swlock_read, ck_swlock_t,
+ ck_swlock_locked_writer, ck_swlock_read_trylock)
+
+CK_CC_INLINE static bool
+ck_swlock_read_trylock(ck_swlock_t *rw)
+{
+ uint32_t l = ck_pr_load_32(&rw->value);
+
+ if (l & CK_SWLOCK_WRITER_BIT)
+ return false;
+
+ l = ck_pr_faa_32(&rw->value, 1) & CK_SWLOCK_WRITER_MASK;
+ if (l == CK_SWLOCK_WRITER_BIT)
+ ck_pr_dec_32(&rw->value);
+
+ ck_pr_fence_lock();
+ return l == 0;
+}
+
+CK_CC_INLINE static void
+ck_swlock_read_lock(ck_swlock_t *rw)
+{
+ uint32_t l;
+
+ for (;;) {
+ while (ck_pr_load_32(&rw->value) & CK_SWLOCK_WRITER_BIT)
+ ck_pr_stall();
+
+ l = ck_pr_faa_32(&rw->value, 1) & CK_SWLOCK_WRITER_MASK;
+ if (l == 0)
+ break;
+
+ /*
+ * If the latch bit has not been set, then the writer would
+ * have observed the reader and will wait to completion of
+ * read-side critical section.
+ */
+ if (l == CK_SWLOCK_WRITER_BIT)
+ ck_pr_dec_32(&rw->value);
+ }
+
+ ck_pr_fence_lock();
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_swlock_locked_reader(ck_swlock_t *rw)
+{
+
+ ck_pr_fence_load();
+ return ck_pr_load_32(&rw->value) & CK_SWLOCK_READER_MASK;
+}
+
+CK_CC_INLINE static void
+ck_swlock_read_unlock(ck_swlock_t *rw)
+{
+
+ ck_pr_fence_unlock();
+ ck_pr_dec_32(&rw->value);
+ return;
+}
+
+CK_ELIDE_PROTOTYPE(ck_swlock_read, ck_swlock_t,
+ ck_swlock_locked_writer, ck_swlock_read_lock,
+ ck_swlock_locked_reader, ck_swlock_read_unlock)
+
+#endif /* CK_SWLOCK_H */
diff --git a/include/ck_tflock.h b/include/ck_tflock.h
new file mode 100644
index 0000000..a1872ae
--- /dev/null
+++ b/include/ck_tflock.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_TFLOCK_TICKET_H
+#define CK_TFLOCK_TICKET_H
+
+/*
+ * This is an implementation of task-fair locks derived from the work
+ * described in:
+ * John M. Mellor-Crummey and Michael L. Scott. 1991.
+ * Scalable reader-writer synchronization for shared-memory
+ * multiprocessors. SIGPLAN Not. 26, 7 (April 1991), 106-113.
+ */
+
+#include <ck_cc.h>
+#include <ck_pr.h>
+
+struct ck_tflock_ticket {
+ uint32_t request;
+ uint32_t completion;
+};
+typedef struct ck_tflock_ticket ck_tflock_ticket_t;
+
+#define CK_TFLOCK_TICKET_INITIALIZER { 0, 0 }
+
+#define CK_TFLOCK_TICKET_RC_INCR 0x10000U /* Read-side increment. */
+#define CK_TFLOCK_TICKET_WC_INCR 0x1U /* Write-side increment. */
+#define CK_TFLOCK_TICKET_W_MASK 0xffffU /* Write-side mask. */
+#define CK_TFLOCK_TICKET_WC_TOPMSK 0x8000U /* Write clear mask for overflow. */
+#define CK_TFLOCK_TICKET_RC_TOPMSK 0x80000000U /* Read clear mask for overflow. */
+
+CK_CC_INLINE static uint32_t
+ck_tflock_ticket_fca_32(uint32_t *target, uint32_t mask, uint32_t delta)
+{
+ uint32_t snapshot = ck_pr_load_32(target);
+ uint32_t goal;
+
+ for (;;) {
+ goal = (snapshot & ~mask) + delta;
+ if (ck_pr_cas_32_value(target, snapshot, goal, &snapshot) == true)
+ break;
+
+ ck_pr_stall();
+ }
+
+ return snapshot;
+}
+
+CK_CC_INLINE static void
+ck_tflock_ticket_init(struct ck_tflock_ticket *pf)
+{
+
+ pf->request = pf->completion = 0;
+ ck_pr_barrier();
+ return;
+}
+
+CK_CC_INLINE static void
+ck_tflock_ticket_write_lock(struct ck_tflock_ticket *lock)
+{
+ uint32_t previous;
+
+ previous = ck_tflock_ticket_fca_32(&lock->request, CK_TFLOCK_TICKET_WC_TOPMSK,
+ CK_TFLOCK_TICKET_WC_INCR);
+ ck_pr_fence_atomic_load();
+ while (ck_pr_load_32(&lock->completion) != previous)
+ ck_pr_stall();
+
+ ck_pr_fence_lock();
+ return;
+}
+
+CK_CC_INLINE static void
+ck_tflock_ticket_write_unlock(struct ck_tflock_ticket *lock)
+{
+
+ ck_pr_fence_unlock();
+ ck_tflock_ticket_fca_32(&lock->completion, CK_TFLOCK_TICKET_WC_TOPMSK,
+ CK_TFLOCK_TICKET_WC_INCR);
+ return;
+}
+
+CK_CC_INLINE static void
+ck_tflock_ticket_read_lock(struct ck_tflock_ticket *lock)
+{
+ uint32_t previous;
+
+ previous = ck_tflock_ticket_fca_32(&lock->request,
+ CK_TFLOCK_TICKET_RC_TOPMSK, CK_TFLOCK_TICKET_RC_INCR) &
+ CK_TFLOCK_TICKET_W_MASK;
+
+ ck_pr_fence_atomic_load();
+
+ while ((ck_pr_load_32(&lock->completion) &
+ CK_TFLOCK_TICKET_W_MASK) != previous) {
+ ck_pr_stall();
+ }
+
+ ck_pr_fence_lock();
+ return;
+}
+
+CK_CC_INLINE static void
+ck_tflock_ticket_read_unlock(struct ck_tflock_ticket *lock)
+{
+
+ ck_pr_fence_unlock();
+ ck_tflock_ticket_fca_32(&lock->completion, CK_TFLOCK_TICKET_RC_TOPMSK,
+ CK_TFLOCK_TICKET_RC_INCR);
+ return;
+}
+
+#endif /* CK_TFLOCK_TICKET_H */
diff --git a/include/gcc/aarch64/ck_f_pr.h b/include/gcc/aarch64/ck_f_pr.h
new file mode 100644
index 0000000..93ecee0
--- /dev/null
+++ b/include/gcc/aarch64/ck_f_pr.h
@@ -0,0 +1,167 @@
+/* DO NOT EDIT. This is auto-generated from feature.sh */
+#define CK_F_PR_ADD_16
+#define CK_F_PR_ADD_32
+#define CK_F_PR_ADD_64
+#define CK_F_PR_ADD_8
+#define CK_F_PR_ADD_CHAR
+#define CK_F_PR_ADD_INT
+#define CK_F_PR_ADD_PTR
+#define CK_F_PR_ADD_SHORT
+#define CK_F_PR_ADD_UINT
+#define CK_F_PR_AND_16
+#define CK_F_PR_AND_32
+#define CK_F_PR_AND_64
+#define CK_F_PR_AND_8
+#define CK_F_PR_AND_CHAR
+#define CK_F_PR_AND_INT
+#define CK_F_PR_AND_PTR
+#define CK_F_PR_AND_SHORT
+#define CK_F_PR_AND_UINT
+#define CK_F_PR_BARRIER
+#define CK_F_PR_CAS_16
+#define CK_F_PR_CAS_16_VALUE
+#define CK_F_PR_CAS_32
+#define CK_F_PR_CAS_32_VALUE
+#define CK_F_PR_CAS_64
+#define CK_F_PR_CAS_64_VALUE
+#define CK_F_PR_CAS_64_2
+#define CK_F_PR_CAS_64_2_VALUE
+#define CK_F_PR_CAS_DOUBLE
+#define CK_F_PR_CAS_DOUBLE_VALUE
+#define CK_F_PR_CAS_8
+#define CK_F_PR_CAS_8_VALUE
+#define CK_F_PR_CAS_CHAR
+#define CK_F_PR_CAS_CHAR_VALUE
+#define CK_F_PR_CAS_INT
+#define CK_F_PR_CAS_INT_VALUE
+#define CK_F_PR_CAS_PTR
+#define CK_F_PR_CAS_PTR_2
+#define CK_F_PR_CAS_PTR_2_VALUE
+#define CK_F_PR_CAS_PTR_VALUE
+#define CK_F_PR_CAS_SHORT
+#define CK_F_PR_CAS_SHORT_VALUE
+#define CK_F_PR_CAS_UINT
+#define CK_F_PR_CAS_UINT_VALUE
+#define CK_F_PR_DEC_16
+#define CK_F_PR_DEC_32
+#define CK_F_PR_DEC_64
+#define CK_F_PR_DEC_8
+#define CK_F_PR_DEC_CHAR
+#define CK_F_PR_DEC_INT
+#define CK_F_PR_DEC_PTR
+#define CK_F_PR_DEC_SHORT
+#define CK_F_PR_DEC_UINT
+#define CK_F_PR_FAA_16
+#define CK_F_PR_FAA_32
+#define CK_F_PR_FAA_64
+#define CK_F_PR_FAA_8
+#define CK_F_PR_FAA_CHAR
+#define CK_F_PR_FAA_INT
+#define CK_F_PR_FAA_PTR
+#define CK_F_PR_FAA_SHORT
+#define CK_F_PR_FAA_UINT
+#define CK_F_PR_FAS_16
+#define CK_F_PR_FAS_32
+#define CK_F_PR_FAS_64
+#define CK_F_PR_FAS_8
+#define CK_F_PR_FAS_CHAR
+#define CK_F_PR_FAS_INT
+#define CK_F_PR_FAS_PTR
+#define CK_F_PR_FAS_SHORT
+#define CK_F_PR_FAS_UINT
+#define CK_F_PR_FENCE_ATOMIC
+#define CK_F_PR_FENCE_ATOMIC_LOAD
+#define CK_F_PR_FENCE_ATOMIC_STORE
+#define CK_F_PR_FENCE_LOAD
+#define CK_F_PR_FENCE_LOAD_ATOMIC
+#define CK_F_PR_FENCE_LOAD_DEPENDS
+#define CK_F_PR_FENCE_LOAD_STORE
+#define CK_F_PR_FENCE_MEMORY
+#define CK_F_PR_FENCE_STORE
+#define CK_F_PR_FENCE_STORE_ATOMIC
+#define CK_F_PR_FENCE_STORE_LOAD
+#define CK_F_PR_FENCE_STRICT_ATOMIC
+#define CK_F_PR_FENCE_STRICT_ATOMIC_LOAD
+#define CK_F_PR_FENCE_STRICT_ATOMIC_STORE
+#define CK_F_PR_FENCE_STRICT_LOAD
+#define CK_F_PR_FENCE_STRICT_LOAD_ATOMIC
+#define CK_F_PR_FENCE_STRICT_LOAD_STORE
+#define CK_F_PR_FENCE_STRICT_MEMORY
+#define CK_F_PR_FENCE_STRICT_STORE
+#define CK_F_PR_FENCE_STRICT_STORE_ATOMIC
+#define CK_F_PR_FENCE_STRICT_STORE_LOAD
+#define CK_F_PR_INC_16
+#define CK_F_PR_INC_32
+#define CK_F_PR_INC_64
+#define CK_F_PR_INC_8
+#define CK_F_PR_INC_CHAR
+#define CK_F_PR_INC_INT
+#define CK_F_PR_INC_PTR
+#define CK_F_PR_INC_SHORT
+#define CK_F_PR_INC_UINT
+#define CK_F_PR_LOAD_16
+#define CK_F_PR_LOAD_32
+#define CK_F_PR_LOAD_64
+#define CK_F_PR_LOAD_DOUBLE
+#define CK_F_PR_LOAD_8
+#define CK_F_PR_LOAD_CHAR
+#define CK_F_PR_LOAD_INT
+#define CK_F_PR_LOAD_PTR
+#define CK_F_PR_LOAD_SHORT
+#define CK_F_PR_LOAD_UINT
+#define CK_F_PR_NEG_16
+#define CK_F_PR_NEG_32
+#define CK_F_PR_NEG_64
+#define CK_F_PR_NEG_8
+#define CK_F_PR_NEG_CHAR
+#define CK_F_PR_NEG_INT
+#define CK_F_PR_NEG_PTR
+#define CK_F_PR_NEG_SHORT
+#define CK_F_PR_NEG_UINT
+#define CK_F_PR_NOT_16
+#define CK_F_PR_NOT_32
+#define CK_F_PR_NOT_64
+#define CK_F_PR_NOT_8
+#define CK_F_PR_NOT_CHAR
+#define CK_F_PR_NOT_INT
+#define CK_F_PR_NOT_PTR
+#define CK_F_PR_NOT_SHORT
+#define CK_F_PR_NOT_UINT
+#define CK_F_PR_OR_16
+#define CK_F_PR_OR_32
+#define CK_F_PR_OR_64
+#define CK_F_PR_OR_8
+#define CK_F_PR_OR_CHAR
+#define CK_F_PR_OR_INT
+#define CK_F_PR_OR_PTR
+#define CK_F_PR_OR_SHORT
+#define CK_F_PR_OR_UINT
+#define CK_F_PR_STALL
+#define CK_F_PR_STORE_16
+#define CK_F_PR_STORE_32
+#define CK_F_PR_STORE_64
+#define CK_F_PR_STORE_DOUBLE
+#define CK_F_PR_STORE_8
+#define CK_F_PR_STORE_CHAR
+#define CK_F_PR_STORE_INT
+#define CK_F_PR_STORE_PTR
+#define CK_F_PR_STORE_SHORT
+#define CK_F_PR_STORE_UINT
+#define CK_F_PR_SUB_16
+#define CK_F_PR_SUB_32
+#define CK_F_PR_SUB_64
+#define CK_F_PR_SUB_8
+#define CK_F_PR_SUB_CHAR
+#define CK_F_PR_SUB_INT
+#define CK_F_PR_SUB_PTR
+#define CK_F_PR_SUB_SHORT
+#define CK_F_PR_SUB_UINT
+#define CK_F_PR_XOR_16
+#define CK_F_PR_XOR_32
+#define CK_F_PR_XOR_64
+#define CK_F_PR_XOR_8
+#define CK_F_PR_XOR_CHAR
+#define CK_F_PR_XOR_INT
+#define CK_F_PR_XOR_PTR
+#define CK_F_PR_XOR_SHORT
+#define CK_F_PR_XOR_UINT
diff --git a/include/gcc/aarch64/ck_pr.h b/include/gcc/aarch64/ck_pr.h
new file mode 100644
index 0000000..e739c4d
--- /dev/null
+++ b/include/gcc/aarch64/ck_pr.h
@@ -0,0 +1,227 @@
+/*
+ * Copyright 2009-2016 Samy Al Bahra.
+ * Copyright 2013-2016 Olivier Houchard.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_PR_AARCH64_H
+#define CK_PR_AARCH64_H
+
+#ifndef CK_PR_H
+#error Do not include this file directly, use ck_pr.h
+#endif
+
+#include <ck_cc.h>
+#include <ck_md.h>
+
+/*
+ * The following represent supported atomic operations.
+ * These operations may be emulated.
+ */
+#include "ck_f_pr.h"
+
+/*
+ * Minimum interface requirement met.
+ */
+#define CK_F_PR
+
+CK_CC_INLINE static void
+ck_pr_stall(void)
+{
+
+ __asm__ __volatile__("" ::: "memory");
+ return;
+}
+
+#define CK_DMB_SY __asm __volatile("dmb ish" : : "r" (0) : "memory")
+#define CK_DMB_LD __asm __volatile("dmb ishld" : : "r" (0) : "memory")
+#define CK_DMB_ST __asm __volatile("dmb ishst" : : "r" (0) : "memory")
+
+#define CK_PR_FENCE(T, I) \
+ CK_CC_INLINE static void \
+ ck_pr_fence_strict_##T(void) \
+ { \
+ I; \
+ }
+
+CK_PR_FENCE(atomic, CK_DMB_ST)
+CK_PR_FENCE(atomic_store, CK_DMB_ST)
+CK_PR_FENCE(atomic_load, CK_DMB_SY)
+CK_PR_FENCE(store_atomic, CK_DMB_ST)
+CK_PR_FENCE(load_atomic, CK_DMB_SY)
+CK_PR_FENCE(store, CK_DMB_ST)
+CK_PR_FENCE(store_load, CK_DMB_SY)
+CK_PR_FENCE(load, CK_DMB_LD)
+CK_PR_FENCE(load_store, CK_DMB_SY)
+CK_PR_FENCE(memory, CK_DMB_SY)
+CK_PR_FENCE(acquire, CK_DMB_SY)
+CK_PR_FENCE(release, CK_DMB_SY)
+CK_PR_FENCE(acqrel, CK_DMB_SY)
+CK_PR_FENCE(lock, CK_DMB_SY)
+CK_PR_FENCE(unlock, CK_DMB_SY)
+
+#undef CK_PR_FENCE
+
+#undef CK_DMB_SI
+#undef CK_DMB_LD
+#undef CK_DMB_ST
+
+#define CK_PR_LOAD(S, M, T, I) \
+ CK_CC_INLINE static T \
+ ck_pr_md_load_##S(const M *target) \
+ { \
+ long r = 0; \
+ __asm__ __volatile__(I " %w0, [%1];" \
+ : "=r" (r) \
+ : "r" (target) \
+ : "memory"); \
+ return ((T)r); \
+ }
+#define CK_PR_LOAD_64(S, M, T, I) \
+ CK_CC_INLINE static T \
+ ck_pr_md_load_##S(const M *target) \
+ { \
+ long r = 0; \
+ __asm__ __volatile__(I " %0, [%1];" \
+ : "=r" (r) \
+ : "r" (target) \
+ : "memory"); \
+ return ((T)r); \
+ }
+
+
+CK_PR_LOAD_64(ptr, void, void *, "ldr")
+
+#define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, I)
+#define CK_PR_LOAD_S_64(S, T, I) CK_PR_LOAD_64(S, T, T, I)
+
+CK_PR_LOAD_S_64(64, uint64_t, "ldr")
+CK_PR_LOAD_S(32, uint32_t, "ldr")
+CK_PR_LOAD_S(16, uint16_t, "ldrh")
+CK_PR_LOAD_S(8, uint8_t, "ldrb")
+CK_PR_LOAD_S(uint, unsigned int, "ldr")
+CK_PR_LOAD_S(int, int, "ldr")
+CK_PR_LOAD_S(short, short, "ldrh")
+CK_PR_LOAD_S(char, char, "ldrb")
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_LOAD_S_64(double, double, "ldr")
+#endif
+
+#undef CK_PR_LOAD_S
+#undef CK_PR_LOAD_S_64
+#undef CK_PR_LOAD
+#undef CK_PR_LAOD_64
+
+#define CK_PR_STORE(S, M, T, I) \
+ CK_CC_INLINE static void \
+ ck_pr_md_store_##S(M *target, T v) \
+ { \
+ __asm__ __volatile__(I " %w1, [%0]" \
+ : \
+ : "r" (target), \
+ "r" (v) \
+ : "memory"); \
+ return; \
+ }
+#define CK_PR_STORE_64(S, M, T, I) \
+ CK_CC_INLINE static void \
+ ck_pr_md_store_##S(M *target, T v) \
+ { \
+ __asm__ __volatile__(I " %1, [%0]" \
+ : \
+ : "r" (target), \
+ "r" (v) \
+ : "memory"); \
+ return; \
+ }
+
+CK_PR_STORE_64(ptr, void, const void *, "str")
+
+#define CK_PR_STORE_S(S, T, I) CK_PR_STORE(S, T, T, I)
+#define CK_PR_STORE_S_64(S, T, I) CK_PR_STORE_64(S, T, T, I)
+
+CK_PR_STORE_S_64(64, uint64_t, "str")
+CK_PR_STORE_S(32, uint32_t, "str")
+CK_PR_STORE_S(16, uint16_t, "strh")
+CK_PR_STORE_S(8, uint8_t, "strb")
+CK_PR_STORE_S(uint, unsigned int, "str")
+CK_PR_STORE_S(int, int, "str")
+CK_PR_STORE_S(short, short, "strh")
+CK_PR_STORE_S(char, char, "strb")
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_STORE_S_64(double, double, "str")
+#endif
+
+#undef CK_PR_STORE_S
+#undef CK_PR_STORE_S_64
+#undef CK_PR_STORE
+#undef CK_PR_STORE_64
+
+#ifdef CK_MD_LSE_ENABLE
+#include "ck_pr_lse.h"
+#else
+#include "ck_pr_llsc.h"
+#endif
+
+/*
+ * ck_pr_neg_*() functions can only be implemented via LL/SC, as there are no
+ * LSE alternatives.
+ */
+#define CK_PR_NEG(N, M, T, W, R) \
+ CK_CC_INLINE static void \
+ ck_pr_neg_##N(M *target) \
+ { \
+ T previous = 0; \
+ T tmp = 0; \
+ __asm__ __volatile__("1:" \
+ "ldxr" W " %" R "0, [%2];" \
+ "neg %" R "0, %" R "0;" \
+ "stxr" W " %w1, %" R "0, [%2];" \
+ "cbnz %w1, 1b;" \
+ : "=&r" (previous), \
+ "=&r" (tmp) \
+ : "r" (target) \
+ : "memory", "cc"); \
+ return; \
+ }
+
+CK_PR_NEG(ptr, void, void *, "", "")
+CK_PR_NEG(64, uint64_t, uint64_t, "", "")
+
+#define CK_PR_NEG_S(S, T, W) \
+ CK_PR_NEG(S, T, T, W, "w") \
+
+CK_PR_NEG_S(32, uint32_t, "")
+CK_PR_NEG_S(uint, unsigned int, "")
+CK_PR_NEG_S(int, int, "")
+CK_PR_NEG_S(16, uint16_t, "h")
+CK_PR_NEG_S(8, uint8_t, "b")
+CK_PR_NEG_S(short, short, "h")
+CK_PR_NEG_S(char, char, "b")
+
+#undef CK_PR_NEG_S
+#undef CK_PR_NEG
+
+#endif /* CK_PR_AARCH64_H */
+
diff --git a/include/gcc/aarch64/ck_pr_llsc.h b/include/gcc/aarch64/ck_pr_llsc.h
new file mode 100644
index 0000000..aa4e309
--- /dev/null
+++ b/include/gcc/aarch64/ck_pr_llsc.h
@@ -0,0 +1,352 @@
+/*
+ * Copyright 2009-2016 Samy Al Bahra.
+ * Copyright 2013-2016 Olivier Houchard.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_PR_AARCH64_LLSC_H
+#define CK_PR_AARCH64_LLSC_H
+
+#ifndef CK_PR_H
+#error Do not include this file directly, use ck_pr.h
+#endif
+
+CK_CC_INLINE static bool
+ck_pr_cas_64_2_value(uint64_t target[2], uint64_t compare[2], uint64_t set[2], uint64_t value[2])
+{
+ uint64_t tmp1, tmp2;
+
+ __asm__ __volatile__("1:"
+ "ldxp %0, %1, [%4];"
+ "mov %2, %0;"
+ "mov %3, %1;"
+ "eor %0, %0, %5;"
+ "eor %1, %1, %6;"
+ "orr %1, %0, %1;"
+ "mov %w0, #0;"
+ "cbnz %1, 2f;"
+ "stxp %w0, %7, %8, [%4];"
+ "cbnz %w0, 1b;"
+ "mov %w0, #1;"
+ "2:"
+ : "=&r" (tmp1), "=&r" (tmp2), "=&r" (value[0]), "=&r" (value[1])
+ : "r" (target), "r" (compare[0]), "r" (compare[1]), "r" (set[0]), "r" (set[1])
+ : "cc", "memory");
+
+ return (tmp1);
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_2_value(void *target, void *compare, void *set, void *value)
+{
+ return (ck_pr_cas_64_2_value(CK_CPP_CAST(uint64_t *, target),
+ CK_CPP_CAST(uint64_t *, compare),
+ CK_CPP_CAST(uint64_t *, set),
+ CK_CPP_CAST(uint64_t *, value)));
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_64_2(uint64_t target[2], uint64_t compare[2], uint64_t set[2])
+{
+ uint64_t tmp1, tmp2;
+
+ __asm__ __volatile__("1:"
+ "ldxp %0, %1, [%2];"
+ "eor %0, %0, %3;"
+ "eor %1, %1, %4;"
+ "orr %1, %0, %1;"
+ "mov %w0, #0;"
+ "cbnz %1, 2f;"
+ "stxp %w0, %5, %6, [%2];"
+ "cbnz %w0, 1b;"
+ "mov %w0, #1;"
+ "2:"
+ : "=&r" (tmp1), "=&r" (tmp2)
+ : "r" (target), "r" (compare[0]), "r" (compare[1]), "r" (set[0]), "r" (set[1])
+ : "cc", "memory");
+
+ return (tmp1);
+}
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_2(void *target, void *compare, void *set)
+{
+ return (ck_pr_cas_64_2(CK_CPP_CAST(uint64_t *, target),
+ CK_CPP_CAST(uint64_t *, compare),
+ CK_CPP_CAST(uint64_t *, set)));
+}
+
+
+#define CK_PR_CAS(N, M, T, W, R) \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##N##_value(M *target, T compare, T set, M *value) \
+ { \
+ T previous; \
+ T tmp; \
+ __asm__ __volatile__("1:" \
+ "ldxr" W " %" R "0, [%2];" \
+ "cmp %" R "0, %" R "4;" \
+ "b.ne 2f;" \
+ "stxr" W " %w1, %" R "3, [%2];" \
+ "cbnz %w1, 1b;" \
+ "2:" \
+ : "=&r" (previous), \
+ "=&r" (tmp) \
+ : "r" (target), \
+ "r" (set), \
+ "r" (compare) \
+ : "memory", "cc"); \
+ *(T *)value = previous; \
+ return (previous == compare); \
+ } \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##N(M *target, T compare, T set) \
+ { \
+ T previous; \
+ T tmp; \
+ __asm__ __volatile__( \
+ "1:" \
+ "ldxr" W " %" R "0, [%2];" \
+ "cmp %" R "0, %" R "4;" \
+ "b.ne 2f;" \
+ "stxr" W " %w1, %" R "3, [%2];" \
+ "cbnz %w1, 1b;" \
+ "2:" \
+ : "=&r" (previous), \
+ "=&r" (tmp) \
+ : "r" (target), \
+ "r" (set), \
+ "r" (compare) \
+ : "memory", "cc"); \
+ return (previous == compare); \
+ }
+
+CK_PR_CAS(ptr, void, void *, "", "")
+
+#define CK_PR_CAS_S(N, M, W, R) CK_PR_CAS(N, M, M, W, R)
+CK_PR_CAS_S(64, uint64_t, "", "")
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_CAS_S(double, double, "", "")
+#endif
+CK_PR_CAS_S(32, uint32_t, "", "w")
+CK_PR_CAS_S(uint, unsigned int, "", "w")
+CK_PR_CAS_S(int, int, "", "w")
+CK_PR_CAS_S(16, uint16_t, "h", "w")
+CK_PR_CAS_S(8, uint8_t, "b", "w")
+CK_PR_CAS_S(short, short, "h", "w")
+CK_PR_CAS_S(char, char, "b", "w")
+
+
+#undef CK_PR_CAS_S
+#undef CK_PR_CAS
+
+#define CK_PR_FAS(N, M, T, W, R) \
+ CK_CC_INLINE static T \
+ ck_pr_fas_##N(M *target, T v) \
+ { \
+ T previous; \
+ T tmp; \
+ __asm__ __volatile__("1:" \
+ "ldxr" W " %" R "0, [%2];" \
+ "stxr" W " %w1, %" R "3, [%2];"\
+ "cbnz %w1, 1b;" \
+ : "=&r" (previous), \
+ "=&r" (tmp) \
+ : "r" (target), \
+ "r" (v) \
+ : "memory", "cc"); \
+ return (previous); \
+ }
+
+CK_PR_FAS(64, uint64_t, uint64_t, "", "")
+CK_PR_FAS(32, uint32_t, uint32_t, "", "w")
+CK_PR_FAS(ptr, void, void *, "", "")
+CK_PR_FAS(int, int, int, "", "w")
+CK_PR_FAS(uint, unsigned int, unsigned int, "", "w")
+CK_PR_FAS(16, uint16_t, uint16_t, "h", "w")
+CK_PR_FAS(8, uint8_t, uint8_t, "b", "w")
+CK_PR_FAS(short, short, short, "h", "w")
+CK_PR_FAS(char, char, char, "b", "w")
+
+
+#undef CK_PR_FAS
+
+#define CK_PR_UNARY(O, N, M, T, I, W, R) \
+ CK_CC_INLINE static void \
+ ck_pr_##O##_##N(M *target) \
+ { \
+ T previous = 0; \
+ T tmp = 0; \
+ __asm__ __volatile__("1:" \
+ "ldxr" W " %" R "0, [%2];" \
+ I ";" \
+ "stxr" W " %w1, %" R "0, [%2];" \
+ "cbnz %w1, 1b;" \
+ : "=&r" (previous), \
+ "=&r" (tmp) \
+ : "r" (target) \
+ : "memory", "cc"); \
+ return; \
+ }
+
+CK_PR_UNARY(inc, ptr, void, void *, "add %0, %0, #1", "", "")
+CK_PR_UNARY(dec, ptr, void, void *, "sub %0, %0, #1", "", "")
+CK_PR_UNARY(not, ptr, void, void *, "mvn %0, %0", "", "")
+CK_PR_UNARY(inc, 64, uint64_t, uint64_t, "add %0, %0, #1", "", "")
+CK_PR_UNARY(dec, 64, uint64_t, uint64_t, "sub %0, %0, #1", "", "")
+CK_PR_UNARY(not, 64, uint64_t, uint64_t, "mvn %0, %0", "", "")
+
+#define CK_PR_UNARY_S(S, T, W) \
+ CK_PR_UNARY(inc, S, T, T, "add %w0, %w0, #1", W, "w") \
+ CK_PR_UNARY(dec, S, T, T, "sub %w0, %w0, #1", W, "w") \
+ CK_PR_UNARY(not, S, T, T, "mvn %w0, %w0", W, "w") \
+
+CK_PR_UNARY_S(32, uint32_t, "")
+CK_PR_UNARY_S(uint, unsigned int, "")
+CK_PR_UNARY_S(int, int, "")
+CK_PR_UNARY_S(16, uint16_t, "h")
+CK_PR_UNARY_S(8, uint8_t, "b")
+CK_PR_UNARY_S(short, short, "h")
+CK_PR_UNARY_S(char, char, "b")
+
+#undef CK_PR_UNARY_S
+#undef CK_PR_UNARY
+
+#define CK_PR_BINARY(O, N, M, T, I, W, R) \
+ CK_CC_INLINE static void \
+ ck_pr_##O##_##N(M *target, T delta) \
+ { \
+ T previous; \
+ T tmp; \
+ __asm__ __volatile__("1:" \
+ "ldxr" W " %" R "0, [%2];"\
+ I " %" R "0, %" R "0, %" R "3;" \
+ "stxr" W " %w1, %" R "0, [%2];" \
+ "cbnz %w1, 1b;" \
+ : "=&r" (previous), \
+ "=&r" (tmp) \
+ : "r" (target), \
+ "r" (delta) \
+ : "memory", "cc"); \
+ return; \
+ }
+
+CK_PR_BINARY(and, ptr, void, uintptr_t, "and", "", "")
+CK_PR_BINARY(add, ptr, void, uintptr_t, "add", "", "")
+CK_PR_BINARY(or, ptr, void, uintptr_t, "orr", "", "")
+CK_PR_BINARY(sub, ptr, void, uintptr_t, "sub", "", "")
+CK_PR_BINARY(xor, ptr, void, uintptr_t, "eor", "", "")
+CK_PR_BINARY(and, 64, uint64_t, uint64_t, "and", "", "")
+CK_PR_BINARY(add, 64, uint64_t, uint64_t, "add", "", "")
+CK_PR_BINARY(or, 64, uint64_t, uint64_t, "orr", "", "")
+CK_PR_BINARY(sub, 64, uint64_t, uint64_t, "sub", "", "")
+CK_PR_BINARY(xor, 64, uint64_t, uint64_t, "eor", "", "")
+
+#define CK_PR_BINARY_S(S, T, W) \
+ CK_PR_BINARY(and, S, T, T, "and", W, "w") \
+ CK_PR_BINARY(add, S, T, T, "add", W, "w") \
+ CK_PR_BINARY(or, S, T, T, "orr", W, "w") \
+ CK_PR_BINARY(sub, S, T, T, "sub", W, "w") \
+ CK_PR_BINARY(xor, S, T, T, "eor", W, "w")
+
+CK_PR_BINARY_S(32, uint32_t, "")
+CK_PR_BINARY_S(uint, unsigned int, "")
+CK_PR_BINARY_S(int, int, "")
+CK_PR_BINARY_S(16, uint16_t, "h")
+CK_PR_BINARY_S(8, uint8_t, "b")
+CK_PR_BINARY_S(short, short, "h")
+CK_PR_BINARY_S(char, char, "b")
+
+#undef CK_PR_BINARY_S
+#undef CK_PR_BINARY
+
+CK_CC_INLINE static void *
+ck_pr_faa_ptr(void *target, uintptr_t delta)
+{
+ uintptr_t previous, r, tmp;
+
+ __asm__ __volatile__("1:"
+ "ldxr %0, [%3];"
+ "add %1, %4, %0;"
+ "stxr %w2, %1, [%3];"
+ "cbnz %w2, 1b;"
+ : "=&r" (previous),
+ "=&r" (r),
+ "=&r" (tmp)
+ : "r" (target),
+ "r" (delta)
+ : "memory", "cc");
+
+ return (void *)(previous);
+}
+
+CK_CC_INLINE static uint64_t
+ck_pr_faa_64(uint64_t *target, uint64_t delta)
+{
+ uint64_t previous, r, tmp;
+
+ __asm__ __volatile__("1:"
+ "ldxr %0, [%3];"
+ "add %1, %4, %0;"
+ "stxr %w2, %1, [%3];"
+ "cbnz %w2, 1b;"
+ : "=&r" (previous),
+ "=&r" (r),
+ "=&r" (tmp)
+ : "r" (target),
+ "r" (delta)
+ : "memory", "cc");
+
+ return (previous);
+}
+
+#define CK_PR_FAA(S, T, W) \
+ CK_CC_INLINE static T \
+ ck_pr_faa_##S(T *target, T delta) \
+ { \
+ T previous, r, tmp; \
+ __asm__ __volatile__("1:" \
+ "ldxr" W " %w0, [%3];" \
+ "add %w1, %w4, %w0;" \
+ "stxr" W " %w2, %w1, [%3];" \
+ "cbnz %w2, 1b;" \
+ : "=&r" (previous), \
+ "=&r" (r), \
+ "=&r" (tmp) \
+ : "r" (target), \
+ "r" (delta) \
+ : "memory", "cc"); \
+ return (previous); \
+ }
+
+CK_PR_FAA(32, uint32_t, "")
+CK_PR_FAA(uint, unsigned int, "")
+CK_PR_FAA(int, int, "")
+CK_PR_FAA(16, uint16_t, "h")
+CK_PR_FAA(8, uint8_t, "b")
+CK_PR_FAA(short, short, "h")
+CK_PR_FAA(char, char, "b")
+
+#undef CK_PR_FAA
+
+#endif /* CK_PR_AARCH64_LLSC_H */
diff --git a/include/gcc/aarch64/ck_pr_lse.h b/include/gcc/aarch64/ck_pr_lse.h
new file mode 100644
index 0000000..e2c9554
--- /dev/null
+++ b/include/gcc/aarch64/ck_pr_lse.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright 2009-2016 Samy Al Bahra.
+ * Copyright 2013-2016 Olivier Houchard.
+ * Copyright 2016 Alexey Kopytov.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_PR_AARCH64_LSE_H
+#define CK_PR_AARCH64_LSE_H
+
+#ifndef CK_PR_H
+#error Do not include this file directly, use ck_pr.h
+#endif
+
+CK_CC_INLINE static bool
+ck_pr_cas_64_2_value(uint64_t target[2], uint64_t compare[2], uint64_t set[2], uint64_t value[2])
+{
+ uint64_t tmp1;
+ uint64_t tmp2;
+ register uint64_t x0 __asm__ ("x0") = compare[0];
+ register uint64_t x1 __asm__ ("x1") = compare[1];
+ register uint64_t x2 __asm__ ("x2") = set[0];
+ register uint64_t x3 __asm__ ("x3") = set[1];
+
+ __asm__ __volatile__("casp %0, %1, %4, %5, [%6];"
+ "eor %2, %0, %7;"
+ "eor %3, %1, %8;"
+ "orr %2, %2, %3;"
+ : "+&r" (x0), "+&r" (x1), "=&r" (tmp1), "=&r" (tmp2)
+ : "r" (x2), "r" (x3), "r" (target), "r" (compare[0]), "r" (compare[1])
+ : "memory");
+
+ value[0] = x0;
+ value[1] = x1;
+
+ return (!!tmp1);
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_2_value(void *target, void *compare, void *set, void *value)
+{
+ return (ck_pr_cas_64_2_value(CK_CPP_CAST(uint64_t *, target),
+ CK_CPP_CAST(uint64_t *, compare),
+ CK_CPP_CAST(uint64_t *, set),
+ CK_CPP_CAST(uint64_t *, value)));
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_64_2(uint64_t target[2], uint64_t compare[2], uint64_t set[2])
+{
+ register uint64_t x0 __asm__ ("x0") = compare[0];
+ register uint64_t x1 __asm__ ("x1") = compare[1];
+ register uint64_t x2 __asm__ ("x2") = set[0];
+ register uint64_t x3 __asm__ ("x3") = set[1];
+
+ __asm__ __volatile__("casp %0, %1, %2, %3, [%4];"
+ "eor %0, %0, %5;"
+ "eor %1, %1, %6;"
+ "orr %0, %0, %1;"
+ : "+&r" (x0), "+&r" (x1)
+ : "r" (x2), "r" (x3), "r" (target), "r" (compare[0]), "r" (compare[1])
+ : "memory");
+
+ return (!!x0);
+}
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_2(void *target, void *compare, void *set)
+{
+ return (ck_pr_cas_64_2(CK_CPP_CAST(uint64_t *, target),
+ CK_CPP_CAST(uint64_t *, compare),
+ CK_CPP_CAST(uint64_t *, set)));
+}
+
+
+#define CK_PR_CAS(N, M, T, W, R) \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##N##_value(M *target, T compare, T set, M *value) \
+ { \
+ *(T *)value = compare; \
+ __asm__ __volatile__( \
+ "cas" W " %" R "0, %" R "2, [%1];" \
+ : "+&r" (*(T *)value) \
+ : "r" (target), \
+ "r" (set) \
+ : "memory"); \
+ return (*(T *)value == compare); \
+ } \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##N(M *target, T compare, T set) \
+ { \
+ T previous = compare; \
+ __asm__ __volatile__( \
+ "cas" W " %" R "0, %" R "2, [%1];" \
+ : "+&r" (previous) \
+ : "r" (target), \
+ "r" (set) \
+ : "memory"); \
+ return (previous == compare); \
+ }
+
+CK_PR_CAS(ptr, void, void *, "", "")
+
+#define CK_PR_CAS_S(N, M, W, R) CK_PR_CAS(N, M, M, W, R)
+CK_PR_CAS_S(64, uint64_t, "", "")
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_CAS_S(double, double, "", "")
+#endif
+CK_PR_CAS_S(32, uint32_t, "", "w")
+CK_PR_CAS_S(uint, unsigned int, "", "w")
+CK_PR_CAS_S(int, int, "", "w")
+CK_PR_CAS_S(16, uint16_t, "h", "w")
+CK_PR_CAS_S(8, uint8_t, "b", "w")
+CK_PR_CAS_S(short, short, "h", "w")
+CK_PR_CAS_S(char, char, "b", "w")
+
+
+#undef CK_PR_CAS_S
+#undef CK_PR_CAS
+
+#define CK_PR_FAS(N, M, T, W, R) \
+ CK_CC_INLINE static T \
+ ck_pr_fas_##N(M *target, T v) \
+ { \
+ T previous; \
+ __asm__ __volatile__( \
+ "swp" W " %" R "2, %" R "0, [%1];" \
+ : "=&r" (previous) \
+ : "r" (target), \
+ "r" (v) \
+ : "memory"); \
+ return (previous); \
+ }
+
+CK_PR_FAS(64, uint64_t, uint64_t, "", "")
+CK_PR_FAS(32, uint32_t, uint32_t, "", "w")
+CK_PR_FAS(ptr, void, void *, "", "")
+CK_PR_FAS(int, int, int, "", "w")
+CK_PR_FAS(uint, unsigned int, unsigned int, "", "w")
+CK_PR_FAS(16, uint16_t, uint16_t, "h", "w")
+CK_PR_FAS(8, uint8_t, uint8_t, "b", "w")
+CK_PR_FAS(short, short, short, "h", "w")
+CK_PR_FAS(char, char, char, "b", "w")
+
+
+#undef CK_PR_FAS
+
+#define CK_PR_UNARY(O, N, M, T, I, W, R, S) \
+ CK_CC_INLINE static void \
+ ck_pr_##O##_##N(M *target) \
+ { \
+ __asm__ __volatile__(I ";" \
+ "st" S W " " R "0, [%0];" \
+ : \
+ : "r" (target) \
+ : "x0", "memory"); \
+ return; \
+ }
+
+CK_PR_UNARY(inc, ptr, void, void *, "mov x0, 1", "", "x", "add")
+CK_PR_UNARY(dec, ptr, void, void *, "mov x0, -1", "", "x", "add")
+CK_PR_UNARY(not, ptr, void, void *, "mov x0, -1", "", "x", "eor")
+CK_PR_UNARY(inc, 64, uint64_t, uint64_t, "mov x0, 1", "", "x", "add")
+CK_PR_UNARY(dec, 64, uint64_t, uint64_t, "mov x0, -1", "", "x", "add")
+CK_PR_UNARY(not, 64, uint64_t, uint64_t, "mov x0, -1", "", "x", "eor")
+
+#define CK_PR_UNARY_S(S, T, W) \
+ CK_PR_UNARY(inc, S, T, T, "mov w0, 1", W, "w", "add") \
+ CK_PR_UNARY(dec, S, T, T, "mov w0, -1", W, "w", "add") \
+ CK_PR_UNARY(not, S, T, T, "mov w0, -1", W, "w", "eor") \
+
+CK_PR_UNARY_S(32, uint32_t, "")
+CK_PR_UNARY_S(uint, unsigned int, "")
+CK_PR_UNARY_S(int, int, "")
+CK_PR_UNARY_S(16, uint16_t, "h")
+CK_PR_UNARY_S(8, uint8_t, "b")
+CK_PR_UNARY_S(short, short, "h")
+CK_PR_UNARY_S(char, char, "b")
+
+#undef CK_PR_UNARY_S
+#undef CK_PR_UNARY
+
+#define CK_PR_BINARY(O, N, M, T, S, W, R, I) \
+ CK_CC_INLINE static void \
+ ck_pr_##O##_##N(M *target, T delta) \
+ { \
+ __asm__ __volatile__(I ";" \
+ "st" S W " %" R "0, [%1];" \
+ : "+&r" (delta) \
+ : "r" (target) \
+ : "memory"); \
+ return; \
+ }
+
+CK_PR_BINARY(and, ptr, void, uintptr_t, "clr", "", "", "mvn %0, %0")
+CK_PR_BINARY(add, ptr, void, uintptr_t, "add", "", "", "")
+CK_PR_BINARY(or, ptr, void, uintptr_t, "set", "", "", "")
+CK_PR_BINARY(sub, ptr, void, uintptr_t, "add", "", "", "neg %0, %0")
+CK_PR_BINARY(xor, ptr, void, uintptr_t, "eor", "", "", "")
+CK_PR_BINARY(and, 64, uint64_t, uint64_t, "clr", "", "", "mvn %0, %0")
+CK_PR_BINARY(add, 64, uint64_t, uint64_t, "add", "", "", "")
+CK_PR_BINARY(or, 64, uint64_t, uint64_t, "set", "", "", "")
+CK_PR_BINARY(sub, 64, uint64_t, uint64_t, "add", "", "", "neg %0, %0")
+CK_PR_BINARY(xor, 64, uint64_t, uint64_t, "eor", "", "", "")
+
+#define CK_PR_BINARY_S(S, T, W) \
+ CK_PR_BINARY(and, S, T, T, "clr", W, "w", "mvn %w0, %w0") \
+ CK_PR_BINARY(add, S, T, T, "add", W, "w", "") \
+ CK_PR_BINARY(or, S, T, T, "set", W, "w", "") \
+ CK_PR_BINARY(sub, S, T, T, "add", W, "w", "neg %w0, %w0") \
+ CK_PR_BINARY(xor, S, T, T, "eor", W, "w", "")
+
+CK_PR_BINARY_S(32, uint32_t, "")
+CK_PR_BINARY_S(uint, unsigned int, "")
+CK_PR_BINARY_S(int, int, "")
+CK_PR_BINARY_S(16, uint16_t, "h")
+CK_PR_BINARY_S(8, uint8_t, "b")
+CK_PR_BINARY_S(short, short, "h")
+CK_PR_BINARY_S(char, char, "b")
+
+#undef CK_PR_BINARY_S
+#undef CK_PR_BINARY
+
+CK_CC_INLINE static void *
+ck_pr_faa_ptr(void *target, uintptr_t delta)
+{
+ uintptr_t previous;
+
+ __asm__ __volatile__(
+ "ldadd %2, %0, [%1];"
+ : "=r" (previous)
+ : "r" (target),
+ "r" (delta)
+ : "memory");
+
+ return (void *)(previous);
+}
+
+CK_CC_INLINE static uint64_t
+ck_pr_faa_64(uint64_t *target, uint64_t delta)
+{
+ uint64_t previous;
+
+ __asm__ __volatile__(
+ "ldadd %2, %0, [%1];"
+ : "=r" (previous)
+ : "r" (target),
+ "r" (delta)
+ : "memory");
+
+ return (previous);
+}
+
+#define CK_PR_FAA(S, T, W) \
+ CK_CC_INLINE static T \
+ ck_pr_faa_##S(T *target, T delta) \
+ { \
+ T previous; \
+ __asm__ __volatile__( \
+ "ldadd" W " %w2, %w0, [%1];" \
+ : "=r" (previous) \
+ : "r" (target), \
+ "r" (delta) \
+ : "memory"); \
+ return (previous); \
+ }
+
+CK_PR_FAA(32, uint32_t, "")
+CK_PR_FAA(uint, unsigned int, "")
+CK_PR_FAA(int, int, "")
+CK_PR_FAA(16, uint16_t, "h")
+CK_PR_FAA(8, uint8_t, "b")
+CK_PR_FAA(short, short, "h")
+CK_PR_FAA(char, char, "b")
+
+#undef CK_PR_FAA
+
+#endif /* CK_PR_AARCH64_LSE_H */
diff --git a/include/gcc/arm/ck_f_pr.h b/include/gcc/arm/ck_f_pr.h
new file mode 100644
index 0000000..c508f85
--- /dev/null
+++ b/include/gcc/arm/ck_f_pr.h
@@ -0,0 +1,162 @@
+/* DO NOT EDIT. This is auto-generated from feature.sh */
+#define CK_F_PR_ADD_16
+#define CK_F_PR_ADD_32
+#define CK_F_PR_ADD_8
+#define CK_F_PR_ADD_CHAR
+#define CK_F_PR_ADD_INT
+#define CK_F_PR_ADD_PTR
+#define CK_F_PR_ADD_SHORT
+#define CK_F_PR_ADD_UINT
+#define CK_F_PR_AND_16
+#define CK_F_PR_AND_32
+#define CK_F_PR_AND_8
+#define CK_F_PR_AND_CHAR
+#define CK_F_PR_AND_INT
+#define CK_F_PR_AND_PTR
+#define CK_F_PR_AND_SHORT
+#define CK_F_PR_AND_UINT
+#define CK_F_PR_BARRIER
+#define CK_F_PR_CAS_16
+#define CK_F_PR_CAS_16_VALUE
+#define CK_F_PR_CAS_32
+#define CK_F_PR_CAS_32_VALUE
+#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
+#define CK_F_PR_CAS_64
+#define CK_F_PR_CAS_64_VALUE
+#define CK_F_PR_CAS_DOUBLE
+#define CK_F_PR_CAS_DOUBLE_VALUE
+#endif
+#define CK_F_PR_CAS_8
+#define CK_F_PR_CAS_8_VALUE
+#define CK_F_PR_CAS_CHAR
+#define CK_F_PR_CAS_CHAR_VALUE
+#define CK_F_PR_CAS_INT
+#define CK_F_PR_CAS_INT_VALUE
+#define CK_F_PR_CAS_PTR
+#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
+#define CK_F_PR_CAS_PTR_2
+#define CK_F_PR_CAS_PTR_2_VALUE
+#endif
+#define CK_F_PR_CAS_PTR_VALUE
+#define CK_F_PR_CAS_SHORT
+#define CK_F_PR_CAS_SHORT_VALUE
+#define CK_F_PR_CAS_UINT
+#define CK_F_PR_CAS_UINT_VALUE
+#define CK_F_PR_DEC_16
+#define CK_F_PR_DEC_32
+#define CK_F_PR_DEC_8
+#define CK_F_PR_DEC_CHAR
+#define CK_F_PR_DEC_INT
+#define CK_F_PR_DEC_PTR
+#define CK_F_PR_DEC_SHORT
+#define CK_F_PR_DEC_UINT
+#define CK_F_PR_FAA_16
+#define CK_F_PR_FAA_32
+#define CK_F_PR_FAA_8
+#define CK_F_PR_FAA_CHAR
+#define CK_F_PR_FAA_INT
+#define CK_F_PR_FAA_PTR
+#define CK_F_PR_FAA_SHORT
+#define CK_F_PR_FAA_UINT
+#define CK_F_PR_FAS_16
+#define CK_F_PR_FAS_32
+#define CK_F_PR_FAS_8
+#define CK_F_PR_FAS_CHAR
+#define CK_F_PR_FAS_INT
+#define CK_F_PR_FAS_PTR
+#define CK_F_PR_FAS_SHORT
+#define CK_F_PR_FAS_UINT
+#define CK_F_PR_FENCE_ATOMIC
+#define CK_F_PR_FENCE_ATOMIC_LOAD
+#define CK_F_PR_FENCE_ATOMIC_STORE
+#define CK_F_PR_FENCE_LOAD
+#define CK_F_PR_FENCE_LOAD_ATOMIC
+#define CK_F_PR_FENCE_LOAD_DEPENDS
+#define CK_F_PR_FENCE_LOAD_STORE
+#define CK_F_PR_FENCE_MEMORY
+#define CK_F_PR_FENCE_STORE
+#define CK_F_PR_FENCE_STORE_ATOMIC
+#define CK_F_PR_FENCE_STORE_LOAD
+#define CK_F_PR_FENCE_STRICT_ATOMIC
+#define CK_F_PR_FENCE_STRICT_ATOMIC_LOAD
+#define CK_F_PR_FENCE_STRICT_ATOMIC_STORE
+#define CK_F_PR_FENCE_STRICT_LOAD
+#define CK_F_PR_FENCE_STRICT_LOAD_ATOMIC
+#define CK_F_PR_FENCE_STRICT_LOAD_STORE
+#define CK_F_PR_FENCE_STRICT_MEMORY
+#define CK_F_PR_FENCE_STRICT_STORE
+#define CK_F_PR_FENCE_STRICT_STORE_ATOMIC
+#define CK_F_PR_FENCE_STRICT_STORE_LOAD
+#define CK_F_PR_INC_16
+#define CK_F_PR_INC_32
+#define CK_F_PR_INC_8
+#define CK_F_PR_INC_CHAR
+#define CK_F_PR_INC_INT
+#define CK_F_PR_INC_PTR
+#define CK_F_PR_INC_SHORT
+#define CK_F_PR_INC_UINT
+#define CK_F_PR_LOAD_16
+#define CK_F_PR_LOAD_32
+#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
+#define CK_F_PR_LOAD_64
+#define CK_F_PR_LOAD_DOUBLE
+#endif
+#define CK_F_PR_LOAD_8
+#define CK_F_PR_LOAD_CHAR
+#define CK_F_PR_LOAD_INT
+#define CK_F_PR_LOAD_PTR
+#define CK_F_PR_LOAD_SHORT
+#define CK_F_PR_LOAD_UINT
+#define CK_F_PR_NEG_16
+#define CK_F_PR_NEG_32
+#define CK_F_PR_NEG_8
+#define CK_F_PR_NEG_CHAR
+#define CK_F_PR_NEG_INT
+#define CK_F_PR_NEG_PTR
+#define CK_F_PR_NEG_SHORT
+#define CK_F_PR_NEG_UINT
+#define CK_F_PR_NOT_16
+#define CK_F_PR_NOT_32
+#define CK_F_PR_NOT_8
+#define CK_F_PR_NOT_CHAR
+#define CK_F_PR_NOT_INT
+#define CK_F_PR_NOT_PTR
+#define CK_F_PR_NOT_SHORT
+#define CK_F_PR_NOT_UINT
+#define CK_F_PR_OR_16
+#define CK_F_PR_OR_32
+#define CK_F_PR_OR_8
+#define CK_F_PR_OR_CHAR
+#define CK_F_PR_OR_INT
+#define CK_F_PR_OR_PTR
+#define CK_F_PR_OR_SHORT
+#define CK_F_PR_OR_UINT
+#define CK_F_PR_STALL
+#define CK_F_PR_STORE_16
+#define CK_F_PR_STORE_32
+#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
+#define CK_F_PR_STORE_64
+#define CK_F_PR_STORE_DOUBLE
+#endif
+#define CK_F_PR_STORE_8
+#define CK_F_PR_STORE_CHAR
+#define CK_F_PR_STORE_INT
+#define CK_F_PR_STORE_PTR
+#define CK_F_PR_STORE_SHORT
+#define CK_F_PR_STORE_UINT
+#define CK_F_PR_SUB_16
+#define CK_F_PR_SUB_32
+#define CK_F_PR_SUB_8
+#define CK_F_PR_SUB_CHAR
+#define CK_F_PR_SUB_INT
+#define CK_F_PR_SUB_PTR
+#define CK_F_PR_SUB_SHORT
+#define CK_F_PR_SUB_UINT
+#define CK_F_PR_XOR_16
+#define CK_F_PR_XOR_32
+#define CK_F_PR_XOR_8
+#define CK_F_PR_XOR_CHAR
+#define CK_F_PR_XOR_INT
+#define CK_F_PR_XOR_PTR
+#define CK_F_PR_XOR_SHORT
+#define CK_F_PR_XOR_UINT
diff --git a/include/gcc/arm/ck_pr.h b/include/gcc/arm/ck_pr.h
new file mode 100644
index 0000000..841ca21
--- /dev/null
+++ b/include/gcc/arm/ck_pr.h
@@ -0,0 +1,563 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * Copyright 2013-2015 Olivier Houchard.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_PR_ARM_H
+#define CK_PR_ARM_H
+
+#ifndef CK_PR_H
+#error Do not include this file directly, use ck_pr.h
+#endif
+
+#include <ck_cc.h>
+#include <ck_md.h>
+
+/*
+ * The following represent supported atomic operations.
+ * These operations may be emulated.
+ */
+#include "ck_f_pr.h"
+
+/*
+ * Minimum interface requirement met.
+ */
+#define CK_F_PR
+
+CK_CC_INLINE static void
+ck_pr_stall(void)
+{
+
+ __asm__ __volatile__("" ::: "memory");
+ return;
+}
+
+#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
+#define CK_ISB __asm __volatile("isb" : : "r" (0) : "memory")
+#define CK_DMB __asm __volatile("dmb" : : "r" (0) : "memory")
+#define CK_DSB __asm __volatile("dsb" : : "r" (0) : "memory")
+/* FreeBSD's toolchain doesn't accept dmb st, so use the opcode instead */
+#ifdef __FreeBSD__
+#define CK_DMB_ST __asm __volatile(".word 0xf57ff05e" : : "r" (0) : "memory")
+#else
+#define CK_DMB_ST __asm __volatile("dmb st" : : "r" (0) : "memory")
+#endif /* __FreeBSD__ */
+#else
+/* armv6 doesn't have dsb/dmb/isb, and no way to wait only for stores */
+#define CK_ISB \
+ __asm __volatile("mcr p15, 0, %0, c7, c5, 4" : : "r" (0) : "memory")
+#define CK_DSB \
+ __asm __volatile("mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : "memory")
+#define CK_DMB \
+ __asm __volatile("mcr p15, 0, %0, c7, c10, 5" : : "r" (0) : "memory")
+#define CK_DMB_ST CK_DMB
+#endif
+
+#define CK_PR_FENCE(T, I) \
+ CK_CC_INLINE static void \
+ ck_pr_fence_strict_##T(void) \
+ { \
+ I; \
+ }
+
+CK_PR_FENCE(atomic, CK_DMB_ST)
+CK_PR_FENCE(atomic_store, CK_DMB_ST)
+CK_PR_FENCE(atomic_load, CK_DMB_ST)
+CK_PR_FENCE(store_atomic, CK_DMB_ST)
+CK_PR_FENCE(load_atomic, CK_DMB)
+CK_PR_FENCE(store, CK_DMB_ST)
+CK_PR_FENCE(store_load, CK_DMB)
+CK_PR_FENCE(load, CK_DMB)
+CK_PR_FENCE(load_store, CK_DMB)
+CK_PR_FENCE(memory, CK_DMB)
+CK_PR_FENCE(acquire, CK_DMB)
+CK_PR_FENCE(release, CK_DMB)
+CK_PR_FENCE(acqrel, CK_DMB)
+CK_PR_FENCE(lock, CK_DMB)
+CK_PR_FENCE(unlock, CK_DMB)
+
+#undef CK_PR_FENCE
+
+#undef CK_ISB
+#undef CK_DSB
+#undef CK_DMB
+#undef CK_DMB_ST
+
+#define CK_PR_LOAD(S, M, T, C, I) \
+ CK_CC_INLINE static T \
+ ck_pr_md_load_##S(const M *target) \
+ { \
+ long r = 0; \
+ __asm__ __volatile__(I " %0, [%1];" \
+ : "=r" (r) \
+ : "r" (target) \
+ : "memory"); \
+ return ((T)r); \
+ }
+
+CK_PR_LOAD(ptr, void, void *, uint32_t, "ldr")
+
+#define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, T, I)
+
+CK_PR_LOAD_S(32, uint32_t, "ldr")
+CK_PR_LOAD_S(16, uint16_t, "ldrh")
+CK_PR_LOAD_S(8, uint8_t, "ldrb")
+CK_PR_LOAD_S(uint, unsigned int, "ldr")
+CK_PR_LOAD_S(int, int, "ldr")
+CK_PR_LOAD_S(short, short, "ldrh")
+CK_PR_LOAD_S(char, char, "ldrb")
+
+#undef CK_PR_LOAD_S
+#undef CK_PR_LOAD
+
+#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
+
+#define CK_PR_DOUBLE_LOAD(T, N) \
+CK_CC_INLINE static T \
+ck_pr_md_load_##N(const T *target) \
+{ \
+ register T ret; \
+ \
+ __asm __volatile("ldrexd %0, [%1]" \
+ : "=&r" (ret) \
+ : "r" (target) \
+ : "memory", "cc"); \
+ return (ret); \
+}
+
+CK_PR_DOUBLE_LOAD(uint64_t, 64)
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_DOUBLE_LOAD(double, double)
+#endif
+#undef CK_PR_DOUBLE_LOAD
+#endif
+
+#define CK_PR_STORE(S, M, T, C, I) \
+ CK_CC_INLINE static void \
+ ck_pr_md_store_##S(M *target, T v) \
+ { \
+ __asm__ __volatile__(I " %1, [%0]" \
+ : \
+ : "r" (target), \
+ "r" (v) \
+ : "memory"); \
+ return; \
+ }
+
+CK_PR_STORE(ptr, void, const void *, uint32_t, "str")
+
+#define CK_PR_STORE_S(S, T, I) CK_PR_STORE(S, T, T, T, I)
+
+CK_PR_STORE_S(32, uint32_t, "str")
+CK_PR_STORE_S(16, uint16_t, "strh")
+CK_PR_STORE_S(8, uint8_t, "strb")
+CK_PR_STORE_S(uint, unsigned int, "str")
+CK_PR_STORE_S(int, int, "str")
+CK_PR_STORE_S(short, short, "strh")
+CK_PR_STORE_S(char, char, "strb")
+
+#undef CK_PR_STORE_S
+#undef CK_PR_STORE
+
+#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__)
+
+#define CK_PR_DOUBLE_STORE(T, N) \
+CK_CC_INLINE static void \
+ck_pr_md_store_##N(const T *target, T value) \
+{ \
+ T tmp; \
+ uint32_t flag; \
+ __asm __volatile("1: \n" \
+ "ldrexd %0, [%2]\n" \
+ "strexd %1, %3, [%2]\n" \
+ "teq %1, #0\n" \
+ "it ne \n" \
+ "bne 1b\n" \
+ : "=&r" (tmp), "=&r" (flag) \
+ : "r" (target), "r" (value) \
+ : "memory", "cc"); \
+}
+
+CK_PR_DOUBLE_STORE(uint64_t, 64)
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_DOUBLE_STORE(double, double)
+#endif
+
+#undef CK_PR_DOUBLE_STORE
+
+#define CK_PR_DOUBLE_CAS_VALUE(T, N) \
+CK_CC_INLINE static bool \
+ck_pr_cas_##N##_value(T *target, T compare, T set, T *value) \
+{ \
+ T previous; \
+ int tmp; \
+ \
+ __asm__ __volatile__("1:" \
+ "ldrexd %0, [%4];" \
+ "cmp %Q0, %Q2;" \
+ "ittt eq;" \
+ "cmpeq %R0, %R2;" \
+ "strexdeq %1, %3, [%4];" \
+ "cmpeq %1, #1;" \
+ "beq 1b;" \
+ :"=&r" (previous), "=&r" (tmp) \
+ : "r" (compare), "r" (set) , \
+ "r"(target) \
+ : "memory", "cc"); \
+ *value = previous; \
+ return (*value == compare); \
+}
+
+CK_PR_DOUBLE_CAS_VALUE(uint64_t, 64)
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_DOUBLE_CAS_VALUE(double, double)
+#endif
+
+#undef CK_PR_DOUBLE_CAS_VALUE
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_2_value(void *target, void *compare, void *set, void *value)
+{
+ uint32_t *_compare = CK_CPP_CAST(uint32_t *, compare);
+ uint32_t *_set = CK_CPP_CAST(uint32_t *, set);
+ uint64_t __compare = ((uint64_t)_compare[0]) | ((uint64_t)_compare[1] << 32);
+ uint64_t __set = ((uint64_t)_set[0]) | ((uint64_t)_set[1] << 32);
+
+ return (ck_pr_cas_64_value(CK_CPP_CAST(uint64_t *, target),
+ __compare,
+ __set,
+ CK_CPP_CAST(uint64_t *, value)));
+}
+
+#define CK_PR_DOUBLE_CAS(T, N) \
+CK_CC_INLINE static bool \
+ck_pr_cas_##N(T *target, T compare, T set) \
+{ \
+ int ret; \
+ T tmp; \
+ \
+ __asm__ __volatile__("1:" \
+ "mov %0, #0;" \
+ "ldrexd %1, [%4];" \
+ "cmp %Q1, %Q2;" \
+ "itttt eq;" \
+ "cmpeq %R1, %R2;" \
+ "strexdeq %1, %3, [%4];" \
+ "moveq %0, #1;" \
+ "cmpeq %1, #1;" \
+ "beq 1b;" \
+ : "=&r" (ret), "=&r" (tmp) \
+ : "r" (compare), "r" (set) , \
+ "r"(target) \
+ : "memory", "cc"); \
+ \
+ return (ret); \
+}
+
+CK_PR_DOUBLE_CAS(uint64_t, 64)
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_DOUBLE_CAS(double, double)
+#endif
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_2(void *target, void *compare, void *set)
+{
+ uint32_t *_compare = CK_CPP_CAST(uint32_t *, compare);
+ uint32_t *_set = CK_CPP_CAST(uint32_t *, set);
+ uint64_t __compare = ((uint64_t)_compare[0]) | ((uint64_t)_compare[1] << 32);
+ uint64_t __set = ((uint64_t)_set[0]) | ((uint64_t)_set[1] << 32);
+ return (ck_pr_cas_64(CK_CPP_CAST(uint64_t *, target),
+ __compare,
+ __set));
+}
+
+#endif
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_value(void *target, void *compare, void *set, void *value)
+{
+ void *previous, *tmp;
+ __asm__ __volatile__("1:"
+ "ldrex %0, [%2];"
+ "cmp %0, %4;"
+ "itt eq;"
+ "strexeq %1, %3, [%2];"
+ "cmpeq %1, #1;"
+ "beq 1b;"
+ : "=&r" (previous),
+ "=&r" (tmp)
+ : "r" (target),
+ "r" (set),
+ "r" (compare)
+ : "memory", "cc");
+ *(void **)value = previous;
+ return (previous == compare);
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr(void *target, void *compare, void *set)
+{
+ void *previous, *tmp;
+ __asm__ __volatile__("1:"
+ "ldrex %0, [%2];"
+ "cmp %0, %4;"
+ "itt eq;"
+ "strexeq %1, %3, [%2];"
+ "cmpeq %1, #1;"
+ "beq 1b;"
+ : "=&r" (previous),
+ "=&r" (tmp)
+ : "r" (target),
+ "r" (set),
+ "r" (compare)
+ : "memory", "cc");
+ return (previous == compare);
+}
+
+#define CK_PR_CAS(N, T, W) \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##N##_value(T *target, T compare, T set, T *value) \
+ { \
+ T previous = 0, tmp = 0; \
+ __asm__ __volatile__("1:" \
+ "ldrex" W " %0, [%2];" \
+ "cmp %0, %4;" \
+ "itt eq;" \
+ "strex" W "eq %1, %3, [%2];" \
+ "cmpeq %1, #1;" \
+ "beq 1b;" \
+ /* \
+ * Using "+&" instead of "=&" to avoid bogus \
+ * clang warnings. \
+ */ \
+ : "+&r" (previous), \
+ "+&r" (tmp) \
+ : "r" (target), \
+ "r" (set), \
+ "r" (compare) \
+ : "memory", "cc"); \
+ *value = previous; \
+ return (previous == compare); \
+ } \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##N(T *target, T compare, T set) \
+ { \
+ T previous = 0, tmp = 0; \
+ __asm__ __volatile__("1:" \
+ "ldrex" W " %0, [%2];" \
+ "cmp %0, %4;" \
+ "itt eq;" \
+ "strex" W "eq %1, %3, [%2];" \
+ "cmpeq %1, #1;" \
+ "beq 1b;" \
+ : "+&r" (previous), \
+ "+&r" (tmp) \
+ : "r" (target), \
+ "r" (set), \
+ "r" (compare) \
+ : "memory", "cc"); \
+ return (previous == compare); \
+ }
+
+CK_PR_CAS(32, uint32_t, "")
+CK_PR_CAS(uint, unsigned int, "")
+CK_PR_CAS(int, int, "")
+CK_PR_CAS(16, uint16_t, "h")
+CK_PR_CAS(8, uint8_t, "b")
+CK_PR_CAS(short, short, "h")
+CK_PR_CAS(char, char, "b")
+
+
+#undef CK_PR_CAS
+
+#define CK_PR_FAS(N, M, T, W) \
+ CK_CC_INLINE static T \
+ ck_pr_fas_##N(M *target, T v) \
+ { \
+ T previous = 0; \
+ T tmp = 0; \
+ __asm__ __volatile__("1:" \
+ "ldrex" W " %0, [%2];" \
+ "strex" W " %1, %3, [%2];" \
+ "cmp %1, #0;" \
+ "bne 1b;" \
+ : "+&r" (previous), \
+ "+&r" (tmp) \
+ : "r" (target), \
+ "r" (v) \
+ : "memory", "cc"); \
+ return (previous); \
+ }
+
+CK_PR_FAS(32, uint32_t, uint32_t, "")
+CK_PR_FAS(ptr, void, void *, "")
+CK_PR_FAS(int, int, int, "")
+CK_PR_FAS(uint, unsigned int, unsigned int, "")
+CK_PR_FAS(16, uint16_t, uint16_t, "h")
+CK_PR_FAS(8, uint8_t, uint8_t, "b")
+CK_PR_FAS(short, short, short, "h")
+CK_PR_FAS(char, char, char, "b")
+
+
+#undef CK_PR_FAS
+
+#define CK_PR_UNARY(O, N, M, T, I, W) \
+ CK_CC_INLINE static void \
+ ck_pr_##O##_##N(M *target) \
+ { \
+ T previous = 0; \
+ T tmp = 0; \
+ __asm__ __volatile__("1:" \
+ "ldrex" W " %0, [%2];" \
+ I ";" \
+ "strex" W " %1, %0, [%2];" \
+ "cmp %1, #0;" \
+ "bne 1b;" \
+ : "+&r" (previous), \
+ "+&r" (tmp) \
+ : "r" (target) \
+ : "memory", "cc"); \
+ return; \
+ }
+
+CK_PR_UNARY(inc, ptr, void, void *, "add %0, %0, #1", "")
+CK_PR_UNARY(dec, ptr, void, void *, "sub %0, %0, #1", "")
+CK_PR_UNARY(not, ptr, void, void *, "mvn %0, %0", "")
+CK_PR_UNARY(neg, ptr, void, void *, "neg %0, %0", "")
+
+#define CK_PR_UNARY_S(S, T, W) \
+ CK_PR_UNARY(inc, S, T, T, "add %0, %0, #1", W) \
+ CK_PR_UNARY(dec, S, T, T, "sub %0, %0, #1", W) \
+ CK_PR_UNARY(not, S, T, T, "mvn %0, %0", W) \
+ CK_PR_UNARY(neg, S, T, T, "neg %0, %0", W) \
+
+CK_PR_UNARY_S(32, uint32_t, "")
+CK_PR_UNARY_S(uint, unsigned int, "")
+CK_PR_UNARY_S(int, int, "")
+CK_PR_UNARY_S(16, uint16_t, "h")
+CK_PR_UNARY_S(8, uint8_t, "b")
+CK_PR_UNARY_S(short, short, "h")
+CK_PR_UNARY_S(char, char, "b")
+
+#undef CK_PR_UNARY_S
+#undef CK_PR_UNARY
+
+#define CK_PR_BINARY(O, N, M, T, I, W) \
+ CK_CC_INLINE static void \
+ ck_pr_##O##_##N(M *target, T delta) \
+ { \
+ T previous = 0; \
+ T tmp = 0; \
+ __asm__ __volatile__("1:" \
+ "ldrex" W " %0, [%2];" \
+ I " %0, %0, %3;" \
+ "strex" W " %1, %0, [%2];" \
+ "cmp %1, #0;" \
+ "bne 1b;" \
+ : "+&r" (previous), \
+ "+&r" (tmp) \
+ : "r" (target), \
+ "r" (delta) \
+ : "memory", "cc"); \
+ return; \
+ }
+
+CK_PR_BINARY(and, ptr, void, uintptr_t, "and", "")
+CK_PR_BINARY(add, ptr, void, uintptr_t, "add", "")
+CK_PR_BINARY(or, ptr, void, uintptr_t, "orr", "")
+CK_PR_BINARY(sub, ptr, void, uintptr_t, "sub", "")
+CK_PR_BINARY(xor, ptr, void, uintptr_t, "eor", "")
+
+#define CK_PR_BINARY_S(S, T, W) \
+ CK_PR_BINARY(and, S, T, T, "and", W) \
+ CK_PR_BINARY(add, S, T, T, "add", W) \
+ CK_PR_BINARY(or, S, T, T, "orr", W) \
+ CK_PR_BINARY(sub, S, T, T, "sub", W) \
+ CK_PR_BINARY(xor, S, T, T, "eor", W)
+
+CK_PR_BINARY_S(32, uint32_t, "")
+CK_PR_BINARY_S(uint, unsigned int, "")
+CK_PR_BINARY_S(int, int, "")
+CK_PR_BINARY_S(16, uint16_t, "h")
+CK_PR_BINARY_S(8, uint8_t, "b")
+CK_PR_BINARY_S(short, short, "h")
+CK_PR_BINARY_S(char, char, "b")
+
+#undef CK_PR_BINARY_S
+#undef CK_PR_BINARY
+
+CK_CC_INLINE static void *
+ck_pr_faa_ptr(void *target, uintptr_t delta)
+{
+ uintptr_t previous, r, tmp;
+
+ __asm__ __volatile__("1:"
+ "ldrex %0, [%3];"
+ "add %1, %4, %0;"
+ "strex %2, %1, [%3];"
+ "cmp %2, #0;"
+ "bne 1b;"
+ : "=&r" (previous),
+ "=&r" (r),
+ "=&r" (tmp)
+ : "r" (target),
+ "r" (delta)
+ : "memory", "cc");
+
+ return (void *)(previous);
+}
+
+#define CK_PR_FAA(S, T, W) \
+ CK_CC_INLINE static T \
+ ck_pr_faa_##S(T *target, T delta) \
+ { \
+ T previous = 0, r = 0, tmp = 0; \
+ __asm__ __volatile__("1:" \
+ "ldrex" W " %0, [%3];" \
+ "add %1, %4, %0;" \
+ "strex" W " %2, %1, [%3];" \
+ "cmp %2, #0;" \
+ "bne 1b;" \
+ : "+&r" (previous), \
+ "+&r" (r), \
+ "+&r" (tmp) \
+ : "r" (target), \
+ "r" (delta) \
+ : "memory", "cc"); \
+ return (previous); \
+ }
+
+CK_PR_FAA(32, uint32_t, "")
+CK_PR_FAA(uint, unsigned int, "")
+CK_PR_FAA(int, int, "")
+CK_PR_FAA(16, uint16_t, "h")
+CK_PR_FAA(8, uint8_t, "b")
+CK_PR_FAA(short, short, "h")
+CK_PR_FAA(char, char, "b")
+
+#undef CK_PR_FAA
+
+#endif /* CK_PR_ARM_H */
+
diff --git a/include/gcc/ck_cc.h b/include/gcc/ck_cc.h
new file mode 100644
index 0000000..a14a4b5
--- /dev/null
+++ b/include/gcc/ck_cc.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * Copyright 2014 Paul Khuong.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_GCC_CC_H
+#define CK_GCC_CC_H
+
+#include <ck_md.h>
+
+#ifdef __SUNPRO_C
+#define CK_CC_UNUSED
+#define CK_CC_USED
+#define CK_CC_IMM
+#define CK_CC_IMM_U32
+#else
+#define CK_CC_UNUSED __attribute__((unused))
+#define CK_CC_USED __attribute__((used))
+#define CK_CC_IMM "i"
+#if defined(__x86_64__) || defined(__x86__)
+#define CK_CC_IMM_U32 "Z"
+#define CK_CC_IMM_S32 "e"
+#else
+#define CK_CC_IMM_U32 CK_CC_IMM
+#define CK_CC_IMM_S32 CK_CC_IMM
+#endif /* __x86_64__ || __x86__ */
+#endif
+
+#ifdef __OPTIMIZE__
+#define CK_CC_INLINE CK_CC_UNUSED inline
+#else
+#define CK_CC_INLINE CK_CC_UNUSED
+#endif
+
+#define CK_CC_FORCE_INLINE CK_CC_UNUSED __attribute__((always_inline)) inline
+#define CK_CC_RESTRICT __restrict__
+
+/*
+ * Packed attribute.
+ */
+#define CK_CC_PACKED __attribute__((packed))
+
+/*
+ * Weak reference.
+ */
+#define CK_CC_WEAKREF __attribute__((weakref))
+
+/*
+ * Alignment attribute.
+ */
+#define CK_CC_ALIGN(B) __attribute__((aligned(B)))
+
+/*
+ * Cache align.
+ */
+#define CK_CC_CACHELINE CK_CC_ALIGN(CK_MD_CACHELINE)
+
+/*
+ * These are functions which should be avoided.
+ */
+#ifdef __freestanding__
+#pragma GCC poison malloc free
+#endif
+
+/*
+ * Branch execution hints.
+ */
+#define CK_CC_LIKELY(x) (__builtin_expect(!!(x), 1))
+#define CK_CC_UNLIKELY(x) (__builtin_expect(!!(x), 0))
+
+/*
+ * Some compilers are overly strict regarding aliasing semantics.
+ * Unfortunately, in many cases it makes more sense to pay aliasing
+ * cost rather than overly expensive register spillage.
+ */
+#define CK_CC_ALIASED __attribute__((__may_alias__))
+
+/*
+ * Compile-time typeof
+ */
+#define CK_CC_TYPEOF(X, DEFAULT) __typeof__(X)
+
+/*
+ * Portability wrappers for bitwise ops.
+ */
+
+#define CK_F_CC_FFS
+#define CK_F_CC_CLZ
+#define CK_F_CC_CTZ
+#define CK_F_CC_POPCOUNT
+
+CK_CC_INLINE static int
+ck_cc_ffs(unsigned int x)
+{
+
+ return __builtin_ffs(x);
+}
+
+CK_CC_INLINE static int
+ck_cc_clz(unsigned int x)
+{
+
+ return __builtin_clz(x);
+}
+
+CK_CC_INLINE static int
+ck_cc_ctz(unsigned int x)
+{
+
+ return __builtin_ctz(x);
+}
+
+CK_CC_INLINE static int
+ck_cc_popcount(unsigned int x)
+{
+
+ return __builtin_popcount(x);
+}
+
+#endif /* CK_GCC_CC_H */
diff --git a/include/gcc/ck_f_pr.h b/include/gcc/ck_f_pr.h
new file mode 100644
index 0000000..0ef0d10
--- /dev/null
+++ b/include/gcc/ck_f_pr.h
@@ -0,0 +1,105 @@
+/* DO NOT EDIT. This is auto-generated from feature.sh */
+#define CK_F_PR_ADD_16
+#define CK_F_PR_ADD_32
+#define CK_F_PR_ADD_64
+#define CK_F_PR_ADD_8
+#define CK_F_PR_ADD_CHAR
+#define CK_F_PR_ADD_INT
+#define CK_F_PR_ADD_PTR
+#define CK_F_PR_ADD_UINT
+#define CK_F_PR_AND_16
+#define CK_F_PR_AND_32
+#define CK_F_PR_AND_64
+#define CK_F_PR_AND_8
+#define CK_F_PR_AND_CHAR
+#define CK_F_PR_AND_INT
+#define CK_F_PR_AND_PTR
+#define CK_F_PR_AND_UINT
+#define CK_F_PR_CAS_16
+#define CK_F_PR_CAS_16_VALUE
+#define CK_F_PR_CAS_32
+#define CK_F_PR_CAS_32_VALUE
+#define CK_F_PR_CAS_64
+#define CK_F_PR_CAS_64_VALUE
+#define CK_F_PR_CAS_8
+#define CK_F_PR_CAS_8_VALUE
+#define CK_F_PR_CAS_CHAR
+#define CK_F_PR_CAS_CHAR_VALUE
+#define CK_F_PR_CAS_INT
+#define CK_F_PR_CAS_INT_VALUE
+#define CK_F_PR_CAS_PTR
+#define CK_F_PR_CAS_PTR_VALUE
+#define CK_F_PR_CAS_UINT
+#define CK_F_PR_CAS_UINT_VALUE
+#define CK_F_PR_DEC_16
+#define CK_F_PR_DEC_32
+#define CK_F_PR_DEC_64
+#define CK_F_PR_DEC_8
+#define CK_F_PR_DEC_CHAR
+#define CK_F_PR_DEC_INT
+#define CK_F_PR_DEC_PTR
+#define CK_F_PR_DEC_UINT
+#define CK_F_PR_FAA_16
+#define CK_F_PR_FAA_32
+#define CK_F_PR_FAA_64
+#define CK_F_PR_FAA_8
+#define CK_F_PR_FAA_CHAR
+#define CK_F_PR_FAA_INT
+#define CK_F_PR_FAA_PTR
+#define CK_F_PR_FAA_UINT
+#define CK_F_PR_FENCE_LOAD
+#define CK_F_PR_FENCE_LOAD_DEPENDS
+#define CK_F_PR_FENCE_MEMORY
+#define CK_F_PR_FENCE_STORE
+#define CK_F_PR_FENCE_STRICT_LOAD
+#define CK_F_PR_FENCE_STRICT_MEMORY
+#define CK_F_PR_FENCE_STRICT_STORE
+#define CK_F_PR_INC_16
+#define CK_F_PR_INC_32
+#define CK_F_PR_INC_64
+#define CK_F_PR_INC_8
+#define CK_F_PR_INC_CHAR
+#define CK_F_PR_INC_INT
+#define CK_F_PR_INC_PTR
+#define CK_F_PR_INC_UINT
+#define CK_F_PR_LOAD_16
+#define CK_F_PR_LOAD_32
+#define CK_F_PR_LOAD_64
+#define CK_F_PR_LOAD_8
+#define CK_F_PR_LOAD_CHAR
+#define CK_F_PR_LOAD_INT
+#define CK_F_PR_LOAD_PTR
+#define CK_F_PR_LOAD_UINT
+#define CK_F_PR_OR_16
+#define CK_F_PR_OR_32
+#define CK_F_PR_OR_64
+#define CK_F_PR_OR_8
+#define CK_F_PR_OR_CHAR
+#define CK_F_PR_OR_INT
+#define CK_F_PR_OR_PTR
+#define CK_F_PR_OR_UINT
+#define CK_F_PR_STALL
+#define CK_F_PR_STORE_16
+#define CK_F_PR_STORE_32
+#define CK_F_PR_STORE_64
+#define CK_F_PR_STORE_8
+#define CK_F_PR_STORE_CHAR
+#define CK_F_PR_STORE_INT
+#define CK_F_PR_STORE_PTR
+#define CK_F_PR_STORE_UINT
+#define CK_F_PR_SUB_16
+#define CK_F_PR_SUB_32
+#define CK_F_PR_SUB_64
+#define CK_F_PR_SUB_8
+#define CK_F_PR_SUB_CHAR
+#define CK_F_PR_SUB_INT
+#define CK_F_PR_SUB_PTR
+#define CK_F_PR_SUB_UINT
+#define CK_F_PR_XOR_16
+#define CK_F_PR_XOR_32
+#define CK_F_PR_XOR_64
+#define CK_F_PR_XOR_8
+#define CK_F_PR_XOR_CHAR
+#define CK_F_PR_XOR_INT
+#define CK_F_PR_XOR_PTR
+#define CK_F_PR_XOR_UINT
diff --git a/include/gcc/ck_pr.h b/include/gcc/ck_pr.h
new file mode 100644
index 0000000..084d423
--- /dev/null
+++ b/include/gcc/ck_pr.h
@@ -0,0 +1,297 @@
+/*
+ * Copyright 2010 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_PR_GCC_H
+#define CK_PR_GCC_H
+
+#ifndef CK_PR_H
+#error Do not include this file directly, use ck_pr.h
+#endif
+
+#include <ck_cc.h>
+
+CK_CC_INLINE static void
+ck_pr_barrier(void)
+{
+
+ __asm__ __volatile__("" ::: "memory");
+ return;
+}
+
+#ifndef CK_F_PR
+#define CK_F_PR
+
+#include <ck_stdbool.h>
+#include <ck_stdint.h>
+
+/*
+ * The following represent supported atomic operations.
+ * These operations may be emulated.
+ */
+#include "ck_f_pr.h"
+
+#define CK_PR_ACCESS(x) (*(volatile __typeof__(x) *)&(x))
+
+#define CK_PR_LOAD(S, M, T) \
+ CK_CC_INLINE static T \
+ ck_pr_md_load_##S(const M *target) \
+ { \
+ T r; \
+ ck_pr_barrier(); \
+ r = CK_PR_ACCESS(*(const T *)target); \
+ ck_pr_barrier(); \
+ return (r); \
+ } \
+ CK_CC_INLINE static void \
+ ck_pr_md_store_##S(M *target, T v) \
+ { \
+ ck_pr_barrier(); \
+ CK_PR_ACCESS(*(T *)target) = v; \
+ ck_pr_barrier(); \
+ return; \
+ }
+
+CK_CC_INLINE static void *
+ck_pr_md_load_ptr(const void *target)
+{
+ void *r;
+
+ ck_pr_barrier();
+ r = CK_CC_DECONST_PTR(CK_PR_ACCESS(target));
+ ck_pr_barrier();
+
+ return r;
+}
+
+CK_CC_INLINE static void
+ck_pr_md_store_ptr(void *target, const void *v)
+{
+
+ ck_pr_barrier();
+ CK_PR_ACCESS(target) = CK_CC_DECONST_PTR(v);
+ ck_pr_barrier();
+ return;
+}
+
+#define CK_PR_LOAD_S(S, T) CK_PR_LOAD(S, T, T)
+
+CK_PR_LOAD_S(char, char)
+CK_PR_LOAD_S(uint, unsigned int)
+CK_PR_LOAD_S(int, int)
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_LOAD_S(double, double)
+#endif
+CK_PR_LOAD_S(64, uint64_t)
+CK_PR_LOAD_S(32, uint32_t)
+CK_PR_LOAD_S(16, uint16_t)
+CK_PR_LOAD_S(8, uint8_t)
+
+#undef CK_PR_LOAD_S
+#undef CK_PR_LOAD
+
+CK_CC_INLINE static void
+ck_pr_stall(void)
+{
+
+ ck_pr_barrier();
+}
+
+/*
+ * Load and store fences are equivalent to full fences in the GCC port.
+ */
+#define CK_PR_FENCE(T) \
+ CK_CC_INLINE static void \
+ ck_pr_fence_strict_##T(void) \
+ { \
+ __sync_synchronize(); \
+ }
+
+CK_PR_FENCE(atomic)
+CK_PR_FENCE(atomic_atomic)
+CK_PR_FENCE(atomic_load)
+CK_PR_FENCE(atomic_store)
+CK_PR_FENCE(store_atomic)
+CK_PR_FENCE(load_atomic)
+CK_PR_FENCE(load)
+CK_PR_FENCE(load_load)
+CK_PR_FENCE(load_store)
+CK_PR_FENCE(store)
+CK_PR_FENCE(store_store)
+CK_PR_FENCE(store_load)
+CK_PR_FENCE(memory)
+CK_PR_FENCE(acquire)
+CK_PR_FENCE(release)
+CK_PR_FENCE(acqrel)
+CK_PR_FENCE(lock)
+CK_PR_FENCE(unlock)
+
+#undef CK_PR_FENCE
+
+/*
+ * Atomic compare and swap.
+ */
+#define CK_PR_CAS(S, M, T) \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##S(M *target, T compare, T set) \
+ { \
+ bool z; \
+ z = __sync_bool_compare_and_swap((T *)target, compare, set); \
+ return z; \
+ }
+
+CK_PR_CAS(ptr, void, void *)
+
+#define CK_PR_CAS_S(S, T) CK_PR_CAS(S, T, T)
+
+CK_PR_CAS_S(char, char)
+CK_PR_CAS_S(int, int)
+CK_PR_CAS_S(uint, unsigned int)
+CK_PR_CAS_S(64, uint64_t)
+CK_PR_CAS_S(32, uint32_t)
+CK_PR_CAS_S(16, uint16_t)
+CK_PR_CAS_S(8, uint8_t)
+
+#undef CK_PR_CAS_S
+#undef CK_PR_CAS
+
+/*
+ * Compare and swap, set *v to old value of target.
+ */
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_value(void *target, void *compare, void *set, void *v)
+{
+ set = __sync_val_compare_and_swap((void **)target, compare, set);
+ *(void **)v = set;
+ return (set == compare);
+}
+
+#define CK_PR_CAS_O(S, T) \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##S##_value(T *target, T compare, T set, T *v) \
+ { \
+ set = __sync_val_compare_and_swap(target, compare, set);\
+ *v = set; \
+ return (set == compare); \
+ }
+
+CK_PR_CAS_O(char, char)
+CK_PR_CAS_O(int, int)
+CK_PR_CAS_O(uint, unsigned int)
+CK_PR_CAS_O(64, uint64_t)
+CK_PR_CAS_O(32, uint32_t)
+CK_PR_CAS_O(16, uint16_t)
+CK_PR_CAS_O(8, uint8_t)
+
+#undef CK_PR_CAS_O
+
+/*
+ * Atomic fetch-and-add operations.
+ */
+#define CK_PR_FAA(S, M, T) \
+ CK_CC_INLINE static T \
+ ck_pr_faa_##S(M *target, T d) \
+ { \
+ d = __sync_fetch_and_add((T *)target, d); \
+ return (d); \
+ }
+
+CK_PR_FAA(ptr, void, void *)
+
+#define CK_PR_FAA_S(S, T) CK_PR_FAA(S, T, T)
+
+CK_PR_FAA_S(char, char)
+CK_PR_FAA_S(uint, unsigned int)
+CK_PR_FAA_S(int, int)
+CK_PR_FAA_S(64, uint64_t)
+CK_PR_FAA_S(32, uint32_t)
+CK_PR_FAA_S(16, uint16_t)
+CK_PR_FAA_S(8, uint8_t)
+
+#undef CK_PR_FAA_S
+#undef CK_PR_FAA
+
+/*
+ * Atomic store-only binary operations.
+ */
+#define CK_PR_BINARY(K, S, M, T) \
+ CK_CC_INLINE static void \
+ ck_pr_##K##_##S(M *target, T d) \
+ { \
+ d = __sync_fetch_and_##K((T *)target, d); \
+ return; \
+ }
+
+#define CK_PR_BINARY_S(K, S, T) CK_PR_BINARY(K, S, T, T)
+
+#define CK_PR_GENERATE(K) \
+ CK_PR_BINARY(K, ptr, void, void *) \
+ CK_PR_BINARY_S(K, char, char) \
+ CK_PR_BINARY_S(K, int, int) \
+ CK_PR_BINARY_S(K, uint, unsigned int) \
+ CK_PR_BINARY_S(K, 64, uint64_t) \
+ CK_PR_BINARY_S(K, 32, uint32_t) \
+ CK_PR_BINARY_S(K, 16, uint16_t) \
+ CK_PR_BINARY_S(K, 8, uint8_t)
+
+CK_PR_GENERATE(add)
+CK_PR_GENERATE(sub)
+CK_PR_GENERATE(and)
+CK_PR_GENERATE(or)
+CK_PR_GENERATE(xor)
+
+#undef CK_PR_GENERATE
+#undef CK_PR_BINARY_S
+#undef CK_PR_BINARY
+
+#define CK_PR_UNARY(S, M, T) \
+ CK_CC_INLINE static void \
+ ck_pr_inc_##S(M *target) \
+ { \
+ ck_pr_add_##S(target, (T)1); \
+ return; \
+ } \
+ CK_CC_INLINE static void \
+ ck_pr_dec_##S(M *target) \
+ { \
+ ck_pr_sub_##S(target, (T)1); \
+ return; \
+ }
+
+#define CK_PR_UNARY_S(S, M) CK_PR_UNARY(S, M, M)
+
+CK_PR_UNARY(ptr, void, void *)
+CK_PR_UNARY_S(char, char)
+CK_PR_UNARY_S(int, int)
+CK_PR_UNARY_S(uint, unsigned int)
+CK_PR_UNARY_S(64, uint64_t)
+CK_PR_UNARY_S(32, uint32_t)
+CK_PR_UNARY_S(16, uint16_t)
+CK_PR_UNARY_S(8, uint8_t)
+
+#undef CK_PR_UNARY_S
+#undef CK_PR_UNARY
+#endif /* !CK_F_PR */
+#endif /* CK_PR_GCC_H */
diff --git a/include/gcc/ppc/ck_f_pr.h b/include/gcc/ppc/ck_f_pr.h
new file mode 100644
index 0000000..0aec33e
--- /dev/null
+++ b/include/gcc/ppc/ck_f_pr.h
@@ -0,0 +1,79 @@
+/* DO NOT EDIT. This is auto-generated from feature.sh */
+#define CK_F_PR_ADD_32
+#define CK_F_PR_ADD_INT
+#define CK_F_PR_ADD_PTR
+#define CK_F_PR_ADD_UINT
+#define CK_F_PR_AND_32
+#define CK_F_PR_AND_INT
+#define CK_F_PR_AND_PTR
+#define CK_F_PR_AND_UINT
+#define CK_F_PR_CAS_32
+#define CK_F_PR_CAS_32_VALUE
+#define CK_F_PR_CAS_INT
+#define CK_F_PR_CAS_INT_VALUE
+#define CK_F_PR_CAS_PTR
+#define CK_F_PR_CAS_PTR_VALUE
+#define CK_F_PR_CAS_UINT
+#define CK_F_PR_CAS_UINT_VALUE
+#define CK_F_PR_DEC_32
+#define CK_F_PR_DEC_INT
+#define CK_F_PR_DEC_PTR
+#define CK_F_PR_DEC_UINT
+#define CK_F_PR_FAA_32
+#define CK_F_PR_FAA_INT
+#define CK_F_PR_FAA_PTR
+#define CK_F_PR_FAA_UINT
+#define CK_F_PR_FAS_32
+#define CK_F_PR_FAS_INT
+#define CK_F_PR_FAS_PTR
+#define CK_F_PR_FAS_UINT
+#define CK_F_PR_FENCE_LOAD
+#define CK_F_PR_FENCE_LOAD_DEPENDS
+#define CK_F_PR_FENCE_MEMORY
+#define CK_F_PR_FENCE_STORE
+#define CK_F_PR_FENCE_STRICT_LOAD
+#define CK_F_PR_FENCE_STRICT_LOAD_DEPENDS
+#define CK_F_PR_FENCE_STRICT_MEMORY
+#define CK_F_PR_FENCE_STRICT_STORE
+#define CK_F_PR_INC_32
+#define CK_F_PR_INC_INT
+#define CK_F_PR_INC_PTR
+#define CK_F_PR_INC_UINT
+#define CK_F_PR_LOAD_16
+#define CK_F_PR_LOAD_32
+#define CK_F_PR_LOAD_8
+#define CK_F_PR_LOAD_CHAR
+#define CK_F_PR_LOAD_INT
+#define CK_F_PR_LOAD_PTR
+#define CK_F_PR_LOAD_SHORT
+#define CK_F_PR_LOAD_UINT
+#define CK_F_PR_NEG_32
+#define CK_F_PR_NEG_INT
+#define CK_F_PR_NEG_PTR
+#define CK_F_PR_NEG_UINT
+#define CK_F_PR_NOT_32
+#define CK_F_PR_NOT_INT
+#define CK_F_PR_NOT_PTR
+#define CK_F_PR_NOT_UINT
+#define CK_F_PR_OR_32
+#define CK_F_PR_OR_INT
+#define CK_F_PR_OR_PTR
+#define CK_F_PR_OR_UINT
+#define CK_F_PR_STALL
+#define CK_F_PR_STORE_16
+#define CK_F_PR_STORE_32
+#define CK_F_PR_STORE_8
+#define CK_F_PR_STORE_CHAR
+#define CK_F_PR_STORE_INT
+#define CK_F_PR_STORE_PTR
+#define CK_F_PR_STORE_SHORT
+#define CK_F_PR_STORE_UINT
+#define CK_F_PR_SUB_32
+#define CK_F_PR_SUB_INT
+#define CK_F_PR_SUB_PTR
+#define CK_F_PR_SUB_UINT
+#define CK_F_PR_XOR_32
+#define CK_F_PR_XOR_INT
+#define CK_F_PR_XOR_PTR
+#define CK_F_PR_XOR_UINT
+
diff --git a/include/gcc/ppc/ck_pr.h b/include/gcc/ppc/ck_pr.h
new file mode 100644
index 0000000..cd7935d
--- /dev/null
+++ b/include/gcc/ppc/ck_pr.h
@@ -0,0 +1,327 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * Copyright 2012 João Fernandes.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_PR_PPC_H
+#define CK_PR_PPC_H
+
+#ifndef CK_PR_H
+#error Do not include this file directly, use ck_pr.h
+#endif
+
+#include <ck_cc.h>
+#include <ck_md.h>
+
+/*
+ * The following represent supported atomic operations.
+ * These operations may be emulated.
+ */
+#include "ck_f_pr.h"
+
+/*
+ * Minimum interface requirement met.
+ */
+#define CK_F_PR
+
+/*
+ * This bounces the hardware thread from low to medium
+ * priority. I am unsure of the benefits of this approach
+ * but it is used by the Linux kernel.
+ */
+CK_CC_INLINE static void
+ck_pr_stall(void)
+{
+
+ __asm__ __volatile__("or 1, 1, 1;"
+ "or 2, 2, 2;" ::: "memory");
+ return;
+}
+
+#define CK_PR_FENCE(T, I) \
+ CK_CC_INLINE static void \
+ ck_pr_fence_strict_##T(void) \
+ { \
+ __asm__ __volatile__(I ::: "memory"); \
+ }
+
+CK_PR_FENCE(atomic, "lwsync")
+CK_PR_FENCE(atomic_store, "lwsync")
+CK_PR_FENCE(atomic_load, "sync")
+CK_PR_FENCE(store_atomic, "lwsync")
+CK_PR_FENCE(load_atomic, "lwsync")
+CK_PR_FENCE(store, "lwsync")
+CK_PR_FENCE(store_load, "sync")
+CK_PR_FENCE(load, "lwsync")
+CK_PR_FENCE(load_store, "lwsync")
+CK_PR_FENCE(memory, "sync")
+CK_PR_FENCE(acquire, "lwsync")
+CK_PR_FENCE(release, "lwsync")
+CK_PR_FENCE(acqrel, "lwsync")
+CK_PR_FENCE(lock, "lwsync")
+CK_PR_FENCE(unlock, "lwsync")
+
+#undef CK_PR_FENCE
+
+#define CK_PR_LOAD(S, M, T, C, I) \
+ CK_CC_INLINE static T \
+ ck_pr_md_load_##S(const M *target) \
+ { \
+ T r; \
+ __asm__ __volatile__(I "%U1%X1 %0, %1" \
+ : "=r" (r) \
+ : "m" (*(const C *)target) \
+ : "memory"); \
+ return (r); \
+ }
+
+CK_PR_LOAD(ptr, void, void *, uint32_t, "lwz")
+
+#define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, T, I)
+
+CK_PR_LOAD_S(32, uint32_t, "lwz")
+CK_PR_LOAD_S(16, uint16_t, "lhz")
+CK_PR_LOAD_S(8, uint8_t, "lbz")
+CK_PR_LOAD_S(uint, unsigned int, "lwz")
+CK_PR_LOAD_S(int, int, "lwz")
+CK_PR_LOAD_S(short, short, "lhz")
+CK_PR_LOAD_S(char, char, "lbz")
+
+#undef CK_PR_LOAD_S
+#undef CK_PR_LOAD
+
+#define CK_PR_STORE(S, M, T, C, I) \
+ CK_CC_INLINE static void \
+ ck_pr_md_store_##S(M *target, T v) \
+ { \
+ __asm__ __volatile__(I "%U0%X0 %1, %0" \
+ : "=m" (*(C *)target) \
+ : "r" (v) \
+ : "memory"); \
+ return; \
+ }
+
+CK_PR_STORE(ptr, void, const void *, uint32_t, "stw")
+
+#define CK_PR_STORE_S(S, T, I) CK_PR_STORE(S, T, T, T, I)
+
+CK_PR_STORE_S(32, uint32_t, "stw")
+CK_PR_STORE_S(16, uint16_t, "sth")
+CK_PR_STORE_S(8, uint8_t, "stb")
+CK_PR_STORE_S(uint, unsigned int, "stw")
+CK_PR_STORE_S(int, int, "stw")
+CK_PR_STORE_S(short, short, "sth")
+CK_PR_STORE_S(char, char, "stb")
+
+#undef CK_PR_STORE_S
+#undef CK_PR_STORE
+
+#define CK_PR_CAS(N, T, M) \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##N##_value(M *target, T compare, T set, M *value) \
+ { \
+ T previous; \
+ __asm__ __volatile__("1:" \
+ "lwarx %0, 0, %1;" \
+ "cmpw 0, %0, %3;" \
+ "bne- 2f;" \
+ "stwcx. %2, 0, %1;" \
+ "bne- 1b;" \
+ "2:" \
+ : "=&r" (previous) \
+ : "r" (target), \
+ "r" (set), \
+ "r" (compare) \
+ : "memory", "cc"); \
+ *(T *)value = previous; \
+ return (previous == compare); \
+ } \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##N(M *target, T compare, T set) \
+ { \
+ T previous; \
+ __asm__ __volatile__("1:" \
+ "lwarx %0, 0, %1;" \
+ "cmpw 0, %0, %3;" \
+ "bne- 2f;" \
+ "stwcx. %2, 0, %1;" \
+ "bne- 1b;" \
+ "2:" \
+ : "=&r" (previous) \
+ : "r" (target), \
+ "r" (set), \
+ "r" (compare) \
+ : "memory", "cc"); \
+ return (previous == compare); \
+ }
+
+CK_PR_CAS(ptr, void *, void)
+#define CK_PR_CAS_S(a, b) CK_PR_CAS(a, b, b)
+CK_PR_CAS_S(32, uint32_t)
+CK_PR_CAS_S(uint, unsigned int)
+CK_PR_CAS_S(int, int)
+
+#undef CK_PR_CAS_S
+#undef CK_PR_CAS
+
+#define CK_PR_FAS(N, M, T, W) \
+ CK_CC_INLINE static T \
+ ck_pr_fas_##N(M *target, T v) \
+ { \
+ T previous; \
+ __asm__ __volatile__("1:" \
+ "l" W "arx %0, 0, %1;" \
+ "st" W "cx. %2, 0, %1;" \
+ "bne- 1b;" \
+ : "=&r" (previous) \
+ : "r" (target), \
+ "r" (v) \
+ : "memory", "cc"); \
+ return (previous); \
+ }
+
+CK_PR_FAS(32, uint32_t, uint32_t, "w")
+CK_PR_FAS(ptr, void, void *, "w")
+CK_PR_FAS(int, int, int, "w")
+CK_PR_FAS(uint, unsigned int, unsigned int, "w")
+
+#undef CK_PR_FAS
+
+#define CK_PR_UNARY(O, N, M, T, I, W) \
+ CK_CC_INLINE static void \
+ ck_pr_##O##_##N(M *target) \
+ { \
+ T previous; \
+ __asm__ __volatile__("1:" \
+ "l" W "arx %0, 0, %1;" \
+ I ";" \
+ "st" W "cx. %0, 0, %1;" \
+ "bne- 1b;" \
+ : "=&r" (previous) \
+ : "r" (target) \
+ : "memory", "cc"); \
+ return; \
+ }
+
+CK_PR_UNARY(inc, ptr, void, void *, "addic %0, %0, 1", "w")
+CK_PR_UNARY(dec, ptr, void, void *, "addic %0, %0, -1", "w")
+CK_PR_UNARY(not, ptr, void, void *, "not %0, %0", "w")
+CK_PR_UNARY(neg, ptr, void, void *, "neg %0, %0", "w")
+
+#define CK_PR_UNARY_S(S, T, W) \
+ CK_PR_UNARY(inc, S, T, T, "addic %0, %0, 1", W) \
+ CK_PR_UNARY(dec, S, T, T, "addic %0, %0, -1", W) \
+ CK_PR_UNARY(not, S, T, T, "not %0, %0", W) \
+ CK_PR_UNARY(neg, S, T, T, "neg %0, %0", W)
+
+CK_PR_UNARY_S(32, uint32_t, "w")
+CK_PR_UNARY_S(uint, unsigned int, "w")
+CK_PR_UNARY_S(int, int, "w")
+
+#undef CK_PR_UNARY_S
+#undef CK_PR_UNARY
+
+#define CK_PR_BINARY(O, N, M, T, I, W) \
+ CK_CC_INLINE static void \
+ ck_pr_##O##_##N(M *target, T delta) \
+ { \
+ T previous; \
+ __asm__ __volatile__("1:" \
+ "l" W "arx %0, 0, %1;" \
+ I " %0, %2, %0;" \
+ "st" W "cx. %0, 0, %1;" \
+ "bne- 1b;" \
+ : "=&r" (previous) \
+ : "r" (target), \
+ "r" (delta) \
+ : "memory", "cc"); \
+ return; \
+ }
+
+CK_PR_BINARY(and, ptr, void, uintptr_t, "and", "w")
+CK_PR_BINARY(add, ptr, void, uintptr_t, "add", "w")
+CK_PR_BINARY(or, ptr, void, uintptr_t, "or", "w")
+CK_PR_BINARY(sub, ptr, void, uintptr_t, "sub", "w")
+CK_PR_BINARY(xor, ptr, void, uintptr_t, "xor", "w")
+
+#define CK_PR_BINARY_S(S, T, W) \
+ CK_PR_BINARY(and, S, T, T, "and", W) \
+ CK_PR_BINARY(add, S, T, T, "add", W) \
+ CK_PR_BINARY(or, S, T, T, "or", W) \
+ CK_PR_BINARY(sub, S, T, T, "subf", W) \
+ CK_PR_BINARY(xor, S, T, T, "xor", W)
+
+CK_PR_BINARY_S(32, uint32_t, "w")
+CK_PR_BINARY_S(uint, unsigned int, "w")
+CK_PR_BINARY_S(int, int, "w")
+
+#undef CK_PR_BINARY_S
+#undef CK_PR_BINARY
+
+CK_CC_INLINE static void *
+ck_pr_faa_ptr(void *target, uintptr_t delta)
+{
+ uintptr_t previous, r;
+
+ __asm__ __volatile__("1:"
+ "lwarx %0, 0, %2;"
+ "add %1, %3, %0;"
+ "stwcx. %1, 0, %2;"
+ "bne- 1b;"
+ : "=&r" (previous),
+ "=&r" (r)
+ : "r" (target),
+ "r" (delta)
+ : "memory", "cc");
+
+ return (void *)(previous);
+}
+
+#define CK_PR_FAA(S, T, W) \
+ CK_CC_INLINE static T \
+ ck_pr_faa_##S(T *target, T delta) \
+ { \
+ T previous, r; \
+ __asm__ __volatile__("1:" \
+ "l" W "arx %0, 0, %2;" \
+ "add %1, %3, %0;" \
+ "st" W "cx. %1, 0, %2;" \
+ "bne- 1b;" \
+ : "=&r" (previous), \
+ "=&r" (r) \
+ : "r" (target), \
+ "r" (delta) \
+ : "memory", "cc"); \
+ return (previous); \
+ }
+
+CK_PR_FAA(32, uint32_t, "w")
+CK_PR_FAA(uint, unsigned int, "w")
+CK_PR_FAA(int, int, "w")
+
+#undef CK_PR_FAA
+
+#endif /* CK_PR_PPC_H */
+
diff --git a/include/gcc/ppc64/ck_f_pr.h b/include/gcc/ppc64/ck_f_pr.h
new file mode 100644
index 0000000..cd54a28
--- /dev/null
+++ b/include/gcc/ppc64/ck_f_pr.h
@@ -0,0 +1,97 @@
+/* DO NOT EDIT. This is auto-generated from feature.sh */
+#define CK_F_PR_ADD_32
+#define CK_F_PR_ADD_64
+#define CK_F_PR_ADD_INT
+#define CK_F_PR_ADD_PTR
+#define CK_F_PR_ADD_UINT
+#define CK_F_PR_AND_32
+#define CK_F_PR_AND_64
+#define CK_F_PR_AND_INT
+#define CK_F_PR_AND_PTR
+#define CK_F_PR_AND_UINT
+#define CK_F_PR_CAS_32
+#define CK_F_PR_CAS_32_VALUE
+#define CK_F_PR_CAS_64
+#define CK_F_PR_CAS_64_VALUE
+#define CK_F_PR_CAS_INT
+#define CK_F_PR_CAS_INT_VALUE
+#define CK_F_PR_CAS_PTR
+#define CK_F_PR_CAS_PTR_VALUE
+#define CK_F_PR_CAS_UINT
+#define CK_F_PR_CAS_UINT_VALUE
+#define CK_F_PR_DEC_32
+#define CK_F_PR_DEC_64
+#define CK_F_PR_DEC_INT
+#define CK_F_PR_DEC_PTR
+#define CK_F_PR_DEC_UINT
+#define CK_F_PR_FAA_32
+#define CK_F_PR_FAA_64
+#define CK_F_PR_FAA_INT
+#define CK_F_PR_FAA_PTR
+#define CK_F_PR_FAA_UINT
+#define CK_F_PR_FAS_32
+#define CK_F_PR_FAS_64
+#define CK_F_PR_FAS_INT
+#define CK_F_PR_FAS_PTR
+#define CK_F_PR_FAS_UINT
+#define CK_F_PR_FAS_DOUBLE
+#define CK_F_PR_FENCE_LOAD
+#define CK_F_PR_FENCE_LOAD_DEPENDS
+#define CK_F_PR_FENCE_MEMORY
+#define CK_F_PR_FENCE_STORE
+#define CK_F_PR_FENCE_STRICT_LOAD
+#define CK_F_PR_FENCE_STRICT_LOAD_DEPENDS
+#define CK_F_PR_FENCE_STRICT_MEMORY
+#define CK_F_PR_FENCE_STRICT_STORE
+#define CK_F_PR_INC_32
+#define CK_F_PR_INC_64
+#define CK_F_PR_INC_INT
+#define CK_F_PR_INC_PTR
+#define CK_F_PR_INC_UINT
+#define CK_F_PR_LOAD_16
+#define CK_F_PR_LOAD_32
+#define CK_F_PR_LOAD_64
+#define CK_F_PR_LOAD_8
+#define CK_F_PR_LOAD_CHAR
+#define CK_F_PR_LOAD_DOUBLE
+#define CK_F_PR_LOAD_INT
+#define CK_F_PR_LOAD_PTR
+#define CK_F_PR_LOAD_SHORT
+#define CK_F_PR_LOAD_UINT
+#define CK_F_PR_NEG_32
+#define CK_F_PR_NEG_64
+#define CK_F_PR_NEG_INT
+#define CK_F_PR_NEG_PTR
+#define CK_F_PR_NEG_UINT
+#define CK_F_PR_NOT_32
+#define CK_F_PR_NOT_64
+#define CK_F_PR_NOT_INT
+#define CK_F_PR_NOT_PTR
+#define CK_F_PR_NOT_UINT
+#define CK_F_PR_OR_32
+#define CK_F_PR_OR_64
+#define CK_F_PR_OR_INT
+#define CK_F_PR_OR_PTR
+#define CK_F_PR_OR_UINT
+#define CK_F_PR_STALL
+#define CK_F_PR_STORE_16
+#define CK_F_PR_STORE_32
+#define CK_F_PR_STORE_64
+#define CK_F_PR_STORE_8
+#define CK_F_PR_STORE_CHAR
+#define CK_F_PR_STORE_DOUBLE
+#define CK_F_PR_STORE_INT
+#define CK_F_PR_STORE_PTR
+#define CK_F_PR_STORE_SHORT
+#define CK_F_PR_STORE_UINT
+#define CK_F_PR_SUB_32
+#define CK_F_PR_SUB_64
+#define CK_F_PR_SUB_INT
+#define CK_F_PR_SUB_PTR
+#define CK_F_PR_SUB_UINT
+#define CK_F_PR_XOR_32
+#define CK_F_PR_XOR_64
+#define CK_F_PR_XOR_INT
+#define CK_F_PR_XOR_PTR
+#define CK_F_PR_XOR_UINT
+
diff --git a/include/gcc/ppc64/ck_pr.h b/include/gcc/ppc64/ck_pr.h
new file mode 100644
index 0000000..3f5e5db
--- /dev/null
+++ b/include/gcc/ppc64/ck_pr.h
@@ -0,0 +1,427 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_PR_PPC64_H
+#define CK_PR_PPC64_H
+
+#ifndef CK_PR_H
+#error Do not include this file directly, use ck_pr.h
+#endif
+
+#include <ck_cc.h>
+#include <ck_md.h>
+
+/*
+ * The following represent supported atomic operations.
+ * These operations may be emulated.
+ */
+#include "ck_f_pr.h"
+
+/*
+ * Minimum interface requirement met.
+ */
+#define CK_F_PR
+
+/*
+ * This bounces the hardware thread from low to medium
+ * priority. I am unsure of the benefits of this approach
+ * but it is used by the Linux kernel.
+ */
+CK_CC_INLINE static void
+ck_pr_stall(void)
+{
+
+ __asm__ __volatile__("or 1, 1, 1;"
+ "or 2, 2, 2;" ::: "memory");
+ return;
+}
+
+#define CK_PR_FENCE(T, I) \
+ CK_CC_INLINE static void \
+ ck_pr_fence_strict_##T(void) \
+ { \
+ __asm__ __volatile__(I ::: "memory"); \
+ }
+
+/*
+ * These are derived from:
+ * http://www.ibm.com/developerworks/systems/articles/powerpc.html
+ */
+CK_PR_FENCE(atomic, "lwsync")
+CK_PR_FENCE(atomic_store, "lwsync")
+CK_PR_FENCE(atomic_load, "sync")
+CK_PR_FENCE(store_atomic, "lwsync")
+CK_PR_FENCE(load_atomic, "lwsync")
+CK_PR_FENCE(store, "lwsync")
+CK_PR_FENCE(store_load, "sync")
+CK_PR_FENCE(load, "lwsync")
+CK_PR_FENCE(load_store, "lwsync")
+CK_PR_FENCE(memory, "sync")
+CK_PR_FENCE(acquire, "lwsync")
+CK_PR_FENCE(release, "lwsync")
+CK_PR_FENCE(acqrel, "lwsync")
+CK_PR_FENCE(lock, "lwsync")
+CK_PR_FENCE(unlock, "lwsync")
+
+#undef CK_PR_FENCE
+
+#define CK_PR_LOAD(S, M, T, C, I) \
+ CK_CC_INLINE static T \
+ ck_pr_md_load_##S(const M *target) \
+ { \
+ T r; \
+ __asm__ __volatile__(I "%U1%X1 %0, %1" \
+ : "=r" (r) \
+ : "m" (*(const C *)target) \
+ : "memory"); \
+ return (r); \
+ }
+
+CK_PR_LOAD(ptr, void, void *, uint64_t, "ld")
+
+#define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, T, I)
+
+CK_PR_LOAD_S(64, uint64_t, "ld")
+CK_PR_LOAD_S(32, uint32_t, "lwz")
+CK_PR_LOAD_S(16, uint16_t, "lhz")
+CK_PR_LOAD_S(8, uint8_t, "lbz")
+CK_PR_LOAD_S(uint, unsigned int, "lwz")
+CK_PR_LOAD_S(int, int, "lwz")
+CK_PR_LOAD_S(short, short, "lhz")
+CK_PR_LOAD_S(char, char, "lbz")
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_LOAD_S(double, double, "ld")
+#endif
+
+#undef CK_PR_LOAD_S
+#undef CK_PR_LOAD
+
+#define CK_PR_STORE(S, M, T, C, I) \
+ CK_CC_INLINE static void \
+ ck_pr_md_store_##S(M *target, T v) \
+ { \
+ __asm__ __volatile__(I "%U0%X0 %1, %0" \
+ : "=m" (*(C *)target) \
+ : "r" (v) \
+ : "memory"); \
+ return; \
+ }
+
+CK_PR_STORE(ptr, void, const void *, uint64_t, "std")
+
+#define CK_PR_STORE_S(S, T, I) CK_PR_STORE(S, T, T, T, I)
+
+CK_PR_STORE_S(64, uint64_t, "std")
+CK_PR_STORE_S(32, uint32_t, "stw")
+CK_PR_STORE_S(16, uint16_t, "sth")
+CK_PR_STORE_S(8, uint8_t, "stb")
+CK_PR_STORE_S(uint, unsigned int, "stw")
+CK_PR_STORE_S(int, int, "stw")
+CK_PR_STORE_S(short, short, "sth")
+CK_PR_STORE_S(char, char, "stb")
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_STORE_S(double, double, "std")
+#endif
+
+#undef CK_PR_STORE_S
+#undef CK_PR_STORE
+
+CK_CC_INLINE static bool
+ck_pr_cas_64_value(uint64_t *target, uint64_t compare, uint64_t set, uint64_t *value)
+{
+ uint64_t previous;
+
+ __asm__ __volatile__("1:"
+ "ldarx %0, 0, %1;"
+ "cmpd 0, %0, %3;"
+ "bne- 2f;"
+ "stdcx. %2, 0, %1;"
+ "bne- 1b;"
+ "2:"
+ : "=&r" (previous)
+ : "r" (target),
+ "r" (set),
+ "r" (compare)
+ : "memory", "cc");
+
+ *value = previous;
+ return (previous == compare);
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_value(void *target, void *compare, void *set, void *value)
+{
+ void *previous;
+
+ __asm__ __volatile__("1:"
+ "ldarx %0, 0, %1;"
+ "cmpd 0, %0, %3;"
+ "bne- 2f;"
+ "stdcx. %2, 0, %1;"
+ "bne- 1b;"
+ "2:"
+ : "=&r" (previous)
+ : "r" (target),
+ "r" (set),
+ "r" (compare)
+ : "memory", "cc");
+
+ ck_pr_md_store_ptr(value, previous);
+ return (previous == compare);
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_64(uint64_t *target, uint64_t compare, uint64_t set)
+{
+ uint64_t previous;
+
+ __asm__ __volatile__("1:"
+ "ldarx %0, 0, %1;"
+ "cmpd 0, %0, %3;"
+ "bne- 2f;"
+ "stdcx. %2, 0, %1;"
+ "bne- 1b;"
+ "2:"
+ : "=&r" (previous)
+ : "r" (target),
+ "r" (set),
+ "r" (compare)
+ : "memory", "cc");
+
+ return (previous == compare);
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr(void *target, void *compare, void *set)
+{
+ void *previous;
+
+ __asm__ __volatile__("1:"
+ "ldarx %0, 0, %1;"
+ "cmpd 0, %0, %3;"
+ "bne- 2f;"
+ "stdcx. %2, 0, %1;"
+ "bne- 1b;"
+ "2:"
+ : "=&r" (previous)
+ : "r" (target),
+ "r" (set),
+ "r" (compare)
+ : "memory", "cc");
+
+ return (previous == compare);
+}
+
+#define CK_PR_CAS(N, T) \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##N##_value(T *target, T compare, T set, T *value) \
+ { \
+ T previous; \
+ __asm__ __volatile__("1:" \
+ "lwarx %0, 0, %1;" \
+ "cmpw 0, %0, %3;" \
+ "bne- 2f;" \
+ "stwcx. %2, 0, %1;" \
+ "bne- 1b;" \
+ "2:" \
+ : "=&r" (previous) \
+ : "r" (target), \
+ "r" (set), \
+ "r" (compare) \
+ : "memory", "cc"); \
+ *value = previous; \
+ return (previous == compare); \
+ } \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##N(T *target, T compare, T set) \
+ { \
+ T previous; \
+ __asm__ __volatile__("1:" \
+ "lwarx %0, 0, %1;" \
+ "cmpw 0, %0, %3;" \
+ "bne- 2f;" \
+ "stwcx. %2, 0, %1;" \
+ "bne- 1b;" \
+ "2:" \
+ : "=&r" (previous) \
+ : "r" (target), \
+ "r" (set), \
+ "r" (compare) \
+ : "memory", "cc"); \
+ return (previous == compare); \
+ }
+
+CK_PR_CAS(32, uint32_t)
+CK_PR_CAS(uint, unsigned int)
+CK_PR_CAS(int, int)
+
+#undef CK_PR_CAS
+
+#define CK_PR_FAS(N, M, T, W) \
+ CK_CC_INLINE static T \
+ ck_pr_fas_##N(M *target, T v) \
+ { \
+ T previous; \
+ __asm__ __volatile__("1:" \
+ "l" W "arx %0, 0, %1;" \
+ "st" W "cx. %2, 0, %1;" \
+ "bne- 1b;" \
+ : "=&r" (previous) \
+ : "r" (target), \
+ "r" (v) \
+ : "memory", "cc"); \
+ return (previous); \
+ }
+
+CK_PR_FAS(64, uint64_t, uint64_t, "d")
+CK_PR_FAS(32, uint32_t, uint32_t, "w")
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_FAS(double, double, double, "d")
+#endif
+CK_PR_FAS(ptr, void, void *, "d")
+CK_PR_FAS(int, int, int, "w")
+CK_PR_FAS(uint, unsigned int, unsigned int, "w")
+
+#undef CK_PR_FAS
+
+#define CK_PR_UNARY(O, N, M, T, I, W) \
+ CK_CC_INLINE static void \
+ ck_pr_##O##_##N(M *target) \
+ { \
+ T previous; \
+ __asm__ __volatile__("1:" \
+ "l" W "arx %0, 0, %1;" \
+ I ";" \
+ "st" W "cx. %0, 0, %1;" \
+ "bne- 1b;" \
+ : "=&r" (previous) \
+ : "r" (target) \
+ : "memory", "cc"); \
+ return; \
+ }
+
+CK_PR_UNARY(inc, ptr, void, void *, "addic %0, %0, 1", "d")
+CK_PR_UNARY(dec, ptr, void, void *, "addic %0, %0, -1", "d")
+CK_PR_UNARY(not, ptr, void, void *, "not %0, %0", "d")
+CK_PR_UNARY(neg, ptr, void, void *, "neg %0, %0", "d")
+
+#define CK_PR_UNARY_S(S, T, W) \
+ CK_PR_UNARY(inc, S, T, T, "addic %0, %0, 1", W) \
+ CK_PR_UNARY(dec, S, T, T, "addic %0, %0, -1", W) \
+ CK_PR_UNARY(not, S, T, T, "not %0, %0", W) \
+ CK_PR_UNARY(neg, S, T, T, "neg %0, %0", W)
+
+CK_PR_UNARY_S(64, uint64_t, "d")
+CK_PR_UNARY_S(32, uint32_t, "w")
+CK_PR_UNARY_S(uint, unsigned int, "w")
+CK_PR_UNARY_S(int, int, "w")
+
+#undef CK_PR_UNARY_S
+#undef CK_PR_UNARY
+
+#define CK_PR_BINARY(O, N, M, T, I, W) \
+ CK_CC_INLINE static void \
+ ck_pr_##O##_##N(M *target, T delta) \
+ { \
+ T previous; \
+ __asm__ __volatile__("1:" \
+ "l" W "arx %0, 0, %1;" \
+ I " %0, %2, %0;" \
+ "st" W "cx. %0, 0, %1;" \
+ "bne- 1b;" \
+ : "=&r" (previous) \
+ : "r" (target), \
+ "r" (delta) \
+ : "memory", "cc"); \
+ return; \
+ }
+
+CK_PR_BINARY(and, ptr, void, uintptr_t, "and", "d")
+CK_PR_BINARY(add, ptr, void, uintptr_t, "add", "d")
+CK_PR_BINARY(or, ptr, void, uintptr_t, "or", "d")
+CK_PR_BINARY(sub, ptr, void, uintptr_t, "sub", "d")
+CK_PR_BINARY(xor, ptr, void, uintptr_t, "xor", "d")
+
+#define CK_PR_BINARY_S(S, T, W) \
+ CK_PR_BINARY(and, S, T, T, "and", W) \
+ CK_PR_BINARY(add, S, T, T, "add", W) \
+ CK_PR_BINARY(or, S, T, T, "or", W) \
+ CK_PR_BINARY(sub, S, T, T, "subf", W) \
+ CK_PR_BINARY(xor, S, T, T, "xor", W)
+
+CK_PR_BINARY_S(64, uint64_t, "d")
+CK_PR_BINARY_S(32, uint32_t, "w")
+CK_PR_BINARY_S(uint, unsigned int, "w")
+CK_PR_BINARY_S(int, int, "w")
+
+#undef CK_PR_BINARY_S
+#undef CK_PR_BINARY
+
+CK_CC_INLINE static void *
+ck_pr_faa_ptr(void *target, uintptr_t delta)
+{
+ uintptr_t previous, r;
+
+ __asm__ __volatile__("1:"
+ "ldarx %0, 0, %2;"
+ "add %1, %3, %0;"
+ "stdcx. %1, 0, %2;"
+ "bne- 1b;"
+ : "=&r" (previous),
+ "=&r" (r)
+ : "r" (target),
+ "r" (delta)
+ : "memory", "cc");
+
+ return (void *)(previous);
+}
+
+#define CK_PR_FAA(S, T, W) \
+ CK_CC_INLINE static T \
+ ck_pr_faa_##S(T *target, T delta) \
+ { \
+ T previous, r; \
+ __asm__ __volatile__("1:" \
+ "l" W "arx %0, 0, %2;" \
+ "add %1, %3, %0;" \
+ "st" W "cx. %1, 0, %2;" \
+ "bne- 1b;" \
+ : "=&r" (previous), \
+ "=&r" (r) \
+ : "r" (target), \
+ "r" (delta) \
+ : "memory", "cc"); \
+ return (previous); \
+ }
+
+CK_PR_FAA(64, uint64_t, "d")
+CK_PR_FAA(32, uint32_t, "w")
+CK_PR_FAA(uint, unsigned int, "w")
+CK_PR_FAA(int, int, "w")
+
+#undef CK_PR_FAA
+
+#endif /* CK_PR_PPC64_H */
diff --git a/include/gcc/sparcv9/ck_f_pr.h b/include/gcc/sparcv9/ck_f_pr.h
new file mode 100644
index 0000000..0398680
--- /dev/null
+++ b/include/gcc/sparcv9/ck_f_pr.h
@@ -0,0 +1,26 @@
+#define CK_F_PR_CAS_64
+#define CK_F_PR_CAS_64_VALUE
+#define CK_F_PR_CAS_PTR
+#define CK_F_PR_CAS_PTR_VALUE
+#define CK_F_PR_FAS_32
+#define CK_F_PR_FAS_UINT
+#define CK_F_PR_FAS_INT
+#define CK_F_PR_CAS_32
+#define CK_F_PR_CAS_32_VALUE
+#define CK_F_PR_CAS_UINT
+#define CK_F_PR_CAS_INT
+#define CK_F_PR_CAS_UINT_VALUE
+#define CK_F_PR_CAS_INT_VALUE
+#define CK_F_PR_STORE_64
+#define CK_F_PR_STORE_32
+#define CK_F_PR_STORE_DOUBLE
+#define CK_F_PR_STORE_UINT
+#define CK_F_PR_STORE_INT
+#define CK_F_PR_STORE_PTR
+#define CK_F_PR_LOAD_64
+#define CK_F_PR_LOAD_32
+#define CK_F_PR_LOAD_DOUBLE
+#define CK_F_PR_LOAD_UINT
+#define CK_F_PR_LOAD_INT
+#define CK_F_PR_LOAD_PTR
+
diff --git a/include/gcc/sparcv9/ck_pr.h b/include/gcc/sparcv9/ck_pr.h
new file mode 100644
index 0000000..767af6a
--- /dev/null
+++ b/include/gcc/sparcv9/ck_pr.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright 2009, 2010 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_PR_SPARCV9_H
+#define CK_PR_SPARCV9_H
+
+#ifndef CK_PR_H
+#error Do not include this file directly, use ck_pr.h
+#endif
+
+#include <ck_cc.h>
+#include <ck_md.h>
+
+/*
+ * The following represent supported atomic operations.
+ * These operations may be emulated.
+ */
+#include "ck_f_pr.h"
+
+/*
+ * Minimum interface requirement met.
+ */
+#define CK_F_PR
+
+/*
+ * Order loads at the least.
+ */
+CK_CC_INLINE static void
+ck_pr_stall(void)
+{
+
+ __asm__ __volatile__("membar #LoadLoad" ::: "memory");
+ return;
+}
+
+#define CK_PR_FENCE(T, I) \
+ CK_CC_INLINE static void \
+ ck_pr_fence_strict_##T(void) \
+ { \
+ __asm__ __volatile__(I ::: "memory"); \
+ }
+
+/*
+ * Atomic operations are treated as both load and store
+ * operations on SPARCv9.
+ */
+CK_PR_FENCE(atomic, "membar #StoreStore")
+CK_PR_FENCE(atomic_store, "membar #StoreStore")
+CK_PR_FENCE(atomic_load, "membar #StoreLoad")
+CK_PR_FENCE(store_atomic, "membar #StoreStore")
+CK_PR_FENCE(load_atomic, "membar #LoadStore")
+CK_PR_FENCE(store, "membar #StoreStore")
+CK_PR_FENCE(store_load, "membar #StoreLoad")
+CK_PR_FENCE(load, "membar #LoadLoad")
+CK_PR_FENCE(load_store, "membar #LoadStore")
+CK_PR_FENCE(memory, "membar #LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
+CK_PR_FENCE(acquire, "membar #LoadLoad | #LoadStore")
+CK_PR_FENCE(release, "membar #LoadStore | #StoreStore")
+CK_PR_FENCE(acqrel, "membar #LoadLoad | #LoadStore | #StoreStore")
+CK_PR_FENCE(lock, "membar #LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
+CK_PR_FENCE(unlock, "membar #LoadStore | #StoreStore")
+
+#undef CK_PR_FENCE
+
+#define CK_PR_LOAD(S, M, T, C, I) \
+ CK_CC_INLINE static T \
+ ck_pr_md_load_##S(const M *target) \
+ { \
+ T r; \
+ __asm__ __volatile__(I " [%1], %0" \
+ : "=&r" (r) \
+ : "r" (target) \
+ : "memory"); \
+ return (r); \
+ }
+
+CK_PR_LOAD(ptr, void, void *, uint64_t, "ldx")
+
+#define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, T, I)
+
+CK_PR_LOAD_S(64, uint64_t, "ldx")
+CK_PR_LOAD_S(32, uint32_t, "lduw")
+CK_PR_LOAD_S(uint, unsigned int, "lduw")
+CK_PR_LOAD_S(double, double, "ldx")
+CK_PR_LOAD_S(int, int, "ldsw")
+
+#undef CK_PR_LOAD_S
+#undef CK_PR_LOAD
+
+#define CK_PR_STORE(S, M, T, C, I) \
+ CK_CC_INLINE static void \
+ ck_pr_md_store_##S(M *target, T v) \
+ { \
+ __asm__ __volatile__(I " %0, [%1]" \
+ : \
+ : "r" (v), \
+ "r" (target) \
+ : "memory"); \
+ return; \
+ }
+
+CK_PR_STORE(ptr, void, const void *, uint64_t, "stx")
+
+#define CK_PR_STORE_S(S, T, I) CK_PR_STORE(S, T, T, T, I)
+
+CK_PR_STORE_S(8, uint8_t, "stub")
+CK_PR_STORE_S(64, uint64_t, "stx")
+CK_PR_STORE_S(32, uint32_t, "stuw")
+CK_PR_STORE_S(uint, unsigned int, "stuw")
+CK_PR_STORE_S(double, double, "stx")
+CK_PR_STORE_S(int, int, "stsw")
+
+#undef CK_PR_STORE_S
+#undef CK_PR_STORE
+
+CK_CC_INLINE static bool
+ck_pr_cas_64_value(uint64_t *target, uint64_t compare, uint64_t set, uint64_t *value)
+{
+
+ __asm__ __volatile__("casx [%1], %2, %0"
+ : "+&r" (set)
+ : "r" (target),
+ "r" (compare)
+ : "memory");
+
+ *value = set;
+ return (compare == set);
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_64(uint64_t *target, uint64_t compare, uint64_t set)
+{
+
+ __asm__ __volatile__("casx [%1], %2, %0"
+ : "+&r" (set)
+ : "r" (target),
+ "r" (compare)
+ : "memory");
+
+ return (compare == set);
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr(void *target, void *compare, void *set)
+{
+
+ return ck_pr_cas_64(target, (uint64_t)compare, (uint64_t)set);
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_value(void *target, void *compare, void *set, void *previous)
+{
+
+ return ck_pr_cas_64_value(target, (uint64_t)compare, (uint64_t)set, previous);
+}
+
+#define CK_PR_CAS(N, T) \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##N##_value(T *target, T compare, T set, T *value) \
+ { \
+ __asm__ __volatile__("cas [%1], %2, %0" \
+ : "+&r" (set) \
+ : "r" (target), \
+ "r" (compare) \
+ : "memory"); \
+ *value = set; \
+ return (compare == set); \
+ } \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##N(T *target, T compare, T set) \
+ { \
+ __asm__ __volatile__("cas [%1], %2, %0" \
+ : "+&r" (set) \
+ : "r" (target), \
+ "r" (compare) \
+ : "memory"); \
+ return (compare == set); \
+ }
+
+CK_PR_CAS(32, uint32_t)
+CK_PR_CAS(uint, unsigned int)
+CK_PR_CAS(int, int)
+
+#undef CK_PR_CAS
+
+#define CK_PR_FAS(N, T) \
+ CK_CC_INLINE static T \
+ ck_pr_fas_##N(T *target, T update) \
+ { \
+ \
+ __asm__ __volatile__("swap [%1], %0" \
+ : "+&r" (update) \
+ : "r" (target) \
+ : "memory"); \
+ return (update); \
+ }
+
+CK_PR_FAS(int, int)
+CK_PR_FAS(uint, unsigned int)
+CK_PR_FAS(32, uint32_t)
+
+#undef CK_PR_FAS
+
+#endif /* CK_PR_SPARCV9_H */
+
diff --git a/include/gcc/x86/ck_f_pr.h b/include/gcc/x86/ck_f_pr.h
new file mode 100644
index 0000000..f82c66b
--- /dev/null
+++ b/include/gcc/x86/ck_f_pr.h
@@ -0,0 +1,152 @@
+/* DO NOT EDIT. This is auto-generated from feature.sh */
+#define CK_F_PR_ADD_16
+#define CK_F_PR_ADD_32
+#define CK_F_PR_ADD_8
+#define CK_F_PR_ADD_CHAR
+#define CK_F_PR_ADD_INT
+#define CK_F_PR_ADD_PTR
+#define CK_F_PR_ADD_UINT
+#define CK_F_PR_AND_16
+#define CK_F_PR_AND_32
+#define CK_F_PR_AND_8
+#define CK_F_PR_AND_CHAR
+#define CK_F_PR_AND_INT
+#define CK_F_PR_AND_PTR
+#define CK_F_PR_AND_UINT
+#define CK_F_PR_BTC_16
+#define CK_F_PR_BTC_32
+#define CK_F_PR_BTC_INT
+#define CK_F_PR_BTC_PTR
+#define CK_F_PR_BTC_UINT
+#define CK_F_PR_BTR_16
+#define CK_F_PR_BTR_32
+#define CK_F_PR_BTR_INT
+#define CK_F_PR_BTR_PTR
+#define CK_F_PR_BTR_UINT
+#define CK_F_PR_BTS_16
+#define CK_F_PR_BTS_32
+#define CK_F_PR_BTS_INT
+#define CK_F_PR_BTS_PTR
+#define CK_F_PR_BTS_UINT
+#define CK_F_PR_CAS_16
+#define CK_F_PR_CAS_16_VALUE
+#define CK_F_PR_CAS_32
+#define CK_F_PR_CAS_32_VALUE
+#define CK_F_PR_CAS_8
+#define CK_F_PR_CAS_8_VALUE
+#define CK_F_PR_CAS_CHAR
+#define CK_F_PR_CAS_CHAR_VALUE
+#define CK_F_PR_CAS_INT
+#define CK_F_PR_CAS_INT_VALUE
+#define CK_F_PR_CAS_PTR
+#define CK_F_PR_CAS_PTR_VALUE
+#define CK_F_PR_CAS_UINT
+#define CK_F_PR_CAS_UINT_VALUE
+#define CK_F_PR_DEC_16
+#define CK_F_PR_DEC_16_ZERO
+#define CK_F_PR_DEC_32
+#define CK_F_PR_DEC_32_ZERO
+#define CK_F_PR_DEC_8
+#define CK_F_PR_DEC_8_ZERO
+#define CK_F_PR_DEC_CHAR
+#define CK_F_PR_DEC_CHAR_ZERO
+#define CK_F_PR_DEC_INT
+#define CK_F_PR_DEC_INT_ZERO
+#define CK_F_PR_DEC_PTR
+#define CK_F_PR_DEC_PTR_ZERO
+#define CK_F_PR_DEC_UINT
+#define CK_F_PR_DEC_UINT_ZERO
+#define CK_F_PR_FAA_16
+#define CK_F_PR_FAA_32
+#define CK_F_PR_FAA_8
+#define CK_F_PR_FAA_CHAR
+#define CK_F_PR_FAA_INT
+#define CK_F_PR_FAA_PTR
+#define CK_F_PR_FAA_UINT
+#define CK_F_PR_FAS_16
+#define CK_F_PR_FAS_32
+#define CK_F_PR_FAS_8
+#define CK_F_PR_FAS_CHAR
+#define CK_F_PR_FAS_INT
+#define CK_F_PR_FAS_PTR
+#define CK_F_PR_FAS_UINT
+#define CK_F_PR_FENCE_LOAD
+#define CK_F_PR_FENCE_LOAD_DEPENDS
+#define CK_F_PR_FENCE_MEMORY
+#define CK_F_PR_FENCE_STORE
+#define CK_F_PR_FENCE_STRICT_LOAD
+#define CK_F_PR_FENCE_STRICT_LOAD_DEPENDS
+#define CK_F_PR_FENCE_STRICT_MEMORY
+#define CK_F_PR_FENCE_STRICT_STORE
+#define CK_F_PR_INC_16
+#define CK_F_PR_INC_16_ZERO
+#define CK_F_PR_INC_32
+#define CK_F_PR_INC_32_ZERO
+#define CK_F_PR_INC_8
+#define CK_F_PR_INC_8_ZERO
+#define CK_F_PR_INC_CHAR
+#define CK_F_PR_INC_CHAR_ZERO
+#define CK_F_PR_INC_INT
+#define CK_F_PR_INC_INT_ZERO
+#define CK_F_PR_INC_PTR
+#define CK_F_PR_INC_PTR_ZERO
+#define CK_F_PR_INC_UINT
+#define CK_F_PR_INC_UINT_ZERO
+#define CK_F_PR_LOAD_16
+#define CK_F_PR_LOAD_32
+#define CK_F_PR_LOAD_8
+#define CK_F_PR_LOAD_CHAR
+#define CK_F_PR_LOAD_INT
+#define CK_F_PR_LOAD_PTR
+#define CK_F_PR_LOAD_UINT
+#define CK_F_PR_NEG_16
+#define CK_F_PR_NEG_16_ZERO
+#define CK_F_PR_NEG_32
+#define CK_F_PR_NEG_32_ZERO
+#define CK_F_PR_NEG_8
+#define CK_F_PR_NEG_8_ZERO
+#define CK_F_PR_NEG_CHAR
+#define CK_F_PR_NEG_CHAR_ZERO
+#define CK_F_PR_NEG_INT
+#define CK_F_PR_NEG_INT_ZERO
+#define CK_F_PR_NEG_PTR
+#define CK_F_PR_NEG_PTR_ZERO
+#define CK_F_PR_NEG_UINT
+#define CK_F_PR_NEG_UINT_ZERO
+#define CK_F_PR_NOT_16
+#define CK_F_PR_NOT_32
+#define CK_F_PR_NOT_8
+#define CK_F_PR_NOT_CHAR
+#define CK_F_PR_NOT_INT
+#define CK_F_PR_NOT_PTR
+#define CK_F_PR_NOT_UINT
+#define CK_F_PR_OR_16
+#define CK_F_PR_OR_32
+#define CK_F_PR_OR_8
+#define CK_F_PR_OR_CHAR
+#define CK_F_PR_OR_INT
+#define CK_F_PR_OR_PTR
+#define CK_F_PR_OR_UINT
+#define CK_F_PR_STALL
+#define CK_F_PR_STORE_16
+#define CK_F_PR_STORE_32
+#define CK_F_PR_STORE_8
+#define CK_F_PR_STORE_CHAR
+#define CK_F_PR_STORE_INT
+#define CK_F_PR_STORE_PTR
+#define CK_F_PR_STORE_UINT
+#define CK_F_PR_SUB_16
+#define CK_F_PR_SUB_32
+#define CK_F_PR_SUB_8
+#define CK_F_PR_SUB_CHAR
+#define CK_F_PR_SUB_INT
+#define CK_F_PR_SUB_PTR
+#define CK_F_PR_SUB_UINT
+#define CK_F_PR_XOR_16
+#define CK_F_PR_XOR_32
+#define CK_F_PR_XOR_8
+#define CK_F_PR_XOR_CHAR
+#define CK_F_PR_XOR_INT
+#define CK_F_PR_XOR_PTR
+#define CK_F_PR_XOR_UINT
+
diff --git a/include/gcc/x86/ck_pr.h b/include/gcc/x86/ck_pr.h
new file mode 100644
index 0000000..a04cebf
--- /dev/null
+++ b/include/gcc/x86/ck_pr.h
@@ -0,0 +1,390 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * Copyright 2011 Devon H. O'Dell <devon.odell@gmail.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_PR_X86_H
+#define CK_PR_X86_H
+
+#ifndef CK_PR_H
+#error Do not include this file directly, use ck_pr.h
+#endif
+
+#include <ck_cc.h>
+#include <ck_md.h>
+#include <ck_stdint.h>
+
+/*
+ * The following represent supported atomic operations.
+ * These operations may be emulated.
+ */
+#include "ck_f_pr.h"
+
+/* Minimum requirements for the CK_PR interface are met. */
+#define CK_F_PR
+
+#ifdef CK_MD_UMP
+#define CK_PR_LOCK_PREFIX
+#else
+#define CK_PR_LOCK_PREFIX "lock "
+#endif
+
+/*
+ * Prevent speculative execution in busy-wait loops (P4 <=)
+ * or "predefined delay".
+ */
+CK_CC_INLINE static void
+ck_pr_stall(void)
+{
+ __asm__ __volatile__("pause" ::: "memory");
+ return;
+}
+
+#define CK_PR_FENCE(T, I) \
+ CK_CC_INLINE static void \
+ ck_pr_fence_strict_##T(void) \
+ { \
+ __asm__ __volatile__(I ::: "memory"); \
+ }
+
+CK_PR_FENCE(atomic, "sfence")
+CK_PR_FENCE(atomic_store, "sfence")
+CK_PR_FENCE(atomic_load, "mfence")
+CK_PR_FENCE(store_atomic, "sfence")
+CK_PR_FENCE(load_atomic, "mfence")
+CK_PR_FENCE(load, "lfence")
+CK_PR_FENCE(load_store, "mfence")
+CK_PR_FENCE(store, "sfence")
+CK_PR_FENCE(store_load, "mfence")
+CK_PR_FENCE(memory, "mfence")
+CK_PR_FENCE(release, "mfence")
+CK_PR_FENCE(acquire, "mfence")
+CK_PR_FENCE(acqrel, "mfence")
+CK_PR_FENCE(lock, "mfence")
+CK_PR_FENCE(unlock, "mfence")
+
+#undef CK_PR_FENCE
+
+/*
+ * Atomic fetch-and-store operations.
+ */
+#define CK_PR_FAS(S, M, T, C, I) \
+ CK_CC_INLINE static T \
+ ck_pr_fas_##S(M *target, T v) \
+ { \
+ __asm__ __volatile__(I " %0, %1" \
+ : "+m" (*(C *)target), \
+ "+q" (v) \
+ : \
+ : "memory"); \
+ return v; \
+ }
+
+CK_PR_FAS(ptr, void, void *, char, "xchgl")
+
+#define CK_PR_FAS_S(S, T, I) CK_PR_FAS(S, T, T, T, I)
+
+CK_PR_FAS_S(char, char, "xchgb")
+CK_PR_FAS_S(uint, unsigned int, "xchgl")
+CK_PR_FAS_S(int, int, "xchgl")
+CK_PR_FAS_S(32, uint32_t, "xchgl")
+CK_PR_FAS_S(16, uint16_t, "xchgw")
+CK_PR_FAS_S(8, uint8_t, "xchgb")
+
+#undef CK_PR_FAS_S
+#undef CK_PR_FAS
+
+#define CK_PR_LOAD(S, M, T, C, I) \
+ CK_CC_INLINE static T \
+ ck_pr_md_load_##S(const M *target) \
+ { \
+ T r; \
+ __asm__ __volatile__(I " %1, %0" \
+ : "=q" (r) \
+ : "m" (*(const C *)target) \
+ : "memory"); \
+ return (r); \
+ }
+
+CK_PR_LOAD(ptr, void, void *, char, "movl")
+
+#define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, T, I)
+
+CK_PR_LOAD_S(char, char, "movb")
+CK_PR_LOAD_S(uint, unsigned int, "movl")
+CK_PR_LOAD_S(int, int, "movl")
+CK_PR_LOAD_S(32, uint32_t, "movl")
+CK_PR_LOAD_S(16, uint16_t, "movw")
+CK_PR_LOAD_S(8, uint8_t, "movb")
+
+#undef CK_PR_LOAD_S
+#undef CK_PR_LOAD
+
+#define CK_PR_STORE(S, M, T, C, I) \
+ CK_CC_INLINE static void \
+ ck_pr_md_store_##S(M *target, T v) \
+ { \
+ __asm__ __volatile__(I " %1, %0" \
+ : "=m" (*(C *)target) \
+ : CK_CC_IMM "q" (v) \
+ : "memory"); \
+ return; \
+ }
+
+CK_PR_STORE(ptr, void, const void *, char, "movl")
+
+#define CK_PR_STORE_S(S, T, I) CK_PR_STORE(S, T, T, T, I)
+
+CK_PR_STORE_S(char, char, "movb")
+CK_PR_STORE_S(uint, unsigned int, "movl")
+CK_PR_STORE_S(int, int, "movl")
+CK_PR_STORE_S(32, uint32_t, "movl")
+CK_PR_STORE_S(16, uint16_t, "movw")
+CK_PR_STORE_S(8, uint8_t, "movb")
+
+#undef CK_PR_STORE_S
+#undef CK_PR_STORE
+
+/*
+ * Atomic fetch-and-add operations.
+ */
+#define CK_PR_FAA(S, M, T, C, I) \
+ CK_CC_INLINE static T \
+ ck_pr_faa_##S(M *target, T d) \
+ { \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %1, %0" \
+ : "+m" (*(C *)target), \
+ "+q" (d) \
+ : \
+ : "memory", "cc"); \
+ return (d); \
+ }
+
+CK_PR_FAA(ptr, void, uintptr_t, char, "xaddl")
+
+#define CK_PR_FAA_S(S, T, I) CK_PR_FAA(S, T, T, T, I)
+
+CK_PR_FAA_S(char, char, "xaddb")
+CK_PR_FAA_S(uint, unsigned int, "xaddl")
+CK_PR_FAA_S(int, int, "xaddl")
+CK_PR_FAA_S(32, uint32_t, "xaddl")
+CK_PR_FAA_S(16, uint16_t, "xaddw")
+CK_PR_FAA_S(8, uint8_t, "xaddb")
+
+#undef CK_PR_FAA_S
+#undef CK_PR_FAA
+
+/*
+ * Atomic store-only unary operations.
+ */
+#define CK_PR_UNARY(K, S, T, C, I) \
+ CK_PR_UNARY_R(K, S, T, C, I) \
+ CK_PR_UNARY_V(K, S, T, C, I)
+
+#define CK_PR_UNARY_R(K, S, T, C, I) \
+ CK_CC_INLINE static void \
+ ck_pr_##K##_##S(T *target) \
+ { \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %0" \
+ : "+m" (*(C *)target) \
+ : \
+ : "memory", "cc"); \
+ return; \
+ }
+
+#define CK_PR_UNARY_V(K, S, T, C, I) \
+ CK_CC_INLINE static void \
+ ck_pr_##K##_##S##_zero(T *target, bool *r) \
+ { \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %0; setz %1" \
+ : "+m" (*(C *)target), \
+ "=m" (*r) \
+ : \
+ : "memory", "cc"); \
+ return; \
+ }
+
+
+#define CK_PR_UNARY_S(K, S, T, I) CK_PR_UNARY(K, S, T, T, I)
+
+#define CK_PR_GENERATE(K) \
+ CK_PR_UNARY(K, ptr, void, char, #K "l") \
+ CK_PR_UNARY_S(K, char, char, #K "b") \
+ CK_PR_UNARY_S(K, int, int, #K "l") \
+ CK_PR_UNARY_S(K, uint, unsigned int, #K "l") \
+ CK_PR_UNARY_S(K, 32, uint32_t, #K "l") \
+ CK_PR_UNARY_S(K, 16, uint16_t, #K "w") \
+ CK_PR_UNARY_S(K, 8, uint8_t, #K "b")
+
+CK_PR_GENERATE(inc)
+CK_PR_GENERATE(dec)
+CK_PR_GENERATE(neg)
+
+/* not does not affect condition flags. */
+#undef CK_PR_UNARY_V
+#define CK_PR_UNARY_V(a, b, c, d, e)
+CK_PR_GENERATE(not)
+
+#undef CK_PR_GENERATE
+#undef CK_PR_UNARY_S
+#undef CK_PR_UNARY_V
+#undef CK_PR_UNARY_R
+#undef CK_PR_UNARY
+
+/*
+ * Atomic store-only binary operations.
+ */
+#define CK_PR_BINARY(K, S, M, T, C, I) \
+ CK_CC_INLINE static void \
+ ck_pr_##K##_##S(M *target, T d) \
+ { \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %1, %0" \
+ : "+m" (*(C *)target) \
+ : CK_CC_IMM "q" (d) \
+ : "memory", "cc"); \
+ return; \
+ }
+
+#define CK_PR_BINARY_S(K, S, T, I) CK_PR_BINARY(K, S, T, T, T, I)
+
+#define CK_PR_GENERATE(K) \
+ CK_PR_BINARY(K, ptr, void, uintptr_t, char, #K "l") \
+ CK_PR_BINARY_S(K, char, char, #K "b") \
+ CK_PR_BINARY_S(K, int, int, #K "l") \
+ CK_PR_BINARY_S(K, uint, unsigned int, #K "l") \
+ CK_PR_BINARY_S(K, 32, uint32_t, #K "l") \
+ CK_PR_BINARY_S(K, 16, uint16_t, #K "w") \
+ CK_PR_BINARY_S(K, 8, uint8_t, #K "b")
+
+CK_PR_GENERATE(add)
+CK_PR_GENERATE(sub)
+CK_PR_GENERATE(and)
+CK_PR_GENERATE(or)
+CK_PR_GENERATE(xor)
+
+#undef CK_PR_GENERATE
+#undef CK_PR_BINARY_S
+#undef CK_PR_BINARY
+
+/*
+ * Atomic compare and swap.
+ */
+#define CK_PR_CAS(S, M, T, C, I) \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##S(M *target, T compare, T set) \
+ { \
+ bool z; \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %2, %0; setz %1" \
+ : "+m" (*(C *)target), \
+ "=a" (z) \
+ : "q" (set), \
+ "a" (compare) \
+ : "memory", "cc"); \
+ return z; \
+ }
+
+CK_PR_CAS(ptr, void, void *, char, "cmpxchgl")
+
+#define CK_PR_CAS_S(S, T, I) CK_PR_CAS(S, T, T, T, I)
+
+CK_PR_CAS_S(char, char, "cmpxchgb")
+CK_PR_CAS_S(int, int, "cmpxchgl")
+CK_PR_CAS_S(uint, unsigned int, "cmpxchgl")
+CK_PR_CAS_S(32, uint32_t, "cmpxchgl")
+CK_PR_CAS_S(16, uint16_t, "cmpxchgw")
+CK_PR_CAS_S(8, uint8_t, "cmpxchgb")
+
+#undef CK_PR_CAS_S
+#undef CK_PR_CAS
+
+/*
+ * Compare and swap, set *v to old value of target.
+ */
+#define CK_PR_CAS_O(S, M, T, C, I, R) \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##S##_value(M *target, T compare, T set, M *v) \
+ { \
+ bool z; \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX "cmpxchg" I " %3, %0;" \
+ "mov %% " R ", %2;" \
+ "setz %1;" \
+ : "+m" (*(C *)target), \
+ "=a" (z), \
+ "=m" (*(C *)v) \
+ : "q" (set), \
+ "a" (compare) \
+ : "memory", "cc"); \
+ return (bool)z; \
+ }
+
+CK_PR_CAS_O(ptr, void, void *, char, "l", "eax")
+
+#define CK_PR_CAS_O_S(S, T, I, R) \
+ CK_PR_CAS_O(S, T, T, T, I, R)
+
+CK_PR_CAS_O_S(char, char, "b", "al")
+CK_PR_CAS_O_S(int, int, "l", "eax")
+CK_PR_CAS_O_S(uint, unsigned int, "l", "eax")
+CK_PR_CAS_O_S(32, uint32_t, "l", "eax")
+CK_PR_CAS_O_S(16, uint16_t, "w", "ax")
+CK_PR_CAS_O_S(8, uint8_t, "b", "al")
+
+#undef CK_PR_CAS_O_S
+#undef CK_PR_CAS_O
+
+/*
+ * Atomic bit test operations.
+ */
+#define CK_PR_BT(K, S, T, P, C, I) \
+ CK_CC_INLINE static bool \
+ ck_pr_##K##_##S(T *target, unsigned int b) \
+ { \
+ bool c; \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX I "; setc %1" \
+ : "+m" (*(C *)target), \
+ "=q" (c) \
+ : "q" ((P)b) \
+ : "memory", "cc"); \
+ return (bool)c; \
+ }
+
+#define CK_PR_BT_S(K, S, T, I) CK_PR_BT(K, S, T, T, T, I)
+
+#define CK_PR_GENERATE(K) \
+ CK_PR_BT(K, ptr, void, uint32_t, char, #K "l %2, %0") \
+ CK_PR_BT_S(K, uint, unsigned int, #K "l %2, %0") \
+ CK_PR_BT_S(K, int, int, #K "l %2, %0") \
+ CK_PR_BT_S(K, 32, uint32_t, #K "l %2, %0") \
+ CK_PR_BT_S(K, 16, uint16_t, #K "w %w2, %0")
+
+CK_PR_GENERATE(btc)
+CK_PR_GENERATE(bts)
+CK_PR_GENERATE(btr)
+
+#undef CK_PR_GENERATE
+#undef CK_PR_BT
+
+#endif /* CK_PR_X86_H */
+
diff --git a/include/gcc/x86_64/ck_f_pr.h b/include/gcc/x86_64/ck_f_pr.h
new file mode 100644
index 0000000..545f5fd
--- /dev/null
+++ b/include/gcc/x86_64/ck_f_pr.h
@@ -0,0 +1,202 @@
+/* DO NOT EDIT. This is auto-generated from feature.sh */
+#define CK_F_PR_ADD_16
+#define CK_F_PR_ADD_32
+#define CK_F_PR_ADD_64
+#define CK_F_PR_ADD_8
+#define CK_F_PR_ADD_CHAR
+#define CK_F_PR_ADD_INT
+#define CK_F_PR_ADD_PTR
+#define CK_F_PR_ADD_UINT
+#define CK_F_PR_AND_16
+#define CK_F_PR_AND_32
+#define CK_F_PR_AND_64
+#define CK_F_PR_AND_8
+#define CK_F_PR_AND_CHAR
+#define CK_F_PR_AND_INT
+#define CK_F_PR_AND_PTR
+#define CK_F_PR_AND_UINT
+#define CK_F_PR_BTC_16
+#define CK_F_PR_BTC_32
+#define CK_F_PR_BTC_64
+#define CK_F_PR_BTC_INT
+#define CK_F_PR_BTC_PTR
+#define CK_F_PR_BTC_UINT
+#define CK_F_PR_BTR_16
+#define CK_F_PR_BTR_32
+#define CK_F_PR_BTR_64
+#define CK_F_PR_BTR_INT
+#define CK_F_PR_BTR_PTR
+#define CK_F_PR_BTR_UINT
+#define CK_F_PR_BTS_16
+#define CK_F_PR_BTS_32
+#define CK_F_PR_BTS_64
+#define CK_F_PR_BTS_INT
+#define CK_F_PR_BTS_PTR
+#define CK_F_PR_BTS_UINT
+#define CK_F_PR_CAS_16
+#define CK_F_PR_CAS_16_8
+#define CK_F_PR_CAS_16_8_VALUE
+#define CK_F_PR_CAS_16_VALUE
+#define CK_F_PR_CAS_32
+#define CK_F_PR_CAS_32_4
+#define CK_F_PR_CAS_32_4_VALUE
+#define CK_F_PR_CAS_32_VALUE
+#define CK_F_PR_CAS_64
+#define CK_F_PR_CAS_64_2
+#define CK_F_PR_CAS_64_2_VALUE
+#define CK_F_PR_CAS_64_VALUE
+#define CK_F_PR_CAS_8
+#define CK_F_PR_CAS_8_16
+#define CK_F_PR_CAS_8_16_VALUE
+#define CK_F_PR_CAS_8_VALUE
+#define CK_F_PR_CAS_CHAR
+#define CK_F_PR_CAS_CHAR_16
+#define CK_F_PR_CAS_CHAR_16_VALUE
+#define CK_F_PR_CAS_CHAR_VALUE
+#define CK_F_PR_CAS_INT
+#define CK_F_PR_CAS_INT_4
+#define CK_F_PR_CAS_INT_4_VALUE
+#define CK_F_PR_CAS_INT_VALUE
+#define CK_F_PR_CAS_PTR
+#define CK_F_PR_CAS_PTR_2
+#define CK_F_PR_CAS_PTR_2_VALUE
+#define CK_F_PR_CAS_PTR_VALUE
+#define CK_F_PR_CAS_DOUBLE
+#define CK_F_PR_CAS_DOUBLE_2
+#define CK_F_PR_CAS_DOUBLE_VALUE
+#define CK_F_PR_CAS_UINT
+#define CK_F_PR_CAS_UINT_4
+#define CK_F_PR_CAS_UINT_4_VALUE
+#define CK_F_PR_CAS_UINT_VALUE
+#define CK_F_PR_DEC_16
+#define CK_F_PR_DEC_16_ZERO
+#define CK_F_PR_DEC_32
+#define CK_F_PR_DEC_32_ZERO
+#define CK_F_PR_DEC_64
+#define CK_F_PR_DEC_64_ZERO
+#define CK_F_PR_DEC_8
+#define CK_F_PR_DEC_8_ZERO
+#define CK_F_PR_DEC_CHAR
+#define CK_F_PR_DEC_CHAR_ZERO
+#define CK_F_PR_DEC_INT
+#define CK_F_PR_DEC_INT_ZERO
+#define CK_F_PR_DEC_PTR
+#define CK_F_PR_DEC_PTR_ZERO
+#define CK_F_PR_DEC_UINT
+#define CK_F_PR_DEC_UINT_ZERO
+#define CK_F_PR_FAA_16
+#define CK_F_PR_FAA_32
+#define CK_F_PR_FAA_64
+#define CK_F_PR_FAA_8
+#define CK_F_PR_FAA_CHAR
+#define CK_F_PR_FAA_INT
+#define CK_F_PR_FAA_PTR
+#define CK_F_PR_FAA_UINT
+#define CK_F_PR_FAS_16
+#define CK_F_PR_FAS_32
+#define CK_F_PR_FAS_64
+#define CK_F_PR_FAS_8
+#define CK_F_PR_FAS_CHAR
+#define CK_F_PR_FAS_INT
+#define CK_F_PR_FAS_PTR
+#define CK_F_PR_FAS_UINT
+#define CK_F_PR_FAS_DOUBLE
+#define CK_F_PR_FENCE_LOAD
+#define CK_F_PR_FENCE_LOAD_DEPENDS
+#define CK_F_PR_FENCE_MEMORY
+#define CK_F_PR_FENCE_STORE
+#define CK_F_PR_FENCE_STRICT_LOAD
+#define CK_F_PR_FENCE_STRICT_LOAD_DEPENDS
+#define CK_F_PR_FENCE_STRICT_MEMORY
+#define CK_F_PR_FENCE_STRICT_STORE
+#define CK_F_PR_INC_16
+#define CK_F_PR_INC_16_ZERO
+#define CK_F_PR_INC_32
+#define CK_F_PR_INC_32_ZERO
+#define CK_F_PR_INC_64
+#define CK_F_PR_INC_64_ZERO
+#define CK_F_PR_INC_8
+#define CK_F_PR_INC_8_ZERO
+#define CK_F_PR_INC_CHAR
+#define CK_F_PR_INC_CHAR_ZERO
+#define CK_F_PR_INC_INT
+#define CK_F_PR_INC_INT_ZERO
+#define CK_F_PR_INC_PTR
+#define CK_F_PR_INC_PTR_ZERO
+#define CK_F_PR_INC_UINT
+#define CK_F_PR_INC_UINT_ZERO
+#define CK_F_PR_LOAD_16
+#define CK_F_PR_LOAD_16_8
+#define CK_F_PR_LOAD_32
+#define CK_F_PR_LOAD_32_4
+#define CK_F_PR_LOAD_64
+#define CK_F_PR_LOAD_64_2
+#define CK_F_PR_LOAD_8
+#define CK_F_PR_LOAD_8_16
+#define CK_F_PR_LOAD_CHAR
+#define CK_F_PR_LOAD_CHAR_16
+#define CK_F_PR_LOAD_INT
+#define CK_F_PR_LOAD_INT_4
+#define CK_F_PR_LOAD_PTR
+#define CK_F_PR_LOAD_PTR_2
+#define CK_F_PR_LOAD_DOUBLE
+#define CK_F_PR_LOAD_UINT
+#define CK_F_PR_LOAD_UINT_4
+#define CK_F_PR_NEG_16
+#define CK_F_PR_NEG_16_ZERO
+#define CK_F_PR_NEG_32
+#define CK_F_PR_NEG_32_ZERO
+#define CK_F_PR_NEG_64
+#define CK_F_PR_NEG_64_ZERO
+#define CK_F_PR_NEG_8
+#define CK_F_PR_NEG_8_ZERO
+#define CK_F_PR_NEG_CHAR
+#define CK_F_PR_NEG_CHAR_ZERO
+#define CK_F_PR_NEG_INT
+#define CK_F_PR_NEG_INT_ZERO
+#define CK_F_PR_NEG_PTR
+#define CK_F_PR_NEG_PTR_ZERO
+#define CK_F_PR_NEG_UINT
+#define CK_F_PR_NEG_UINT_ZERO
+#define CK_F_PR_NOT_16
+#define CK_F_PR_NOT_32
+#define CK_F_PR_NOT_64
+#define CK_F_PR_NOT_8
+#define CK_F_PR_NOT_CHAR
+#define CK_F_PR_NOT_INT
+#define CK_F_PR_NOT_PTR
+#define CK_F_PR_NOT_UINT
+#define CK_F_PR_OR_16
+#define CK_F_PR_OR_32
+#define CK_F_PR_OR_64
+#define CK_F_PR_OR_8
+#define CK_F_PR_OR_CHAR
+#define CK_F_PR_OR_INT
+#define CK_F_PR_OR_PTR
+#define CK_F_PR_OR_UINT
+#define CK_F_PR_STORE_16
+#define CK_F_PR_STORE_32
+#define CK_F_PR_STORE_64
+#define CK_F_PR_STORE_8
+#define CK_F_PR_STORE_CHAR
+#define CK_F_PR_STORE_INT
+#define CK_F_PR_STORE_DOUBLE
+#define CK_F_PR_STORE_PTR
+#define CK_F_PR_STORE_UINT
+#define CK_F_PR_SUB_16
+#define CK_F_PR_SUB_32
+#define CK_F_PR_SUB_64
+#define CK_F_PR_SUB_8
+#define CK_F_PR_SUB_CHAR
+#define CK_F_PR_SUB_INT
+#define CK_F_PR_SUB_PTR
+#define CK_F_PR_SUB_UINT
+#define CK_F_PR_XOR_16
+#define CK_F_PR_XOR_32
+#define CK_F_PR_XOR_64
+#define CK_F_PR_XOR_8
+#define CK_F_PR_XOR_CHAR
+#define CK_F_PR_XOR_INT
+#define CK_F_PR_XOR_PTR
+#define CK_F_PR_XOR_UINT
+
diff --git a/include/gcc/x86_64/ck_pr.h b/include/gcc/x86_64/ck_pr.h
new file mode 100644
index 0000000..532d593
--- /dev/null
+++ b/include/gcc/x86_64/ck_pr.h
@@ -0,0 +1,585 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_PR_X86_64_H
+#define CK_PR_X86_64_H
+
+#ifndef CK_PR_H
+#error Do not include this file directly, use ck_pr.h
+#endif
+
+#include <ck_cc.h>
+#include <ck_md.h>
+#include <ck_stdint.h>
+
+/*
+ * The following represent supported atomic operations.
+ * These operations may be emulated.
+ */
+#include "ck_f_pr.h"
+
+/*
+ * Support for TSX extensions.
+ */
+#ifdef CK_MD_RTM_ENABLE
+#include "ck_pr_rtm.h"
+#endif
+
+/* Minimum requirements for the CK_PR interface are met. */
+#define CK_F_PR
+
+#ifdef CK_MD_UMP
+#define CK_PR_LOCK_PREFIX
+#else
+#define CK_PR_LOCK_PREFIX "lock "
+#endif
+
+/*
+ * Prevent speculative execution in busy-wait loops (P4 <=)
+ * or "predefined delay".
+ */
+CK_CC_INLINE static void
+ck_pr_stall(void)
+{
+ __asm__ __volatile__("pause" ::: "memory");
+ return;
+}
+
+#define CK_PR_FENCE(T, I) \
+ CK_CC_INLINE static void \
+ ck_pr_fence_strict_##T(void) \
+ { \
+ __asm__ __volatile__(I ::: "memory"); \
+ }
+
+CK_PR_FENCE(atomic, "sfence")
+CK_PR_FENCE(atomic_store, "sfence")
+CK_PR_FENCE(atomic_load, "mfence")
+CK_PR_FENCE(store_atomic, "sfence")
+CK_PR_FENCE(load_atomic, "mfence")
+CK_PR_FENCE(load, "lfence")
+CK_PR_FENCE(load_store, "mfence")
+CK_PR_FENCE(store, "sfence")
+CK_PR_FENCE(store_load, "mfence")
+CK_PR_FENCE(memory, "mfence")
+CK_PR_FENCE(release, "mfence")
+CK_PR_FENCE(acquire, "mfence")
+CK_PR_FENCE(acqrel, "mfence")
+CK_PR_FENCE(lock, "mfence")
+CK_PR_FENCE(unlock, "mfence")
+
+#undef CK_PR_FENCE
+
+/*
+ * Read for ownership. Older compilers will generate the 32-bit
+ * 3DNow! variant which is binary compatible with x86-64 variant
+ * of prefetchw.
+ */
+#ifndef CK_F_PR_RFO
+#define CK_F_PR_RFO
+CK_CC_INLINE static void
+ck_pr_rfo(const void *m)
+{
+
+ __asm__ __volatile__("prefetchw (%0)"
+ :
+ : "r" (m)
+ : "memory");
+
+ return;
+}
+#endif /* CK_F_PR_RFO */
+
+/*
+ * Atomic fetch-and-store operations.
+ */
+#define CK_PR_FAS(S, M, T, C, I) \
+ CK_CC_INLINE static T \
+ ck_pr_fas_##S(M *target, T v) \
+ { \
+ __asm__ __volatile__(I " %0, %1" \
+ : "+m" (*(C *)target), \
+ "+q" (v) \
+ : \
+ : "memory"); \
+ return v; \
+ }
+
+CK_PR_FAS(ptr, void, void *, char, "xchgq")
+
+#define CK_PR_FAS_S(S, T, I) CK_PR_FAS(S, T, T, T, I)
+
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_FAS_S(double, double, "xchgq")
+#endif
+CK_PR_FAS_S(char, char, "xchgb")
+CK_PR_FAS_S(uint, unsigned int, "xchgl")
+CK_PR_FAS_S(int, int, "xchgl")
+CK_PR_FAS_S(64, uint64_t, "xchgq")
+CK_PR_FAS_S(32, uint32_t, "xchgl")
+CK_PR_FAS_S(16, uint16_t, "xchgw")
+CK_PR_FAS_S(8, uint8_t, "xchgb")
+
+#undef CK_PR_FAS_S
+#undef CK_PR_FAS
+
+/*
+ * Atomic load-from-memory operations.
+ */
+#define CK_PR_LOAD(S, M, T, C, I) \
+ CK_CC_INLINE static T \
+ ck_pr_md_load_##S(const M *target) \
+ { \
+ T r; \
+ __asm__ __volatile__(I " %1, %0" \
+ : "=q" (r) \
+ : "m" (*(const C *)target) \
+ : "memory"); \
+ return (r); \
+ }
+
+CK_PR_LOAD(ptr, void, void *, char, "movq")
+
+#define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, T, I)
+
+CK_PR_LOAD_S(char, char, "movb")
+CK_PR_LOAD_S(uint, unsigned int, "movl")
+CK_PR_LOAD_S(int, int, "movl")
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_LOAD_S(double, double, "movq")
+#endif
+CK_PR_LOAD_S(64, uint64_t, "movq")
+CK_PR_LOAD_S(32, uint32_t, "movl")
+CK_PR_LOAD_S(16, uint16_t, "movw")
+CK_PR_LOAD_S(8, uint8_t, "movb")
+
+#undef CK_PR_LOAD_S
+#undef CK_PR_LOAD
+
+CK_CC_INLINE static void
+ck_pr_load_64_2(const uint64_t target[2], uint64_t v[2])
+{
+ __asm__ __volatile__("movq %%rdx, %%rcx;"
+ "movq %%rax, %%rbx;"
+ CK_PR_LOCK_PREFIX "cmpxchg16b %2;"
+ : "=a" (v[0]),
+ "=d" (v[1])
+ : "m" (*(const uint64_t *)target)
+ : "rbx", "rcx", "memory", "cc");
+ return;
+}
+
+CK_CC_INLINE static void
+ck_pr_load_ptr_2(const void *t, void *v)
+{
+ ck_pr_load_64_2(CK_CPP_CAST(const uint64_t *, t),
+ CK_CPP_CAST(uint64_t *, v));
+ return;
+}
+
+#define CK_PR_LOAD_2(S, W, T) \
+ CK_CC_INLINE static void \
+ ck_pr_md_load_##S##_##W(const T t[2], T v[2]) \
+ { \
+ ck_pr_load_64_2((const uint64_t *)(const void *)t, \
+ (uint64_t *)(void *)v); \
+ return; \
+ }
+
+CK_PR_LOAD_2(char, 16, char)
+CK_PR_LOAD_2(int, 4, int)
+CK_PR_LOAD_2(uint, 4, unsigned int)
+CK_PR_LOAD_2(32, 4, uint32_t)
+CK_PR_LOAD_2(16, 8, uint16_t)
+CK_PR_LOAD_2(8, 16, uint8_t)
+
+#undef CK_PR_LOAD_2
+
+/*
+ * Atomic store-to-memory operations.
+ */
+#define CK_PR_STORE_IMM(S, M, T, C, I, K) \
+ CK_CC_INLINE static void \
+ ck_pr_md_store_##S(M *target, T v) \
+ { \
+ __asm__ __volatile__(I " %1, %0" \
+ : "=m" (*(C *)target) \
+ : K "q" (v) \
+ : "memory"); \
+ return; \
+ }
+
+#define CK_PR_STORE(S, M, T, C, I) \
+ CK_CC_INLINE static void \
+ ck_pr_md_store_##S(M *target, T v) \
+ { \
+ __asm__ __volatile__(I " %1, %0" \
+ : "=m" (*(C *)target) \
+ : "q" (v) \
+ : "memory"); \
+ return; \
+ }
+
+CK_PR_STORE_IMM(ptr, void, const void *, char, "movq", CK_CC_IMM_U32)
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_STORE(double, double, double, double, "movq")
+#endif
+
+#define CK_PR_STORE_S(S, T, I, K) CK_PR_STORE_IMM(S, T, T, T, I, K)
+
+CK_PR_STORE_S(char, char, "movb", CK_CC_IMM_S32)
+CK_PR_STORE_S(int, int, "movl", CK_CC_IMM_S32)
+CK_PR_STORE_S(uint, unsigned int, "movl", CK_CC_IMM_U32)
+CK_PR_STORE_S(64, uint64_t, "movq", CK_CC_IMM_U32)
+CK_PR_STORE_S(32, uint32_t, "movl", CK_CC_IMM_U32)
+CK_PR_STORE_S(16, uint16_t, "movw", CK_CC_IMM_U32)
+CK_PR_STORE_S(8, uint8_t, "movb", CK_CC_IMM_U32)
+
+#undef CK_PR_STORE_S
+#undef CK_PR_STORE_IMM
+#undef CK_PR_STORE
+
+/*
+ * Atomic fetch-and-add operations.
+ */
+#define CK_PR_FAA(S, M, T, C, I) \
+ CK_CC_INLINE static T \
+ ck_pr_faa_##S(M *target, T d) \
+ { \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %1, %0" \
+ : "+m" (*(C *)target), \
+ "+q" (d) \
+ : \
+ : "memory", "cc"); \
+ return (d); \
+ }
+
+CK_PR_FAA(ptr, void, uintptr_t, char, "xaddq")
+
+#define CK_PR_FAA_S(S, T, I) CK_PR_FAA(S, T, T, T, I)
+
+CK_PR_FAA_S(char, char, "xaddb")
+CK_PR_FAA_S(uint, unsigned int, "xaddl")
+CK_PR_FAA_S(int, int, "xaddl")
+CK_PR_FAA_S(64, uint64_t, "xaddq")
+CK_PR_FAA_S(32, uint32_t, "xaddl")
+CK_PR_FAA_S(16, uint16_t, "xaddw")
+CK_PR_FAA_S(8, uint8_t, "xaddb")
+
+#undef CK_PR_FAA_S
+#undef CK_PR_FAA
+
+/*
+ * Atomic store-only unary operations.
+ */
+#define CK_PR_UNARY(K, S, T, C, I) \
+ CK_PR_UNARY_R(K, S, T, C, I) \
+ CK_PR_UNARY_V(K, S, T, C, I)
+
+#define CK_PR_UNARY_R(K, S, T, C, I) \
+ CK_CC_INLINE static void \
+ ck_pr_##K##_##S(T *target) \
+ { \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %0" \
+ : "+m" (*(C *)target) \
+ : \
+ : "memory", "cc"); \
+ return; \
+ }
+
+#define CK_PR_UNARY_V(K, S, T, C, I) \
+ CK_CC_INLINE static void \
+ ck_pr_##K##_##S##_zero(T *target, bool *r) \
+ { \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %0; setz %1" \
+ : "+m" (*(C *)target), \
+ "=m" (*r) \
+ : \
+ : "memory", "cc"); \
+ return; \
+ }
+
+
+#define CK_PR_UNARY_S(K, S, T, I) CK_PR_UNARY(K, S, T, T, I)
+
+#define CK_PR_GENERATE(K) \
+ CK_PR_UNARY(K, ptr, void, char, #K "q") \
+ CK_PR_UNARY_S(K, char, char, #K "b") \
+ CK_PR_UNARY_S(K, int, int, #K "l") \
+ CK_PR_UNARY_S(K, uint, unsigned int, #K "l") \
+ CK_PR_UNARY_S(K, 64, uint64_t, #K "q") \
+ CK_PR_UNARY_S(K, 32, uint32_t, #K "l") \
+ CK_PR_UNARY_S(K, 16, uint16_t, #K "w") \
+ CK_PR_UNARY_S(K, 8, uint8_t, #K "b")
+
+CK_PR_GENERATE(inc)
+CK_PR_GENERATE(dec)
+CK_PR_GENERATE(neg)
+
+/* not does not affect condition flags. */
+#undef CK_PR_UNARY_V
+#define CK_PR_UNARY_V(a, b, c, d, e)
+CK_PR_GENERATE(not)
+
+#undef CK_PR_GENERATE
+#undef CK_PR_UNARY_S
+#undef CK_PR_UNARY_V
+#undef CK_PR_UNARY_R
+#undef CK_PR_UNARY
+
+/*
+ * Atomic store-only binary operations.
+ */
+#define CK_PR_BINARY(K, S, M, T, C, I, O) \
+ CK_CC_INLINE static void \
+ ck_pr_##K##_##S(M *target, T d) \
+ { \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %1, %0" \
+ : "+m" (*(C *)target) \
+ : O "q" (d) \
+ : "memory", "cc"); \
+ return; \
+ }
+
+#define CK_PR_BINARY_S(K, S, T, I, O) CK_PR_BINARY(K, S, T, T, T, I, O)
+
+#define CK_PR_GENERATE(K) \
+ CK_PR_BINARY(K, ptr, void, uintptr_t, char, #K "q", CK_CC_IMM_U32) \
+ CK_PR_BINARY_S(K, char, char, #K "b", CK_CC_IMM_S32) \
+ CK_PR_BINARY_S(K, int, int, #K "l", CK_CC_IMM_S32) \
+ CK_PR_BINARY_S(K, uint, unsigned int, #K "l", CK_CC_IMM_U32) \
+ CK_PR_BINARY_S(K, 64, uint64_t, #K "q", CK_CC_IMM_U32) \
+ CK_PR_BINARY_S(K, 32, uint32_t, #K "l", CK_CC_IMM_U32) \
+ CK_PR_BINARY_S(K, 16, uint16_t, #K "w", CK_CC_IMM_U32) \
+ CK_PR_BINARY_S(K, 8, uint8_t, #K "b", CK_CC_IMM_U32)
+
+CK_PR_GENERATE(add)
+CK_PR_GENERATE(sub)
+CK_PR_GENERATE(and)
+CK_PR_GENERATE(or)
+CK_PR_GENERATE(xor)
+
+#undef CK_PR_GENERATE
+#undef CK_PR_BINARY_S
+#undef CK_PR_BINARY
+
+/*
+ * Atomic compare and swap.
+ */
+#define CK_PR_CAS(S, M, T, C, I) \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##S(M *target, T compare, T set) \
+ { \
+ bool z; \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %2, %0; setz %1" \
+ : "+m" (*(C *)target), \
+ "=a" (z) \
+ : "q" (set), \
+ "a" (compare) \
+ : "memory", "cc"); \
+ return z; \
+ }
+
+CK_PR_CAS(ptr, void, void *, char, "cmpxchgq")
+
+#define CK_PR_CAS_S(S, T, I) CK_PR_CAS(S, T, T, T, I)
+
+CK_PR_CAS_S(char, char, "cmpxchgb")
+CK_PR_CAS_S(int, int, "cmpxchgl")
+CK_PR_CAS_S(uint, unsigned int, "cmpxchgl")
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_CAS_S(double, double, "cmpxchgq")
+#endif
+CK_PR_CAS_S(64, uint64_t, "cmpxchgq")
+CK_PR_CAS_S(32, uint32_t, "cmpxchgl")
+CK_PR_CAS_S(16, uint16_t, "cmpxchgw")
+CK_PR_CAS_S(8, uint8_t, "cmpxchgb")
+
+#undef CK_PR_CAS_S
+#undef CK_PR_CAS
+
+/*
+ * Compare and swap, set *v to old value of target.
+ */
+#define CK_PR_CAS_O(S, M, T, C, I, R) \
+ CK_CC_INLINE static bool \
+ ck_pr_cas_##S##_value(M *target, T compare, T set, M *v) \
+ { \
+ bool z; \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX "cmpxchg" I " %3, %0;" \
+ "mov %% " R ", %2;" \
+ "setz %1;" \
+ : "+m" (*(C *)target), \
+ "=a" (z), \
+ "=m" (*(C *)v) \
+ : "q" (set), \
+ "a" (compare) \
+ : "memory", "cc"); \
+ return z; \
+ }
+
+CK_PR_CAS_O(ptr, void, void *, char, "q", "rax")
+
+#define CK_PR_CAS_O_S(S, T, I, R) \
+ CK_PR_CAS_O(S, T, T, T, I, R)
+
+CK_PR_CAS_O_S(char, char, "b", "al")
+CK_PR_CAS_O_S(int, int, "l", "eax")
+CK_PR_CAS_O_S(uint, unsigned int, "l", "eax")
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_CAS_O_S(double, double, "q", "rax")
+#endif
+CK_PR_CAS_O_S(64, uint64_t, "q", "rax")
+CK_PR_CAS_O_S(32, uint32_t, "l", "eax")
+CK_PR_CAS_O_S(16, uint16_t, "w", "ax")
+CK_PR_CAS_O_S(8, uint8_t, "b", "al")
+
+#undef CK_PR_CAS_O_S
+#undef CK_PR_CAS_O
+
+/*
+ * Contrary to C-interface, alignment requirements are that of uint64_t[2].
+ */
+CK_CC_INLINE static bool
+ck_pr_cas_64_2(uint64_t target[2], uint64_t compare[2], uint64_t set[2])
+{
+ bool z;
+
+ __asm__ __volatile__("movq 0(%4), %%rax;"
+ "movq 8(%4), %%rdx;"
+ CK_PR_LOCK_PREFIX "cmpxchg16b %0; setz %1"
+ : "+m" (*target),
+ "=q" (z)
+ : "b" (set[0]),
+ "c" (set[1]),
+ "q" (compare)
+ : "memory", "cc", "%rax", "%rdx");
+ return z;
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_2(void *t, void *c, void *s)
+{
+ return ck_pr_cas_64_2(CK_CPP_CAST(uint64_t *, t),
+ CK_CPP_CAST(uint64_t *, c),
+ CK_CPP_CAST(uint64_t *, s));
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_64_2_value(uint64_t target[2],
+ uint64_t compare[2],
+ uint64_t set[2],
+ uint64_t v[2])
+{
+ bool z;
+
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX "cmpxchg16b %0;"
+ "setz %3"
+ : "+m" (*target),
+ "=a" (v[0]),
+ "=d" (v[1]),
+ "=q" (z)
+ : "a" (compare[0]),
+ "d" (compare[1]),
+ "b" (set[0]),
+ "c" (set[1])
+ : "memory", "cc");
+ return z;
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_2_value(void *t, void *c, void *s, void *v)
+{
+ return ck_pr_cas_64_2_value(CK_CPP_CAST(uint64_t *,t),
+ CK_CPP_CAST(uint64_t *,c),
+ CK_CPP_CAST(uint64_t *,s),
+ CK_CPP_CAST(uint64_t *,v));
+}
+
+#define CK_PR_CAS_V(S, W, T) \
+CK_CC_INLINE static bool \
+ck_pr_cas_##S##_##W(T t[W], T c[W], T s[W]) \
+{ \
+ return ck_pr_cas_64_2((uint64_t *)(void *)t, \
+ (uint64_t *)(void *)c, \
+ (uint64_t *)(void *)s); \
+} \
+CK_CC_INLINE static bool \
+ck_pr_cas_##S##_##W##_value(T *t, T c[W], T s[W], T *v) \
+{ \
+ return ck_pr_cas_64_2_value((uint64_t *)(void *)t, \
+ (uint64_t *)(void *)c, \
+ (uint64_t *)(void *)s, \
+ (uint64_t *)(void *)v); \
+}
+
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_CAS_V(double, 2, double)
+#endif
+CK_PR_CAS_V(char, 16, char)
+CK_PR_CAS_V(int, 4, int)
+CK_PR_CAS_V(uint, 4, unsigned int)
+CK_PR_CAS_V(32, 4, uint32_t)
+CK_PR_CAS_V(16, 8, uint16_t)
+CK_PR_CAS_V(8, 16, uint8_t)
+
+#undef CK_PR_CAS_V
+
+/*
+ * Atomic bit test operations.
+ */
+#define CK_PR_BT(K, S, T, P, C, I) \
+ CK_CC_INLINE static bool \
+ ck_pr_##K##_##S(T *target, unsigned int b) \
+ { \
+ bool c; \
+ __asm__ __volatile__(CK_PR_LOCK_PREFIX I "; setc %1" \
+ : "+m" (*(C *)target), \
+ "=q" (c) \
+ : "q" ((P)b) \
+ : "memory", "cc"); \
+ return c; \
+ }
+
+#define CK_PR_BT_S(K, S, T, I) CK_PR_BT(K, S, T, T, T, I)
+
+#define CK_PR_GENERATE(K) \
+ CK_PR_BT(K, ptr, void, uint64_t, char, #K "q %2, %0") \
+ CK_PR_BT_S(K, uint, unsigned int, #K "l %2, %0") \
+ CK_PR_BT_S(K, int, int, #K "l %2, %0") \
+ CK_PR_BT_S(K, 64, uint64_t, #K "q %2, %0") \
+ CK_PR_BT_S(K, 32, uint32_t, #K "l %2, %0") \
+ CK_PR_BT_S(K, 16, uint16_t, #K "w %w2, %0")
+
+CK_PR_GENERATE(btc)
+CK_PR_GENERATE(bts)
+CK_PR_GENERATE(btr)
+
+#undef CK_PR_GENERATE
+#undef CK_PR_BT
+
+#endif /* CK_PR_X86_64_H */
+
diff --git a/include/gcc/x86_64/ck_pr_rtm.h b/include/gcc/x86_64/ck_pr_rtm.h
new file mode 100644
index 0000000..45c7b9d
--- /dev/null
+++ b/include/gcc/x86_64/ck_pr_rtm.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2013-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2012,2013 Intel Corporation
+ * Author: Andi Kleen
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that: (1) source code distributions
+ * retain the above copyright notice and this paragraph in its entirety, (2)
+ * distributions including binary code include the above copyright notice and
+ * this paragraph in its entirety in the documentation or other materials
+ * provided with the distribution
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef CK_PR_X86_64_RTM_H
+#define CK_PR_X86_64_RTM_H
+
+#ifndef CK_PR_X86_64_H
+#error Do not include this file directly, use ck_pr.h
+#endif
+
+#define CK_F_PR_RTM
+
+#include <ck_cc.h>
+#include <ck_stdbool.h>
+
+#define CK_PR_RTM_STARTED (~0U)
+#define CK_PR_RTM_EXPLICIT (1 << 0)
+#define CK_PR_RTM_RETRY (1 << 1)
+#define CK_PR_RTM_CONFLICT (1 << 2)
+#define CK_PR_RTM_CAPACITY (1 << 3)
+#define CK_PR_RTM_DEBUG (1 << 4)
+#define CK_PR_RTM_NESTED (1 << 5)
+#define CK_PR_RTM_CODE(x) (((x) >> 24) & 0xFF)
+
+CK_CC_INLINE static unsigned int
+ck_pr_rtm_begin(void)
+{
+ unsigned int r = CK_PR_RTM_STARTED;
+
+ __asm__ __volatile__(".byte 0xc7,0xf8;"
+ ".long 0;"
+ : "+a" (r)
+ :
+ : "memory");
+
+ return r;
+}
+
+CK_CC_INLINE static void
+ck_pr_rtm_end(void)
+{
+
+ __asm__ __volatile__(".byte 0x0f,0x01,0xd5" ::: "memory");
+ return;
+}
+
+CK_CC_INLINE static void
+ck_pr_rtm_abort(const unsigned int status)
+{
+
+ __asm__ __volatile__(".byte 0xc6,0xf8,%P0" :: "i" (status) : "memory");
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_pr_rtm_test(void)
+{
+ bool r;
+
+ __asm__ __volatile__(".byte 0x0f,0x01,0xd6;"
+ "setnz %0"
+ : "=r" (r)
+ :
+ : "memory");
+
+ return r;
+}
+
+#endif /* CK_PR_X86_64_RTM_H */
+
diff --git a/include/spinlock/anderson.h b/include/spinlock/anderson.h
new file mode 100644
index 0000000..bebc5d8
--- /dev/null
+++ b/include/spinlock/anderson.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_SPINLOCK_ANDERSON_H
+#define CK_SPINLOCK_ANDERSON_H
+
+#include <ck_cc.h>
+#include <ck_limits.h>
+#include <ck_md.h>
+#include <ck_pr.h>
+#include <ck_stdbool.h>
+
+#ifndef CK_F_SPINLOCK_ANDERSON
+#define CK_F_SPINLOCK_ANDERSON
+/*
+ * This is an implementation of Anderson's array-based queuing lock.
+ */
+struct ck_spinlock_anderson_thread {
+ unsigned int locked;
+ unsigned int position;
+};
+typedef struct ck_spinlock_anderson_thread ck_spinlock_anderson_thread_t;
+
+struct ck_spinlock_anderson {
+ struct ck_spinlock_anderson_thread *slots;
+ unsigned int count;
+ unsigned int wrap;
+ unsigned int mask;
+ char pad[CK_MD_CACHELINE - sizeof(unsigned int) * 3 - sizeof(void *)];
+ unsigned int next;
+};
+typedef struct ck_spinlock_anderson ck_spinlock_anderson_t;
+
+CK_CC_INLINE static void
+ck_spinlock_anderson_init(struct ck_spinlock_anderson *lock,
+ struct ck_spinlock_anderson_thread *slots,
+ unsigned int count)
+{
+ unsigned int i;
+
+ slots[0].locked = false;
+ slots[0].position = 0;
+ for (i = 1; i < count; i++) {
+ slots[i].locked = true;
+ slots[i].position = i;
+ }
+
+ lock->slots = slots;
+ lock->count = count;
+ lock->mask = count - 1;
+ lock->next = 0;
+
+ /*
+ * If the number of threads is not a power of two then compute
+ * appropriate wrap-around value in the case of next slot counter
+ * overflow.
+ */
+ if (count & (count - 1))
+ lock->wrap = (UINT_MAX % count) + 1;
+ else
+ lock->wrap = 0;
+
+ ck_pr_barrier();
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_anderson_locked(struct ck_spinlock_anderson *lock)
+{
+ unsigned int position;
+ bool r;
+
+ position = ck_pr_load_uint(&lock->next) & lock->mask;
+ r = ck_pr_load_uint(&lock->slots[position].locked);
+ ck_pr_fence_acquire();
+ return r;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_anderson_lock(struct ck_spinlock_anderson *lock,
+ struct ck_spinlock_anderson_thread **slot)
+{
+ unsigned int position, next;
+ unsigned int count = lock->count;
+
+ /*
+ * If count is not a power of 2, then it is possible for an overflow
+ * to reallocate beginning slots to more than one thread. To avoid this
+ * use a compare-and-swap.
+ */
+ if (lock->wrap != 0) {
+ position = ck_pr_load_uint(&lock->next);
+
+ do {
+ if (position == UINT_MAX)
+ next = lock->wrap;
+ else
+ next = position + 1;
+ } while (ck_pr_cas_uint_value(&lock->next, position,
+ next, &position) == false);
+
+ position %= count;
+ } else {
+ position = ck_pr_faa_uint(&lock->next, 1);
+ position &= lock->mask;
+ }
+
+ /* Serialize with respect to previous thread's store. */
+ ck_pr_fence_load();
+
+ /*
+ * Spin until slot is marked as unlocked. First slot is initialized to
+ * false.
+ */
+ while (ck_pr_load_uint(&lock->slots[position].locked) == true)
+ ck_pr_stall();
+
+ /* Prepare slot for potential re-use by another thread. */
+ ck_pr_store_uint(&lock->slots[position].locked, true);
+ ck_pr_fence_lock();
+
+ *slot = lock->slots + position;
+ return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_anderson_unlock(struct ck_spinlock_anderson *lock,
+ struct ck_spinlock_anderson_thread *slot)
+{
+ unsigned int position;
+
+ ck_pr_fence_unlock();
+
+ /* Mark next slot as available. */
+ if (lock->wrap == 0)
+ position = (slot->position + 1) & lock->mask;
+ else
+ position = (slot->position + 1) % lock->count;
+
+ ck_pr_store_uint(&lock->slots[position].locked, false);
+ return;
+}
+#endif /* CK_F_SPINLOCK_ANDERSON */
+#endif /* CK_SPINLOCK_ANDERSON_H */
diff --git a/include/spinlock/cas.h b/include/spinlock/cas.h
new file mode 100644
index 0000000..ff6d723
--- /dev/null
+++ b/include/spinlock/cas.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_SPINLOCK_CAS_H
+#define CK_SPINLOCK_CAS_H
+
+#include <ck_backoff.h>
+#include <ck_cc.h>
+#include <ck_elide.h>
+#include <ck_pr.h>
+#include <ck_stdbool.h>
+
+#ifndef CK_F_SPINLOCK_CAS
+#define CK_F_SPINLOCK_CAS
+/*
+ * This is a simple CACAS (TATAS) spinlock implementation.
+ */
+struct ck_spinlock_cas {
+ unsigned int value;
+};
+typedef struct ck_spinlock_cas ck_spinlock_cas_t;
+
+#define CK_SPINLOCK_CAS_INITIALIZER {false}
+
+CK_CC_INLINE static void
+ck_spinlock_cas_init(struct ck_spinlock_cas *lock)
+{
+
+ lock->value = false;
+ ck_pr_barrier();
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_cas_trylock(struct ck_spinlock_cas *lock)
+{
+ unsigned int value;
+
+ value = ck_pr_fas_uint(&lock->value, true);
+ ck_pr_fence_lock();
+ return !value;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_cas_locked(struct ck_spinlock_cas *lock)
+{
+ bool r = ck_pr_load_uint(&lock->value);
+
+ ck_pr_fence_acquire();
+ return r;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_cas_lock(struct ck_spinlock_cas *lock)
+{
+
+ while (ck_pr_cas_uint(&lock->value, false, true) == false) {
+ while (ck_pr_load_uint(&lock->value) == true)
+ ck_pr_stall();
+ }
+
+ ck_pr_fence_lock();
+ return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_cas_lock_eb(struct ck_spinlock_cas *lock)
+{
+ ck_backoff_t backoff = CK_BACKOFF_INITIALIZER;
+
+ while (ck_pr_cas_uint(&lock->value, false, true) == false)
+ ck_backoff_eb(&backoff);
+
+ ck_pr_fence_lock();
+ return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_cas_unlock(struct ck_spinlock_cas *lock)
+{
+
+ /* Set lock state to unlocked. */
+ ck_pr_fence_unlock();
+ ck_pr_store_uint(&lock->value, false);
+ return;
+}
+
+CK_ELIDE_PROTOTYPE(ck_spinlock_cas, ck_spinlock_cas_t,
+ ck_spinlock_cas_locked, ck_spinlock_cas_lock,
+ ck_spinlock_cas_locked, ck_spinlock_cas_unlock)
+
+CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock_cas, ck_spinlock_cas_t,
+ ck_spinlock_cas_locked, ck_spinlock_cas_trylock)
+
+#endif /* CK_F_SPINLOCK_CAS */
+#endif /* CK_SPINLOCK_CAS_H */
diff --git a/include/spinlock/clh.h b/include/spinlock/clh.h
new file mode 100644
index 0000000..1133806
--- /dev/null
+++ b/include/spinlock/clh.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_SPINLOCK_CLH_H
+#define CK_SPINLOCK_CLH_H
+
+#include <ck_cc.h>
+#include <ck_limits.h>
+#include <ck_pr.h>
+#include <ck_stdbool.h>
+#include <ck_stddef.h>
+
+#ifndef CK_F_SPINLOCK_CLH
+#define CK_F_SPINLOCK_CLH
+
+struct ck_spinlock_clh {
+ unsigned int wait;
+ struct ck_spinlock_clh *previous;
+};
+typedef struct ck_spinlock_clh ck_spinlock_clh_t;
+
+CK_CC_INLINE static void
+ck_spinlock_clh_init(struct ck_spinlock_clh **lock, struct ck_spinlock_clh *unowned)
+{
+
+ unowned->previous = NULL;
+ unowned->wait = false;
+ *lock = unowned;
+ ck_pr_barrier();
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_clh_locked(struct ck_spinlock_clh **queue)
+{
+ struct ck_spinlock_clh *head;
+ bool r;
+
+ head = ck_pr_load_ptr(queue);
+ r = ck_pr_load_uint(&head->wait);
+ ck_pr_fence_acquire();
+ return r;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_clh_lock(struct ck_spinlock_clh **queue, struct ck_spinlock_clh *thread)
+{
+ struct ck_spinlock_clh *previous;
+
+ /* Indicate to the next thread on queue that they will have to block. */
+ thread->wait = true;
+ ck_pr_fence_store_atomic();
+
+ /*
+ * Mark current request as last request. Save reference to previous
+ * request.
+ */
+ previous = ck_pr_fas_ptr(queue, thread);
+ thread->previous = previous;
+
+ /* Wait until previous thread is done with lock. */
+ ck_pr_fence_load();
+ while (ck_pr_load_uint(&previous->wait) == true)
+ ck_pr_stall();
+
+ ck_pr_fence_lock();
+ return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_clh_unlock(struct ck_spinlock_clh **thread)
+{
+ struct ck_spinlock_clh *previous;
+
+ /*
+ * If there are waiters, they are spinning on the current node wait
+ * flag. The flag is cleared so that the successor may complete an
+ * acquisition. If the caller is pre-empted then the predecessor field
+ * may be updated by a successor's lock operation. In order to avoid
+ * this, save a copy of the predecessor before setting the flag.
+ */
+ previous = thread[0]->previous;
+
+ /*
+ * We have to pay this cost anyways, use it as a compiler barrier too.
+ */
+ ck_pr_fence_unlock();
+ ck_pr_store_uint(&(*thread)->wait, false);
+
+ /*
+ * Predecessor is guaranteed not to be spinning on previous request,
+ * so update caller to use previous structure. This allows successor
+ * all the time in the world to successfully read updated wait flag.
+ */
+ *thread = previous;
+ return;
+}
+#endif /* CK_F_SPINLOCK_CLH */
+#endif /* CK_SPINLOCK_CLH_H */
diff --git a/include/spinlock/dec.h b/include/spinlock/dec.h
new file mode 100644
index 0000000..11d36dd
--- /dev/null
+++ b/include/spinlock/dec.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_SPINLOCK_DEC_H
+#define CK_SPINLOCK_DEC_H
+
+#include <ck_backoff.h>
+#include <ck_cc.h>
+#include <ck_elide.h>
+#include <ck_pr.h>
+#include <ck_stdbool.h>
+
+#ifndef CK_F_SPINLOCK_DEC
+#define CK_F_SPINLOCK_DEC
+/*
+ * This is similar to the CACAS lock but makes use of an atomic decrement
+ * operation to check if the lock value was decremented to 0 from 1. The
+ * idea is that a decrement operation is cheaper than a compare-and-swap.
+ */
+struct ck_spinlock_dec {
+ unsigned int value;
+};
+typedef struct ck_spinlock_dec ck_spinlock_dec_t;
+
+#define CK_SPINLOCK_DEC_INITIALIZER {1}
+
+CK_CC_INLINE static void
+ck_spinlock_dec_init(struct ck_spinlock_dec *lock)
+{
+
+ lock->value = 1;
+ ck_pr_barrier();
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_dec_trylock(struct ck_spinlock_dec *lock)
+{
+ unsigned int value;
+
+ value = ck_pr_fas_uint(&lock->value, 0);
+ ck_pr_fence_lock();
+ return value == 1;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_dec_locked(struct ck_spinlock_dec *lock)
+{
+ bool r;
+
+ r = ck_pr_load_uint(&lock->value) != 1;
+ ck_pr_fence_acquire();
+ return r;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_dec_lock(struct ck_spinlock_dec *lock)
+{
+ bool r;
+
+ for (;;) {
+ /*
+ * Only one thread is guaranteed to decrement lock to 0.
+ * Overflow must be protected against. No more than
+ * UINT_MAX lock requests can happen while the lock is held.
+ */
+ ck_pr_dec_uint_zero(&lock->value, &r);
+ if (r == true)
+ break;
+
+ /* Load value without generating write cycles. */
+ while (ck_pr_load_uint(&lock->value) != 1)
+ ck_pr_stall();
+ }
+
+ ck_pr_fence_lock();
+ return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_dec_lock_eb(struct ck_spinlock_dec *lock)
+{
+ ck_backoff_t backoff = CK_BACKOFF_INITIALIZER;
+ bool r;
+
+ for (;;) {
+ ck_pr_dec_uint_zero(&lock->value, &r);
+ if (r == true)
+ break;
+
+ ck_backoff_eb(&backoff);
+ }
+
+ ck_pr_fence_lock();
+ return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_dec_unlock(struct ck_spinlock_dec *lock)
+{
+
+ ck_pr_fence_unlock();
+
+ /*
+ * Unconditionally set lock value to 1 so someone can decrement lock
+ * to 0.
+ */
+ ck_pr_store_uint(&lock->value, 1);
+ return;
+}
+
+CK_ELIDE_PROTOTYPE(ck_spinlock_dec, ck_spinlock_dec_t,
+ ck_spinlock_dec_locked, ck_spinlock_dec_lock,
+ ck_spinlock_dec_locked, ck_spinlock_dec_unlock)
+
+CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock_dec, ck_spinlock_dec_t,
+ ck_spinlock_dec_locked, ck_spinlock_dec_trylock)
+
+#endif /* CK_F_SPINLOCK_DEC */
+#endif /* CK_SPINLOCK_DEC_H */
diff --git a/include/spinlock/fas.h b/include/spinlock/fas.h
new file mode 100644
index 0000000..4e6c123
--- /dev/null
+++ b/include/spinlock/fas.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_SPINLOCK_FAS_H
+#define CK_SPINLOCK_FAS_H
+
+#include <ck_backoff.h>
+#include <ck_cc.h>
+#include <ck_elide.h>
+#include <ck_pr.h>
+#include <ck_stdbool.h>
+
+#ifndef CK_F_SPINLOCK_FAS
+#define CK_F_SPINLOCK_FAS
+
+struct ck_spinlock_fas {
+ unsigned int value;
+};
+typedef struct ck_spinlock_fas ck_spinlock_fas_t;
+
+#define CK_SPINLOCK_FAS_INITIALIZER {false}
+
+CK_CC_INLINE static void
+ck_spinlock_fas_init(struct ck_spinlock_fas *lock)
+{
+
+ lock->value = false;
+ ck_pr_barrier();
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_fas_trylock(struct ck_spinlock_fas *lock)
+{
+ bool value;
+
+ value = ck_pr_fas_uint(&lock->value, true);
+ ck_pr_fence_lock();
+
+ return !value;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_fas_locked(struct ck_spinlock_fas *lock)
+{
+ bool r;
+
+ r = ck_pr_load_uint(&lock->value);
+ ck_pr_fence_acquire();
+ return r;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_fas_lock(struct ck_spinlock_fas *lock)
+{
+
+ while (ck_pr_fas_uint(&lock->value, true) == true) {
+ while (ck_pr_load_uint(&lock->value) == true)
+ ck_pr_stall();
+ }
+
+ ck_pr_fence_lock();
+ return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_fas_lock_eb(struct ck_spinlock_fas *lock)
+{
+ ck_backoff_t backoff = CK_BACKOFF_INITIALIZER;
+
+ while (ck_pr_fas_uint(&lock->value, true) == true)
+ ck_backoff_eb(&backoff);
+
+ ck_pr_fence_lock();
+ return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_fas_unlock(struct ck_spinlock_fas *lock)
+{
+
+ ck_pr_fence_unlock();
+ ck_pr_store_uint(&lock->value, false);
+ return;
+}
+
+CK_ELIDE_PROTOTYPE(ck_spinlock_fas, ck_spinlock_fas_t,
+ ck_spinlock_fas_locked, ck_spinlock_fas_lock,
+ ck_spinlock_fas_locked, ck_spinlock_fas_unlock)
+
+CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock_fas, ck_spinlock_fas_t,
+ ck_spinlock_fas_locked, ck_spinlock_fas_trylock)
+
+#endif /* CK_F_SPINLOCK_FAS */
+#endif /* CK_SPINLOCK_FAS_H */
diff --git a/include/spinlock/hclh.h b/include/spinlock/hclh.h
new file mode 100644
index 0000000..296448b
--- /dev/null
+++ b/include/spinlock/hclh.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2013-2015 Olivier Houchard
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_SPINLOCK_HCLH_H
+#define CK_SPINLOCK_HCLH_H
+
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <ck_stdbool.h>
+#include <ck_stddef.h>
+
+#ifndef CK_F_SPINLOCK_HCLH
+#define CK_F_SPINLOCK_HCLH
+struct ck_spinlock_hclh {
+ unsigned int wait;
+ unsigned int splice;
+ int cluster_id;
+ struct ck_spinlock_hclh *previous;
+};
+typedef struct ck_spinlock_hclh ck_spinlock_hclh_t;
+
+CK_CC_INLINE static void
+ck_spinlock_hclh_init(struct ck_spinlock_hclh **lock,
+ struct ck_spinlock_hclh *unowned,
+ int cluster_id)
+{
+
+ unowned->previous = NULL;
+ unowned->wait = false;
+ unowned->splice = false;
+ unowned->cluster_id = cluster_id;
+ *lock = unowned;
+ ck_pr_barrier();
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_hclh_locked(struct ck_spinlock_hclh **queue)
+{
+ struct ck_spinlock_hclh *head;
+ bool r;
+
+ head = ck_pr_load_ptr(queue);
+ r = ck_pr_load_uint(&head->wait);
+ ck_pr_fence_acquire();
+ return r;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_hclh_lock(struct ck_spinlock_hclh **glob_queue,
+ struct ck_spinlock_hclh **local_queue,
+ struct ck_spinlock_hclh *thread)
+{
+ struct ck_spinlock_hclh *previous, *local_tail;
+
+ /* Indicate to the next thread on queue that they will have to block. */
+ thread->wait = true;
+ thread->splice = false;
+ thread->cluster_id = (*local_queue)->cluster_id;
+
+ /* Serialize with respect to update of local queue. */
+ ck_pr_fence_store_atomic();
+
+ /* Mark current request as last request. Save reference to previous request. */
+ previous = ck_pr_fas_ptr(local_queue, thread);
+ thread->previous = previous;
+
+ /* Wait until previous thread from the local queue is done with lock. */
+ ck_pr_fence_load();
+ if (previous->previous != NULL &&
+ previous->cluster_id == thread->cluster_id) {
+ while (ck_pr_load_uint(&previous->wait) == true)
+ ck_pr_stall();
+
+ /* We're head of the global queue, we're done */
+ if (ck_pr_load_uint(&previous->splice) == false)
+ return;
+ }
+
+ /* Now we need to splice the local queue into the global queue. */
+ local_tail = ck_pr_load_ptr(local_queue);
+ previous = ck_pr_fas_ptr(glob_queue, local_tail);
+
+ ck_pr_store_uint(&local_tail->splice, true);
+
+ /* Wait until previous thread from the global queue is done with lock. */
+ while (ck_pr_load_uint(&previous->wait) == true)
+ ck_pr_stall();
+
+ ck_pr_fence_lock();
+ return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_hclh_unlock(struct ck_spinlock_hclh **thread)
+{
+ struct ck_spinlock_hclh *previous;
+
+ /*
+ * If there are waiters, they are spinning on the current node wait
+ * flag. The flag is cleared so that the successor may complete an
+ * acquisition. If the caller is pre-empted then the predecessor field
+ * may be updated by a successor's lock operation. In order to avoid
+ * this, save a copy of the predecessor before setting the flag.
+ */
+ previous = thread[0]->previous;
+
+ /* We have to pay this cost anyways, use it as a compiler barrier too. */
+ ck_pr_fence_unlock();
+ ck_pr_store_uint(&(*thread)->wait, false);
+
+ /*
+ * Predecessor is guaranteed not to be spinning on previous request,
+ * so update caller to use previous structure. This allows successor
+ * all the time in the world to successfully read updated wait flag.
+ */
+ *thread = previous;
+ return;
+}
+#endif /* CK_F_SPINLOCK_HCLH */
+#endif /* CK_SPINLOCK_HCLH_H */
diff --git a/include/spinlock/mcs.h b/include/spinlock/mcs.h
new file mode 100644
index 0000000..262c720
--- /dev/null
+++ b/include/spinlock/mcs.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_SPINLOCK_MCS_H
+#define CK_SPINLOCK_MCS_H
+
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <ck_stdbool.h>
+#include <ck_stddef.h>
+
+#ifndef CK_F_SPINLOCK_MCS
+#define CK_F_SPINLOCK_MCS
+
+struct ck_spinlock_mcs {
+ unsigned int locked;
+ struct ck_spinlock_mcs *next;
+};
+typedef struct ck_spinlock_mcs * ck_spinlock_mcs_t;
+typedef struct ck_spinlock_mcs ck_spinlock_mcs_context_t;
+
+#define CK_SPINLOCK_MCS_INITIALIZER (NULL)
+
+CK_CC_INLINE static void
+ck_spinlock_mcs_init(struct ck_spinlock_mcs **queue)
+{
+
+ *queue = NULL;
+ ck_pr_barrier();
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_mcs_trylock(struct ck_spinlock_mcs **queue,
+ struct ck_spinlock_mcs *node)
+{
+ bool r;
+
+ node->locked = true;
+ node->next = NULL;
+ ck_pr_fence_store_atomic();
+
+ r = ck_pr_cas_ptr(queue, NULL, node);
+ ck_pr_fence_lock();
+ return r;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_mcs_locked(struct ck_spinlock_mcs **queue)
+{
+ bool r;
+
+ r = ck_pr_load_ptr(queue) != NULL;
+ ck_pr_fence_acquire();
+ return r;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_mcs_lock(struct ck_spinlock_mcs **queue,
+ struct ck_spinlock_mcs *node)
+{
+ struct ck_spinlock_mcs *previous;
+
+ /*
+ * In the case that there is a successor, let them know they must
+ * wait for us to unlock.
+ */
+ node->locked = true;
+ node->next = NULL;
+ ck_pr_fence_store_atomic();
+
+ /*
+ * Swap current tail with current lock request. If the swap operation
+ * returns NULL, it means the queue was empty. If the queue was empty,
+ * then the operation is complete.
+ */
+ previous = ck_pr_fas_ptr(queue, node);
+ if (previous != NULL) {
+ /*
+ * Let the previous lock holder know that we are waiting on
+ * them.
+ */
+ ck_pr_store_ptr(&previous->next, node);
+ while (ck_pr_load_uint(&node->locked) == true)
+ ck_pr_stall();
+ }
+
+ ck_pr_fence_lock();
+ return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_mcs_unlock(struct ck_spinlock_mcs **queue,
+ struct ck_spinlock_mcs *node)
+{
+ struct ck_spinlock_mcs *next;
+
+ ck_pr_fence_unlock();
+
+ next = ck_pr_load_ptr(&node->next);
+ if (next == NULL) {
+ /*
+ * If there is no request following us then it is a possibilty
+ * that we are the current tail. In this case, we may just
+ * mark the spinlock queue as empty.
+ */
+ if (ck_pr_load_ptr(queue) == node &&
+ ck_pr_cas_ptr(queue, node, NULL) == true) {
+ return;
+ }
+
+ /*
+ * If the node is not the current tail then a lock operation
+ * is in-progress. In this case, busy-wait until the queue is
+ * in a consistent state to wake up the incoming lock
+ * request.
+ */
+ for (;;) {
+ next = ck_pr_load_ptr(&node->next);
+ if (next != NULL)
+ break;
+
+ ck_pr_stall();
+ }
+ }
+
+ /* Allow the next lock operation to complete. */
+ ck_pr_store_uint(&next->locked, false);
+ return;
+}
+#endif /* CK_F_SPINLOCK_MCS */
+#endif /* CK_SPINLOCK_MCS_H */
diff --git a/include/spinlock/ticket.h b/include/spinlock/ticket.h
new file mode 100644
index 0000000..3358547
--- /dev/null
+++ b/include/spinlock/ticket.h
@@ -0,0 +1,296 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_SPINLOCK_TICKET_H
+#define CK_SPINLOCK_TICKET_H
+
+#include <ck_backoff.h>
+#include <ck_cc.h>
+#include <ck_elide.h>
+#include <ck_md.h>
+#include <ck_pr.h>
+#include <ck_stdbool.h>
+
+#ifndef CK_F_SPINLOCK_TICKET
+#define CK_F_SPINLOCK_TICKET
+/*
+ * If 16-bit or 32-bit increment is supported, implement support for
+ * trylock functionality on availability of 32-bit or 64-bit fetch-and-add
+ * and compare-and-swap. This code path is only applied to x86*.
+ */
+#if defined(CK_MD_TSO) && (defined(__x86__) || defined(__x86_64__))
+#if defined(CK_F_PR_FAA_32) && defined(CK_F_PR_INC_16) && defined(CK_F_PR_CAS_32)
+#define CK_SPINLOCK_TICKET_TYPE uint32_t
+#define CK_SPINLOCK_TICKET_TYPE_BASE uint16_t
+#define CK_SPINLOCK_TICKET_INC(x) ck_pr_inc_16(x)
+#define CK_SPINLOCK_TICKET_CAS(x, y, z) ck_pr_cas_32(x, y, z)
+#define CK_SPINLOCK_TICKET_FAA(x, y) ck_pr_faa_32(x, y)
+#define CK_SPINLOCK_TICKET_LOAD(x) ck_pr_load_32(x)
+#define CK_SPINLOCK_TICKET_INCREMENT (0x00010000UL)
+#define CK_SPINLOCK_TICKET_MASK (0xFFFFUL)
+#define CK_SPINLOCK_TICKET_SHIFT (16)
+#elif defined(CK_F_PR_FAA_64) && defined(CK_F_PR_INC_32) && defined(CK_F_PR_CAS_64)
+#define CK_SPINLOCK_TICKET_TYPE uint64_t
+#define CK_SPINLOCK_TICKET_TYPE_BASE uint32_t
+#define CK_SPINLOCK_TICKET_INC(x) ck_pr_inc_32(x)
+#define CK_SPINLOCK_TICKET_CAS(x, y, z) ck_pr_cas_64(x, y, z)
+#define CK_SPINLOCK_TICKET_FAA(x, y) ck_pr_faa_64(x, y)
+#define CK_SPINLOCK_TICKET_LOAD(x) ck_pr_load_64(x)
+#define CK_SPINLOCK_TICKET_INCREMENT (0x0000000100000000ULL)
+#define CK_SPINLOCK_TICKET_MASK (0xFFFFFFFFULL)
+#define CK_SPINLOCK_TICKET_SHIFT (32)
+#endif
+#endif /* CK_MD_TSO */
+
+#if defined(CK_SPINLOCK_TICKET_TYPE)
+#define CK_F_SPINLOCK_TICKET_TRYLOCK
+
+struct ck_spinlock_ticket {
+ CK_SPINLOCK_TICKET_TYPE value;
+};
+typedef struct ck_spinlock_ticket ck_spinlock_ticket_t;
+#define CK_SPINLOCK_TICKET_INITIALIZER { .value = 0 }
+
+CK_CC_INLINE static void
+ck_spinlock_ticket_init(struct ck_spinlock_ticket *ticket)
+{
+
+ ticket->value = 0;
+ ck_pr_barrier();
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_ticket_locked(struct ck_spinlock_ticket *ticket)
+{
+ CK_SPINLOCK_TICKET_TYPE request, position;
+
+ request = CK_SPINLOCK_TICKET_LOAD(&ticket->value);
+ position = request & CK_SPINLOCK_TICKET_MASK;
+ request >>= CK_SPINLOCK_TICKET_SHIFT;
+
+ ck_pr_fence_acquire();
+ return request != position;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_ticket_lock(struct ck_spinlock_ticket *ticket)
+{
+ CK_SPINLOCK_TICKET_TYPE request, position;
+
+ /* Get our ticket number and set next ticket number. */
+ request = CK_SPINLOCK_TICKET_FAA(&ticket->value,
+ CK_SPINLOCK_TICKET_INCREMENT);
+
+ position = request & CK_SPINLOCK_TICKET_MASK;
+ request >>= CK_SPINLOCK_TICKET_SHIFT;
+
+ while (request != position) {
+ ck_pr_stall();
+ position = CK_SPINLOCK_TICKET_LOAD(&ticket->value) &
+ CK_SPINLOCK_TICKET_MASK;
+ }
+
+ ck_pr_fence_lock();
+ return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_ticket_lock_pb(struct ck_spinlock_ticket *ticket, unsigned int c)
+{
+ CK_SPINLOCK_TICKET_TYPE request, position;
+ ck_backoff_t backoff;
+
+ /* Get our ticket number and set next ticket number. */
+ request = CK_SPINLOCK_TICKET_FAA(&ticket->value,
+ CK_SPINLOCK_TICKET_INCREMENT);
+
+ position = request & CK_SPINLOCK_TICKET_MASK;
+ request >>= CK_SPINLOCK_TICKET_SHIFT;
+
+ while (request != position) {
+ ck_pr_stall();
+ position = CK_SPINLOCK_TICKET_LOAD(&ticket->value) &
+ CK_SPINLOCK_TICKET_MASK;
+
+ backoff = (request - position) & CK_SPINLOCK_TICKET_MASK;
+ backoff <<= c;
+ ck_backoff_eb(&backoff);
+ }
+
+ ck_pr_fence_lock();
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_ticket_trylock(struct ck_spinlock_ticket *ticket)
+{
+ CK_SPINLOCK_TICKET_TYPE snapshot, request, position;
+
+ snapshot = CK_SPINLOCK_TICKET_LOAD(&ticket->value);
+ position = snapshot & CK_SPINLOCK_TICKET_MASK;
+ request = snapshot >> CK_SPINLOCK_TICKET_SHIFT;
+
+ if (position != request)
+ return false;
+
+ if (CK_SPINLOCK_TICKET_CAS(&ticket->value,
+ snapshot, snapshot + CK_SPINLOCK_TICKET_INCREMENT) == false) {
+ return false;
+ }
+
+ ck_pr_fence_lock();
+ return true;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_ticket_unlock(struct ck_spinlock_ticket *ticket)
+{
+
+ ck_pr_fence_unlock();
+ CK_SPINLOCK_TICKET_INC((CK_SPINLOCK_TICKET_TYPE_BASE *)(void *)&ticket->value);
+ return;
+}
+
+#undef CK_SPINLOCK_TICKET_TYPE
+#undef CK_SPINLOCK_TICKET_TYPE_BASE
+#undef CK_SPINLOCK_TICKET_INC
+#undef CK_SPINLOCK_TICKET_FAA
+#undef CK_SPINLOCK_TICKET_LOAD
+#undef CK_SPINLOCK_TICKET_INCREMENT
+#undef CK_SPINLOCK_TICKET_MASK
+#undef CK_SPINLOCK_TICKET_SHIFT
+#else
+/*
+ * MESI benefits from cacheline padding between next and current. This avoids
+ * invalidation of current from the cache due to incoming lock requests.
+ */
+struct ck_spinlock_ticket {
+ unsigned int next;
+ unsigned int position;
+};
+typedef struct ck_spinlock_ticket ck_spinlock_ticket_t;
+
+#define CK_SPINLOCK_TICKET_INITIALIZER {.next = 0, .position = 0}
+
+CK_CC_INLINE static void
+ck_spinlock_ticket_init(struct ck_spinlock_ticket *ticket)
+{
+
+ ticket->next = 0;
+ ticket->position = 0;
+ ck_pr_barrier();
+
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_spinlock_ticket_locked(struct ck_spinlock_ticket *ticket)
+{
+ bool r;
+
+ r = ck_pr_load_uint(&ticket->position) !=
+ ck_pr_load_uint(&ticket->next);
+ ck_pr_fence_acquire();
+ return r;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_ticket_lock(struct ck_spinlock_ticket *ticket)
+{
+ unsigned int request;
+
+ /* Get our ticket number and set next ticket number. */
+ request = ck_pr_faa_uint(&ticket->next, 1);
+
+ /*
+ * Busy-wait until our ticket number is current.
+ * We can get away without a fence here assuming
+ * our position counter does not overflow.
+ */
+ while (ck_pr_load_uint(&ticket->position) != request)
+ ck_pr_stall();
+
+ ck_pr_fence_lock();
+ return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_ticket_lock_pb(struct ck_spinlock_ticket *ticket, unsigned int c)
+{
+ ck_backoff_t backoff;
+ unsigned int request, position;
+
+ request = ck_pr_faa_uint(&ticket->next, 1);
+
+ for (;;) {
+ position = ck_pr_load_uint(&ticket->position);
+ if (position == request)
+ break;
+
+ backoff = request - position;
+ backoff <<= c;
+
+ /*
+ * Ideally, back-off from generating cache traffic for at least
+ * the amount of time necessary for the number of pending lock
+ * acquisition and relinquish operations (assuming an empty
+ * critical section).
+ */
+ ck_backoff_eb(&backoff);
+ }
+
+ ck_pr_fence_lock();
+ return;
+}
+
+CK_CC_INLINE static void
+ck_spinlock_ticket_unlock(struct ck_spinlock_ticket *ticket)
+{
+ unsigned int update;
+
+ ck_pr_fence_unlock();
+
+ /*
+ * Update current ticket value so next lock request can proceed.
+ * Overflow behavior is assumed to be roll-over, in which case,
+ * it is only an issue if there are 2^32 pending lock requests.
+ */
+ update = ck_pr_load_uint(&ticket->position);
+ ck_pr_store_uint(&ticket->position, update + 1);
+ return;
+}
+#endif /* !CK_F_SPINLOCK_TICKET_TRYLOCK */
+
+CK_ELIDE_PROTOTYPE(ck_spinlock_ticket, ck_spinlock_ticket_t,
+ ck_spinlock_ticket_locked, ck_spinlock_ticket_lock,
+ ck_spinlock_ticket_locked, ck_spinlock_ticket_unlock)
+
+CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock_ticket, ck_spinlock_ticket_t,
+ ck_spinlock_ticket_locked, ck_spinlock_ticket_trylock)
+
+#endif /* CK_F_SPINLOCK_TICKET */
+#endif /* CK_SPINLOCK_TICKET_H */
diff --git a/regressions/Makefile b/regressions/Makefile
new file mode 100644
index 0000000..3195e52
--- /dev/null
+++ b/regressions/Makefile
@@ -0,0 +1,128 @@
+DIR=array \
+ backoff \
+ barrier \
+ bitmap \
+ brlock \
+ bytelock \
+ cohort \
+ epoch \
+ fifo \
+ hp \
+ hs \
+ rhs \
+ ht \
+ pflock \
+ pr \
+ queue \
+ ring \
+ rwlock \
+ swlock \
+ sequence \
+ spinlock \
+ stack \
+ swlock \
+ tflock
+
+.PHONY: all clean check
+
+all:
+ $(MAKE) -C ./ck_array/validate all
+ $(MAKE) -C ./ck_cohort/validate all
+ $(MAKE) -C ./ck_cohort/benchmark all
+ $(MAKE) -C ./ck_bitmap/validate all
+ $(MAKE) -C ./ck_backoff/validate all
+ $(MAKE) -C ./ck_queue/validate all
+ $(MAKE) -C ./ck_brlock/validate all
+ $(MAKE) -C ./ck_ht/validate all
+ $(MAKE) -C ./ck_ht/benchmark all
+ $(MAKE) -C ./ck_brlock/benchmark all
+ $(MAKE) -C ./ck_spinlock/validate all
+ $(MAKE) -C ./ck_spinlock/benchmark all
+ $(MAKE) -C ./ck_fifo/validate all
+ $(MAKE) -C ./ck_fifo/benchmark all
+ $(MAKE) -C ./ck_pr/validate all
+ $(MAKE) -C ./ck_pr/benchmark all
+ $(MAKE) -C ./ck_hs/benchmark all
+ $(MAKE) -C ./ck_hs/validate all
+ $(MAKE) -C ./ck_rhs/benchmark all
+ $(MAKE) -C ./ck_rhs/validate all
+ $(MAKE) -C ./ck_barrier/validate all
+ $(MAKE) -C ./ck_barrier/benchmark all
+ $(MAKE) -C ./ck_bytelock/validate all
+ $(MAKE) -C ./ck_bytelock/benchmark all
+ $(MAKE) -C ./ck_epoch/validate all
+ $(MAKE) -C ./ck_rwcohort/validate all
+ $(MAKE) -C ./ck_rwcohort/benchmark all
+ $(MAKE) -C ./ck_sequence/validate all
+ $(MAKE) -C ./ck_sequence/benchmark all
+ $(MAKE) -C ./ck_stack/validate all
+ $(MAKE) -C ./ck_stack/benchmark all
+ $(MAKE) -C ./ck_ring/validate all
+ $(MAKE) -C ./ck_ring/benchmark all
+ $(MAKE) -C ./ck_rwlock/validate all
+ $(MAKE) -C ./ck_rwlock/benchmark all
+ $(MAKE) -C ./ck_tflock/validate all
+ $(MAKE) -C ./ck_tflock/benchmark all
+ $(MAKE) -C ./ck_swlock/validate all
+ $(MAKE) -C ./ck_swlock/benchmark all
+ $(MAKE) -C ./ck_pflock/validate all
+ $(MAKE) -C ./ck_pflock/benchmark all
+ $(MAKE) -C ./ck_hp/validate all
+ $(MAKE) -C ./ck_hp/benchmark all
+
+clean:
+ $(MAKE) -C ./ck_array/validate clean
+ $(MAKE) -C ./ck_pflock/validate clean
+ $(MAKE) -C ./ck_pflock/benchmark clean
+ $(MAKE) -C ./ck_tflock/validate clean
+ $(MAKE) -C ./ck_tflock/benchmark clean
+ $(MAKE) -C ./ck_rwcohort/validate clean
+ $(MAKE) -C ./ck_rwcohort/benchmark clean
+ $(MAKE) -C ./ck_backoff/validate clean
+ $(MAKE) -C ./ck_bitmap/validate clean
+ $(MAKE) -C ./ck_queue/validate clean
+ $(MAKE) -C ./ck_cohort/validate clean
+ $(MAKE) -C ./ck_cohort/benchmark clean
+ $(MAKE) -C ./ck_brlock/validate clean
+ $(MAKE) -C ./ck_ht/validate clean
+ $(MAKE) -C ./ck_ht/benchmark clean
+ $(MAKE) -C ./ck_hs/validate clean
+ $(MAKE) -C ./ck_hs/benchmark clean
+ $(MAKE) -C ./ck_rhs/validate clean
+ $(MAKE) -C ./ck_rhs/benchmark clean
+ $(MAKE) -C ./ck_brlock/benchmark clean
+ $(MAKE) -C ./ck_spinlock/validate clean
+ $(MAKE) -C ./ck_spinlock/benchmark clean
+ $(MAKE) -C ./ck_fifo/validate clean
+ $(MAKE) -C ./ck_fifo/benchmark clean
+ $(MAKE) -C ./ck_pr/validate clean
+ $(MAKE) -C ./ck_pr/benchmark clean
+ $(MAKE) -C ./ck_barrier/validate clean
+ $(MAKE) -C ./ck_barrier/benchmark clean
+ $(MAKE) -C ./ck_bytelock/validate clean
+ $(MAKE) -C ./ck_bytelock/benchmark clean
+ $(MAKE) -C ./ck_epoch/validate clean
+ $(MAKE) -C ./ck_sequence/validate clean
+ $(MAKE) -C ./ck_sequence/benchmark clean
+ $(MAKE) -C ./ck_stack/validate clean
+ $(MAKE) -C ./ck_stack/benchmark clean
+ $(MAKE) -C ./ck_ring/validate clean
+ $(MAKE) -C ./ck_ring/benchmark clean
+ $(MAKE) -C ./ck_rwlock/validate clean
+ $(MAKE) -C ./ck_rwlock/benchmark clean
+ $(MAKE) -C ./ck_swlock/validate clean
+ $(MAKE) -C ./ck_swlock/benchmark clean
+ $(MAKE) -C ./ck_pflock/validate clean
+ $(MAKE) -C ./ck_pflock/benchmark clean
+ $(MAKE) -C ./ck_hp/validate clean
+ $(MAKE) -C ./ck_hp/benchmark clean
+
+check: all
+ rc=0; \
+ for d in $(DIR) ; do \
+ echo "----[ Testing $$d...."; \
+ $(MAKE) -C ./ck_$$d/validate check || rc=1; \
+ echo; \
+ done; \
+ exit $$rc
+
diff --git a/regressions/Makefile.unsupported b/regressions/Makefile.unsupported
new file mode 100644
index 0000000..90aa877
--- /dev/null
+++ b/regressions/Makefile.unsupported
@@ -0,0 +1,9 @@
+.PHONY: all clean check
+
+all:
+ @echo Regressions are currently unsupported for out-of-source builds
+
+clean: all
+
+check: all
+
diff --git a/regressions/ck_array/validate/Makefile b/regressions/ck_array/validate/Makefile
new file mode 100644
index 0000000..3c48167
--- /dev/null
+++ b/regressions/ck_array/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean distribution
+
+OBJECTS=serial
+
+all: $(OBJECTS)
+
+serial: serial.c ../../../include/ck_array.h ../../../src/ck_array.c
+ $(CC) $(CFLAGS) -o serial serial.c ../../../src/ck_array.c
+
+check: all
+ ./serial
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE -ggdb
diff --git a/regressions/ck_array/validate/serial.c b/regressions/ck_array/validate/serial.c
new file mode 100644
index 0000000..b6d7b56
--- /dev/null
+++ b/regressions/ck_array/validate/serial.c
@@ -0,0 +1,178 @@
+#include <ck_array.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../../common.h"
+
+#ifndef ITERATION
+#define ITERATION 128
+#endif
+
+static void
+my_free(void *p, size_t m, bool d)
+{
+
+ (void)m;
+ (void)d;
+
+ free(p);
+ return;
+}
+
+static void *
+my_malloc(size_t b)
+{
+
+ return malloc(b);
+}
+
+static void *
+my_realloc(void *r, size_t a, size_t b, bool d)
+{
+
+ (void)a;
+ (void)d;
+
+ return realloc(r, b);
+}
+
+int
+main(void)
+{
+ void *r;
+ uintptr_t i;
+ ck_array_t array;
+ ck_array_iterator_t iterator;
+ struct ck_malloc m = {
+ .malloc = my_malloc,
+ .free = NULL,
+ .realloc = my_realloc
+ };
+
+ if (ck_array_init(&array, CK_ARRAY_MODE_SPMC, &m, 4) == true)
+ ck_error("ck_array_init with NULL free succeeded\n");
+
+ m.free = my_free;
+ if (ck_array_init(&array, CK_ARRAY_MODE_SPMC, &m, 4) == false)
+ ck_error("ck_array_init\n");
+
+ for (i = 0; i < ITERATION; i++) {
+ if (ck_array_put(&array, (void *)i) == false)
+ ck_error("ck_error_put\n");
+
+ if (ck_array_remove(&array, (void *)i) == false)
+ ck_error("ck_error_remove after put\n");
+ }
+
+ i = 0; CK_ARRAY_FOREACH(&array, &iterator, &r) i++;
+ if (i != 0)
+ ck_error("Non-empty array after put -> remove workload.\n");
+
+ ck_array_commit(&array);
+
+ i = 0; CK_ARRAY_FOREACH(&array, &iterator, &r) i++;
+ if (i != 0)
+ ck_error("Non-empty array after put -> remove -> commit workload.\n");
+
+ for (i = 0; i < ITERATION; i++) {
+ if (ck_array_put(&array, (void *)i) == false)
+ ck_error("ck_error_put\n");
+ }
+
+ i = 0; CK_ARRAY_FOREACH(&array, &iterator, &r) i++;
+ if (i != 0)
+ ck_error("Non-empty array after put workload.\n");
+
+ for (i = 0; i < ITERATION; i++) {
+ if (ck_array_remove(&array, (void *)i) == false)
+ ck_error("ck_error_remove after put\n");
+ }
+
+ i = 0; CK_ARRAY_FOREACH(&array, &iterator, &r) i++;
+ if (i != 0)
+ ck_error("Non-empty array after put -> remove workload.\n");
+
+ ck_array_commit(&array);
+
+ i = 0; CK_ARRAY_FOREACH(&array, &iterator, &r) i++;
+ if (i != 0)
+ ck_error("Non-empty array after put -> remove -> commit workload.\n");
+
+ for (i = 0; i < ITERATION; i++) {
+ if (ck_array_put(&array, (void *)i) == false)
+ ck_error("ck_error_put\n");
+ }
+
+ ck_array_commit(&array);
+
+ i = 0;
+ CK_ARRAY_FOREACH(&array, &iterator, &r) {
+ i++;
+ }
+
+ if (i != ITERATION)
+ ck_error("Incorrect item count in iteration\n");
+
+ ck_array_remove(&array, (void *)(uintptr_t)0);
+ ck_array_remove(&array, (void *)(uintptr_t)1);
+ ck_array_commit(&array);
+ i = 0; CK_ARRAY_FOREACH(&array, &iterator, &r) i++;
+ if (i != ITERATION - 2 || ck_array_length(&array) != ITERATION - 2)
+ ck_error("Incorrect item count in iteration after remove\n");
+
+ if (ck_array_put_unique(&array, (void *)UINTPTR_MAX) != 0)
+ ck_error("Unique value put failed.\n");
+
+ if (ck_array_put_unique(&array, (void *)(uintptr_t)4) != 1)
+ ck_error("put of 4 not detected as non-unique.\n");
+
+ if (ck_array_put_unique(&array, (void *)UINTPTR_MAX) != 1)
+ ck_error("put of UINTPTR_MAX not detected as non-unique.\n");
+
+ ck_array_commit(&array);
+ i = 0;
+ CK_ARRAY_FOREACH(&array, &iterator, &r) {
+ i++;
+ }
+ if (i != ITERATION - 1 || ck_array_length(&array) != ITERATION - 1)
+ ck_error("Incorrect item count in iteration after unique put\n");
+
+ if (ck_array_initialized(&array) == false)
+ ck_error("Error, expected array to be initialized.\n");
+
+ for (i = 0; i < ITERATION * 4; i++) {
+ ck_array_remove(&array, (void *)i);
+ }
+
+ for (i = 0; i < ITERATION * 16; i++) {
+ ck_array_put(&array, (void *)i);
+ }
+
+ ck_array_commit(&array);
+
+ for (i = 0; i < ITERATION * 128; i++) {
+ ck_array_put(&array, (void *)i);
+ if (ck_array_put_unique(&array, (void *)i) != 1)
+ ck_error("put_unique for non-unique value should fail.\n");
+ }
+
+ for (i = 0; i < ITERATION * 64; i++) {
+ bool f = ck_array_remove(&array, (void *)i);
+
+ if (f == false && i < ITERATION * 144)
+ ck_error("Remove failed for existing entry.\n");
+
+ if (f == true && i > ITERATION * 144)
+ ck_error("Remove succeeded for non-existing entry.\n");
+ }
+
+ ck_array_commit(&array);
+ ck_array_deinit(&array, false);
+
+ if (ck_array_initialized(&array) == true)
+ ck_error("Error, expected array to be uninitialized.\n");
+
+ return 0;
+}
+
diff --git a/regressions/ck_backoff/validate/Makefile b/regressions/ck_backoff/validate/Makefile
new file mode 100644
index 0000000..39e6d4f
--- /dev/null
+++ b/regressions/ck_backoff/validate/Makefile
@@ -0,0 +1,15 @@
+.PHONY: check clean
+
+all: validate
+
+validate: validate.c ../../../include/ck_backoff.h
+ $(CC) $(CFLAGS) -o validate validate.c
+
+check: all
+ ./validate
+
+clean:
+ rm -rf validate *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE
diff --git a/regressions/ck_backoff/validate/validate.c b/regressions/ck_backoff/validate/validate.c
new file mode 100644
index 0000000..137d48e
--- /dev/null
+++ b/regressions/ck_backoff/validate/validate.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <ck_backoff.h>
+#include "../../common.h"
+
+int
+main(void)
+{
+ ck_backoff_t backoff = CK_BACKOFF_INITIALIZER;
+ const ck_backoff_t ceiling = CK_BACKOFF_CEILING + 1;
+ unsigned int i = 0;
+
+ fprintf(stderr, "Ceiling is: %u (%#x)\n", CK_BACKOFF_CEILING, CK_BACKOFF_CEILING);
+
+ for (;;) {
+ ck_backoff_t previous = backoff;
+ ck_backoff_eb(&backoff);
+
+ printf("EB %u\n", backoff);
+ if (previous == ceiling) {
+ if (backoff != ceiling)
+ ck_error("[C] GB: expected %u, got %u\n", ceiling, backoff);
+
+ if (i++ >= 1)
+ break;
+ } else if (previous != backoff >> 1) {
+ ck_error("[N] GB: expected %u (%u), got %u\n", previous << 1, previous, backoff);
+ }
+ }
+
+ return 0;
+}
+
diff --git a/regressions/ck_barrier/benchmark/Makefile b/regressions/ck_barrier/benchmark/Makefile
new file mode 100644
index 0000000..ea973d2
--- /dev/null
+++ b/regressions/ck_barrier/benchmark/Makefile
@@ -0,0 +1,14 @@
+.PHONY: clean distribution
+
+OBJECTS=throughput
+
+all: $(OBJECTS)
+
+throughput: throughput.c ../../../include/ck_barrier.h ../../../src/ck_barrier_centralized.c
+ $(CC) $(CFLAGS) -o throughput throughput.c ../../../src/ck_barrier_centralized.c
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_barrier/benchmark/throughput.c b/regressions/ck_barrier/benchmark/throughput.c
new file mode 100644
index 0000000..1a1c013
--- /dev/null
+++ b/regressions/ck_barrier/benchmark/throughput.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <pthread.h>
+#include <unistd.h>
+#include <ck_stdint.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <ck_pr.h>
+#include <ck_barrier.h>
+
+#include "../../common.h"
+
+#if defined(CK_F_PR_INC_64) && defined(CK_F_PR_LOAD_64)
+static int done = 0;
+static struct affinity a;
+static int nthr;
+static int tid;
+static ck_barrier_centralized_t barrier = CK_BARRIER_CENTRALIZED_INITIALIZER;
+struct counter {
+ uint64_t value;
+} CK_CC_CACHELINE;
+struct counter *counters;
+
+static void *
+thread(void *null CK_CC_UNUSED)
+{
+ ck_barrier_centralized_state_t state = CK_BARRIER_CENTRALIZED_STATE_INITIALIZER;
+ int id;
+
+ id = ck_pr_faa_int(&tid, 1);
+ aff_iterate(&a);
+
+ while (ck_pr_load_int(&done) == 0) {
+ ck_barrier_centralized(&barrier, &state, nthr);
+ ck_pr_inc_64(&counters[id].value);
+ ck_barrier_centralized(&barrier, &state, nthr);
+ ck_pr_inc_64(&counters[id].value);
+ ck_barrier_centralized(&barrier, &state, nthr);
+ ck_pr_inc_64(&counters[id].value);
+ ck_barrier_centralized(&barrier, &state, nthr);
+ ck_pr_inc_64(&counters[id].value);
+ ck_barrier_centralized(&barrier, &state, nthr);
+ ck_pr_inc_64(&counters[id].value);
+ ck_barrier_centralized(&barrier, &state, nthr);
+ ck_pr_inc_64(&counters[id].value);
+ ck_barrier_centralized(&barrier, &state, nthr);
+ ck_pr_inc_64(&counters[id].value);
+ ck_barrier_centralized(&barrier, &state, nthr);
+ ck_pr_inc_64(&counters[id].value);
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+ uint64_t count;
+ int i;
+
+ if (argc != 3) {
+ ck_error("Correct usage: <number of threads> <affinity delta>\n");
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ counters = calloc(sizeof(struct counter), nthr);
+ if (counters == NULL) {
+ ck_error("ERROR: Could not allocate counters\n");
+ }
+
+ a.delta = atoi(argv[2]);
+
+ fprintf(stderr, "Creating threads (barrier)...");
+ for (i = 0; i < nthr; ++i) {
+ if (pthread_create(&threads[i], NULL, thread, NULL)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ common_sleep(10);
+
+ count = 0;
+ ck_pr_store_int(&done, 1);
+ for (i = 0; i < nthr; ++i)
+ count += ck_pr_load_64(&counters[i].value);
+ printf("%d %16" PRIu64 "\n", nthr, count);
+
+ return (0);
+}
+#else
+int
+main(void)
+{
+
+ fputs("Unsupported.", stderr);
+ return 0;
+}
+#endif
+
diff --git a/regressions/ck_barrier/validate/Makefile b/regressions/ck_barrier/validate/Makefile
new file mode 100644
index 0000000..f31a1a6
--- /dev/null
+++ b/regressions/ck_barrier/validate/Makefile
@@ -0,0 +1,34 @@
+.PHONY: check clean distribution
+
+OBJECTS=barrier_centralized barrier_combining barrier_dissemination barrier_tournament barrier_mcs
+
+all: $(OBJECTS)
+
+barrier_centralized: barrier_centralized.c ../../../include/ck_barrier.h ../../../src/ck_barrier_centralized.c
+ $(CC) $(CFLAGS) -o barrier_centralized barrier_centralized.c ../../../src/ck_barrier_centralized.c
+
+barrier_combining: barrier_combining.c ../../../include/ck_barrier.h ../../../src/ck_barrier_combining.c
+ $(CC) $(CFLAGS) -o barrier_combining barrier_combining.c ../../../src/ck_barrier_combining.c
+
+barrier_dissemination: barrier_dissemination.c ../../../include/ck_barrier.h ../../../src/ck_barrier_dissemination.c
+ $(CC) $(CFLAGS) -o barrier_dissemination barrier_dissemination.c ../../../src/ck_barrier_dissemination.c
+
+barrier_tournament: barrier_tournament.c ../../../include/ck_barrier.h ../../../src/ck_barrier_tournament.c
+ $(CC) $(CFLAGS) -o barrier_tournament barrier_tournament.c ../../../src/ck_barrier_tournament.c
+
+barrier_mcs: barrier_mcs.c ../../../include/ck_barrier.h ../../../src/ck_barrier_mcs.c
+ $(CC) $(CFLAGS) -o barrier_mcs barrier_mcs.c ../../../src/ck_barrier_mcs.c
+
+check: all
+ rc=0; \
+ for d in $(OBJECTS) ; do \
+ echo $$d; \
+ ./$$d $(CORES) 1 1 || rc=1; \
+ done; \
+ exit $$rc
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_barrier/validate/barrier_centralized.c b/regressions/ck_barrier/validate/barrier_centralized.c
new file mode 100644
index 0000000..551913a
--- /dev/null
+++ b/regressions/ck_barrier/validate/barrier_centralized.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_barrier.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 5000000
+#endif
+
+#ifndef ENTRIES
+#define ENTRIES 512
+#endif
+
+static struct affinity a;
+static int nthr;
+static int counters[ENTRIES];
+static ck_barrier_centralized_t barrier = CK_BARRIER_CENTRALIZED_INITIALIZER;
+static int barrier_wait;
+
+static void *
+thread(void *null CK_CC_UNUSED)
+{
+ ck_barrier_centralized_state_t state = CK_BARRIER_CENTRALIZED_STATE_INITIALIZER;
+ int j, counter;
+ int i = 0;
+
+ aff_iterate(&a);
+
+ ck_pr_inc_int(&barrier_wait);
+ while (ck_pr_load_int(&barrier_wait) != nthr)
+ ck_pr_stall();
+
+ for (j = 0; j < ITERATE; j++) {
+ i = j++ & (ENTRIES - 1);
+ ck_pr_inc_int(&counters[i]);
+ ck_barrier_centralized(&barrier, &state, nthr);
+ counter = ck_pr_load_int(&counters[i]);
+ if (counter != nthr * (j / ENTRIES + 1)) {
+ ck_error("FAILED [%d:%d]: %d != %d\n", i, j - 1, counter, nthr);
+ }
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+ int i;
+
+ if (argc < 3) {
+ ck_error("Usage: correct <number of threads> <affinity delta>\n");
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ a.delta = atoi(argv[2]);
+
+ fprintf(stderr, "Creating threads (barrier)...");
+ for (i = 0; i < nthr; i++) {
+ if (pthread_create(&threads[i], NULL, thread, NULL)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Waiting for threads to finish correctness regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+
+
+ return (0);
+}
+
diff --git a/regressions/ck_barrier/validate/barrier_combining.c b/regressions/ck_barrier/validate/barrier_combining.c
new file mode 100644
index 0000000..98fa0cf
--- /dev/null
+++ b/regressions/ck_barrier/validate/barrier_combining.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_barrier.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 5000000
+#endif
+
+#ifndef ENTRIES
+#define ENTRIES 512
+#endif
+
+static struct affinity a;
+static int nthr;
+static int ngroups;
+static int counters[ENTRIES];
+static ck_barrier_combining_t barrier;
+static int barrier_wait;
+
+static void *
+thread(void *group)
+{
+ ck_barrier_combining_state_t state = CK_BARRIER_COMBINING_STATE_INITIALIZER;
+ int j, counter;
+ int i = 0;
+
+ aff_iterate(&a);
+
+ ck_pr_inc_int(&barrier_wait);
+ while (ck_pr_load_int(&barrier_wait) != (nthr * ngroups))
+ ck_pr_stall();
+
+ for (j = 0; j < ITERATE; j++) {
+ i = j++ & (ENTRIES - 1);
+ ck_pr_inc_int(&counters[i]);
+ ck_barrier_combining(&barrier, group, &state);
+ counter = ck_pr_load_int(&counters[i]);
+ if (counter != nthr * ngroups * (j / ENTRIES + 1)) {
+ ck_error("FAILED [%d:%d]: %d != %d\n", i, j - 1, counter, nthr * ngroups);
+ }
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+ ck_barrier_combining_group_t *groupings;
+ ck_barrier_combining_group_t *init_root;
+ int i;
+
+ init_root = malloc(sizeof(ck_barrier_combining_group_t));
+ if (init_root == NULL) {
+ ck_error("ERROR: Could not allocate initial barrier structure\n");
+ }
+ ck_barrier_combining_init(&barrier, init_root);
+
+ if (argc < 4) {
+ ck_error("Usage: correct <total groups> <threads per group> <affinity delta>\n");
+ }
+
+ ngroups = atoi(argv[1]);
+ if (ngroups <= 0) {
+ ck_error("ERROR: Number of groups must be greater than 0\n");
+ }
+
+ nthr = atoi(argv[2]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ groupings = malloc(sizeof(ck_barrier_combining_group_t) * ngroups);
+ if (groupings == NULL) {
+ ck_error("Could not allocate thread barrier grouping structures\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr * ngroups);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ a.delta = atoi(argv[3]);
+
+ for (i = 0; i < ngroups; i++)
+ ck_barrier_combining_group_init(&barrier, groupings + i, nthr);
+
+ fprintf(stderr, "Creating threads (barrier)...");
+ for (i = 0; i < (nthr * ngroups); i++) {
+ if (pthread_create(&threads[i], NULL, thread, groupings + (i % ngroups))) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Waiting for threads to finish correctness regression...");
+ for (i = 0; i < (nthr * ngroups); i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+
+ return (0);
+}
+
diff --git a/regressions/ck_barrier/validate/barrier_dissemination.c b/regressions/ck_barrier/validate/barrier_dissemination.c
new file mode 100644
index 0000000..e8acc10
--- /dev/null
+++ b/regressions/ck_barrier/validate/barrier_dissemination.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_barrier.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 5000000
+#endif
+
+#ifndef ENTRIES
+#define ENTRIES 512
+#endif
+
+static struct affinity a;
+static int nthr;
+static int counters[ENTRIES];
+static int barrier_wait;
+
+static void *
+thread(void *b)
+{
+ ck_barrier_dissemination_t *barrier = b;
+ ck_barrier_dissemination_state_t state;
+ int j, k, counter;
+ int i = 0;
+
+ aff_iterate(&a);
+ ck_barrier_dissemination_subscribe(barrier, &state);
+
+ ck_pr_inc_int(&barrier_wait);
+ while (ck_pr_load_int(&barrier_wait) != nthr)
+ ck_pr_stall();
+
+ for (j = 0, k = 0; j < ITERATE; j++, k++) {
+ i = j++ & (ENTRIES - 1);
+ ck_pr_inc_int(&counters[i]);
+ ck_barrier_dissemination(barrier, &state);
+ counter = ck_pr_load_int(&counters[i]);
+ if (counter != nthr * (j / ENTRIES + 1)) {
+ ck_error("FAILED [%d:%d]: %d != %d\n", i, j - 1, counter, nthr);
+ }
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ ck_barrier_dissemination_t *barrier;
+ ck_barrier_dissemination_flag_t **barrier_internal;
+ pthread_t *threads;
+ int i, size;
+
+ if (argc < 3) {
+ ck_error("Usage: correct <number of threads> <affinity delta>\n");
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ a.delta = atoi(argv[2]);
+
+ barrier = malloc(sizeof(ck_barrier_dissemination_t) * nthr);
+ if (barrier == NULL) {
+ ck_error("ERROR: Could not allocate barrier structures\n");
+ }
+
+ barrier_internal = malloc(sizeof(ck_barrier_dissemination_flag_t *) * nthr);
+ if (barrier_internal == NULL) {
+ ck_error("ERROR: Could not allocate barrier structures\n");
+ }
+
+ size = ck_barrier_dissemination_size(nthr);
+ for (i = 0; i < nthr; ++i) {
+ barrier_internal[i] = malloc(sizeof(ck_barrier_dissemination_flag_t) * size);
+ if (barrier_internal[i] == NULL) {
+ ck_error("ERROR: Could not allocate barrier structures\n");
+ }
+ }
+ ck_barrier_dissemination_init(barrier, barrier_internal, nthr);
+
+ fprintf(stderr, "Creating threads (barrier)...");
+ for (i = 0; i < nthr; i++) {
+ if (pthread_create(&threads[i], NULL, thread, barrier)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Waiting for threads to finish correctness regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+
+
+ return (0);
+}
+
diff --git a/regressions/ck_barrier/validate/barrier_mcs.c b/regressions/ck_barrier/validate/barrier_mcs.c
new file mode 100644
index 0000000..c2e3f2b
--- /dev/null
+++ b/regressions/ck_barrier/validate/barrier_mcs.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_barrier.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 5000000
+#endif
+
+#ifndef ENTRIES
+#define ENTRIES 512
+#endif
+
+static struct affinity a;
+static int nthr;
+static int counters[ENTRIES];
+static int barrier_wait;
+
+static void *
+thread(void *b)
+{
+ ck_barrier_mcs_t *barrier = b;
+ ck_barrier_mcs_state_t state;
+ int j, counter;
+ int i = 0;
+
+ aff_iterate(&a);
+
+ ck_barrier_mcs_subscribe(barrier, &state);
+
+ ck_pr_inc_int(&barrier_wait);
+ while (ck_pr_load_int(&barrier_wait) != nthr)
+ ck_pr_stall();
+
+ for (j = 0; j < ITERATE; j++) {
+ i = j++ & (ENTRIES - 1);
+ ck_pr_inc_int(&counters[i]);
+ ck_barrier_mcs(barrier, &state);
+ counter = ck_pr_load_int(&counters[i]);
+ if (counter != nthr * (j / ENTRIES + 1)) {
+ ck_error("FAILED [%d:%d]: %d != %d\n", i, j - 1, counter, nthr);
+ }
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+ ck_barrier_mcs_t *barrier;
+ int i;
+
+ if (argc < 3) {
+ ck_error("Usage: correct <number of threads> <affinity delta>\n");
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ barrier = malloc(sizeof(ck_barrier_mcs_t) * nthr);
+ if (barrier == NULL) {
+ ck_error("ERROR: Could not allocate barrier structures\n");
+ }
+ ck_barrier_mcs_init(barrier, nthr);
+
+ a.delta = atoi(argv[2]);
+
+ fprintf(stderr, "Creating threads (barrier)...");
+ for (i = 0; i < nthr; i++) {
+ if (pthread_create(&threads[i], NULL, thread, barrier)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Waiting for threads to finish correctness regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+
+
+ return (0);
+}
+
diff --git a/regressions/ck_barrier/validate/barrier_tournament.c b/regressions/ck_barrier/validate/barrier_tournament.c
new file mode 100644
index 0000000..f51dab8
--- /dev/null
+++ b/regressions/ck_barrier/validate/barrier_tournament.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_barrier.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 5000000
+#endif
+
+#ifndef ENTRIES
+#define ENTRIES 512
+#endif
+
+static struct affinity a;
+static int nthr;
+static int counters[ENTRIES];
+static int barrier_wait;
+static ck_barrier_tournament_t barrier;
+
+static void *
+thread(CK_CC_UNUSED void *unused)
+{
+ ck_barrier_tournament_state_t state;
+ int j, counter;
+ int i = 0;
+
+ aff_iterate(&a);
+ ck_barrier_tournament_subscribe(&barrier, &state);
+
+ ck_pr_inc_int(&barrier_wait);
+ while (ck_pr_load_int(&barrier_wait) != nthr)
+ ck_pr_stall();
+
+ for (j = 0; j < ITERATE; j++) {
+ i = j++ & (ENTRIES - 1);
+ ck_pr_inc_int(&counters[i]);
+ ck_barrier_tournament(&barrier, &state);
+ counter = ck_pr_load_int(&counters[i]);
+ if (counter != nthr * (j / ENTRIES + 1)) {
+ ck_error("FAILED [%d:%d]: %d != %d\n", i, j - 1, counter, nthr);
+ }
+ }
+
+ ck_pr_inc_int(&barrier_wait);
+ while (ck_pr_load_int(&barrier_wait) != nthr * 2)
+ ck_pr_stall();
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+ ck_barrier_tournament_round_t **rounds;
+ int i;
+ unsigned int size;
+
+ if (argc < 3) {
+ ck_error("Usage: correct <number of threads> <affinity delta>\n");
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+ a.delta = atoi(argv[2]);
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ rounds = malloc(sizeof(ck_barrier_tournament_round_t *) * nthr);
+ if (rounds == NULL) {
+ ck_error("ERROR: Could not allocate barrier structures\n");
+ }
+
+ size = ck_barrier_tournament_size(nthr);
+ for (i = 0; i < nthr; ++i) {
+ rounds[i] = malloc(sizeof(ck_barrier_tournament_round_t) * size);
+ if (rounds[i] == NULL) {
+ ck_error("ERROR: Could not allocate barrier structures\n");
+ }
+ }
+
+ ck_barrier_tournament_init(&barrier, rounds, nthr);
+
+ fprintf(stderr, "Creating threads (barrier)...");
+ for (i = 0; i < nthr; i++) {
+ if (pthread_create(&threads[i], NULL, thread, NULL)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Waiting for threads to finish correctness regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+
+ return (0);
+}
+
diff --git a/regressions/ck_bitmap/validate/Makefile b/regressions/ck_bitmap/validate/Makefile
new file mode 100644
index 0000000..85e13c8
--- /dev/null
+++ b/regressions/ck_bitmap/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean
+
+OBJECTS=serial
+
+all: $(OBJECTS)
+
+serial: serial.c ../../../include/ck_bitmap.h
+ $(CC) $(CFLAGS) -o serial serial.c
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+check: all
+ ./serial
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE
diff --git a/regressions/ck_bitmap/validate/serial.c b/regressions/ck_bitmap/validate/serial.c
new file mode 100644
index 0000000..ba52588
--- /dev/null
+++ b/regressions/ck_bitmap/validate/serial.c
@@ -0,0 +1,372 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra.
+ * Copyright 2012-2014 AppNexus, Inc.
+ * Copyright 2012 Shreyas Prasad.
+ * Copyright 2014 Paul Khuong.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_bitmap.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "../../common.h"
+
+#ifndef STATIC_LENGTH
+#define STATIC_LENGTH 256
+#endif
+
+static unsigned int length = 256;
+static ck_bitmap_t *g_bits;
+
+static void
+check_iteration(ck_bitmap_t *bits, unsigned int len, bool initial)
+{
+ ck_bitmap_iterator_t iter;
+ unsigned int i = 0, j;
+
+ len += 1;
+ if (initial == true) {
+ if (bits == g_bits)
+ len = length;
+ else
+ len = STATIC_LENGTH;
+ }
+
+ ck_bitmap_iterator_init(&iter, bits);
+ for (j = 0; ck_bitmap_next(bits, &iter, &i) == true; j++) {
+ if (i == j)
+ continue;
+
+ ck_error("[4] ERROR: Expected bit %u, got bit %u\n", j, i);
+ }
+
+ if (j != len) {
+ ck_error("[5] ERROR: Expected length %u, got length %u\n", len, j);
+ }
+
+ return;
+}
+
+static void
+test(ck_bitmap_t *bits, unsigned int n_length, bool initial)
+{
+ bool r;
+ unsigned int i;
+ CK_BITMAP_INSTANCE(8) u;
+
+ CK_BITMAP_INIT(&u, 8, false);
+ CK_BITMAP_SET(&u, 1);
+ CK_BITMAP_SET(&u, 4);
+
+ for (i = 0; i < n_length; i++) {
+ if (ck_bitmap_test(bits, i) == !initial) {
+ ck_error("[0] ERROR [%u]: Expected %u got %u\n", i,
+ initial, !initial);
+ }
+ }
+
+ for (i = 0; i < n_length; i++) {
+ ck_bitmap_set(bits, i);
+ if (ck_bitmap_test(bits, i) == false) {
+ ck_error("[1] ERROR: Expected bit to be set: %u\n", i);
+ }
+
+ ck_bitmap_reset(bits, i);
+ if (ck_bitmap_test(bits, i) == true) {
+ ck_error("[2] ERROR: Expected bit to be cleared: %u\n", i);
+ }
+
+ r = ck_bitmap_bts(bits, i);
+ if (r == true) {
+ ck_error("[3] ERROR: Expected bit to be cleared before 1st bts: %u\n", i);
+ }
+ if (ck_bitmap_test(bits, i) == false) {
+ ck_error("[4] ERROR: Expected bit to be set: %u\n", i);
+ }
+ r = ck_bitmap_bts(bits, i);
+ if (r == false) {
+ ck_error("[5] ERROR: Expected bit to be set before 2nd bts: %u\n", i);
+ }
+ if (ck_bitmap_test(bits, i) == false) {
+ ck_error("[6] ERROR: Expected bit to be set: %u\n", i);
+ }
+
+ ck_bitmap_reset(bits, i);
+ if (ck_bitmap_test(bits, i) == true) {
+ ck_error("[7] ERROR: Expected bit to be cleared: %u\n", i);
+ }
+
+ ck_bitmap_set(bits, i);
+ if (ck_bitmap_test(bits, i) == false) {
+ ck_error("[8] ERROR: Expected bit to be set: %u\n", i);
+ }
+
+ check_iteration(bits, i, initial);
+ }
+
+ for (i = 0; i < n_length; i++) {
+ if (ck_bitmap_test(bits, i) == false) {
+ ck_error("[9] ERROR: Expected bit to be set: %u\n", i);
+ }
+ }
+
+ ck_bitmap_clear(bits);
+
+ for (i = 0; i < n_length; i++) {
+ if (ck_bitmap_test(bits, i) == true) {
+ ck_error("[10] ERROR: Expected bit to be reset: %u\n", i);
+ }
+ }
+
+ ck_bitmap_union(bits, CK_BITMAP(&u));
+ if (ck_bitmap_test(bits, 1) == false ||
+ ck_bitmap_test(bits, 4) == false) {
+ ck_error("ERROR: Expected union semantics.\n");
+ }
+
+ return;
+}
+
+static void
+test_init(bool init)
+{
+ ck_bitmap_t *bitmap;
+ size_t bytes;
+ unsigned int i;
+
+ bytes = ck_bitmap_size(length);
+ bitmap = malloc(bytes);
+ memset(bitmap, random(), bytes);
+
+ ck_bitmap_init(bitmap, length, init);
+
+ if (ck_bitmap_bits(bitmap) != length) {
+ ck_error("ERROR: Expected length %u got %u\n",
+ length, ck_bitmap_bits(bitmap));
+ }
+
+ for (i = 0; i < length; i++) {
+ if (ck_bitmap_test(bitmap, i) != init) {
+ ck_error("ERROR: Expected bit %i at index %u, got %i\n",
+ (int)init, i, (int)(!init));
+ }
+ }
+
+ free(bitmap);
+}
+
+static ck_bitmap_t *
+random_init(void)
+{
+ ck_bitmap_t *bitmap;
+ unsigned int i;
+
+ bitmap = malloc(ck_bitmap_size(length));
+ ck_bitmap_init(bitmap, length, false);
+
+ for (i = 0; i < length; i++) {
+ if (random() & 1) {
+ ck_bitmap_set(bitmap, i);
+ }
+ }
+
+ return bitmap;
+}
+
+static ck_bitmap_t *
+copy(const ck_bitmap_t *src)
+{
+ ck_bitmap_t *bitmap;
+ size_t bytes = ck_bitmap_size(ck_bitmap_bits(src));
+
+ bitmap = malloc(bytes);
+ memcpy(bitmap, src, bytes);
+ return bitmap;
+}
+
+static void
+test_counts(const ck_bitmap_t *x, const ck_bitmap_t *y)
+{
+ unsigned int count = 0;
+ unsigned int count_intersect = 0;
+ unsigned int i;
+
+ for (i = 0; i <= length * 2; i++) {
+ unsigned actual_limit = i;
+ unsigned int r;
+ bool check;
+
+ if (actual_limit > ck_bitmap_bits(x))
+ actual_limit = ck_bitmap_bits(x);
+
+ check = ck_bitmap_empty(x, i);
+ if (check != (count == 0)) {
+ ck_error("ck_bitmap_empty(%u): got %i expected %i\n",
+ i, (int)check, (int)(count == 0));
+ }
+
+ check = ck_bitmap_full(x, i);
+ if (check != (count == actual_limit)) {
+ ck_error("ck_bitmap_full(%u): got %i expected %i\n",
+ i, (int)check, (int)(count == i));
+ }
+
+ r = ck_bitmap_count(x, i);
+ if (r != count) {
+ ck_error("ck_bitmap_count(%u): got %u expected %u\n",
+ i, r, count);
+ }
+
+ r = ck_bitmap_count_intersect(x, y, i);
+ if (r != count_intersect) {
+ ck_error("ck_bitmap_count_intersect(%u): got %u expected %u\n",
+ i, r, count_intersect);
+ }
+
+ if (i < length) {
+ count += ck_bitmap_test(x, i);
+ count_intersect += ck_bitmap_test(x, i) & ck_bitmap_test(y, i);
+ }
+ }
+}
+
+static void
+random_test(unsigned int seed)
+{
+ ck_bitmap_t *x, *x_copy, *y;
+ unsigned int i;
+
+ srandom(seed);
+
+ test_init(false);
+ test_init(true);
+
+ x = random_init();
+ y = random_init();
+
+#define TEST(routine, expected) do { \
+ x_copy = copy(x); \
+ routine(x_copy, y); \
+ for (i = 0; i < length; i++) { \
+ bool xi = ck_bitmap_test(x, i); \
+ bool yi = ck_bitmap_test(y, i); \
+ bool ri = ck_bitmap_test(x_copy, i); \
+ bool wanted = expected(xi, yi); \
+ \
+ if (ri != wanted) { \
+ ck_error("In " #routine " at %u: " \
+ "got %i expected %i\n", \
+ i, ri, wanted); \
+ } \
+ } \
+ free(x_copy); \
+ } while (0)
+
+#define OR(x, y) (x | y)
+#define AND(x, y) (x & y)
+#define ANDC2(x, y) (x & (~y))
+
+ TEST(ck_bitmap_union, OR);
+ TEST(ck_bitmap_intersection, AND);
+ TEST(ck_bitmap_intersection_negate, ANDC2);
+
+#undef ANDC2
+#undef AND
+#undef OR
+#undef TEST
+
+ test_counts(x, y);
+
+ for (i = 0; i < 4; i++) {
+ ck_bitmap_init(x, length, i & 1);
+ ck_bitmap_init(y, length, i >> 1);
+ test_counts(x, y);
+ }
+
+ free(y);
+ free(x);
+}
+
+int
+main(int argc, char *argv[])
+{
+ unsigned int bytes, base;
+ size_t i, j;
+
+ if (argc >= 2) {
+ length = atoi(argv[1]);
+ }
+
+ base = ck_bitmap_base(length);
+ bytes = ck_bitmap_size(length);
+ fprintf(stderr, "Configuration: %u bytes\n",
+ bytes);
+
+ g_bits = malloc(bytes);
+ memset(g_bits->map, 0xFF, base);
+ ck_bitmap_init(g_bits, length, false);
+ test(g_bits, length, false);
+
+ memset(g_bits->map, 0x00, base);
+ ck_bitmap_init(g_bits, length, true);
+ test(g_bits, length, true);
+
+ ck_bitmap_test(g_bits, length - 1);
+
+ CK_BITMAP_INSTANCE(STATIC_LENGTH) sb;
+ fprintf(stderr, "Static configuration: %zu bytes\n",
+ sizeof(sb));
+ memset(CK_BITMAP_BUFFER(&sb), 0xFF, ck_bitmap_base(STATIC_LENGTH));
+ CK_BITMAP_INIT(&sb, STATIC_LENGTH, false);
+ test(CK_BITMAP(&sb), STATIC_LENGTH, false);
+ memset(CK_BITMAP_BUFFER(&sb), 0x00, ck_bitmap_base(STATIC_LENGTH));
+ CK_BITMAP_INIT(&sb, STATIC_LENGTH, true);
+ test(CK_BITMAP(&sb), STATIC_LENGTH, true);
+
+ CK_BITMAP_CLEAR(&sb);
+ if (CK_BITMAP_TEST(&sb, 1) == true) {
+ ck_error("ERROR: Expected bit to be reset.\n");
+ }
+
+ CK_BITMAP_SET(&sb, 1);
+ if (CK_BITMAP_TEST(&sb, 1) == false) {
+ ck_error("ERROR: Expected bit to be set.\n");
+ }
+
+ CK_BITMAP_RESET(&sb, 1);
+ if (CK_BITMAP_TEST(&sb, 1) == true) {
+ ck_error("ERROR: Expected bit to be reset.\n");
+ }
+
+ for (i = 0; i < 4 * sizeof(unsigned int) * CHAR_BIT; i++) {
+ length = i;
+ for (j = 0; j < 10; j++) {
+ random_test(i * 10 + j);
+ }
+ }
+
+ return 0;
+}
diff --git a/regressions/ck_brlock/benchmark/Makefile b/regressions/ck_brlock/benchmark/Makefile
new file mode 100644
index 0000000..cd12e7c
--- /dev/null
+++ b/regressions/ck_brlock/benchmark/Makefile
@@ -0,0 +1,17 @@
+.PHONY: clean distribution
+
+OBJECTS=latency throughput
+
+all: $(OBJECTS)
+
+latency: latency.c ../../../include/ck_brlock.h
+ $(CC) $(CFLAGS) -o latency latency.c
+
+throughput: throughput.c ../../../include/ck_brlock.h
+ $(CC) $(CFLAGS) -o throughput throughput.c
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_brlock/benchmark/latency.c b/regressions/ck_brlock/benchmark/latency.c
new file mode 100644
index 0000000..4db8e26
--- /dev/null
+++ b/regressions/ck_brlock/benchmark/latency.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_brlock.h>
+#include <ck_rwlock.h>
+#include <inttypes.h>
+#include <stdio.h>
+
+#include "../../common.h"
+
+#ifndef STEPS
+#define STEPS 1000000
+#endif
+
+int
+main(void)
+{
+ uint64_t s_b, e_b, i;
+ ck_brlock_t brlock = CK_BRLOCK_INITIALIZER;
+ ck_brlock_reader_t r[8];
+ ck_rwlock_t naive;
+
+ for (i = 0; i < sizeof(r) / sizeof(*r); i++)
+ ck_brlock_read_register(&brlock, &r[i]);
+
+ for (i = 0; i < STEPS; i++) {
+ ck_brlock_write_lock(&brlock);
+ ck_brlock_write_unlock(&brlock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_brlock_write_lock(&brlock);
+ ck_brlock_write_unlock(&brlock);
+ }
+ e_b = rdtsc();
+ printf("WRITE: brlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ ck_rwlock_init(&naive);
+ for (i = 0; i < STEPS; i++) {
+ ck_rwlock_write_lock(&naive);
+ ck_rwlock_write_unlock(&naive);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_rwlock_write_lock(&naive);
+ ck_rwlock_write_unlock(&naive);
+ }
+ e_b = rdtsc();
+ printf("WRITE: naive %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ for (i = 0; i < STEPS; i++) {
+ ck_brlock_read_lock(&brlock, &r[0]);
+ ck_brlock_read_unlock(&r[0]);
+ }
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_brlock_read_lock(&brlock, &r[0]);
+ ck_brlock_read_unlock(&r[0]);
+ }
+ e_b = rdtsc();
+ printf("READ: brlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ for (i = 0; i < STEPS; i++) {
+ ck_rwlock_read_lock(&naive);
+ ck_rwlock_read_unlock(&naive);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_rwlock_read_lock(&naive);
+ ck_rwlock_read_unlock(&naive);
+ }
+ e_b = rdtsc();
+ printf("READ: naive %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ return (0);
+}
+
diff --git a/regressions/ck_brlock/benchmark/throughput.c b/regressions/ck_brlock/benchmark/throughput.c
new file mode 100644
index 0000000..27ed803
--- /dev/null
+++ b/regressions/ck_brlock/benchmark/throughput.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_brlock.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "../../common.h"
+
+#ifndef STEPS
+#define STEPS 1000000
+#endif
+
+static int barrier;
+static int threads;
+static unsigned int flag CK_CC_CACHELINE;
+static ck_brlock_t brlock = CK_BRLOCK_INITIALIZER;
+static struct affinity affinity;
+
+static void *
+thread_brlock(void *pun)
+{
+ uint64_t s_b, e_b, a, i;
+ ck_brlock_reader_t r;
+ uint64_t *value = pun;
+
+ if (aff_iterate(&affinity) != 0) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_brlock_read_register(&brlock, &r);
+ ck_pr_inc_int(&barrier);
+ while (ck_pr_load_int(&barrier) != threads)
+ ck_pr_stall();
+
+ for (i = 1, a = 0;; i++) {
+ s_b = rdtsc();
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ e_b = rdtsc();
+
+ a += (e_b - s_b) >> 4;
+
+ if (ck_pr_load_uint(&flag) == 1)
+ break;
+ }
+
+ ck_pr_inc_int(&barrier);
+ while (ck_pr_load_int(&barrier) != threads * 2)
+ ck_pr_stall();
+
+ *value = (a / i);
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int t;
+ pthread_t *p;
+ uint64_t *latency;
+
+ if (argc != 3) {
+ ck_error("Usage: throughput <delta> <threads>\n");
+ }
+
+ threads = atoi(argv[2]);
+ if (threads <= 0) {
+ ck_error("ERROR: Threads must be a value > 0.\n");
+ }
+
+ p = malloc(sizeof(pthread_t) * threads);
+ if (p == NULL) {
+ ck_error("ERROR: Failed to initialize thread.\n");
+ }
+
+ latency = malloc(sizeof(uint64_t) * threads);
+ if (latency == NULL) {
+ ck_error("ERROR: Failed to create latency buffer.\n");
+ }
+
+ affinity.delta = atoi(argv[1]);
+ affinity.request = 0;
+
+ fprintf(stderr, "Creating threads (brlock)...");
+ for (t = 0; t < threads; t++) {
+ if (pthread_create(&p[t], NULL, thread_brlock, latency + t) != 0) {
+ ck_error("ERROR: Could not create thread %d\n", t);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ common_sleep(10);
+ ck_pr_store_uint(&flag, 1);
+
+ fprintf(stderr, "Waiting for threads to finish acquisition regression...");
+ for (t = 0; t < threads; t++)
+ pthread_join(p[t], NULL);
+ fprintf(stderr, "done\n\n");
+
+ for (t = 1; t <= threads; t++)
+ printf("%10u %20" PRIu64 "\n", t, latency[t - 1]);
+
+ return (0);
+}
+
diff --git a/regressions/ck_brlock/validate/Makefile b/regressions/ck_brlock/validate/Makefile
new file mode 100644
index 0000000..3a49c43
--- /dev/null
+++ b/regressions/ck_brlock/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean distribution
+
+OBJECTS=validate
+
+all: $(OBJECTS)
+
+validate: validate.c ../../../include/ck_brlock.h
+ $(CC) $(CFLAGS) -o validate validate.c
+
+check: all
+ ./validate $(CORES) 1
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_brlock/validate/validate.c b/regressions/ck_brlock/validate/validate.c
new file mode 100644
index 0000000..20f285a
--- /dev/null
+++ b/regressions/ck_brlock/validate/validate.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_brlock.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 1000000
+#endif
+
+static struct affinity a;
+static unsigned int locked = 0;
+static int nthr;
+static ck_brlock_t lock = CK_BRLOCK_INITIALIZER;
+
+static void *
+thread(void *null CK_CC_UNUSED)
+{
+ ck_brlock_reader_t r;
+ int i = ITERATE;
+ unsigned int l;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_brlock_read_register(&lock, &r);
+
+ while (i--) {
+ ck_brlock_write_lock(&lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_brlock_write_unlock(&lock);
+
+ ck_brlock_read_lock(&lock, &r);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_brlock_read_unlock(&r);
+ }
+
+ ck_brlock_read_unregister(&lock, &r);
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+ int i;
+
+ if (argc != 3) {
+ ck_error("Usage: validate <number of threads> <affinity delta>\n");
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ a.delta = atoi(argv[2]);
+
+ fprintf(stderr, "Creating threads (mutual exclusion)...");
+ for (i = 0; i < nthr; i++) {
+ if (pthread_create(&threads[i], NULL, thread, NULL)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Waiting for threads to finish correctness regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+
+ return (0);
+}
+
diff --git a/regressions/ck_bytelock/benchmark/Makefile b/regressions/ck_bytelock/benchmark/Makefile
new file mode 100644
index 0000000..c819099
--- /dev/null
+++ b/regressions/ck_bytelock/benchmark/Makefile
@@ -0,0 +1,14 @@
+.PHONY: clean distribution
+
+OBJECTS=latency
+
+all: $(OBJECTS)
+
+latency: latency.c
+ $(CC) $(CFLAGS) -o latency latency.c
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_bytelock/benchmark/latency.c b/regressions/ck_bytelock/benchmark/latency.c
new file mode 100644
index 0000000..be30165
--- /dev/null
+++ b/regressions/ck_bytelock/benchmark/latency.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_bytelock.h>
+#include <ck_rwlock.h>
+#include <inttypes.h>
+#include <stdio.h>
+
+#include "../../common.h"
+
+#ifndef STEPS
+#define STEPS 1000000
+#endif
+
+int
+main(void)
+{
+ uint64_t s_b, e_b, i;
+ ck_bytelock_t bytelock = CK_BYTELOCK_INITIALIZER;
+ ck_rwlock_t naive;
+
+ for (i = 0; i < STEPS; i++) {
+ ck_bytelock_write_lock(&bytelock, 1);
+ ck_bytelock_write_unlock(&bytelock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_bytelock_write_lock(&bytelock, 1);
+ ck_bytelock_write_unlock(&bytelock);
+ }
+ e_b = rdtsc();
+ printf("WRITE: bytelock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ ck_rwlock_init(&naive);
+ for (i = 0; i < STEPS; i++) {
+ ck_rwlock_write_lock(&naive);
+ ck_rwlock_write_unlock(&naive);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_rwlock_write_lock(&naive);
+ ck_rwlock_write_unlock(&naive);
+ }
+ e_b = rdtsc();
+ printf("WRITE: naive %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ for (i = 0; i < STEPS; i++) {
+ ck_bytelock_read_lock(&bytelock, 1);
+ ck_bytelock_read_unlock(&bytelock, 1);
+ }
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_bytelock_read_lock(&bytelock, 1);
+ ck_bytelock_read_unlock(&bytelock, 1);
+ }
+ e_b = rdtsc();
+ printf("READ: bytelock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ for (i = 0; i < STEPS; i++) {
+ ck_rwlock_read_lock(&naive);
+ ck_rwlock_read_unlock(&naive);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_rwlock_read_lock(&naive);
+ ck_rwlock_read_unlock(&naive);
+ }
+ e_b = rdtsc();
+ printf("READ: naive %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ return (0);
+}
+
diff --git a/regressions/ck_bytelock/validate/Makefile b/regressions/ck_bytelock/validate/Makefile
new file mode 100644
index 0000000..2a890e0
--- /dev/null
+++ b/regressions/ck_bytelock/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean distribution
+
+OBJECTS=validate
+
+all: $(OBJECTS)
+
+validate: validate.c ../../../include/ck_bytelock.h
+ $(CC) $(CFLAGS) -o validate validate.c
+
+check: all
+ ./validate $(CORES) 1
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_bytelock/validate/validate.c b/regressions/ck_bytelock/validate/validate.c
new file mode 100644
index 0000000..c164ce4
--- /dev/null
+++ b/regressions/ck_bytelock/validate/validate.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_bytelock.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 5000000
+#endif
+
+struct block {
+ unsigned int tid;
+};
+
+static struct affinity a;
+static unsigned int locked = 0;
+static int nthr;
+static ck_bytelock_t lock CK_CC_CACHELINE = CK_BYTELOCK_INITIALIZER;
+
+static void *
+thread(void *null)
+{
+ struct block *context = null;
+ int i = ITERATE;
+ unsigned int l;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ if (context->tid == (unsigned int)nthr - 1)
+ context->tid = sizeof(lock.readers) + 1;
+
+ while (i--) {
+ ck_bytelock_write_lock(&lock, context->tid);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_bytelock_write_unlock(&lock);
+
+ ck_bytelock_read_lock(&lock, context->tid);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_bytelock_read_unlock(&lock, context->tid);
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+ struct block *context;
+ int i;
+
+ if (argc != 3) {
+ ck_error("Usage: correct <number of threads> <affinity delta>\n");
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ context = malloc(sizeof(struct block) * nthr);
+ if (context == NULL) {
+ ck_error("ERROR: Could not allocate thread contexts\n");
+ }
+
+ a.delta = atoi(argv[2]);
+
+ fprintf(stderr, "Creating threads (mutual exclusion)...");
+ for (i = 0; i < nthr; i++) {
+ context[i].tid = i + 1;
+ if (pthread_create(&threads[i], NULL, thread, context + i)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Waiting for threads to finish correctness regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+
+ return (0);
+}
+
diff --git a/regressions/ck_cohort/benchmark/Makefile b/regressions/ck_cohort/benchmark/Makefile
new file mode 100644
index 0000000..6af18b9
--- /dev/null
+++ b/regressions/ck_cohort/benchmark/Makefile
@@ -0,0 +1,17 @@
+.PHONY: all clean
+
+OBJECTS=ck_cohort.THROUGHPUT ck_cohort.LATENCY
+
+all: $(OBJECTS)
+
+ck_cohort.THROUGHPUT: ck_cohort.c
+ $(CC) $(CFLAGS) -o ck_cohort.THROUGHPUT throughput.c -lm
+
+ck_cohort.LATENCY: ck_cohort.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_cohort.LATENCY ck_cohort.c
+
+clean:
+ rm -rf *.dSYM *.exe $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE -lm
diff --git a/regressions/ck_cohort/benchmark/ck_cohort.c b/regressions/ck_cohort/benchmark/ck_cohort.c
new file mode 100644
index 0000000..954b616
--- /dev/null
+++ b/regressions/ck_cohort/benchmark/ck_cohort.c
@@ -0,0 +1,8 @@
+#include "../ck_cohort.h"
+
+#include <ck_cohort.h>
+#ifdef THROUGHPUT
+#include "../../ck_spinlock/benchmark/throughput.h"
+#elif defined(LATENCY)
+#include "../../ck_spinlock/benchmark/latency.h"
+#endif
diff --git a/regressions/ck_cohort/benchmark/throughput.c b/regressions/ck_cohort/benchmark/throughput.c
new file mode 100644
index 0000000..7c4776d
--- /dev/null
+++ b/regressions/ck_cohort/benchmark/throughput.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2013-2015 Samy Al Bahra.
+ * Copyright 2013 Brendon Scheinman.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_cohort.h>
+#include <ck_md.h>
+#include <ck_spinlock.h>
+
+#include "../../common.h"
+
+#define max(x, y) (((x) > (y)) ? (x) : (y))
+
+static struct affinity a;
+static unsigned int ready;
+
+struct counters {
+ uint64_t value;
+} CK_CC_CACHELINE;
+
+static struct counters *count;
+static uint64_t nthr;
+static unsigned int n_cohorts;
+static unsigned int barrier;
+static int critical CK_CC_CACHELINE;
+
+static void
+ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+
+ (void)context;
+ ck_spinlock_fas_lock(lock);
+ return;
+}
+
+static void
+ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+
+ (void)context;
+ ck_spinlock_fas_unlock(lock);
+ return;
+}
+
+static bool
+ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+
+ (void)context;
+ return ck_spinlock_fas_locked(lock);
+}
+
+CK_COHORT_PROTOTYPE(basic,
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context,
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context)
+
+struct cohort_record {
+ CK_COHORT_INSTANCE(basic) cohort;
+} CK_CC_CACHELINE;
+static struct cohort_record *cohorts;
+
+static ck_spinlock_t global_lock = CK_SPINLOCK_INITIALIZER;
+
+struct block {
+ unsigned int tid;
+};
+
+static void *
+fairness(void *null)
+{
+ struct block *context = null;
+ unsigned int i = context->tid;
+ volatile int j;
+ long int base;
+ unsigned int core;
+ CK_COHORT_INSTANCE(basic) *cohort;
+
+
+ if (aff_iterate_core(&a, &core)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ cohort = &((cohorts + (core / (int)(a.delta)) % n_cohorts)->cohort);
+
+ while (ck_pr_load_uint(&ready) == 0);
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) != nthr);
+
+ while (ck_pr_load_uint(&ready)) {
+ CK_COHORT_LOCK(basic, cohort, NULL, NULL);
+
+ count[i].value++;
+ if (critical) {
+ base = common_lrand48() % critical;
+ for (j = 0; j < base; j++);
+ }
+
+ CK_COHORT_UNLOCK(basic, cohort, NULL, NULL);
+ }
+
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ uint64_t v, d;
+ unsigned int i;
+ pthread_t *threads;
+ struct block *context;
+ ck_spinlock_t *local_lock;
+
+ if (argc != 5) {
+ ck_error("Usage: ck_cohort <number of cohorts> <threads per cohort> "
+ "<affinity delta> <critical section>\n");
+ }
+
+ n_cohorts = atoi(argv[1]);
+ if (n_cohorts <= 0) {
+ ck_error("ERROR: Number of cohorts must be greater than 0\n");
+ }
+
+ nthr = n_cohorts * atoi(argv[2]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ critical = atoi(argv[4]);
+ if (critical < 0) {
+ ck_error("ERROR: critical section cannot be negative\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ cohorts = malloc(sizeof(struct cohort_record) * n_cohorts);
+ if (cohorts == NULL) {
+ ck_error("ERROR: Could not allocate cohort structures\n");
+ }
+
+ context = malloc(sizeof(struct block) * nthr);
+ if (context == NULL) {
+ ck_error("ERROR: Could not allocate thread contexts\n");
+ }
+
+ a.delta = atoi(argv[2]);
+ a.request = 0;
+
+ count = malloc(sizeof(*count) * nthr);
+ if (count == NULL) {
+ ck_error("ERROR: Could not create acquisition buffer\n");
+ }
+ memset(count, 0, sizeof(*count) * nthr);
+
+ fprintf(stderr, "Creating cohorts...");
+ for (i = 0 ; i < n_cohorts ; i++) {
+ local_lock = malloc(max(CK_MD_CACHELINE, sizeof(ck_spinlock_t)));
+ if (local_lock == NULL) {
+ ck_error("ERROR: Could not allocate local lock\n");
+ }
+ CK_COHORT_INIT(basic, &((cohorts + i)->cohort), &global_lock, local_lock,
+ CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT);
+ local_lock = NULL;
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Creating threads (fairness)...");
+ for (i = 0; i < nthr; i++) {
+ context[i].tid = i;
+ if (pthread_create(&threads[i], NULL, fairness, context + i)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ ck_pr_store_uint(&ready, 1);
+ common_sleep(10);
+ ck_pr_store_uint(&ready, 0);
+
+ fprintf(stderr, "Waiting for threads to finish acquisition regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done\n\n");
+
+ for (i = 0, v = 0; i < nthr; i++) {
+ printf("%d %15" PRIu64 "\n", i, count[i].value);
+ v += count[i].value;
+ }
+
+ printf("\n# total : %15" PRIu64 "\n", v);
+ printf("# throughput : %15" PRIu64 " a/s\n", (v /= nthr) / 10);
+
+ for (i = 0, d = 0; i < nthr; i++)
+ d += (count[i].value - v) * (count[i].value - v);
+
+ printf("# average : %15" PRIu64 "\n", v);
+ printf("# deviation : %.2f (%.2f%%)\n\n", sqrt(d / nthr), (sqrt(d / nthr) / v) * 100.00);
+
+ return 0;
+}
diff --git a/regressions/ck_cohort/ck_cohort.h b/regressions/ck_cohort/ck_cohort.h
new file mode 100644
index 0000000..b0d7f0a
--- /dev/null
+++ b/regressions/ck_cohort/ck_cohort.h
@@ -0,0 +1,35 @@
+#define LOCK_NAME "ck_cohort"
+#define LOCK_DEFINE \
+ static ck_spinlock_fas_t global_fas_lock = CK_SPINLOCK_FAS_INITIALIZER; \
+ static ck_spinlock_fas_t local_fas_lock = CK_SPINLOCK_FAS_INITIALIZER; \
+ static void \
+ ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context) \
+ { \
+ (void)context; \
+ ck_spinlock_fas_lock(lock); \
+ } \
+ \
+ static void \
+ ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context) \
+ { \
+ (void)context; \
+ ck_spinlock_fas_unlock(lock); \
+ } \
+ \
+ static bool \
+ ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context) \
+ { \
+ (void)context; \
+ return ck_spinlock_fas_locked(lock); \
+ } \
+ CK_COHORT_PROTOTYPE(fas_fas, \
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, \
+ ck_spinlock_fas_locked_with_context, ck_spinlock_fas_lock_with_context, \
+ ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context) \
+ static CK_COHORT_INSTANCE(fas_fas) CK_CC_CACHELINE cohort = CK_COHORT_INITIALIZER
+
+
+#define LOCK_INIT CK_COHORT_INIT(fas_fas, &cohort, &global_fas_lock, &local_fas_lock, \
+ CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT)
+#define LOCK CK_COHORT_LOCK(fas_fas, &cohort, NULL, NULL)
+#define UNLOCK CK_COHORT_UNLOCK(fas_fas, &cohort, NULL, NULL)
diff --git a/regressions/ck_cohort/validate/Makefile b/regressions/ck_cohort/validate/Makefile
new file mode 100644
index 0000000..145af3a
--- /dev/null
+++ b/regressions/ck_cohort/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean distribution
+
+OBJECTS=validate
+
+all: $(OBJECTS)
+
+validate: validate.c ../../../include/ck_cohort.h
+ $(CC) $(CFLAGS) -o validate validate.c
+
+check: all
+ ./validate `expr $(CORES) / 2` 2 1
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_cohort/validate/validate.c b/regressions/ck_cohort/validate/validate.c
new file mode 100644
index 0000000..cffbf77
--- /dev/null
+++ b/regressions/ck_cohort/validate/validate.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2013-2015 Samy Al Bahra.
+ * Copyright 2013 Brendon Scheinman.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <ck_pr.h>
+#include <ck_cohort.h>
+#include <ck_spinlock.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 1000000
+#endif
+
+static struct affinity a;
+static unsigned int locked;
+static int nthr;
+static ck_spinlock_fas_t global_fas_lock = CK_SPINLOCK_FAS_INITIALIZER;
+
+static void
+ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+ (void)context;
+ ck_spinlock_fas_lock(lock);
+}
+
+static void
+ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+ (void)context;
+ ck_spinlock_fas_unlock(lock);
+}
+
+static bool
+ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+ (void)context;
+ return ck_spinlock_fas_locked(lock);
+}
+
+static bool
+ck_spinlock_fas_trylock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+ (void)context;
+ return ck_spinlock_fas_trylock(lock);
+}
+
+CK_COHORT_TRYLOCK_PROTOTYPE(fas_fas,
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context,
+ ck_spinlock_fas_locked_with_context, ck_spinlock_fas_trylock_with_context,
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context,
+ ck_spinlock_fas_locked_with_context, ck_spinlock_fas_trylock_with_context)
+static CK_COHORT_INSTANCE(fas_fas) *cohorts;
+static int n_cohorts;
+
+static void *
+thread(void *null CK_CC_UNUSED)
+{
+ int i = ITERATE;
+ unsigned int l;
+ unsigned int core;
+ CK_COHORT_INSTANCE(fas_fas) *cohort;
+
+ if (aff_iterate_core(&a, &core)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ cohort = cohorts + (core / (int)(a.delta)) % n_cohorts;
+
+ while (i--) {
+
+ if (i & 1) {
+ CK_COHORT_LOCK(fas_fas, cohort, NULL, NULL);
+ } else {
+ while (CK_COHORT_TRYLOCK(fas_fas, cohort, NULL, NULL, NULL) == false) {
+ ck_pr_stall();
+ }
+ }
+
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ CK_COHORT_UNLOCK(fas_fas, cohort, NULL, NULL);
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+ int threads_per_cohort;
+ ck_spinlock_fas_t *local_lock;
+ int i;
+
+ if (argc != 4) {
+ ck_error("Usage: validate <number of cohorts> <threads per cohort> <affinity delta>\n");
+ }
+
+ n_cohorts = atoi(argv[1]);
+ if (n_cohorts <= 0) {
+ fprintf(stderr, "setting number of cohorts per thread to 1\n");
+ n_cohorts = 1;
+ }
+
+ threads_per_cohort = atoi(argv[2]);
+ if (threads_per_cohort <= 0) {
+ ck_error("ERROR: Threads per cohort must be greater than 0\n");
+ }
+
+ nthr = n_cohorts * threads_per_cohort;
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ a.delta = atoi(argv[3]);
+
+ fprintf(stderr, "Creating cohorts...");
+ cohorts = malloc(sizeof(CK_COHORT_INSTANCE(fas_fas)) * n_cohorts);
+ for (i = 0 ; i < n_cohorts ; i++) {
+ local_lock = malloc(sizeof(ck_spinlock_fas_t));
+ CK_COHORT_INIT(fas_fas, cohorts + i, &global_fas_lock, local_lock,
+ CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT);
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Creating threads...");
+ for (i = 0; i < nthr; i++) {
+ if (pthread_create(&threads[i], NULL, thread, NULL)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Waiting for threads to finish correctness regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+
+ return (0);
+}
+
diff --git a/regressions/ck_epoch/validate/Makefile b/regressions/ck_epoch/validate/Makefile
new file mode 100644
index 0000000..446c008
--- /dev/null
+++ b/regressions/ck_epoch/validate/Makefile
@@ -0,0 +1,42 @@
+.PHONY: check clean distribution
+
+OBJECTS=ck_stack ck_epoch_synchronize ck_epoch_poll ck_epoch_call \
+ ck_epoch_section ck_epoch_section_2 torture
+HALF=`expr $(CORES) / 2`
+
+all: $(OBJECTS)
+
+check: all
+ ./ck_stack $(CORES) 1
+ ./ck_epoch_synchronize $(HALF) $(HALF) 1
+ ./ck_epoch_poll $(CORES) 1 1
+ ./ck_epoch_section
+ ./ck_epoch_section_2 $(HALF) $(HALF) 1
+ ./torture $(HALF) $(HALF) 1
+
+ck_epoch_synchronize: ck_epoch_synchronize.c ../../../include/ck_stack.h ../../../include/ck_epoch.h ../../../src/ck_epoch.c
+ $(CC) $(CFLAGS) -o ck_epoch_synchronize ck_epoch_synchronize.c ../../../src/ck_epoch.c
+
+ck_epoch_poll: ck_epoch_poll.c ../../../include/ck_stack.h ../../../include/ck_epoch.h ../../../src/ck_epoch.c
+ $(CC) $(CFLAGS) -o ck_epoch_poll ck_epoch_poll.c ../../../src/ck_epoch.c
+
+torture: torture.c ../../../include/ck_epoch.h ../../../src/ck_epoch.c
+ $(CC) $(CFLAGS) -o torture torture.c ../../../src/ck_epoch.c
+
+ck_epoch_section: ck_epoch_section.c ../../../include/ck_epoch.h ../../../src/ck_epoch.c
+ $(CC) $(CFLAGS) -o ck_epoch_section ck_epoch_section.c ../../../src/ck_epoch.c
+
+ck_epoch_section_2: ck_epoch_section_2.c ../../../include/ck_epoch.h ../../../src/ck_epoch.c
+ $(CC) $(CFLAGS) -o ck_epoch_section_2 ck_epoch_section_2.c ../../../src/ck_epoch.c
+
+ck_epoch_call: ck_epoch_call.c ../../../include/ck_stack.h ../../../include/ck_epoch.h ../../../src/ck_epoch.c
+ $(CC) $(CFLAGS) -o ck_epoch_call ck_epoch_call.c ../../../src/ck_epoch.c
+
+ck_stack: ck_stack.c ../../../include/ck_stack.h ../../../include/ck_epoch.h ../../../src/ck_epoch.c
+ $(CC) $(CFLAGS) -o ck_stack ck_stack.c ../../../src/ck_epoch.c
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_epoch/validate/ck_epoch_call.c b/regressions/ck_epoch/validate/ck_epoch_call.c
new file mode 100644
index 0000000..29e0df8
--- /dev/null
+++ b/regressions/ck_epoch/validate/ck_epoch_call.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <ck_epoch.h>
+
+#include "../../common.h"
+
+static ck_epoch_t epoch;
+static unsigned int counter;
+static ck_epoch_record_t record[2];
+
+static void
+cb(ck_epoch_entry_t *p)
+{
+
+ if (counter == 0)
+ ck_epoch_call(&record[1], p, cb);
+
+ printf("Counter value: %u -> %u\n",
+ counter, counter + 1);
+ counter++;
+ return;
+}
+
+int
+main(void)
+{
+ ck_epoch_entry_t entry;
+
+ ck_epoch_register(&epoch, &record[0]);
+ ck_epoch_register(&epoch, &record[1]);
+
+ ck_epoch_call(&record[1], &entry, cb);
+ ck_epoch_barrier(&record[1]);
+ ck_epoch_barrier(&record[1]);
+ if (counter != 2)
+ ck_error("Expected counter value 2, read %u.\n", counter);
+
+ return 0;
+}
diff --git a/regressions/ck_epoch/validate/ck_epoch_poll.c b/regressions/ck_epoch/validate/ck_epoch_poll.c
new file mode 100644
index 0000000..aec6dd0
--- /dev/null
+++ b/regressions/ck_epoch/validate/ck_epoch_poll.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_backoff.h>
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <string.h>
+#include <ck_epoch.h>
+#include <ck_stack.h>
+
+#include "../../common.h"
+
+static unsigned int n_rd;
+static unsigned int n_wr;
+static unsigned int n_threads;
+static unsigned int barrier;
+static unsigned int e_barrier;
+static unsigned int readers;
+static unsigned int writers;
+
+#ifndef PAIRS_S
+#define PAIRS_S 100000
+#endif
+
+#ifndef ITERATE_S
+#define ITERATE_S 20
+#endif
+
+struct node {
+ unsigned int value;
+ ck_stack_entry_t stack_entry;
+ ck_epoch_entry_t epoch_entry;
+};
+static ck_stack_t stack = CK_STACK_INITIALIZER;
+static ck_epoch_t stack_epoch;
+CK_STACK_CONTAINER(struct node, stack_entry, stack_container)
+CK_EPOCH_CONTAINER(struct node, epoch_entry, epoch_container)
+static struct affinity a;
+static const char animate[] = "-/|\\";
+
+static void
+destructor(ck_epoch_entry_t *p)
+{
+ struct node *e = epoch_container(p);
+
+ free(e);
+ return;
+}
+
+static void *
+read_thread(void *unused CK_CC_UNUSED)
+{
+ unsigned int j;
+ ck_epoch_record_t record CK_CC_CACHELINE;
+ ck_stack_entry_t *cursor, *n;
+
+ ck_epoch_register(&stack_epoch, &record);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < n_threads);
+
+ while (CK_STACK_ISEMPTY(&stack) == true) {
+ if (ck_pr_load_uint(&readers) != 0)
+ break;
+
+ ck_pr_stall();
+ }
+
+ j = 0;
+ for (;;) {
+ ck_epoch_begin(&record, NULL);
+ CK_STACK_FOREACH(&stack, cursor) {
+ if (cursor == NULL)
+ continue;
+
+ n = CK_STACK_NEXT(cursor);
+ j += ck_pr_load_ptr(&n) != NULL;
+ }
+ ck_epoch_end(&record, NULL);
+
+ if (j != 0 && ck_pr_load_uint(&readers) == 0)
+ ck_pr_store_uint(&readers, 1);
+
+ if (CK_STACK_ISEMPTY(&stack) == true &&
+ ck_pr_load_uint(&e_barrier) != 0)
+ break;
+ }
+
+ ck_pr_inc_uint(&e_barrier);
+ while (ck_pr_load_uint(&e_barrier) < n_threads);
+
+ fprintf(stderr, "[R] Observed entries: %u\n", j);
+ return (NULL);
+}
+
+static void *
+write_thread(void *unused CK_CC_UNUSED)
+{
+ struct node **entry, *e;
+ unsigned int i, j, tid;
+ ck_epoch_record_t record;
+ ck_stack_entry_t *s;
+
+ ck_epoch_register(&stack_epoch, &record);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ tid = ck_pr_faa_uint(&writers, 1);
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < n_threads);
+
+ entry = malloc(sizeof(struct node *) * PAIRS_S);
+ if (entry == NULL) {
+ ck_error("Failed allocation.\n");
+ }
+
+ for (j = 0; j < ITERATE_S; j++) {
+ for (i = 0; i < PAIRS_S; i++) {
+ entry[i] = malloc(sizeof(struct node));
+ if (entry == NULL) {
+ ck_error("Failed individual allocation\n");
+ }
+ }
+
+ for (i = 0; i < PAIRS_S; i++) {
+ ck_stack_push_upmc(&stack, &entry[i]->stack_entry);
+ }
+
+ while (ck_pr_load_uint(&readers) == 0)
+ ck_pr_stall();
+
+ if (tid == 0) {
+ fprintf(stderr, "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b[W] %2.2f: %c",
+ (double)j / ITERATE_S, animate[i % strlen(animate)]);
+ }
+
+ for (i = 0; i < PAIRS_S; i++) {
+ ck_epoch_begin(&record, NULL);
+ s = ck_stack_pop_upmc(&stack);
+ e = stack_container(s);
+ ck_epoch_end(&record, NULL);
+
+ ck_epoch_call(&record, &e->epoch_entry, destructor);
+ ck_epoch_poll(&record);
+ }
+ }
+
+ ck_epoch_barrier(&record);
+
+ if (tid == 0) {
+ fprintf(stderr, "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b[W] Peak: %u (%2.2f%%)\n Reclamations: %lu\n\n",
+ record.n_peak,
+ (double)record.n_peak / ((double)PAIRS_S * ITERATE_S) * 100,
+ record.n_dispatch);
+ }
+
+ ck_pr_inc_uint(&e_barrier);
+ while (ck_pr_load_uint(&e_barrier) < n_threads);
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ unsigned int i;
+ pthread_t *threads;
+
+ if (argc != 4) {
+ ck_error("Usage: stack <#readers> <#writers> <affinity delta>\n");
+ }
+
+ n_rd = atoi(argv[1]);
+ n_wr = atoi(argv[2]);
+ n_threads = n_wr + n_rd;
+
+ a.delta = atoi(argv[3]);
+ a.request = 0;
+
+ threads = malloc(sizeof(pthread_t) * n_threads);
+ ck_epoch_init(&stack_epoch);
+
+ for (i = 0; i < n_rd; i++)
+ pthread_create(threads + i, NULL, read_thread, NULL);
+
+ do {
+ pthread_create(threads + i, NULL, write_thread, NULL);
+ } while (++i < n_wr + n_rd);
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(threads[i], NULL);
+
+ return (0);
+}
diff --git a/regressions/ck_epoch/validate/ck_epoch_section.c b/regressions/ck_epoch/validate/ck_epoch_section.c
new file mode 100644
index 0000000..12bcca1
--- /dev/null
+++ b/regressions/ck_epoch/validate/ck_epoch_section.c
@@ -0,0 +1,311 @@
+/*
+ * Copyright 2015 John Esmet.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <time.h>
+#include <unistd.h>
+
+#include <ck_epoch.h>
+
+#include "../../common.h"
+
+static ck_epoch_t epc;
+static ck_epoch_record_t record, record2;
+static unsigned int cleanup_calls;
+
+static void
+setup_test(void)
+{
+
+ ck_epoch_init(&epc);
+ ck_epoch_register(&epc, &record);
+ ck_epoch_register(&epc, &record2);
+ cleanup_calls = 0;
+
+ return;
+}
+
+static void
+teardown_test(void)
+{
+
+ memset(&epc, 0, sizeof(ck_epoch_t));
+ ck_epoch_unregister(&record);
+ memset(&record, 0, sizeof(ck_epoch_record_t));
+ memset(&record2, 0, sizeof(ck_epoch_record_t));
+ cleanup_calls = 0;
+
+ return;
+}
+
+static void
+cleanup(ck_epoch_entry_t *e)
+{
+ (void) e;
+
+ cleanup_calls++;
+
+ return;
+}
+
+static void
+test_simple_read_section(void)
+{
+ ck_epoch_entry_t entry;
+ ck_epoch_section_t section;
+
+ memset(&entry, 0, sizeof(ck_epoch_entry_t));
+ setup_test();
+
+ ck_epoch_begin(&record, &section);
+ ck_epoch_call(&record, &entry, cleanup);
+ assert(cleanup_calls == 0);
+ ck_epoch_end(&record, &section);
+ ck_epoch_barrier(&record);
+ assert(cleanup_calls == 1);
+
+ teardown_test();
+ return;
+}
+
+static void
+test_nested_read_section(void)
+{
+ ck_epoch_entry_t entry1, entry2;
+ ck_epoch_section_t section1, section2;
+
+ memset(&entry1, 0, sizeof(ck_epoch_entry_t));
+ memset(&entry2, 0, sizeof(ck_epoch_entry_t));
+ setup_test();
+
+ ck_epoch_begin(&record, &section1);
+ ck_epoch_call(&record, &entry1, cleanup);
+ assert(cleanup_calls == 0);
+
+ ck_epoch_begin(&record, &section2);
+ ck_epoch_call(&record, &entry2, cleanup);
+ assert(cleanup_calls == 0);
+
+ ck_epoch_end(&record, &section2);
+ assert(cleanup_calls == 0);
+
+ ck_epoch_end(&record, &section1);
+ assert(cleanup_calls == 0);
+
+ ck_epoch_barrier(&record);
+ assert(cleanup_calls == 2);
+
+ teardown_test();
+ return;
+}
+
+struct obj {
+ ck_epoch_entry_t entry;
+ unsigned int destroyed;
+};
+
+static void *
+barrier_work(void *arg)
+{
+ unsigned int *run;
+
+ run = (unsigned int *)arg;
+ while (ck_pr_load_uint(run) != 0) {
+ /*
+ * Need to use record2, as record is local
+ * to the test thread.
+ */
+ ck_epoch_barrier(&record2);
+ usleep(5 * 1000);
+ }
+
+ return NULL;
+}
+
+static void *
+reader_work(void *arg)
+{
+ ck_epoch_record_t local_record;
+ ck_epoch_section_t section;
+ struct obj *o;
+
+ ck_epoch_register(&epc, &local_record);
+
+ o = (struct obj *)arg;
+
+ /*
+ * Begin a read section. The calling thread has an open read section,
+ * so the object should not be destroyed for the lifetime of this
+ * thread.
+ */
+ ck_epoch_begin(&local_record, &section);
+ usleep((common_rand() % 100) * 1000);
+ assert(ck_pr_load_uint(&o->destroyed) == 0);
+ ck_epoch_end(&local_record, &section);
+
+ ck_epoch_unregister(&local_record);
+
+ return NULL;
+}
+
+static void
+obj_destroy(ck_epoch_entry_t *e)
+{
+ struct obj *o;
+
+ o = (struct obj *)e;
+ ck_pr_fas_uint(&o->destroyed, 1);
+
+ return;
+}
+
+static void
+test_single_reader_with_barrier_thread(void)
+{
+ const int num_sections = 10;
+ struct obj o;
+ unsigned int run;
+ pthread_t thread;
+ ck_epoch_section_t sections[num_sections];
+ int shuffled[num_sections];
+
+ run = 1;
+ memset(&o, 0, sizeof(struct obj));
+ common_srand(time(NULL));
+ setup_test();
+
+ if (pthread_create(&thread, NULL, barrier_work, &run) != 0) {
+ abort();
+ }
+
+ /* Start a bunch of sections. */
+ for (int i = 0; i < num_sections; i++) {
+ ck_epoch_begin(&record, &sections[i]);
+ shuffled[i] = i;
+ if (i == num_sections / 2) {
+ usleep(1 * 1000);
+ }
+ }
+
+ /* Generate a shuffle. */
+ for (int i = num_sections - 1; i >= 0; i--) {
+ int k = common_rand() % (i + 1);
+ int tmp = shuffled[k];
+ shuffled[k] = shuffled[i];
+ shuffled[i] = tmp;
+ }
+
+ ck_epoch_call(&record, &o.entry, obj_destroy);
+
+ /* Close the sections in shuffle-order. */
+ for (int i = 0; i < num_sections; i++) {
+ ck_epoch_end(&record, &sections[shuffled[i]]);
+ if (i != num_sections - 1) {
+ assert(ck_pr_load_uint(&o.destroyed) == 0);
+ usleep(3 * 1000);
+ }
+ }
+
+ ck_pr_store_uint(&run, 0);
+ if (pthread_join(thread, NULL) != 0) {
+ abort();
+ }
+
+ ck_epoch_barrier(&record);
+ assert(ck_pr_load_uint(&o.destroyed) == 1);
+
+ teardown_test();
+
+ return;
+}
+
+static void
+test_multiple_readers_with_barrier_thread(void)
+{
+ const int num_readers = 10;
+ struct obj o;
+ unsigned int run;
+ ck_epoch_section_t section;
+ pthread_t threads[num_readers + 1];
+
+ run = 1;
+ memset(&o, 0, sizeof(struct obj));
+ memset(&section, 0, sizeof(ck_epoch_section_t));
+ common_srand(time(NULL));
+ setup_test();
+
+ /* Create a thread to call barrier() while we create reader threads.
+ * Each barrier will attempt to move the global epoch forward so
+ * it will make the read section code coverage more interesting. */
+ if (pthread_create(&threads[num_readers], NULL,
+ barrier_work, &run) != 0) {
+ abort();
+ }
+
+ ck_epoch_begin(&record, &section);
+ ck_epoch_call(&record, &o.entry, obj_destroy);
+
+ for (int i = 0; i < num_readers; i++) {
+ if (pthread_create(&threads[i], NULL, reader_work, &o) != 0) {
+ abort();
+ }
+ }
+
+ ck_epoch_end(&record, &section);
+
+ ck_pr_store_uint(&run, 0);
+ if (pthread_join(threads[num_readers], NULL) != 0) {
+ abort();
+ }
+
+ /* After the barrier, the object should be destroyed and readers
+ * should return. */
+ for (int i = 0; i < num_readers; i++) {
+ if (pthread_join(threads[i], NULL) != 0) {
+ abort();
+ }
+ }
+
+ teardown_test();
+ return;
+}
+
+int
+main(void)
+{
+
+ test_simple_read_section();
+ test_nested_read_section();
+ test_single_reader_with_barrier_thread();
+ test_multiple_readers_with_barrier_thread();
+
+ return 0;
+}
diff --git a/regressions/ck_epoch/validate/ck_epoch_section_2.c b/regressions/ck_epoch/validate/ck_epoch_section_2.c
new file mode 100644
index 0000000..aed3661
--- /dev/null
+++ b/regressions/ck_epoch/validate/ck_epoch_section_2.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <string.h>
+#include <ck_epoch.h>
+#include <ck_stack.h>
+
+#include "../../common.h"
+
+static unsigned int n_rd;
+static unsigned int n_wr;
+static unsigned int n_threads;
+static unsigned int barrier;
+static unsigned int leave;
+
+#ifndef PAIRS_S
+#define PAIRS_S 10000
+#endif
+
+#ifndef CK_EPOCH_T_DEPTH
+#define CK_EPOCH_T_DEPTH 8
+#endif
+
+static ck_epoch_t epoch;
+static struct affinity a;
+
+static void *
+read_thread(void *unused CK_CC_UNUSED)
+{
+ ck_epoch_record_t *record;
+ unsigned long long i = 0;
+
+ record = malloc(sizeof *record);
+ assert(record != NULL);
+ ck_epoch_register(&epoch, record);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < n_threads);
+
+ for (;;) {
+ ck_epoch_section_t section[2];
+ ck_epoch_section_t junk[CK_EPOCH_T_DEPTH];
+ unsigned int j;
+
+ ck_epoch_begin(record, &section[0]);
+
+ for (j = 0; j < CK_EPOCH_T_DEPTH; j++)
+ ck_epoch_begin(record, &junk[j]);
+ for (j = 0; j < CK_EPOCH_T_DEPTH; j++)
+ ck_epoch_end(record, &junk[j]);
+
+ if (i > 0)
+ ck_epoch_end(record, &section[1]);
+
+ /* Wait for the next synchronize operation. */
+ while ((ck_pr_load_uint(&epoch.epoch) & 1) ==
+ section[0].bucket) {
+ i++;
+
+ if (!(i % 10000000)) {
+ fprintf(stderr, "%u %u %u\n",
+ ck_pr_load_uint(&epoch.epoch),
+ section[0].bucket, record->epoch);
+ }
+
+ while ((ck_pr_load_uint(&epoch.epoch) & 1) ==
+ section[0].bucket) {
+ if (ck_pr_load_uint(&leave) == 1)
+ break;
+
+ ck_pr_stall();
+ }
+ }
+
+ ck_epoch_begin(record, &section[1]);
+
+ assert(section[0].bucket != section[1].bucket);
+ ck_epoch_end(record, &section[0]);
+
+ assert(ck_pr_load_uint(&record->active) > 0);
+
+ if (ck_pr_load_uint(&leave) == 1) {
+ ck_epoch_end(record, &section[1]);
+ break;
+ }
+
+ i++;
+ }
+
+ return NULL;
+}
+
+static void *
+write_thread(void *unused CK_CC_UNUSED)
+{
+ ck_epoch_record_t record;
+ unsigned long iterations = 0;
+
+ ck_epoch_register(&epoch, &record);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < n_threads);
+
+ for (;;) {
+ if (!(iterations % 1048575))
+ fprintf(stderr, ".");
+
+ ck_epoch_synchronize(&record);
+ iterations++;
+
+ if (ck_pr_load_uint(&leave) == 1)
+ break;
+ }
+
+ fprintf(stderr, "%lu iterations\n", iterations);
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ unsigned int i;
+ pthread_t *threads;
+
+ if (argc != 4) {
+ ck_error("Usage: stack <#readers> <#writers> <affinity delta>\n");
+ }
+
+ n_rd = atoi(argv[1]);
+ n_wr = atoi(argv[2]);
+ n_threads = n_wr + n_rd;
+
+ a.delta = atoi(argv[3]);
+ a.request = 0;
+
+ threads = malloc(sizeof(pthread_t) * n_threads);
+ ck_epoch_init(&epoch);
+
+ for (i = 0; i < n_rd; i++)
+ pthread_create(threads + i, NULL, read_thread, NULL);
+
+ do {
+ pthread_create(threads + i, NULL, write_thread, NULL);
+ } while (++i < n_wr + n_rd);
+
+ common_sleep(10);
+ ck_pr_store_uint(&leave, 1);
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(threads[i], NULL);
+
+ return (0);
+}
diff --git a/regressions/ck_epoch/validate/ck_epoch_synchronize.c b/regressions/ck_epoch/validate/ck_epoch_synchronize.c
new file mode 100644
index 0000000..a03a4f7
--- /dev/null
+++ b/regressions/ck_epoch/validate/ck_epoch_synchronize.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_backoff.h>
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <string.h>
+#include <ck_epoch.h>
+#include <ck_stack.h>
+
+#include "../../common.h"
+
+static unsigned int n_rd;
+static unsigned int n_wr;
+static unsigned int n_threads;
+static unsigned int barrier;
+static unsigned int e_barrier;
+static unsigned int readers;
+static unsigned int writers;
+
+#ifndef PAIRS_S
+#define PAIRS_S 10000
+#endif
+
+#ifndef ITERATE_S
+#define ITERATE_S 20
+#endif
+
+struct node {
+ unsigned int value;
+ ck_stack_entry_t stack_entry;
+ ck_epoch_entry_t epoch_entry;
+};
+static ck_stack_t stack = CK_STACK_INITIALIZER;
+static ck_epoch_t stack_epoch;
+CK_STACK_CONTAINER(struct node, stack_entry, stack_container)
+CK_EPOCH_CONTAINER(struct node, epoch_entry, epoch_container)
+static struct affinity a;
+static const char animate[] = "-/|\\";
+
+static void
+destructor(ck_epoch_entry_t *p)
+{
+ struct node *e = epoch_container(p);
+
+ free(e);
+ return;
+}
+
+static void *
+read_thread(void *unused CK_CC_UNUSED)
+{
+ unsigned int j;
+ ck_epoch_record_t record CK_CC_CACHELINE;
+ ck_stack_entry_t *cursor;
+ ck_stack_entry_t *n;
+ unsigned int i;
+
+ ck_epoch_register(&stack_epoch, &record);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < n_threads);
+
+ while (CK_STACK_ISEMPTY(&stack) == true) {
+ if (ck_pr_load_uint(&readers) != 0)
+ break;
+
+ ck_pr_stall();
+ }
+
+ j = 0;
+ for (;;) {
+ i = 0;
+
+ ck_epoch_begin(&record, NULL);
+ CK_STACK_FOREACH(&stack, cursor) {
+ if (cursor == NULL)
+ continue;
+
+ n = CK_STACK_NEXT(cursor);
+ j += ck_pr_load_ptr(&n) != NULL;
+
+ if (i++ > 4098)
+ break;
+ }
+ ck_epoch_end(&record, NULL);
+
+ if (j != 0 && ck_pr_load_uint(&readers) == 0)
+ ck_pr_store_uint(&readers, 1);
+
+ if (CK_STACK_ISEMPTY(&stack) == true &&
+ ck_pr_load_uint(&e_barrier) != 0)
+ break;
+ }
+
+ ck_pr_inc_uint(&e_barrier);
+ while (ck_pr_load_uint(&e_barrier) < n_threads);
+
+ fprintf(stderr, "[R] Observed entries: %u\n", j);
+ return (NULL);
+}
+
+static void *
+write_thread(void *unused CK_CC_UNUSED)
+{
+ struct node **entry, *e;
+ unsigned int i, j, tid;
+ ck_epoch_record_t record;
+ ck_stack_entry_t *s;
+
+ ck_epoch_register(&stack_epoch, &record);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ tid = ck_pr_faa_uint(&writers, 1);
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < n_threads);
+
+ entry = malloc(sizeof(struct node *) * PAIRS_S);
+ if (entry == NULL) {
+ ck_error("Failed allocation.\n");
+ }
+
+ for (j = 0; j < ITERATE_S; j++) {
+ for (i = 0; i < PAIRS_S; i++) {
+ entry[i] = malloc(sizeof(struct node));
+ if (entry == NULL) {
+ ck_error("Failed individual allocation\n");
+ }
+ }
+
+ for (i = 0; i < PAIRS_S; i++) {
+ ck_stack_push_upmc(&stack, &entry[i]->stack_entry);
+ }
+
+ while (ck_pr_load_uint(&readers) == 0)
+ ck_pr_stall();
+
+ for (i = 0; i < PAIRS_S; i++) {
+ ck_epoch_begin(&record, NULL);
+ s = ck_stack_pop_upmc(&stack);
+ e = stack_container(s);
+ ck_epoch_end(&record, NULL);
+
+ if (i & 1) {
+ ck_epoch_synchronize(&record);
+ ck_epoch_reclaim(&record);
+ ck_epoch_call(&record, &e->epoch_entry, destructor);
+ } else {
+ ck_epoch_barrier(&record);
+ destructor(&e->epoch_entry);
+ }
+
+ if (tid == 0 && (i % 16384) == 0) {
+ fprintf(stderr, "[W] %2.2f: %c\n",
+ (double)j / ITERATE_S, animate[i % strlen(animate)]);
+ }
+ }
+ }
+
+ ck_epoch_synchronize(&record);
+
+ if (tid == 0) {
+ fprintf(stderr, "[W] Peak: %u (%2.2f%%)\n Reclamations: %lu\n\n",
+ record.n_peak,
+ (double)record.n_peak / ((double)PAIRS_S * ITERATE_S) * 100,
+ record.n_dispatch);
+ }
+
+ ck_pr_inc_uint(&e_barrier);
+ while (ck_pr_load_uint(&e_barrier) < n_threads);
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ unsigned int i;
+ pthread_t *threads;
+
+ if (argc != 4) {
+ ck_error("Usage: stack <#readers> <#writers> <affinity delta>\n");
+ }
+
+ n_rd = atoi(argv[1]);
+ n_wr = atoi(argv[2]);
+ n_threads = n_wr + n_rd;
+
+ a.delta = atoi(argv[3]);
+ a.request = 0;
+
+ threads = malloc(sizeof(pthread_t) * n_threads);
+ ck_epoch_init(&stack_epoch);
+
+ for (i = 0; i < n_rd; i++)
+ pthread_create(threads + i, NULL, read_thread, NULL);
+
+ do {
+ pthread_create(threads + i, NULL, write_thread, NULL);
+ } while (++i < n_wr + n_rd);
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(threads[i], NULL);
+
+ return (0);
+}
diff --git a/regressions/ck_epoch/validate/ck_stack.c b/regressions/ck_epoch/validate/ck_stack.c
new file mode 100644
index 0000000..fc50228
--- /dev/null
+++ b/regressions/ck_epoch/validate/ck_stack.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_backoff.h>
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <ck_epoch.h>
+#include <ck_stack.h>
+
+#include "../../common.h"
+
+static unsigned int n_threads;
+static unsigned int barrier;
+static unsigned int e_barrier;
+
+#ifndef PAIRS
+#define PAIRS 5000000
+#endif
+
+struct node {
+ unsigned int value;
+ ck_epoch_entry_t epoch_entry;
+ ck_stack_entry_t stack_entry;
+};
+static ck_stack_t stack = {NULL, NULL};
+static ck_epoch_t stack_epoch;
+CK_STACK_CONTAINER(struct node, stack_entry, stack_container)
+CK_EPOCH_CONTAINER(struct node, epoch_entry, epoch_container)
+static struct affinity a;
+
+static void
+destructor(ck_epoch_entry_t *p)
+{
+ struct node *e = epoch_container(p);
+
+ free(e);
+ return;
+}
+
+static void *
+thread(void *unused CK_CC_UNUSED)
+{
+ struct node **entry, *e;
+ ck_epoch_record_t record;
+ ck_stack_entry_t *s;
+ unsigned long smr = 0;
+ unsigned int i;
+
+ ck_epoch_register(&stack_epoch, &record);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ entry = malloc(sizeof(struct node *) * PAIRS);
+ if (entry == NULL) {
+ ck_error("Failed allocation.\n");
+ }
+
+ for (i = 0; i < PAIRS; i++) {
+ entry[i] = malloc(sizeof(struct node));
+ if (entry == NULL) {
+ ck_error("Failed individual allocation\n");
+ }
+ }
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < n_threads);
+
+ for (i = 0; i < PAIRS; i++) {
+ ck_epoch_begin(&record, NULL);
+ ck_stack_push_upmc(&stack, &entry[i]->stack_entry);
+ s = ck_stack_pop_upmc(&stack);
+ ck_epoch_end(&record, NULL);
+
+ e = stack_container(s);
+ ck_epoch_call(&record, &e->epoch_entry, destructor);
+ smr += ck_epoch_poll(&record) == false;
+ }
+
+ ck_pr_inc_uint(&e_barrier);
+ while (ck_pr_load_uint(&e_barrier) < n_threads);
+
+ fprintf(stderr, "Deferrals: %lu (%2.2f)\n", smr, (double)smr / PAIRS);
+ fprintf(stderr, "Peak: %u (%2.2f%%), %u pending\nReclamations: %lu\n\n",
+ record.n_peak,
+ (double)record.n_peak / PAIRS * 100,
+ record.n_pending,
+ record.n_dispatch);
+
+ ck_epoch_barrier(&record);
+ ck_pr_inc_uint(&e_barrier);
+ while (ck_pr_load_uint(&e_barrier) < (n_threads << 1));
+
+ if (record.n_pending != 0) {
+ ck_error("ERROR: %u pending, expecting none.\n",
+ record.n_pending);
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ unsigned int i;
+ pthread_t *threads;
+
+ if (argc != 3) {
+ ck_error("Usage: stack <threads> <affinity delta>\n");
+ }
+
+ n_threads = atoi(argv[1]);
+ a.delta = atoi(argv[2]);
+ a.request = 0;
+
+ threads = malloc(sizeof(pthread_t) * n_threads);
+
+ ck_epoch_init(&stack_epoch);
+
+ for (i = 0; i < n_threads; i++)
+ pthread_create(threads + i, NULL, thread, NULL);
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(threads[i], NULL);
+
+ return (0);
+}
diff --git a/regressions/ck_epoch/validate/torture.c b/regressions/ck_epoch/validate/torture.c
new file mode 100644
index 0000000..ce3c049
--- /dev/null
+++ b/regressions/ck_epoch/validate/torture.c
@@ -0,0 +1,234 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <string.h>
+#include <ck_epoch.h>
+#include <ck_stack.h>
+
+#include "../../common.h"
+
+static unsigned int n_rd;
+static unsigned int n_wr;
+static unsigned int n_threads;
+static unsigned int barrier;
+static unsigned int leave;
+static unsigned int first;
+
+struct {
+ unsigned int value;
+} valid CK_CC_CACHELINE = { 1 };
+
+struct {
+ unsigned int value;
+} invalid CK_CC_CACHELINE;
+
+#ifndef PAIRS_S
+#define PAIRS_S 10000
+#endif
+
+#ifndef CK_EPOCH_T_DEPTH
+#define CK_EPOCH_T_DEPTH 8
+#endif
+
+static ck_epoch_t epoch;
+static struct affinity a;
+
+static void
+test(struct ck_epoch_record *record)
+{
+ unsigned int j[3];
+ unsigned int b, c;
+ const unsigned int r = 100;
+ size_t i;
+
+ for (i = 0; i < 8; i++) {
+ ck_epoch_begin(record, NULL);
+ c = ck_pr_load_uint(&invalid.value);
+ ck_pr_fence_load();
+ b = ck_pr_load_uint(&valid.value);
+ ck_test(c > b, "Invalid value: %u > %u\n", c, b);
+ ck_epoch_end(record, NULL);
+ }
+
+ ck_epoch_begin(record, NULL);
+
+ /* This implies no early load of epoch occurs. */
+ j[0] = record->epoch;
+
+
+ /* We should observe up to one epoch migration. */
+ do {
+ ck_pr_fence_load();
+ j[1] = ck_pr_load_uint(&epoch.epoch);
+
+ if (ck_pr_load_uint(&leave) == 1) {
+ ck_epoch_end(record, NULL);
+ return;
+ }
+ } while (j[1] == j[0]);
+
+ /* No more epoch migrations should occur */
+ for (i = 0; i < r; i++) {
+ ck_pr_fence_strict_load();
+ j[2] = ck_pr_load_uint(&epoch.epoch);
+
+ ck_test(j[2] != j[1], "Inconsistency detected: %u %u %u\n",
+ j[0], j[1], j[2]);
+ }
+
+ ck_epoch_end(record, NULL);
+ return;
+}
+
+static void *
+read_thread(void *unused CK_CC_UNUSED)
+{
+ ck_epoch_record_t *record;
+
+ record = malloc(sizeof *record);
+ assert(record != NULL);
+ ck_epoch_register(&epoch, record);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < n_threads);
+
+ do {
+ test(record);
+ test(record);
+ test(record);
+ test(record);
+ } while (ck_pr_load_uint(&leave) == 0);
+
+ ck_pr_dec_uint(&n_rd);
+
+ return NULL;
+}
+
+static void *
+write_thread(void *unused CK_CC_UNUSED)
+{
+ ck_epoch_record_t *record;
+ unsigned long iterations = 0;
+ bool c = ck_pr_faa_uint(&first, 1);
+
+ record = malloc(sizeof *record);
+ assert(record != NULL);
+ ck_epoch_register(&epoch, record);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < n_threads);
+
+ do {
+ /*
+ * A thread should never observe invalid.value > valid.value.
+ * inside a protected section. Only
+ * invalid.value <= valid.value is valid.
+ */
+ if (!c) ck_pr_store_uint(&valid.value, 1);
+ ck_epoch_synchronize(record);
+ if (!c) ck_pr_store_uint(&invalid.value, 1);
+
+ ck_pr_fence_store();
+ if (!c) ck_pr_store_uint(&valid.value, 2);
+ ck_epoch_synchronize(record);
+ if (!c) ck_pr_store_uint(&invalid.value, 2);
+
+ ck_pr_fence_store();
+ if (!c) ck_pr_store_uint(&valid.value, 3);
+ ck_epoch_synchronize(record);
+ if (!c) ck_pr_store_uint(&invalid.value, 3);
+
+ ck_pr_fence_store();
+ if (!c) ck_pr_store_uint(&valid.value, 4);
+ ck_epoch_synchronize(record);
+ if (!c) ck_pr_store_uint(&invalid.value, 4);
+
+ ck_epoch_synchronize(record);
+ if (!c) ck_pr_store_uint(&invalid.value, 0);
+ ck_epoch_synchronize(record);
+
+ iterations += 4;
+ } while (ck_pr_load_uint(&leave) == 0 &&
+ ck_pr_load_uint(&n_rd) > 0);
+
+ fprintf(stderr, "%lu iterations\n", iterations);
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ unsigned int i;
+ pthread_t *threads;
+
+ if (argc != 4) {
+ ck_error("Usage: stack <#readers> <#writers> <affinity delta>\n");
+ }
+
+ n_rd = atoi(argv[1]);
+ n_wr = atoi(argv[2]);
+ n_threads = n_wr + n_rd;
+
+ a.delta = atoi(argv[3]);
+ a.request = 0;
+
+ threads = malloc(sizeof(pthread_t) * n_threads);
+ ck_epoch_init(&epoch);
+
+ for (i = 0; i < n_rd; i++)
+ pthread_create(threads + i, NULL, read_thread, NULL);
+
+ do {
+ pthread_create(threads + i, NULL, write_thread, NULL);
+ } while (++i < n_wr + n_rd);
+
+ common_sleep(30);
+ ck_pr_store_uint(&leave, 1);
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(threads[i], NULL);
+
+ return 0;
+}
diff --git a/regressions/ck_fifo/benchmark/Makefile b/regressions/ck_fifo/benchmark/Makefile
new file mode 100644
index 0000000..6e2df2a
--- /dev/null
+++ b/regressions/ck_fifo/benchmark/Makefile
@@ -0,0 +1,14 @@
+.PHONY: clean distribution
+
+OBJECTS=latency
+
+all: $(OBJECTS)
+
+latency: latency.c
+ $(CC) $(CFLAGS) -o latency latency.c
+
+clean:
+ rm -rf *~ *.o *.dSYM *.exe $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_fifo/benchmark/latency.c b/regressions/ck_fifo/benchmark/latency.c
new file mode 100644
index 0000000..267452f
--- /dev/null
+++ b/regressions/ck_fifo/benchmark/latency.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_fifo.h>
+#include <ck_spinlock.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include "../../common.h"
+
+#ifndef ENTRIES
+#define ENTRIES 4096
+#endif
+
+#ifndef STEPS
+#define STEPS 40000
+#endif
+
+int
+main(void)
+{
+ ck_spinlock_fas_t mutex = CK_SPINLOCK_FAS_INITIALIZER;
+ void *r;
+ uint64_t s, e, a;
+ unsigned int i;
+ unsigned int j;
+
+#if defined(CK_F_FIFO_SPSC)
+ ck_fifo_spsc_t spsc_fifo;
+ ck_fifo_spsc_entry_t spsc_entry[ENTRIES];
+ ck_fifo_spsc_entry_t spsc_stub;
+#endif
+
+#if defined(CK_F_FIFO_MPMC)
+ ck_fifo_mpmc_t mpmc_fifo;
+ ck_fifo_mpmc_entry_t mpmc_entry[ENTRIES];
+ ck_fifo_mpmc_entry_t mpmc_stub;
+ ck_fifo_mpmc_entry_t *garbage;
+#endif
+
+#ifdef CK_F_FIFO_SPSC
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_fifo_spsc_init(&spsc_fifo, &spsc_stub);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++) {
+ ck_spinlock_fas_lock(&mutex);
+ ck_fifo_spsc_enqueue(&spsc_fifo, spsc_entry + j, NULL);
+ ck_spinlock_fas_unlock(&mutex);
+ }
+ e = rdtsc();
+
+ a += e - s;
+ }
+ printf(" spinlock_enqueue: %16" PRIu64 "\n", a / STEPS / (sizeof(spsc_entry) / sizeof(*spsc_entry)));
+
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_fifo_spsc_init(&spsc_fifo, &spsc_stub);
+ for (j = 0; j < ENTRIES; j++)
+ ck_fifo_spsc_enqueue(&spsc_fifo, spsc_entry + j, NULL);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++) {
+ ck_spinlock_fas_lock(&mutex);
+ ck_fifo_spsc_dequeue(&spsc_fifo, &r);
+ ck_spinlock_fas_unlock(&mutex);
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ printf(" spinlock_dequeue: %16" PRIu64 "\n", a / STEPS / (sizeof(spsc_entry) / sizeof(*spsc_entry)));
+
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_fifo_spsc_init(&spsc_fifo, &spsc_stub);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++)
+ ck_fifo_spsc_enqueue(&spsc_fifo, spsc_entry + j, NULL);
+ e = rdtsc();
+
+ a += e - s;
+ }
+ printf("ck_fifo_spsc_enqueue: %16" PRIu64 "\n", a / STEPS / (sizeof(spsc_entry) / sizeof(*spsc_entry)));
+
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_fifo_spsc_init(&spsc_fifo, &spsc_stub);
+ for (j = 0; j < ENTRIES; j++)
+ ck_fifo_spsc_enqueue(&spsc_fifo, spsc_entry + j, NULL);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++)
+ ck_fifo_spsc_dequeue(&spsc_fifo, &r);
+ e = rdtsc();
+ a += e - s;
+ }
+ printf("ck_fifo_spsc_dequeue: %16" PRIu64 "\n", a / STEPS / (sizeof(spsc_entry) / sizeof(*spsc_entry)));
+#endif
+
+#ifdef CK_F_FIFO_MPMC
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_fifo_mpmc_init(&mpmc_fifo, &mpmc_stub);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++)
+ ck_fifo_mpmc_enqueue(&mpmc_fifo, mpmc_entry + j, NULL);
+ e = rdtsc();
+
+ a += e - s;
+ }
+ printf("ck_fifo_mpmc_enqueue: %16" PRIu64 "\n", a / STEPS / (sizeof(mpmc_entry) / sizeof(*mpmc_entry)));
+
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_fifo_mpmc_init(&mpmc_fifo, &mpmc_stub);
+ for (j = 0; j < ENTRIES; j++)
+ ck_fifo_mpmc_enqueue(&mpmc_fifo, mpmc_entry + j, NULL);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++)
+ ck_fifo_mpmc_dequeue(&mpmc_fifo, &r, &garbage);
+ e = rdtsc();
+ a += e - s;
+ }
+ printf("ck_fifo_mpmc_dequeue: %16" PRIu64 "\n", a / STEPS / (sizeof(mpmc_entry) / sizeof(*mpmc_entry)));
+#endif
+
+ return 0;
+}
diff --git a/regressions/ck_fifo/validate/Makefile b/regressions/ck_fifo/validate/Makefile
new file mode 100644
index 0000000..6bfc696
--- /dev/null
+++ b/regressions/ck_fifo/validate/Makefile
@@ -0,0 +1,29 @@
+.PHONY: check clean distribution
+
+OBJECTS=ck_fifo_spsc ck_fifo_mpmc ck_fifo_spsc_iterator ck_fifo_mpmc_iterator
+
+all: $(OBJECTS)
+
+check: all
+ ./ck_fifo_spsc $(CORES) 1 64000
+ ./ck_fifo_mpmc $(CORES) 1 16000
+ ./ck_fifo_spsc_iterator
+ ./ck_fifo_mpmc_iterator
+
+ck_fifo_spsc: ck_fifo_spsc.c ../../../include/ck_fifo.h
+ $(CC) $(CFLAGS) -o ck_fifo_spsc ck_fifo_spsc.c
+
+ck_fifo_mpmc: ck_fifo_mpmc.c ../../../include/ck_fifo.h
+ $(CC) $(CFLAGS) -o ck_fifo_mpmc ck_fifo_mpmc.c
+
+ck_fifo_spsc_iterator: ck_fifo_spsc_iterator.c ../../../include/ck_fifo.h
+ $(CC) $(CFLAGS) -o ck_fifo_spsc_iterator ck_fifo_spsc_iterator.c
+
+ck_fifo_mpmc_iterator: ck_fifo_mpmc_iterator.c ../../../include/ck_fifo.h
+ $(CC) $(CFLAGS) -o ck_fifo_mpmc_iterator ck_fifo_mpmc_iterator.c
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_fifo/validate/ck_fifo_mpmc.c b/regressions/ck_fifo/validate/ck_fifo_mpmc.c
new file mode 100644
index 0000000..89eb2f4
--- /dev/null
+++ b/regressions/ck_fifo/validate/ck_fifo_mpmc.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <pthread.h>
+#include <ck_fifo.h>
+
+#include "../../common.h"
+
+#ifdef CK_F_FIFO_MPMC
+#ifndef ITERATIONS
+#define ITERATIONS 128
+#endif
+
+struct context {
+ unsigned int tid;
+ unsigned int previous;
+ unsigned int next;
+};
+
+struct entry {
+ int tid;
+ int value;
+};
+
+static int nthr;
+
+#ifdef CK_F_FIFO_MPMC
+static ck_fifo_mpmc_t fifo CK_CC_CACHELINE;
+#endif
+
+static struct affinity a;
+static int size;
+static unsigned int barrier;
+
+static void *
+test(void *c)
+{
+#ifdef CK_F_FIFO_MPMC
+ struct context *context = c;
+ struct entry *entry;
+ ck_fifo_mpmc_entry_t *fifo_entry, *garbage;
+ int i, j;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < (unsigned int)nthr);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ fifo_entry = malloc(sizeof(ck_fifo_mpmc_entry_t));
+ entry = malloc(sizeof(struct entry));
+ entry->tid = context->tid;
+ ck_fifo_mpmc_enqueue(&fifo, fifo_entry, entry);
+ if (ck_fifo_mpmc_dequeue(&fifo, &entry, &garbage) == false) {
+ ck_error("ERROR [%u] Queue should never be empty.\n", context->tid);
+ }
+
+ if (entry->tid < 0 || entry->tid >= nthr) {
+ ck_error("ERROR [%u] Incorrect value in entry.\n", entry->tid);
+ }
+ }
+ }
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ fifo_entry = malloc(sizeof(ck_fifo_mpmc_entry_t));
+ entry = malloc(sizeof(struct entry));
+ entry->tid = context->tid;
+ while (ck_fifo_mpmc_tryenqueue(&fifo, fifo_entry, entry) == false)
+ ck_pr_stall();
+
+ while (ck_fifo_mpmc_trydequeue(&fifo, &entry, &garbage) == false)
+ ck_pr_stall();
+
+ if (entry->tid < 0 || entry->tid >= nthr) {
+ ck_error("ERROR [%u] Incorrect value in entry when using try interface.\n", entry->tid);
+ }
+ }
+ }
+#endif
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ int i, r;
+ struct context *context;
+ ck_fifo_mpmc_entry_t *garbage;
+ pthread_t *thread;
+
+ if (argc != 4) {
+ ck_error("Usage: validate <threads> <affinity delta> <size>\n");
+ }
+
+ a.request = 0;
+ a.delta = atoi(argv[2]);
+
+ nthr = atoi(argv[1]);
+ assert(nthr >= 1);
+
+ size = atoi(argv[3]);
+ assert(size > 0);
+
+ context = malloc(sizeof(*context) * nthr);
+ assert(context);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread);
+
+ ck_fifo_mpmc_init(&fifo, malloc(sizeof(ck_fifo_mpmc_entry_t)));
+ ck_fifo_mpmc_deinit(&fifo, &garbage);
+ if (garbage == NULL)
+ ck_error("ERROR: Expected non-NULL stub node on deinit.\n");
+ free(garbage);
+ ck_fifo_mpmc_init(&fifo, malloc(sizeof(ck_fifo_mpmc_entry_t)));
+
+ for (i = 0; i < nthr; i++) {
+ context[i].tid = i;
+ r = pthread_create(thread + i, NULL, test, context + i);
+ assert(r == 0);
+ }
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ return (0);
+}
+#else
+int
+main(void)
+{
+ fprintf(stderr, "Unsupported.\n");
+ return 0;
+}
+#endif
+
diff --git a/regressions/ck_fifo/validate/ck_fifo_mpmc_iterator.c b/regressions/ck_fifo/validate/ck_fifo_mpmc_iterator.c
new file mode 100644
index 0000000..5ac8175
--- /dev/null
+++ b/regressions/ck_fifo/validate/ck_fifo_mpmc_iterator.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_fifo.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#ifdef CK_F_FIFO_MPMC
+struct example {
+ int x;
+};
+
+static ck_fifo_mpmc_t mpmc_fifo;
+
+int
+main(void)
+{
+ int i, length = 3;
+ struct example *examples;
+ ck_fifo_mpmc_entry_t *stub, *entries, *entry, *next;
+
+ stub = malloc(sizeof(ck_fifo_mpmc_entry_t));
+ if (stub == NULL)
+ exit(EXIT_FAILURE);
+
+ ck_fifo_mpmc_init(&mpmc_fifo, stub);
+
+ entries = malloc(sizeof(ck_fifo_mpmc_entry_t) * length);
+ if (entries == NULL)
+ exit(EXIT_FAILURE);
+
+ examples = malloc(sizeof(struct example) * length);
+ /* Need these for this unit test. */
+ if (examples == NULL)
+ exit(EXIT_FAILURE);
+
+ for (i = 0; i < length; ++i) {
+ examples[i].x = i;
+ ck_fifo_mpmc_enqueue(&mpmc_fifo, entries + i, examples + i);
+ }
+
+ puts("Testing CK_FIFO_MPMC_FOREACH.");
+ CK_FIFO_MPMC_FOREACH(&mpmc_fifo, entry) {
+ printf("Next value in fifo: %d\n", ((struct example *)entry->value)->x);
+ }
+
+ puts("Testing CK_FIFO_MPMC_FOREACH_SAFE.");
+ CK_FIFO_MPMC_FOREACH_SAFE(&mpmc_fifo, entry, next) {
+ if (entry->next.pointer != next)
+ exit(EXIT_FAILURE);
+ printf("Next value in fifo: %d\n", ((struct example *)entry->value)->x);
+ }
+
+ free(examples);
+ free(entries);
+ free(stub);
+
+ return (0);
+}
+#else
+int
+main(void)
+{
+ return (0);
+}
+#endif
diff --git a/regressions/ck_fifo/validate/ck_fifo_spsc.c b/regressions/ck_fifo/validate/ck_fifo_spsc.c
new file mode 100644
index 0000000..3d6c38c
--- /dev/null
+++ b/regressions/ck_fifo/validate/ck_fifo_spsc.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <pthread.h>
+
+#include <ck_fifo.h>
+
+#include "../../common.h"
+
+#ifndef ITERATIONS
+#define ITERATIONS 128
+#endif
+
+struct context {
+ unsigned int tid;
+ unsigned int previous;
+ unsigned int next;
+};
+
+struct entry {
+ int tid;
+ int value;
+};
+
+static int nthr;
+static ck_fifo_spsc_t *fifo;
+static struct affinity a;
+static int size;
+static unsigned int barrier;
+
+static void *
+test(void *c)
+{
+ struct context *context = c;
+ struct entry *entry;
+ ck_fifo_spsc_entry_t *fifo_entry;
+ int i, j;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+#ifdef DEBUG
+ fprintf(stderr, "%p %u: %u -> %u\n", fifo+context->tid, context->tid, context->previous, context->tid);
+#endif
+
+ if (context->tid == 0) {
+ struct entry *entries;
+
+ entries = malloc(sizeof(struct entry) * size);
+ assert(entries != NULL);
+
+ for (i = 0; i < size; i++) {
+ entries[i].value = i;
+ entries[i].tid = 0;
+
+ fifo_entry = malloc(sizeof(ck_fifo_spsc_entry_t));
+ ck_fifo_spsc_enqueue(fifo + context->tid, fifo_entry, entries + i);
+ }
+ }
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < (unsigned int)nthr);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ while (ck_fifo_spsc_dequeue(fifo + context->previous, &entry) == false);
+ if (context->previous != (unsigned int)entry->tid) {
+ ck_error("T [%u:%p] %u != %u\n",
+ context->tid, (void *)entry, entry->tid, context->previous);
+ }
+
+ if (entry->value != j) {
+ ck_error("V [%u:%p] %u != %u\n",
+ context->tid, (void *)entry, entry->value, j);
+ }
+
+ entry->tid = context->tid;
+ fifo_entry = ck_fifo_spsc_recycle(fifo + context->tid);
+ if (fifo_entry == NULL)
+ fifo_entry = malloc(sizeof(ck_fifo_spsc_entry_t));
+
+ ck_fifo_spsc_enqueue(fifo + context->tid, fifo_entry, entry);
+ }
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ int i, r;
+ struct context *context;
+ pthread_t *thread;
+
+ if (argc != 4) {
+ ck_error("Usage: validate <threads> <affinity delta> <size>\n");
+ }
+
+ a.request = 0;
+ a.delta = atoi(argv[2]);
+
+ nthr = atoi(argv[1]);
+ assert(nthr >= 1);
+
+ size = atoi(argv[3]);
+ assert(size > 0);
+
+ fifo = malloc(sizeof(ck_fifo_spsc_t) * nthr);
+ assert(fifo);
+
+ context = malloc(sizeof(*context) * nthr);
+ assert(context);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread);
+
+ for (i = 0; i < nthr; i++) {
+ ck_fifo_spsc_entry_t *garbage;
+
+ context[i].tid = i;
+ if (i == 0) {
+ context[i].previous = nthr - 1;
+ context[i].next = i + 1;
+ } else if (i == nthr - 1) {
+ context[i].next = 0;
+ context[i].previous = i - 1;
+ } else {
+ context[i].next = i + 1;
+ context[i].previous = i - 1;
+ }
+
+ ck_fifo_spsc_init(fifo + i, malloc(sizeof(ck_fifo_spsc_entry_t)));
+ ck_fifo_spsc_deinit(fifo + i, &garbage);
+ if (garbage == NULL)
+ ck_error("ERROR: Expected non-NULL stub node on deinit.\n");
+
+ free(garbage);
+ ck_fifo_spsc_init(fifo + i, malloc(sizeof(ck_fifo_spsc_entry_t)));
+ r = pthread_create(thread + i, NULL, test, context + i);
+ assert(r == 0);
+ }
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ return (0);
+}
+
diff --git a/regressions/ck_fifo/validate/ck_fifo_spsc_iterator.c b/regressions/ck_fifo/validate/ck_fifo_spsc_iterator.c
new file mode 100644
index 0000000..97804de
--- /dev/null
+++ b/regressions/ck_fifo/validate/ck_fifo_spsc_iterator.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_fifo.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+struct example {
+ int x;
+};
+
+static ck_fifo_spsc_t spsc_fifo;
+
+int
+main(void)
+{
+ int i, length = 3;
+ struct example *examples;
+ ck_fifo_spsc_entry_t *stub, *entries, *entry, *next;
+
+ stub = malloc(sizeof(ck_fifo_spsc_entry_t));
+ if (stub == NULL)
+ exit(EXIT_FAILURE);
+
+ ck_fifo_spsc_init(&spsc_fifo, stub);
+
+ entries = malloc(sizeof(ck_fifo_spsc_entry_t) * length);
+ if (entries == NULL)
+ exit(EXIT_FAILURE);
+
+ examples = malloc(sizeof(struct example) * length);
+ /* Need these for this unit test. */
+ if (examples == NULL)
+ exit(EXIT_FAILURE);
+
+ for (i = 0; i < length; ++i) {
+ examples[i].x = i;
+ ck_fifo_spsc_enqueue(&spsc_fifo, entries + i, examples + i);
+ }
+
+ puts("Testing CK_FIFO_SPSC_FOREACH.");
+ CK_FIFO_SPSC_FOREACH(&spsc_fifo, entry) {
+ printf("Next value in fifo: %d\n", ((struct example *)entry->value)->x);
+ }
+
+ puts("Testing CK_FIFO_SPSC_FOREACH_SAFE.");
+ CK_FIFO_SPSC_FOREACH_SAFE(&spsc_fifo, entry, next) {
+ if (entry->next != next)
+ exit(EXIT_FAILURE);
+ printf("Next value in fifo: %d\n", ((struct example *)entry->value)->x);
+ }
+
+ free(examples);
+ free(entries);
+ free(stub);
+
+ return (0);
+}
+
diff --git a/regressions/ck_hp/benchmark/Makefile b/regressions/ck_hp/benchmark/Makefile
new file mode 100644
index 0000000..2025ea9
--- /dev/null
+++ b/regressions/ck_hp/benchmark/Makefile
@@ -0,0 +1,17 @@
+.PHONY: clean distribution
+
+OBJECTS=fifo_latency stack_latency
+
+all: $(OBJECTS)
+
+fifo_latency: fifo_latency.c
+ $(CC) $(CFLAGS) -o fifo_latency ../../../src/ck_hp.c fifo_latency.c
+
+stack_latency: stack_latency.c
+ $(CC) $(CFLAGS) -o stack_latency ../../../src/ck_hp.c stack_latency.c
+
+clean:
+ rm -rf *~ *.o *.dSYM *.exe $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_hp/benchmark/fifo_latency.c b/regressions/ck_hp/benchmark/fifo_latency.c
new file mode 100644
index 0000000..77ee2a7
--- /dev/null
+++ b/regressions/ck_hp/benchmark/fifo_latency.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_hp.h>
+#include <ck_hp_fifo.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../../common.h"
+
+#ifndef ENTRIES
+#define ENTRIES 4096
+#endif
+
+#ifndef STEPS
+#define STEPS 40000
+#endif
+
+static ck_hp_fifo_t fifo;
+static ck_hp_t fifo_hp;
+
+int
+main(void)
+{
+ void *r;
+ uint64_t s, e, a;
+ unsigned int i;
+ unsigned int j;
+ ck_hp_fifo_entry_t hp_entry[ENTRIES];
+ ck_hp_fifo_entry_t hp_stub;
+ ck_hp_record_t record;
+
+ ck_hp_init(&fifo_hp, CK_HP_FIFO_SLOTS_COUNT, 1000000, NULL);
+
+ r = malloc(CK_HP_FIFO_SLOTS_SIZE);
+ if (r == NULL) {
+ ck_error("ERROR: Failed to allocate slots.\n");
+ }
+ ck_hp_register(&fifo_hp, &record, r);
+
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_hp_fifo_init(&fifo, &hp_stub);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++)
+ ck_hp_fifo_enqueue_mpmc(&record, &fifo, hp_entry + j, NULL);
+ e = rdtsc();
+
+ a += e - s;
+ }
+ printf("ck_hp_fifo_enqueue_mpmc: %16" PRIu64 "\n", a / STEPS / ENTRIES);
+
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_hp_fifo_init(&fifo, &hp_stub);
+ for (j = 0; j < ENTRIES; j++)
+ ck_hp_fifo_enqueue_mpmc(&record, &fifo, hp_entry + j, NULL);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++)
+ ck_hp_fifo_dequeue_mpmc(&record, &fifo, &r);
+ e = rdtsc();
+ a += e - s;
+ }
+ printf("ck_hp_fifo_dequeue_mpmc: %16" PRIu64 "\n", a / STEPS / ENTRIES);
+
+ return 0;
+}
diff --git a/regressions/ck_hp/benchmark/stack_latency.c b/regressions/ck_hp/benchmark/stack_latency.c
new file mode 100644
index 0000000..c336de6
--- /dev/null
+++ b/regressions/ck_hp/benchmark/stack_latency.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_hp.h>
+#include <ck_hp_stack.h>
+#include <ck_stack.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../../common.h"
+
+#ifndef ENTRIES
+#define ENTRIES 4096
+#endif
+
+#ifndef STEPS
+#define STEPS 40000
+#endif
+
+static ck_stack_t stack;
+static ck_hp_t stack_hp;
+
+int
+main(void)
+{
+ ck_hp_record_t record;
+ ck_stack_entry_t entry[ENTRIES];
+ uint64_t s, e, a;
+ unsigned int i;
+ unsigned int j;
+ void *r;
+
+ ck_hp_init(&stack_hp, CK_HP_STACK_SLOTS_COUNT, 1000000, NULL);
+ r = malloc(CK_HP_STACK_SLOTS_SIZE);
+ if (r == NULL) {
+ ck_error("ERROR: Failed to allocate slots.\n");
+ }
+ ck_hp_register(&stack_hp, &record, (void *)r);
+
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_stack_init(&stack);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++)
+ ck_hp_stack_push_mpmc(&stack, entry + j);
+ e = rdtsc();
+
+ a += e - s;
+ }
+ printf("ck_hp_stack_push_mpmc: %16" PRIu64 "\n", a / STEPS / ENTRIES);
+
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_stack_init(&stack);
+
+ for (j = 0; j < ENTRIES; j++)
+ ck_hp_stack_push_mpmc(&stack, entry + j);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++) {
+ r = ck_hp_stack_pop_mpmc(&record, &stack);
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ printf(" ck_hp_stack_pop_mpmc: %16" PRIu64 "\n", a / STEPS / ENTRIES);
+
+ return 0;
+}
diff --git a/regressions/ck_hp/validate/Makefile b/regressions/ck_hp/validate/Makefile
new file mode 100644
index 0000000..476b34f
--- /dev/null
+++ b/regressions/ck_hp/validate/Makefile
@@ -0,0 +1,33 @@
+.PHONY: check clean distribution
+
+OBJECTS=ck_hp_stack nbds_haz_test serial ck_hp_fifo ck_hp_fifo_donner
+
+all: $(OBJECTS)
+
+check: all
+ ./serial
+ ./ck_hp_stack $(CORES) 100 1
+ ./ck_hp_fifo $(CORES) 1 16384 100
+ ./nbds_haz_test $(CORES) 15 1
+ ./ck_hp_fifo_donner $(CORES) 16384
+
+ck_hp_stack: ../../../src/ck_hp.c ck_hp_stack.c ../../../include/ck_hp_stack.h
+ $(CC) $(CFLAGS) ../../../src/ck_hp.c -o ck_hp_stack ck_hp_stack.c
+
+ck_hp_fifo: ../../../src/ck_hp.c ck_hp_fifo.c ../../../include/ck_hp_fifo.h
+ $(CC) $(CFLAGS) ../../../src/ck_hp.c -o ck_hp_fifo ck_hp_fifo.c
+
+ck_hp_fifo_donner: ../../../src/ck_hp.c ck_hp_fifo_donner.c ../../../include/ck_hp_fifo.h
+ $(CC) $(CFLAGS) ../../../src/ck_hp.c -o ck_hp_fifo_donner ck_hp_fifo_donner.c
+
+serial: ../../../src/ck_hp.c serial.c ../../../include/ck_hp_stack.h
+ $(CC) $(CFLAGS) ../../../src/ck_hp.c -o serial serial.c
+
+nbds_haz_test: ../../../src/ck_hp.c nbds_haz_test.c
+ $(CC) $(CFLAGS) ../../../src/ck_hp.c -o nbds_haz_test nbds_haz_test.c
+
+clean:
+ rm -rf *~ *.o *.dSYM *.exe $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_hp/validate/ck_hp_fifo.c b/regressions/ck_hp/validate/ck_hp_fifo.c
new file mode 100644
index 0000000..4454283
--- /dev/null
+++ b/regressions/ck_hp/validate/ck_hp_fifo.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <pthread.h>
+#include <ck_hp_fifo.h>
+
+#include "../../common.h"
+
+#ifndef ITERATIONS
+#define ITERATIONS 128
+#endif
+
+struct context {
+ unsigned int tid;
+ unsigned int previous;
+ unsigned int next;
+};
+
+struct entry {
+ int tid;
+ int value;
+};
+
+static ck_hp_fifo_t fifo;
+static ck_hp_t fifo_hp;
+static int nthr;
+
+static struct affinity a;
+static int size;
+static unsigned int barrier;
+static unsigned int e_barrier;
+
+static void *
+test(void *c)
+{
+ struct context *context = c;
+ struct entry *entry;
+ ck_hp_fifo_entry_t *fifo_entry;
+ ck_hp_record_t record;
+ int i, j;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_hp_register(&fifo_hp, &record, malloc(sizeof(void *) * 2));
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < (unsigned int)nthr);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ fifo_entry = malloc(sizeof(ck_hp_fifo_entry_t));
+ entry = malloc(sizeof(struct entry));
+ entry->tid = context->tid;
+ ck_hp_fifo_enqueue_mpmc(&record, &fifo, fifo_entry, entry);
+
+ ck_pr_barrier();
+
+ fifo_entry = ck_hp_fifo_dequeue_mpmc(&record, &fifo, &entry);
+ if (fifo_entry == NULL) {
+ ck_error("ERROR [%u] Queue should never be empty.\n", context->tid);
+ }
+
+ ck_pr_barrier();
+
+ if (entry->tid < 0 || entry->tid >= nthr) {
+ ck_error("ERROR [%u] Incorrect value in entry.\n", entry->tid);
+ }
+
+ ck_hp_free(&record, &fifo_entry->hazard, fifo_entry, fifo_entry);
+ }
+ }
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ fifo_entry = malloc(sizeof(ck_hp_fifo_entry_t));
+ entry = malloc(sizeof(struct entry));
+ entry->tid = context->tid;
+
+ while (ck_hp_fifo_tryenqueue_mpmc(&record, &fifo, fifo_entry, entry) == false)
+ ck_pr_stall();
+
+ while (fifo_entry = ck_hp_fifo_trydequeue_mpmc(&record, &fifo, &entry), fifo_entry == NULL)
+ ck_pr_stall();
+
+ if (entry->tid < 0 || entry->tid >= nthr) {
+ ck_error("ERROR [%u] Incorrect value in entry.\n", entry->tid);
+ }
+
+ ck_hp_free(&record, &fifo_entry->hazard, fifo_entry, fifo_entry);
+ }
+ }
+
+ ck_pr_inc_uint(&e_barrier);
+ while (ck_pr_load_uint(&e_barrier) < (unsigned int)nthr);
+
+ return (NULL);
+}
+
+static void
+destructor(void *p)
+{
+
+ free(p);
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int i, r;
+ struct context *context;
+ pthread_t *thread;
+ int threshold;
+
+ if (argc != 5) {
+ ck_error("Usage: validate <threads> <affinity delta> <size> <threshold>\n");
+ }
+
+ a.delta = atoi(argv[2]);
+
+ nthr = atoi(argv[1]);
+ assert(nthr >= 1);
+
+ size = atoi(argv[3]);
+ assert(size > 0);
+
+ threshold = atoi(argv[4]);
+ assert(threshold > 0);
+
+ context = malloc(sizeof(*context) * nthr);
+ assert(context);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread);
+
+ ck_hp_init(&fifo_hp, 2, threshold, destructor);
+ ck_hp_fifo_init(&fifo, malloc(sizeof(ck_hp_fifo_entry_t)));
+
+ ck_hp_fifo_entry_t *entry;
+ ck_hp_fifo_deinit(&fifo, &entry);
+
+ if (entry == NULL)
+ ck_error("ERROR: Expected non-NULL stub node.\n");
+
+ free(entry);
+ ck_hp_fifo_init(&fifo, malloc(sizeof(ck_hp_fifo_entry_t)));
+
+ for (i = 0; i < nthr; i++) {
+ context[i].tid = i;
+ r = pthread_create(thread + i, NULL, test, context + i);
+ assert(r == 0);
+ }
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ return (0);
+}
+
diff --git a/regressions/ck_hp/validate/ck_hp_fifo_donner.c b/regressions/ck_hp/validate/ck_hp_fifo_donner.c
new file mode 100644
index 0000000..1b52a37
--- /dev/null
+++ b/regressions/ck_hp/validate/ck_hp_fifo_donner.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2012 Hendrik Donner
+ * Copyright 2012-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_hp.h>
+#include <ck_hp_fifo.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <pthread.h>
+#include <sys/time.h>
+#include <assert.h>
+#include "../../common.h"
+
+/* FIFO queue */
+static ck_hp_fifo_t fifo;
+
+/* Hazard pointer global */
+static ck_hp_t fifo_hp;
+
+/* thread local element count */
+static unsigned long *count;
+
+static unsigned long thread_count;
+
+static unsigned int start_barrier;
+static unsigned int end_barrier;
+
+/* destructor for FIFO queue */
+static void
+destructor(void *p)
+{
+
+ free(p);
+ return;
+}
+
+/* entry struct for FIFO queue entries */
+struct entry {
+ unsigned long value;
+};
+
+/* function for thread */
+static void *
+queue_50_50(void *elements)
+{
+ struct entry *entry;
+ ck_hp_fifo_entry_t *fifo_entry;
+ ck_hp_record_t *record;
+ void *slots;
+ unsigned long j, element_count = *(unsigned long *)elements;
+ unsigned int seed;
+
+ record = malloc(sizeof(ck_hp_record_t));
+ assert(record);
+
+ slots = malloc(CK_HP_FIFO_SLOTS_SIZE);
+ assert(slots);
+
+ /* different seed for each thread */
+ seed = 1337; /*(unsigned int) pthread_self(); */
+
+ /*
+ * This subscribes the thread to the fifo_hp state using the thread-owned
+ * record.
+ * FIFO queue needs 2 hazard pointers.
+ */
+ ck_hp_register(&fifo_hp, record, slots);
+
+ /* start barrier */
+ ck_pr_inc_uint(&start_barrier);
+ while (ck_pr_load_uint(&start_barrier) < thread_count + 1)
+ ck_pr_stall();
+
+ /* 50/50 enqueue-dequeue */
+ for(j = 0; j < element_count; j++) {
+ /* rand_r with thread local state should be thread safe */
+ if( 50 < (1+(int) (100.0*common_rand_r(&seed)/(RAND_MAX+1.0)))) {
+ /* This is the container for the enqueued data. */
+ fifo_entry = malloc(sizeof(ck_hp_fifo_entry_t));
+
+ if (fifo_entry == NULL) {
+ exit(EXIT_FAILURE);
+ }
+
+ /* This is the data. */
+ entry = malloc(sizeof(struct entry));
+ if (entry != NULL) {
+ entry->value = j;
+ }
+
+ /*
+ * Enqueue the value of the pointer entry into FIFO queue using the
+ * container fifo_entry.
+ */
+ ck_hp_fifo_enqueue_mpmc(record, &fifo, fifo_entry, entry);
+ } else {
+ /*
+ * ck_hp_fifo_dequeue_mpmc will return a pointer to the first unused node and store
+ * the value of the first pointer in the FIFO queue in entry.
+ */
+ fifo_entry = ck_hp_fifo_dequeue_mpmc(record, &fifo, &entry);
+ if (fifo_entry != NULL) {
+ /*
+ * Safely reclaim memory associated with fifo_entry.
+ * This inserts garbage into a local list. Once the list (plist) reaches
+ * a length of 100, ck_hp_free will attempt to reclaim all references
+ * to objects on the list.
+ */
+ ck_hp_free(record, &fifo_entry->hazard, fifo_entry, fifo_entry);
+ }
+ }
+ }
+
+ /* end barrier */
+ ck_pr_inc_uint(&end_barrier);
+ while (ck_pr_load_uint(&end_barrier) < thread_count + 1)
+ ck_pr_stall();
+
+ return NULL;
+}
+
+int
+main(int argc, char** argv)
+{
+ ck_hp_fifo_entry_t *stub;
+ unsigned long element_count, i;
+ pthread_t *thr;
+
+ if (argc != 3) {
+ ck_error("Usage: cktest <thread_count> <element_count>\n");
+ }
+
+ /* Get element count from argument */
+ element_count = atoi(argv[2]);
+
+ /* Get element count from argument */
+ thread_count = atoi(argv[1]);
+
+ /* pthread handles */
+ thr = malloc(sizeof(pthread_t) * thread_count);
+
+ /* array for local operation count */
+ count = malloc(sizeof(unsigned long *) * thread_count);
+
+ /*
+ * Initialize global hazard pointer safe memory reclamation to execute free()
+ * when a fifo_entry is safe to be deleted.
+ * Hazard pointer scan routine will be called when the thread local intern plist's
+ * size exceed 100 entries.
+ */
+
+ /* FIFO queue needs 2 hazard pointers */
+ ck_hp_init(&fifo_hp, CK_HP_FIFO_SLOTS_COUNT, 100, destructor);
+
+ /* The FIFO requires one stub entry on initialization. */
+ stub = malloc(sizeof(ck_hp_fifo_entry_t));
+
+ /* Behavior is undefined if stub is NULL. */
+ if (stub == NULL) {
+ exit(EXIT_FAILURE);
+ }
+
+ /* This is called once to initialize the fifo. */
+ ck_hp_fifo_init(&fifo, stub);
+
+ /* Create threads */
+ for (i = 0; i < thread_count; i++) {
+ count[i] = (element_count + i) / thread_count;
+ if (pthread_create(&thr[i], NULL, queue_50_50, (void *) &count[i]) != 0) {
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ /* start barrier */
+ ck_pr_inc_uint(&start_barrier);
+ while (ck_pr_load_uint(&start_barrier) < thread_count + 1);
+
+ /* end barrier */
+ ck_pr_inc_uint(&end_barrier);
+ while (ck_pr_load_uint(&end_barrier) < thread_count + 1);
+
+ /* Join threads */
+ for (i = 0; i < thread_count; i++)
+ pthread_join(thr[i], NULL);
+
+ return 0;
+}
+
diff --git a/regressions/ck_hp/validate/ck_hp_stack.c b/regressions/ck_hp/validate/ck_hp_stack.c
new file mode 100644
index 0000000..ad9b927
--- /dev/null
+++ b/regressions/ck_hp/validate/ck_hp_stack.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_backoff.h>
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <ck_hp.h>
+#include <ck_stack.h>
+#include <ck_hp_stack.h>
+
+#include "../../common.h"
+
+static unsigned int threshold;
+static unsigned int n_threads;
+static unsigned int barrier;
+static unsigned int e_barrier;
+
+#ifndef PAIRS
+#define PAIRS 5000000
+#endif
+
+struct node {
+ unsigned int value;
+ ck_hp_hazard_t hazard;
+ ck_stack_entry_t stack_entry;
+};
+static ck_stack_t stack = {NULL, NULL};
+static ck_hp_t stack_hp;
+CK_STACK_CONTAINER(struct node, stack_entry, stack_container)
+static struct affinity a;
+
+static void *
+thread(void *unused CK_CC_UNUSED)
+{
+ struct node **entry, *e;
+ unsigned int i;
+ ck_hp_record_t record;
+ void **pointers;
+ ck_stack_entry_t *s;
+
+ unused = NULL;
+ pointers = malloc(sizeof(void *));
+ ck_hp_register(&stack_hp, &record, pointers);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ entry = malloc(sizeof(struct node *) * PAIRS);
+ if (entry == NULL) {
+ ck_error("Failed allocation.\n");
+ }
+
+ for (i = 0; i < PAIRS; i++) {
+ entry[i] = malloc(sizeof(struct node));
+ if (entry == NULL) {
+ ck_error("Failed individual allocation\n");
+ }
+ }
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < n_threads)
+ ck_pr_stall();
+
+ for (i = 0; i < PAIRS; i++) {
+ ck_hp_stack_push_mpmc(&stack, &entry[i]->stack_entry);
+ s = ck_hp_stack_pop_mpmc(&record, &stack);
+ e = stack_container(s);
+ ck_hp_free(&record, &e->hazard, e, s);
+ }
+
+ ck_pr_inc_uint(&e_barrier);
+ while (ck_pr_load_uint(&e_barrier) < n_threads)
+ ck_pr_stall();
+
+ fprintf(stderr, "Peak: %u (%2.2f%%)\nReclamations: %" PRIu64 "\n\n",
+ record.n_peak,
+ (double)record.n_peak / PAIRS * 100,
+ record.n_reclamations);
+
+ ck_hp_clear(&record);
+ ck_hp_purge(&record);
+
+ ck_pr_inc_uint(&e_barrier);
+ while (ck_pr_load_uint(&e_barrier) < (n_threads << 1));
+
+ if (record.n_pending != 0) {
+ ck_error("ERROR: %u pending, expecting none.\n",
+ record.n_pending);
+ }
+
+ return (NULL);
+}
+
+static void
+destructor(void *p)
+{
+
+ free(p);
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ unsigned int i;
+ pthread_t *threads;
+
+ if (argc != 4) {
+ ck_error("Usage: stack <threads> <threshold> <delta>\n");
+ }
+
+ n_threads = atoi(argv[1]);
+ threshold = atoi(argv[2]);
+ a.delta = atoi(argv[3]);
+ a.request = 0;
+
+ threads = malloc(sizeof(pthread_t) * n_threads);
+
+ ck_hp_init(&stack_hp, 1, threshold, destructor);
+
+ for (i = 0; i < n_threads; i++)
+ pthread_create(threads + i, NULL, thread, NULL);
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(threads[i], NULL);
+
+ return (0);
+}
diff --git a/regressions/ck_hp/validate/nbds_haz_test.c b/regressions/ck_hp/validate/nbds_haz_test.c
new file mode 100644
index 0000000..9b85e76
--- /dev/null
+++ b/regressions/ck_hp/validate/nbds_haz_test.c
@@ -0,0 +1,226 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * This is a unit test similar to the implementation in John Dybnis's nbds
+ * test.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+
+#include <ck_backoff.h>
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <ck_hp.h>
+
+#include "../../common.h"
+
+#define STACK_CONTAINER(T, M, N) CK_CC_CONTAINER(stack_entry_t, T, M, N)
+
+struct stack_entry {
+ struct stack_entry *next;
+} CK_CC_ALIGN(8);
+typedef struct stack_entry stack_entry_t;
+
+struct stack {
+ struct stack_entry *head;
+ char *generation;
+} CK_CC_PACKED CK_CC_ALIGN(16);
+typedef struct stack hp_stack_t;
+
+static unsigned int threshold;
+static unsigned int n_threads;
+static unsigned int barrier;
+static unsigned int e_barrier;
+static unsigned int global_tid;
+static unsigned int pops;
+static unsigned int pushs;
+
+#ifndef PAIRS
+#define PAIRS 1000000
+#endif
+
+struct node {
+ unsigned int value;
+ ck_hp_hazard_t hazard;
+ stack_entry_t stack_entry;
+};
+hp_stack_t stack = {NULL, NULL};
+ck_hp_t stack_hp;
+
+STACK_CONTAINER(struct node, stack_entry, stack_container)
+static struct affinity a;
+
+/*
+ * Stack producer operation safe for multiple unique producers and multiple consumers.
+ */
+CK_CC_INLINE static void
+stack_push_mpmc(struct stack *target, struct stack_entry *entry)
+{
+ struct stack_entry *lstack;
+ ck_backoff_t backoff = CK_BACKOFF_INITIALIZER;
+
+ lstack = ck_pr_load_ptr(&target->head);
+ ck_pr_store_ptr(&entry->next, lstack);
+ ck_pr_fence_store();
+
+ while (ck_pr_cas_ptr_value(&target->head, lstack, entry, &lstack) == false) {
+ ck_pr_store_ptr(&entry->next, lstack);
+ ck_pr_fence_store();
+ ck_backoff_eb(&backoff);
+ }
+
+ return;
+}
+
+/*
+ * Stack consumer operation safe for multiple unique producers and multiple consumers.
+ */
+CK_CC_INLINE static struct stack_entry *
+stack_pop_mpmc(ck_hp_record_t *record, struct stack *target)
+{
+ struct stack_entry *entry;
+ ck_backoff_t backoff = CK_BACKOFF_INITIALIZER;
+
+ do {
+ entry = ck_pr_load_ptr(&target->head);
+ if (entry == NULL)
+ return (NULL);
+
+ ck_hp_set_fence(record, 0, entry);
+ } while (entry != ck_pr_load_ptr(&target->head));
+
+ while (ck_pr_cas_ptr_value(&target->head, entry, entry->next, &entry) == false) {
+ if (ck_pr_load_ptr(&entry) == NULL)
+ break;
+
+ ck_hp_set_fence(record, 0, entry);
+ if (entry != ck_pr_load_ptr(&target->head))
+ continue;
+
+ ck_backoff_eb(&backoff);
+ }
+
+ return (entry);
+}
+
+static void *
+thread(void *unused CK_CC_UNUSED)
+{
+ struct node *entry, *e;
+ unsigned int i;
+ ck_hp_record_t record;
+ void **pointers;
+ stack_entry_t *s;
+ unsigned int tid = ck_pr_faa_uint(&global_tid, 1) + 1;
+ unsigned int r = (unsigned int)(tid + 1) * 0x5bd1e995;
+
+ unused = NULL;
+ pointers = malloc(sizeof(void *));
+ ck_hp_register(&stack_hp, &record, pointers);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < n_threads)
+ ck_pr_stall();
+
+ for (i = 0; i < PAIRS; i++) {
+ r ^= r << 6; r ^= r >> 21; r ^= r << 7;
+
+ if (r & 0x1000) {
+ entry = malloc(sizeof(struct node));
+ assert(entry);
+ stack_push_mpmc(&stack, &entry->stack_entry);
+ ck_pr_inc_uint(&pushs);
+ } else {
+ s = stack_pop_mpmc(&record, &stack);
+ if (s == NULL)
+ continue;
+
+ e = stack_container(s);
+ ck_hp_free(&record, &e->hazard, e, s);
+ ck_pr_inc_uint(&pops);
+ }
+ }
+
+ ck_pr_inc_uint(&e_barrier);
+ while (ck_pr_load_uint(&e_barrier) < n_threads);
+
+ return (NULL);
+}
+
+static void
+destructor(void *p)
+{
+ free(p);
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ unsigned int i;
+ pthread_t *threads;
+
+ if (argc != 4) {
+ ck_error("Usage: stack <threads> <threshold> <delta>\n");
+ }
+
+ n_threads = atoi(argv[1]);
+ threshold = atoi(argv[2]);
+ a.delta = atoi(argv[3]);
+ a.request = 0;
+
+ threads = malloc(sizeof(pthread_t) * n_threads);
+
+ ck_hp_init(&stack_hp, 1, threshold, destructor);
+
+ for (i = 0; i < n_threads; i++)
+ pthread_create(threads + i, NULL, thread, NULL);
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(threads[i], NULL);
+
+ fprintf(stderr, "Push: %u\nPop: %u\n", pushs, pops);
+ return (0);
+}
diff --git a/regressions/ck_hp/validate/serial.c b/regressions/ck_hp/validate/serial.c
new file mode 100644
index 0000000..fd31581
--- /dev/null
+++ b/regressions/ck_hp/validate/serial.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <ck_hp.h>
+
+#include "../../common.h"
+
+struct entry {
+ unsigned int value;
+ ck_hp_hazard_t hazard;
+};
+
+static void
+destructor(void *pointer)
+{
+
+ fprintf(stderr, "Free %p\n", pointer);
+ free(pointer);
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ ck_hp_t state;
+ ck_hp_record_t record[2];
+ void **pointers;
+ struct entry *entry, *other;
+
+ (void)argc;
+ (void)argv;
+
+ ck_hp_init(&state, 1, 1, destructor);
+
+ pointers = malloc(sizeof(void *));
+ if (pointers == NULL) {
+ ck_error("ERROR: Failed to allocate slot.\n");
+ }
+ ck_hp_register(&state, &record[0], pointers);
+ ck_hp_reclaim(&record[0]);
+
+ entry = malloc(sizeof *entry);
+ ck_hp_set(&record[0], 0, entry);
+ ck_hp_reclaim(&record[0]);
+ ck_hp_free(&record[0], &entry->hazard, entry, entry);
+ ck_hp_reclaim(&record[0]);
+ ck_hp_set(&record[0], 0, NULL);
+ ck_hp_reclaim(&record[0]);
+
+ entry = malloc(sizeof *entry);
+ ck_hp_set(&record[0], 0, entry);
+ ck_hp_reclaim(&record[0]);
+ ck_hp_free(&record[0], &entry->hazard, entry, entry);
+ ck_hp_reclaim(&record[0]);
+ ck_hp_set(&record[0], 0, NULL);
+ ck_hp_reclaim(&record[0]);
+
+ pointers = malloc(sizeof(void *));
+ if (pointers == NULL) {
+ ck_error("ERROR: Failed to allocate slot.\n");
+ }
+ ck_hp_register(&state, &record[1], pointers);
+ ck_hp_reclaim(&record[1]);
+
+ entry = malloc(sizeof *entry);
+ ck_hp_set(&record[1], 0, entry);
+ ck_hp_reclaim(&record[1]);
+ ck_hp_free(&record[1], &entry->hazard, entry, entry);
+ ck_hp_reclaim(&record[1]);
+ ck_hp_set(&record[1], 0, NULL);
+ ck_hp_reclaim(&record[1]);
+
+ printf("Allocating entry and freeing in other HP record...\n");
+ entry = malloc(sizeof *entry);
+ entry->value = 42;
+ ck_hp_set(&record[0], 0, entry);
+ ck_hp_free(&record[1], &entry->hazard, entry, entry);
+ ck_pr_store_uint(&entry->value, 1);
+
+ other = malloc(sizeof *other);
+ other->value = 24;
+ ck_hp_set(&record[1], 0, other);
+ ck_hp_free(&record[0], &other->hazard, other, other);
+ ck_pr_store_uint(&other->value, 32);
+ ck_hp_set(&record[0], 0, NULL);
+ ck_hp_reclaim(&record[1]);
+ ck_hp_set(&record[1], 0, NULL);
+ ck_hp_reclaim(&record[0]);
+ ck_hp_reclaim(&record[1]);
+
+ return 0;
+}
diff --git a/regressions/ck_hs/benchmark/Makefile b/regressions/ck_hs/benchmark/Makefile
new file mode 100644
index 0000000..23b6745
--- /dev/null
+++ b/regressions/ck_hs/benchmark/Makefile
@@ -0,0 +1,23 @@
+.PHONY: clean distribution
+
+OBJECTS=serial parallel_bytestring parallel_bytestring.delete apply
+
+all: $(OBJECTS)
+
+serial: serial.c ../../../include/ck_hs.h ../../../src/ck_hs.c
+ $(CC) $(CFLAGS) -o serial serial.c ../../../src/ck_hs.c
+
+apply: apply.c ../../../include/ck_hs.h ../../../src/ck_hs.c
+ $(CC) $(CFLAGS) -o apply apply.c ../../../src/ck_hs.c
+
+parallel_bytestring: parallel_bytestring.c ../../../include/ck_hs.h ../../../src/ck_hs.c ../../../src/ck_epoch.c
+ $(CC) $(PTHREAD_CFLAGS) $(CFLAGS) -o parallel_bytestring parallel_bytestring.c ../../../src/ck_hs.c ../../../src/ck_epoch.c
+
+parallel_bytestring.delete: parallel_bytestring.c ../../../include/ck_hs.h ../../../src/ck_hs.c ../../../src/ck_epoch.c
+ $(CC) $(PTHREAD_CFLAGS) $(CFLAGS) -DHS_DELETE -o parallel_bytestring.delete parallel_bytestring.c ../../../src/ck_hs.c ../../../src/ck_epoch.c
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE
diff --git a/regressions/ck_hs/benchmark/apply.c b/regressions/ck_hs/benchmark/apply.c
new file mode 100644
index 0000000..ca4a3da
--- /dev/null
+++ b/regressions/ck_hs/benchmark/apply.c
@@ -0,0 +1,260 @@
+/*
+ * Copyright 2014 Samy Al Bahra.
+ * Copyright 2014 Backtrace I/O, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyrighs
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyrighs
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_hs.h>
+
+#include <assert.h>
+#include <ck_malloc.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include "../../common.h"
+#include "../../../src/ck_ht_hash.h"
+
+static ck_hs_t hs;
+static char **keys;
+static size_t keys_length = 0;
+static size_t keys_capacity = 128;
+static unsigned long global_seed;
+
+static void *
+hs_malloc(size_t r)
+{
+
+ return malloc(r);
+}
+
+static void
+hs_free(void *p, size_t b, bool r)
+{
+
+ (void)b;
+ (void)r;
+
+ free(p);
+
+ return;
+}
+
+static struct ck_malloc my_allocator = {
+ .malloc = hs_malloc,
+ .free = hs_free
+};
+
+static unsigned long
+hs_hash(const void *object, unsigned long seed)
+{
+ const char *c = object;
+ unsigned long h;
+
+ h = (unsigned long)MurmurHash64A(c, strlen(c), seed);
+ return h;
+}
+
+static bool
+hs_compare(const void *previous, const void *compare)
+{
+
+ return strcmp(previous, compare) == 0;
+}
+
+static void
+set_destroy(void)
+{
+
+ ck_hs_destroy(&hs);
+ return;
+}
+
+static void
+set_init(unsigned int size, unsigned int mode)
+{
+
+ if (ck_hs_init(&hs, CK_HS_MODE_OBJECT | CK_HS_MODE_SPMC | mode, hs_hash, hs_compare,
+ &my_allocator, size, global_seed) == false) {
+ perror("ck_hs_init");
+ exit(EXIT_FAILURE);
+ }
+
+ return;
+}
+
+static size_t
+set_count(void)
+{
+
+ return ck_hs_count(&hs);
+}
+
+static bool
+set_reset(void)
+{
+
+ return ck_hs_reset(&hs);
+}
+
+static void *
+test_apply(void *key, void *closure)
+{
+
+ (void)key;
+
+ return closure;
+}
+
+static void
+run_test(const char *file, size_t r, unsigned int size, unsigned int mode)
+{
+ FILE *fp;
+ char buffer[512];
+ size_t i, j;
+ unsigned int d = 0;
+ uint64_t s, e, a, gp, agp;
+ struct ck_hs_stat st;
+ char **t;
+
+ keys = malloc(sizeof(char *) * keys_capacity);
+ assert(keys != NULL);
+
+ fp = fopen(file, "r");
+ assert(fp != NULL);
+
+ while (fgets(buffer, sizeof(buffer), fp) != NULL) {
+ buffer[strlen(buffer) - 1] = '\0';
+ keys[keys_length++] = strdup(buffer);
+ assert(keys[keys_length - 1] != NULL);
+
+ if (keys_length == keys_capacity) {
+ t = realloc(keys, sizeof(char *) * (keys_capacity *= 2));
+ assert(t != NULL);
+ keys = t;
+ }
+ }
+
+ t = realloc(keys, sizeof(char *) * keys_length);
+ assert(t != NULL);
+ keys = t;
+
+ set_init(size, mode);
+ for (i = 0; i < keys_length; i++) {
+ unsigned long h = CK_HS_HASH(&hs, hs_hash, keys[i]);
+
+ if (ck_hs_get(&hs, h, keys[i]) == false) {
+ if (ck_hs_put(&hs, h, keys[i]) == false)
+ ck_error("ERROR: Failed get to put workload.\n");
+ } else {
+ d++;
+ }
+ }
+ ck_hs_stat(&hs, &st);
+
+ fprintf(stderr, "# %zu entries stored, %u duplicates, %u probe.\n",
+ set_count(), d, st.probe_maximum);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ if (set_reset() == false)
+ ck_error("ERROR: Failed to reset hash table.\n");
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ unsigned long h = CK_HS_HASH(&hs, hs_hash, keys[i]);
+
+ if (ck_hs_get(&hs, h, keys[i]) == false &&
+ ck_hs_put(&hs, h, keys[i]) == false) {
+ ck_error("ERROR: Failed get to put workload.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ gp = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ if (set_reset() == false)
+ ck_error("ERROR: Failed to reset hash table.\n");
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ unsigned long h = CK_HS_HASH(&hs, hs_hash, keys[i]);
+
+ if (ck_hs_apply(&hs, h, keys[i], test_apply, (void *)keys[i]) == false)
+ ck_error("ERROR: Failed in apply workload.\n");
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ agp = a / (r * keys_length);
+
+ fclose(fp);
+
+ for (i = 0; i < keys_length; i++) {
+ free(keys[i]);
+ }
+
+ printf("Get to put: %" PRIu64 " ticks\n", gp);
+ printf(" Apply: %" PRIu64 " ticks\n", agp);
+
+ free(keys);
+ keys_length = 0;
+ set_destroy();
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ unsigned int r, size;
+
+ common_srand48((long int)time(NULL));
+ if (argc < 2) {
+ ck_error("Usage: ck_hs <dictionary> [<repetitions> <initial size>]\n");
+ }
+
+ r = 16;
+ if (argc >= 3)
+ r = atoi(argv[2]);
+
+ size = 8;
+ if (argc >= 4)
+ size = atoi(argv[3]);
+
+ global_seed = common_lrand48();
+ run_test(argv[1], r, size, 0);
+
+ printf("\n==============================================\n"
+ "Delete mode\n"
+ "==============================================\n");
+ run_test(argv[1], r, size, CK_HS_MODE_DELETE);
+ return 0;
+}
+
diff --git a/regressions/ck_hs/benchmark/parallel_bytestring.c b/regressions/ck_hs/benchmark/parallel_bytestring.c
new file mode 100644
index 0000000..6d38379
--- /dev/null
+++ b/regressions/ck_hs/benchmark/parallel_bytestring.c
@@ -0,0 +1,602 @@
+/*
+ * Copyright 2012 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyrighs
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyrighs
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include "../../common.h"
+#include <ck_hs.h>
+#include "../../../src/ck_ht_hash.h"
+#include <assert.h>
+#include <ck_epoch.h>
+#include <ck_malloc.h>
+#include <ck_pr.h>
+#include <ck_spinlock.h>
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+
+static ck_hs_t hs CK_CC_CACHELINE;
+static char **keys;
+static size_t keys_length = 0;
+static size_t keys_capacity = 128;
+static ck_epoch_t epoch_hs;
+static ck_epoch_record_t epoch_wr;
+static int n_threads;
+static bool next_stage;
+
+enum state {
+ HS_STATE_STOP = 0,
+ HS_STATE_GET,
+ HS_STATE_STRICT_REPLACEMENT,
+ HS_STATE_DELETION,
+ HS_STATE_REPLACEMENT,
+ HS_STATE_COUNT
+};
+
+static ck_spinlock_t mtx = CK_SPINLOCK_INITIALIZER;
+static struct affinity affinerator = AFFINITY_INITIALIZER;
+static uint64_t accumulator[HS_STATE_COUNT];
+static int barrier[HS_STATE_COUNT];
+static int state;
+
+struct hs_epoch {
+ ck_epoch_entry_t epoch_entry;
+};
+
+COMMON_ALARM_DECLARE_GLOBAL(hs_alarm, alarm_event, next_stage)
+
+static void
+alarm_handler(int s)
+{
+
+ (void)s;
+ next_stage = true;
+ return;
+}
+
+static unsigned long
+hs_hash(const void *object, unsigned long seed)
+{
+ const char *c = object;
+ unsigned long h;
+
+ h = (unsigned long)MurmurHash64A(c, strlen(c), seed);
+ return h;
+}
+
+static bool
+hs_compare(const void *previous, const void *compare)
+{
+
+ return strcmp(previous, compare) == 0;
+}
+
+static void
+hs_destroy(ck_epoch_entry_t *e)
+{
+
+ free(e);
+ return;
+}
+
+static void *
+hs_malloc(size_t r)
+{
+ ck_epoch_entry_t *b;
+
+ b = malloc(sizeof(*b) + r);
+ return b + 1;
+}
+
+static void
+hs_free(void *p, size_t b, bool r)
+{
+ struct hs_epoch *e = p;
+
+ (void)b;
+
+ if (r == true) {
+ /* Destruction requires safe memory reclamation. */
+ ck_epoch_call(&epoch_wr, &(--e)->epoch_entry, hs_destroy);
+ } else {
+ free(--e);
+ }
+
+ return;
+}
+
+static struct ck_malloc my_allocator = {
+ .malloc = hs_malloc,
+ .free = hs_free
+};
+
+static void
+set_init(void)
+{
+ unsigned int mode = CK_HS_MODE_OBJECT | CK_HS_MODE_SPMC;
+
+#ifdef HS_DELETE
+ mode |= CK_HS_MODE_DELETE;
+#endif
+
+ ck_epoch_init(&epoch_hs);
+ ck_epoch_register(&epoch_hs, &epoch_wr);
+ common_srand48((long int)time(NULL));
+ if (ck_hs_init(&hs, mode, hs_hash, hs_compare, &my_allocator, 65536, common_lrand48()) == false) {
+ perror("ck_hs_init");
+ exit(EXIT_FAILURE);
+ }
+
+ return;
+}
+
+static bool
+set_remove(const char *value)
+{
+ unsigned long h;
+
+ h = CK_HS_HASH(&hs, hs_hash, value);
+ return (bool)ck_hs_remove(&hs, h, value);
+}
+
+static bool
+set_replace(const char *value)
+{
+ unsigned long h;
+ void *previous;
+
+ h = CK_HS_HASH(&hs, hs_hash, value);
+ return ck_hs_set(&hs, h, value, &previous);
+}
+
+static bool
+set_swap(const char *value)
+{
+ unsigned long h;
+ void *previous;
+
+ h = CK_HS_HASH(&hs, hs_hash, value);
+ return ck_hs_fas(&hs, h, value, &previous);
+}
+
+static void *
+set_get(const char *value)
+{
+ unsigned long h;
+ void *v;
+
+ h = CK_HS_HASH(&hs, hs_hash, value);
+ v = ck_hs_get(&hs, h, value);
+ return v;
+}
+
+static bool
+set_insert(const char *value)
+{
+ unsigned long h;
+
+ h = CK_HS_HASH(&hs, hs_hash, value);
+ return ck_hs_put(&hs, h, value);
+}
+
+static size_t
+set_count(void)
+{
+
+ return ck_hs_count(&hs);
+}
+
+static bool
+set_reset(void)
+{
+
+ return ck_hs_reset(&hs);
+}
+
+static void *
+reader(void *unused)
+{
+ size_t i;
+ ck_epoch_record_t epoch_record;
+ int state_previous = HS_STATE_STOP;
+ int n_state = 0;
+ uint64_t s, j, a;
+
+ (void)unused;
+ if (aff_iterate(&affinerator) != 0)
+ perror("WARNING: Failed to affine thread");
+
+ s = j = a = 0;
+ ck_epoch_register(&epoch_hs, &epoch_record);
+ for (;;) {
+ j++;
+ ck_epoch_begin(&epoch_record, NULL);
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ char *r;
+
+ r = set_get(keys[i]);
+ if (r == NULL) {
+ if (n_state == HS_STATE_STRICT_REPLACEMENT) {
+ ck_error("ERROR: Did not find during replacement: %s\n", keys[i]);
+ }
+
+ continue;
+ }
+
+ if (strcmp(r, keys[i]) == 0)
+ continue;
+
+ ck_error("ERROR: Found invalid value: [%s] but expected [%s]\n", (char *)r, keys[i]);
+ }
+ a += rdtsc() - s;
+ ck_epoch_end(&epoch_record, NULL);
+
+ n_state = ck_pr_load_int(&state);
+ if (n_state != state_previous) {
+ ck_spinlock_lock(&mtx);
+ accumulator[state_previous] += a / (j * keys_length);
+ ck_spinlock_unlock(&mtx);
+
+ ck_pr_inc_int(&barrier[state_previous]);
+ while (ck_pr_load_int(&barrier[state_previous]) != n_threads + 1)
+ ck_pr_stall();
+
+ state_previous = n_state;
+ s = j = a = 0;
+ }
+ }
+
+ return NULL;
+}
+
+static uint64_t
+acc(size_t i)
+{
+ uint64_t r;
+
+ ck_spinlock_lock(&mtx);
+ r = accumulator[i];
+ ck_spinlock_unlock(&mtx);
+
+ return r;
+}
+
+int
+main(int argc, char *argv[])
+{
+ FILE *fp;
+ char buffer[512];
+ size_t i, j, r;
+ unsigned int d = 0;
+ uint64_t s, e, a, repeated;
+ char **t;
+ pthread_t *readers;
+ double p_r, p_d;
+
+ COMMON_ALARM_DECLARE_LOCAL(hs_alarm, alarm_event)
+
+ r = 20;
+ s = 8;
+ p_d = 0.5;
+ p_r = 0.5;
+ n_threads = CORES - 1;
+
+ if (argc < 2) {
+ ck_error("Usage: parallel <dictionary> [<interval length> <initial size> <readers>\n"
+ " <probability of replacement> <probability of deletion> <epoch threshold>]\n");
+ }
+
+ if (argc >= 3)
+ r = atoi(argv[2]);
+
+ if (argc >= 4)
+ s = (uint64_t)atoi(argv[3]);
+
+ if (argc >= 5) {
+ n_threads = atoi(argv[4]);
+ if (n_threads < 1) {
+ ck_error("ERROR: Number of readers must be >= 1.\n");
+ }
+ }
+
+ if (argc >= 6) {
+ p_r = atof(argv[5]) / 100.00;
+ if (p_r < 0) {
+ ck_error("ERROR: Probability of replacement must be >= 0 and <= 100.\n");
+ }
+ }
+
+ if (argc >= 7) {
+ p_d = atof(argv[6]) / 100.00;
+ if (p_d < 0) {
+ ck_error("ERROR: Probability of deletion must be >= 0 and <= 100.\n");
+ }
+ }
+
+ COMMON_ALARM_INIT(hs_alarm, alarm_event, r)
+
+ affinerator.delta = 1;
+ readers = malloc(sizeof(pthread_t) * n_threads);
+ assert(readers != NULL);
+
+ keys = malloc(sizeof(char *) * keys_capacity);
+ assert(keys != NULL);
+
+ fp = fopen(argv[1], "r");
+ assert(fp != NULL);
+
+ while (fgets(buffer, sizeof(buffer), fp) != NULL) {
+ buffer[strlen(buffer) - 1] = '\0';
+ keys[keys_length++] = strdup(buffer);
+ assert(keys[keys_length - 1] != NULL);
+
+ if (keys_length == keys_capacity) {
+ t = realloc(keys, sizeof(char *) * (keys_capacity *= 2));
+ assert(t != NULL);
+ keys = t;
+ }
+ }
+
+ t = realloc(keys, sizeof(char *) * keys_length);
+ assert(t != NULL);
+ keys = t;
+
+ set_init();
+
+ for (i = 0; i < (size_t)n_threads; i++) {
+ if (pthread_create(&readers[i], NULL, reader, NULL) != 0) {
+ ck_error("ERROR: Failed to create thread %zu.\n", i);
+ }
+ }
+
+ for (i = 0; i < keys_length; i++)
+ d += set_insert(keys[i]) == false;
+
+ fprintf(stderr, " [S] %d readers, 1 writer.\n", n_threads);
+ fprintf(stderr, " [S] %zu entries stored and %u duplicates.\n\n",
+ set_count(), d);
+
+ fprintf(stderr, " ,- BASIC TEST\n");
+ fprintf(stderr, " | Executing SMR test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ if (set_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ d += set_insert(keys[i]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ fprintf(stderr, " | Executing replacement test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_replace(keys[i]);
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ fprintf(stderr, " | Executing get test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ if (set_get(keys[i]) == NULL) {
+ ck_error("ERROR: Unexpected NULL value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ a = 0;
+ fprintf(stderr, " | Executing removal test...");
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_remove(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ for (i = 0; i < keys_length; i++)
+ set_insert(keys[i]);
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ fprintf(stderr, " | Executing negative look-up test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ set_get("\x50\x03\x04\x05\x06\x10");
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ ck_epoch_record_t epoch_temporary = epoch_wr;
+ ck_epoch_synchronize(&epoch_wr);
+
+ fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
+ "%u pending, %u peak, %lu reclamations\n\n",
+ epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
+ epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
+
+ fprintf(stderr, " ,- READER CONCURRENCY\n");
+ fprintf(stderr, " | Executing reader test...");
+
+ ck_pr_store_int(&state, HS_STATE_GET);
+ while (ck_pr_load_int(&barrier[HS_STATE_STOP]) != n_threads)
+ ck_pr_stall();
+ ck_pr_inc_int(&barrier[HS_STATE_STOP]);
+ common_sleep(r);
+ ck_pr_store_int(&state, HS_STATE_STRICT_REPLACEMENT);
+ while (ck_pr_load_int(&barrier[HS_STATE_GET]) != n_threads)
+ ck_pr_stall();
+
+ fprintf(stderr, "done (reader = %" PRIu64 " ticks)\n",
+ acc(HS_STATE_GET) / n_threads);
+
+ fprintf(stderr, " | Executing strict replacement test...");
+
+ a = repeated = 0;
+ common_alarm(alarm_handler, &alarm_event, r);
+
+ ck_pr_inc_int(&barrier[HS_STATE_GET]);
+ for (;;) {
+ repeated++;
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ if (i & 1) {
+ set_replace(keys[i]);
+ } else {
+ set_swap(keys[i]);
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+
+ if (next_stage == true) {
+ next_stage = false;
+ break;
+ }
+ }
+
+ ck_pr_store_int(&state, HS_STATE_DELETION);
+ while (ck_pr_load_int(&barrier[HS_STATE_STRICT_REPLACEMENT]) != n_threads)
+ ck_pr_stall();
+ set_reset();
+ ck_epoch_synchronize(&epoch_wr);
+ fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
+ a / (repeated * keys_length), acc(HS_STATE_STRICT_REPLACEMENT) / n_threads);
+
+ common_alarm(alarm_handler, &alarm_event, r);
+
+ fprintf(stderr, " | Executing deletion test (%.2f)...", p_d * 100);
+ a = repeated = 0;
+ ck_pr_inc_int(&barrier[HS_STATE_STRICT_REPLACEMENT]);
+ for (;;) {
+ double delete;
+
+ repeated++;
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ set_insert(keys[i]);
+ if (p_d != 0.0) {
+ delete = common_drand48();
+ if (delete <= p_d)
+ set_remove(keys[i]);
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+
+ if (next_stage == true) {
+ next_stage = false;
+ break;
+ }
+ }
+ ck_pr_store_int(&state, HS_STATE_REPLACEMENT);
+ while (ck_pr_load_int(&barrier[HS_STATE_DELETION]) != n_threads)
+ ck_pr_stall();
+
+ set_reset();
+ ck_epoch_synchronize(&epoch_wr);
+ fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
+ a / (repeated * keys_length), acc(HS_STATE_DELETION) / n_threads);
+
+ common_alarm(alarm_handler, &alarm_event, r);
+
+ fprintf(stderr, " | Executing replacement test (%.2f)...", p_r * 100);
+ a = repeated = 0;
+ ck_pr_inc_int(&barrier[HS_STATE_DELETION]);
+ for (;;) {
+ double delete, replace;
+
+ repeated++;
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ set_insert(keys[i]);
+ if (p_d != 0.0) {
+ delete = common_drand48();
+ if (delete <= p_d)
+ set_remove(keys[i]);
+ } else {
+ delete = 0.0;
+ }
+
+ if (p_r != 0.0) {
+ replace = common_drand48();
+ if (replace <= p_r) {
+ if ((i & 1) || (delete <= p_d)) {
+ set_replace(keys[i]);
+ } else {
+ set_swap(keys[i]);
+ }
+ }
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+
+ if (next_stage == true) {
+ next_stage = false;
+ break;
+ }
+ }
+ ck_pr_store_int(&state, HS_STATE_STOP);
+ while (ck_pr_load_int(&barrier[HS_STATE_REPLACEMENT]) != n_threads)
+ ck_pr_stall();
+ set_reset();
+ ck_epoch_synchronize(&epoch_wr);
+ fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
+ a / (repeated * keys_length), acc(HS_STATE_REPLACEMENT) / n_threads);
+
+ ck_pr_inc_int(&barrier[HS_STATE_REPLACEMENT]);
+ epoch_temporary = epoch_wr;
+ ck_epoch_synchronize(&epoch_wr);
+
+ fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
+ "%u pending, %u peak, %lu reclamations\n\n",
+ epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
+ epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
+ return 0;
+}
+
diff --git a/regressions/ck_hs/benchmark/serial.c b/regressions/ck_hs/benchmark/serial.c
new file mode 100644
index 0000000..ac4caff
--- /dev/null
+++ b/regressions/ck_hs/benchmark/serial.c
@@ -0,0 +1,517 @@
+/*
+ * Copyright 2012 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyrighs
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyrighs
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_hs.h>
+
+#include <assert.h>
+#include <ck_malloc.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include "../../common.h"
+#include "../../../src/ck_ht_hash.h"
+
+static ck_hs_t hs;
+static char **keys;
+static size_t keys_length = 0;
+static size_t keys_capacity = 128;
+static unsigned long global_seed;
+
+static void *
+hs_malloc(size_t r)
+{
+
+ return malloc(r);
+}
+
+static void
+hs_free(void *p, size_t b, bool r)
+{
+
+ (void)b;
+ (void)r;
+
+ free(p);
+
+ return;
+}
+
+static struct ck_malloc my_allocator = {
+ .malloc = hs_malloc,
+ .free = hs_free
+};
+
+static unsigned long
+hs_hash(const void *object, unsigned long seed)
+{
+ const char *c = object;
+ unsigned long h;
+
+ h = (unsigned long)MurmurHash64A(c, strlen(c), seed);
+ return h;
+}
+
+static bool
+hs_compare(const void *previous, const void *compare)
+{
+
+ return strcmp(previous, compare) == 0;
+}
+
+static void
+set_destroy(void)
+{
+
+ ck_hs_destroy(&hs);
+ return;
+}
+
+static void
+set_init(unsigned int size, unsigned int mode)
+{
+
+ if (ck_hs_init(&hs, CK_HS_MODE_OBJECT | CK_HS_MODE_SPMC | mode, hs_hash, hs_compare,
+ &my_allocator, size, global_seed) == false) {
+ perror("ck_hs_init");
+ exit(EXIT_FAILURE);
+ }
+
+ return;
+}
+
+static bool
+set_remove(const char *value)
+{
+ unsigned long h;
+
+ h = CK_HS_HASH(&hs, hs_hash, value);
+ return ck_hs_remove(&hs, h, value) != NULL;
+}
+
+static bool
+set_swap(const char *value)
+{
+ unsigned long h;
+ void *previous;
+
+ h = CK_HS_HASH(&hs, hs_hash, value);
+ return ck_hs_fas(&hs, h, value, &previous);
+}
+
+static bool
+set_replace(const char *value)
+{
+ unsigned long h;
+ void *previous;
+
+ h = CK_HS_HASH(&hs, hs_hash, value);
+ ck_hs_set(&hs, h, value, &previous);
+ return previous == value;
+}
+
+static void *
+set_get(const char *value)
+{
+ unsigned long h;
+ void *v;
+
+ h = CK_HS_HASH(&hs, hs_hash, value);
+ v = ck_hs_get(&hs, h, value);
+ return v;
+}
+
+static bool
+set_insert(const char *value)
+{
+ unsigned long h;
+
+ h = CK_HS_HASH(&hs, hs_hash, value);
+ return ck_hs_put(&hs, h, value);
+}
+
+static bool
+set_insert_unique(const char *value)
+{
+ unsigned long h;
+
+ h = CK_HS_HASH(&hs, hs_hash, value);
+ return ck_hs_put_unique(&hs, h, value);
+}
+
+static size_t
+set_count(void)
+{
+
+ return ck_hs_count(&hs);
+}
+
+static bool
+set_reset(void)
+{
+
+ return ck_hs_reset(&hs);
+}
+
+static void
+set_gc(void)
+{
+
+ ck_hs_gc(&hs, 0, 0);
+ return;
+}
+
+static void
+set_rebuild(void)
+{
+
+ ck_hs_rebuild(&hs);
+ return;
+}
+
+static void
+keys_shuffle(char **k)
+{
+ size_t i, j;
+ char *t;
+
+ for (i = keys_length; i > 1; i--) {
+ j = rand() % (i - 1);
+
+ if (j != i - 1) {
+ t = k[i - 1];
+ k[i - 1] = k[j];
+ k[j] = t;
+ }
+ }
+
+ return;
+}
+
+static void
+run_test(const char *file, size_t r, unsigned int size, unsigned int mode)
+{
+ FILE *fp;
+ char buffer[512];
+ size_t i, j;
+ unsigned int d = 0;
+ uint64_t s, e, a, ri, si, ai, sr, rg, sg, ag, sd, ng, ss, sts, su, sgc, sb;
+ struct ck_hs_stat st;
+ char **t;
+
+ keys = malloc(sizeof(char *) * keys_capacity);
+ assert(keys != NULL);
+
+ fp = fopen(file, "r");
+ assert(fp != NULL);
+
+ while (fgets(buffer, sizeof(buffer), fp) != NULL) {
+ buffer[strlen(buffer) - 1] = '\0';
+ keys[keys_length++] = strdup(buffer);
+ assert(keys[keys_length - 1] != NULL);
+
+ if (keys_length == keys_capacity) {
+ t = realloc(keys, sizeof(char *) * (keys_capacity *= 2));
+ assert(t != NULL);
+ keys = t;
+ }
+ }
+
+ t = realloc(keys, sizeof(char *) * keys_length);
+ assert(t != NULL);
+ keys = t;
+
+ set_init(size, mode);
+ for (i = 0; i < keys_length; i++)
+ d += set_insert(keys[i]) == false;
+ ck_hs_stat(&hs, &st);
+
+ fprintf(stderr, "# %zu entries stored, %u duplicates, %u probe.\n",
+ set_count(), d, st.probe_maximum);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ if (set_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = keys_length; i > 0; i--)
+ d += set_insert(keys[i - 1]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ ri = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ if (set_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ d += set_insert(keys[i]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ si = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ keys_shuffle(keys);
+
+ if (set_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ d += set_insert(keys[i]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ ai = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_swap(keys[i]);
+ e = rdtsc();
+ a += e - s;
+ }
+ ss = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_replace(keys[i]);
+ e = rdtsc();
+ a += e - s;
+ }
+ sr = a / (r * keys_length);
+
+ set_reset();
+ for (i = 0; i < keys_length; i++)
+ set_insert(keys[i]);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = keys_length; i > 0; i--) {
+ if (set_get(keys[i - 1]) == NULL) {
+ ck_error("ERROR: Unexpected NULL value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ rg = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ if (set_get(keys[i]) == NULL) {
+ ck_error("ERROR: Unexpected NULL value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ sg = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ keys_shuffle(keys);
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ if (set_get(keys[i]) == NULL) {
+ ck_error("ERROR: Unexpected NULL value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ ag = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_remove(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ for (i = 0; i < keys_length; i++)
+ set_insert(keys[i]);
+ }
+ sd = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ set_get("\x50\x03\x04\x05\x06\x10");
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ ng = a / (r * keys_length);
+
+ set_reset();
+ for (i = 0; i < keys_length; i++)
+ set_insert(keys[i]);
+ for (i = 0; i < keys_length; i++)
+ set_remove(keys[i]);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_insert(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ for (i = 0; i < keys_length; i++)
+ set_remove(keys[i]);
+ }
+ sts = a / (r * keys_length);
+
+ set_reset();
+
+ /* Prune duplicates. */
+ for (i = 0; i < keys_length; i++) {
+ if (set_insert(keys[i]) == true)
+ continue;
+
+ free(keys[i]);
+ keys[i] = keys[--keys_length];
+ }
+
+ for (i = 0; i < keys_length; i++)
+ set_remove(keys[i]);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_insert_unique(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ for (i = 0; i < keys_length; i++)
+ set_remove(keys[i]);
+ }
+ su = a / (r * keys_length);
+
+ for (i = 0; i < keys_length; i++)
+ set_insert_unique(keys[i]);
+
+ for (i = 0; i < keys_length / 2; i++)
+ set_remove(keys[i]);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ set_gc();
+ e = rdtsc();
+ a += e - s;
+ }
+ sgc = a / r;
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ set_rebuild();
+ e = rdtsc();
+ a += e - s;
+ }
+ sb = a / r;
+
+ printf("%zu "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 "\n",
+ keys_length, ri, si, ai, ss, sr, rg, sg, ag, sd, ng, sts, su, sgc, sb);
+
+ fclose(fp);
+
+ for (i = 0; i < keys_length; i++) {
+ free(keys[i]);
+ }
+
+ free(keys);
+ keys_length = 0;
+ set_destroy();
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ unsigned int r, size;
+
+ common_srand48((long int)time(NULL));
+ if (argc < 2) {
+ ck_error("Usage: ck_hs <dictionary> [<repetitions> <initial size>]\n");
+ }
+
+ r = 16;
+ if (argc >= 3)
+ r = atoi(argv[2]);
+
+ size = 8;
+ if (argc >= 4)
+ size = atoi(argv[3]);
+
+ global_seed = common_lrand48();
+ run_test(argv[1], r, size, 0);
+ run_test(argv[1], r, size, CK_HS_MODE_DELETE);
+ fprintf(stderr, "# reverse_insertion serial_insertion random_insertion serial_swap "
+ "serial_replace reverse_get serial_get random_get serial_remove negative_get tombstone "
+ "set_unique gc rebuild\n\n");
+
+ return 0;
+}
+
diff --git a/regressions/ck_hs/validate/Makefile b/regressions/ck_hs/validate/Makefile
new file mode 100644
index 0000000..a96e652
--- /dev/null
+++ b/regressions/ck_hs/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean distribution
+
+OBJECTS=serial
+
+all: $(OBJECTS)
+
+serial: serial.c ../../../include/ck_hs.h ../../../src/ck_hs.c
+ $(CC) $(CFLAGS) -o serial serial.c ../../../src/ck_hs.c
+
+check: all
+ ./serial
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE
diff --git a/regressions/ck_hs/validate/serial.c b/regressions/ck_hs/validate/serial.c
new file mode 100644
index 0000000..a16fc82
--- /dev/null
+++ b/regressions/ck_hs/validate/serial.c
@@ -0,0 +1,315 @@
+/*
+ * Copyright 2012 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyrighs
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyrighs
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_hs.h>
+
+#include <assert.h>
+#include <ck_malloc.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "../../common.h"
+
+static void *
+hs_malloc(size_t r)
+{
+
+ return malloc(r);
+}
+
+static void
+hs_free(void *p, size_t b, bool r)
+{
+
+ (void)b;
+ (void)r;
+ free(p);
+ return;
+}
+
+static struct ck_malloc my_allocator = {
+ .malloc = hs_malloc,
+ .free = hs_free
+};
+
+const char *test[] = { "Samy", "Al", "Bahra", "dances", "in", "the", "wind.", "Once",
+ "upon", "a", "time", "his", "gypsy", "ate", "one", "itsy",
+ "bitsy", "spider.", "What", "goes", "up", "must",
+ "come", "down.", "What", "is", "down", "stays",
+ "down.", "A", "B", "C", "D", "E", "F", "G", "H",
+ "I", "J", "K", "L", "M", "N", "O", "P", "Q" };
+
+const char *negative = "negative";
+
+/* Purposefully crappy hash function. */
+static unsigned long
+hs_hash(const void *object, unsigned long seed)
+{
+ const char *c = object;
+ unsigned long h;
+
+ (void)seed;
+ h = c[0];
+ return h;
+}
+
+static bool
+hs_compare(const void *previous, const void *compare)
+{
+
+ return strcmp(previous, compare) == 0;
+}
+
+static void *
+test_ip(void *key, void *closure)
+{
+ const char *a = key;
+ const char *b = closure;
+
+ if (strcmp(a, b) != 0)
+ ck_error("Mismatch: %s != %s\n", a, b);
+
+ return closure;
+}
+
+static void *
+test_negative(void *key, void *closure)
+{
+
+ (void)closure;
+ if (key != NULL)
+ ck_error("ERROR: Apply callback expects NULL argument instead of [%s]\n", key);
+
+ return NULL;
+}
+
+static void *
+test_unique(void *key, void *closure)
+{
+
+ if (key != NULL)
+ ck_error("ERROR: Apply callback expects NULL argument instead of [%s]\n", key);
+
+ return closure;
+}
+
+static void *
+test_remove(void *key, void *closure)
+{
+
+ (void)key;
+ (void)closure;
+
+ return NULL;
+}
+
+static void
+run_test(unsigned int is, unsigned int ad)
+{
+ ck_hs_t hs[16];
+ const size_t size = sizeof(hs) / sizeof(*hs);
+ size_t i, j;
+ const char *blob = "#blobs";
+ unsigned long h;
+
+ if (ck_hs_init(&hs[0], CK_HS_MODE_SPMC | CK_HS_MODE_OBJECT | ad, hs_hash, hs_compare, &my_allocator, is, 6602834) == false)
+ ck_error("ck_hs_init\n");
+
+ for (j = 0; j < size; j++) {
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ h = test[i][0];
+ if (ck_hs_get(&hs[j], h, test[i]) != NULL) {
+ continue;
+ }
+
+ if (i & 1) {
+ if (ck_hs_put_unique(&hs[j], h, test[i]) == false)
+ ck_error("ERROR [%zu]: Failed to insert unique (%s)\n", j, test[i]);
+ } else if (ck_hs_apply(&hs[j], h, test[i], test_unique, (void *)(uintptr_t)test[i]) == false) {
+ ck_error("ERROR: Failed to apply for insertion.\n");
+ }
+
+ if (i & 1) {
+ if (ck_hs_remove(&hs[j], h, test[i]) == false)
+ ck_error("ERROR [%zu]: Failed to remove unique (%s)\n", j, test[i]);
+ } else if (ck_hs_apply(&hs[j], h, test[i], test_remove, NULL) == false) {
+ ck_error("ERROR: Failed to remove apply.\n");
+ }
+
+ if (ck_hs_apply(&hs[j], h, test[i], test_negative, (char *)(uintptr_t)test[i]) == false)
+ ck_error("ERROR: Failed to apply.\n");
+
+ break;
+ }
+
+ if (ck_hs_gc(&hs[j], 0, 0) == false)
+ ck_error("ERROR: Failed to GC empty set.\n");
+
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ h = test[i][0];
+ ck_hs_put(&hs[j], h, test[i]);
+ if (ck_hs_put(&hs[j], h, test[i]) == true) {
+ ck_error("ERROR [%u] [1]: put must fail on collision (%s).\n", is, test[i]);
+ }
+ if (ck_hs_get(&hs[j], h, test[i]) == NULL) {
+ ck_error("ERROR [%u]: get must not fail after put\n", is);
+ }
+ }
+
+ /* Test grow semantics. */
+ ck_hs_grow(&hs[j], 128);
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ h = test[i][0];
+ if (ck_hs_put(&hs[j], h, test[i]) == true) {
+ ck_error("ERROR [%u] [2]: put must fail on collision.\n", is);
+ }
+
+ if (ck_hs_get(&hs[j], h, test[i]) == NULL) {
+ ck_error("ERROR [%u]: get must not fail\n", is);
+ }
+ }
+
+ h = blob[0];
+ if (ck_hs_get(&hs[j], h, blob) == NULL) {
+ if (j > 0)
+ ck_error("ERROR [%u]: Blob must always exist after first.\n", is);
+
+ if (ck_hs_put(&hs[j], h, blob) == false) {
+ ck_error("ERROR [%u]: A unique blob put failed.\n", is);
+ }
+ } else {
+ if (ck_hs_put(&hs[j], h, blob) == true) {
+ ck_error("ERROR [%u]: Duplicate blob put succeeded.\n", is);
+ }
+ }
+
+ /* Grow set and check get semantics. */
+ ck_hs_grow(&hs[j], 512);
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ h = test[i][0];
+ if (ck_hs_get(&hs[j], h, test[i]) == NULL) {
+ ck_error("ERROR [%u]: get must not fail\n", is);
+ }
+ }
+
+ /* Delete and check negative membership. */
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ void *r;
+
+ h = test[i][0];
+ if (ck_hs_get(&hs[j], h, test[i]) == NULL)
+ continue;
+
+ if (r = ck_hs_remove(&hs[j], h, test[i]), r == NULL) {
+ ck_error("ERROR [%u]: remove must not fail\n", is);
+ }
+
+ if (strcmp(r, test[i]) != 0) {
+ ck_error("ERROR [%u]: Removed incorrect node (%s != %s)\n", (char *)r, test[i], is);
+ }
+ }
+
+ /* Test replacement semantics. */
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ void *r;
+ bool d;
+
+ h = test[i][0];
+ d = ck_hs_get(&hs[j], h, test[i]) != NULL;
+ if (ck_hs_set(&hs[j], h, test[i], &r) == false) {
+ ck_error("ERROR [%u]: Failed to set\n", is);
+ }
+
+ /* Expected replacement. */
+ if (d == true && (r == NULL || strcmp(r, test[i]) != 0)) {
+ ck_error("ERROR [%u]: Incorrect previous value: %s != %s\n",
+ is, test[i], (char *)r);
+ }
+
+ /* Replacement should succeed. */
+ if (ck_hs_fas(&hs[j], h, test[i], &r) == false)
+ ck_error("ERROR [%u]: ck_hs_fas must succeed.\n", is);
+
+ if (strcmp(r, test[i]) != 0) {
+ ck_error("ERROR [%u]: Incorrect replaced value: %s != %s\n",
+ is, test[i], (char *)r);
+ }
+
+ if (ck_hs_fas(&hs[j], h, negative, &r) == true)
+ ck_error("ERROR [%u]: Replacement of negative should fail.\n", is);
+
+ if (ck_hs_set(&hs[j], h, test[i], &r) == false) {
+ ck_error("ERROR [%u]: Failed to set [1]\n", is);
+ }
+
+ if (strcmp(r, test[i]) != 0) {
+ ck_error("ERROR [%u]: Invalid &hs[j]: %s != %s\n", is, test[i], (char *)r);
+ }
+
+ /* Attempt in-place mutation. */
+ if (ck_hs_apply(&hs[j], h, test[i], test_ip, (void *)(uintptr_t)test[i]) == false)
+ ck_error("ERROR [%u]: Failed to apply: %s != %s\n", is, (char *)r, test[i]);
+
+ d = ck_hs_get(&hs[j], h, test[i]) != NULL;
+ if (d == false)
+ ck_error("ERROR [%u]: Expected [%s] to exist.\n", is, test[i]);
+ }
+
+ if (j == size - 1)
+ break;
+
+ if (ck_hs_move(&hs[j + 1], &hs[j], hs_hash, hs_compare, &my_allocator) == false)
+ ck_error("Failed to move hash table");
+
+ if (j & 1) {
+ ck_hs_gc(&hs[j + 1], 0, 0);
+ } else {
+ ck_hs_gc(&hs[j + 1], 26, 26);
+ }
+
+ if (ck_hs_rebuild(&hs[j + 1]) == false)
+ ck_error("Failed to rebuild");
+ }
+
+ return;
+}
+
+int
+main(void)
+{
+ unsigned int k;
+
+ for (k = 16; k <= 64; k <<= 1) {
+ run_test(k, 0);
+ run_test(k, CK_HS_MODE_DELETE);
+ break;
+ }
+
+ return 0;
+}
+
diff --git a/regressions/ck_ht/benchmark/Makefile b/regressions/ck_ht/benchmark/Makefile
new file mode 100644
index 0000000..fa31274
--- /dev/null
+++ b/regressions/ck_ht/benchmark/Makefile
@@ -0,0 +1,27 @@
+.PHONY: clean distribution
+
+OBJECTS=serial serial.delete parallel_bytestring parallel_bytestring.delete parallel_direct
+
+all: $(OBJECTS)
+
+serial: serial.c ../../../include/ck_ht.h ../../../src/ck_ht.c
+ $(CC) $(CFLAGS) -o serial serial.c ../../../src/ck_ht.c
+
+serial.delete: serial.c ../../../include/ck_ht.h ../../../src/ck_ht.c
+ $(CC) $(CFLAGS) -DHT_DELETE -o serial.delete serial.c ../../../src/ck_ht.c
+
+parallel_bytestring.delete: parallel_bytestring.c ../../../include/ck_ht.h ../../../src/ck_ht.c ../../../src/ck_epoch.c
+ $(CC) $(PTHREAD_CFLAGS) $(CFLAGS) -DHT_DELETE -o parallel_bytestring.delete parallel_bytestring.c ../../../src/ck_ht.c ../../../src/ck_epoch.c
+
+parallel_bytestring: parallel_bytestring.c ../../../include/ck_ht.h ../../../src/ck_ht.c ../../../src/ck_epoch.c
+ $(CC) $(PTHREAD_CFLAGS) $(CFLAGS) -o parallel_bytestring parallel_bytestring.c ../../../src/ck_ht.c ../../../src/ck_epoch.c
+
+parallel_direct: parallel_direct.c ../../../include/ck_ht.h ../../../src/ck_ht.c ../../../src/ck_epoch.c
+ $(CC) $(PTHREAD_CFLAGS) $(CFLAGS) -o parallel_direct parallel_direct.c ../../../src/ck_ht.c ../../../src/ck_epoch.c
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE
+
diff --git a/regressions/ck_ht/benchmark/parallel_bytestring.c b/regressions/ck_ht/benchmark/parallel_bytestring.c
new file mode 100644
index 0000000..f3d3854
--- /dev/null
+++ b/regressions/ck_ht/benchmark/parallel_bytestring.c
@@ -0,0 +1,559 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_ht.h>
+
+
+#include <assert.h>
+#include <ck_epoch.h>
+#include <ck_malloc.h>
+#include <ck_pr.h>
+#include <ck_spinlock.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+
+#include "../../common.h"
+
+static ck_ht_t ht CK_CC_CACHELINE;
+static char **keys;
+static size_t keys_length = 0;
+static size_t keys_capacity = 128;
+static ck_epoch_t epoch_ht;
+static ck_epoch_record_t epoch_wr;
+static int n_threads;
+static bool next_stage;
+
+enum state {
+ HT_STATE_STOP = 0,
+ HT_STATE_GET,
+ HT_STATE_STRICT_REPLACEMENT,
+ HT_STATE_DELETION,
+ HT_STATE_REPLACEMENT,
+ HT_STATE_COUNT
+};
+
+static struct affinity affinerator = AFFINITY_INITIALIZER;
+static uint64_t accumulator[HT_STATE_COUNT];
+static ck_spinlock_t accumulator_mutex = CK_SPINLOCK_INITIALIZER;
+static int barrier[HT_STATE_COUNT];
+static int state;
+
+struct ht_epoch {
+ ck_epoch_entry_t epoch_entry;
+};
+
+COMMON_ALARM_DECLARE_GLOBAL(ht_alarm, alarm_event, next_stage)
+
+static void
+alarm_handler(int s)
+{
+
+ (void)s;
+ next_stage = true;
+ return;
+}
+
+static void
+ht_destroy(ck_epoch_entry_t *e)
+{
+
+ free(e);
+ return;
+}
+
+static void *
+ht_malloc(size_t r)
+{
+ ck_epoch_entry_t *b;
+
+ b = malloc(sizeof(*b) + r);
+ return b + 1;
+}
+
+static void
+ht_free(void *p, size_t b, bool r)
+{
+ struct ht_epoch *e = p;
+
+ (void)b;
+
+ if (r == true) {
+ /* Destruction requires safe memory reclamation. */
+ ck_epoch_call(&epoch_wr, &(--e)->epoch_entry, ht_destroy);
+ } else {
+ free(--e);
+ }
+
+ return;
+}
+
+static struct ck_malloc my_allocator = {
+ .malloc = ht_malloc,
+ .free = ht_free
+};
+
+static void
+table_init(void)
+{
+ unsigned int mode = CK_HT_MODE_BYTESTRING;
+
+#ifdef HT_DELETE
+ mode |= CK_HT_WORKLOAD_DELETE;
+#endif
+
+ ck_epoch_init(&epoch_ht);
+ ck_epoch_register(&epoch_ht, &epoch_wr);
+ common_srand48((long int)time(NULL));
+ if (ck_ht_init(&ht, mode, NULL, &my_allocator, 8, common_lrand48()) == false) {
+ perror("ck_ht_init");
+ exit(EXIT_FAILURE);
+ }
+
+ return;
+}
+
+static bool
+table_remove(const char *value)
+{
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+ size_t l = strlen(value);
+
+ ck_ht_hash(&h, &ht, value, l);
+ ck_ht_entry_key_set(&entry, value, l);
+ return ck_ht_remove_spmc(&ht, h, &entry);
+}
+
+static bool
+table_replace(const char *value)
+{
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+ size_t l = strlen(value);
+
+ ck_ht_hash(&h, &ht, value, l);
+ ck_ht_entry_set(&entry, h, value, l, "REPLACED");
+ return ck_ht_set_spmc(&ht, h, &entry);
+}
+
+static void *
+table_get(const char *value)
+{
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+ size_t l = strlen(value);
+
+ ck_ht_hash(&h, &ht, value, l);
+ ck_ht_entry_key_set(&entry, value, l);
+ if (ck_ht_get_spmc(&ht, h, &entry) == true)
+ return ck_ht_entry_value(&entry);
+
+ return NULL;
+}
+
+static bool
+table_insert(const char *value)
+{
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+ size_t l = strlen(value);
+
+ ck_ht_hash(&h, &ht, value, l);
+ ck_ht_entry_set(&entry, h, value, l, value);
+ return ck_ht_put_spmc(&ht, h, &entry);
+}
+
+static size_t
+table_count(void)
+{
+
+ return ck_ht_count(&ht);
+}
+
+static bool
+table_reset(void)
+{
+
+ return ck_ht_reset_spmc(&ht);
+}
+
+static void *
+reader(void *unused)
+{
+ size_t i;
+ ck_epoch_record_t epoch_record;
+ int state_previous = HT_STATE_STOP;
+ int n_state;
+ uint64_t s, j, a;
+
+ (void)unused;
+ if (aff_iterate(&affinerator) != 0)
+ perror("WARNING: Failed to affine thread");
+
+ s = j = a = 0;
+ ck_epoch_register(&epoch_ht, &epoch_record);
+ for (;;) {
+ j++;
+ ck_epoch_begin(&epoch_record, NULL);
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ char *r;
+
+ r = table_get(keys[i]);
+ if (r == NULL)
+ continue;
+
+ if (strcmp(r, "REPLACED") == 0)
+ continue;
+
+ if (strcmp(r, keys[i]) == 0)
+ continue;
+
+ ck_error("ERROR: Found invalid value: [%s] but expected [%s]\n", r, keys[i]);
+ }
+ a += rdtsc() - s;
+ ck_epoch_end(&epoch_record, NULL);
+
+ n_state = ck_pr_load_int(&state);
+ if (n_state != state_previous) {
+ ck_spinlock_lock(&accumulator_mutex);
+ accumulator[state_previous] += a / (j * keys_length);
+ ck_spinlock_unlock(&accumulator_mutex);
+ ck_pr_inc_int(&barrier[state_previous]);
+ while (ck_pr_load_int(&barrier[state_previous]) != n_threads + 1)
+ ck_pr_stall();
+
+ state_previous = n_state;
+ s = j = a = 0;
+ }
+ }
+
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ FILE *fp;
+ char buffer[512];
+ size_t i, j, r;
+ unsigned int d = 0;
+ uint64_t s, e, a, repeated;
+ char **t;
+ pthread_t *readers;
+ double p_r, p_d;
+
+ COMMON_ALARM_DECLARE_LOCAL(ht_alarm, alarm_event)
+
+ r = 20;
+ s = 8;
+ p_d = 0.5;
+ p_r = 0.5;
+ n_threads = CORES - 1;
+
+ if (argc < 2) {
+ ck_error("Usage: parallel <dictionary> [<interval length> <initial size> <readers>\n"
+ " <probability of replacement> <probability of deletion> <epoch threshold>]\n");
+ }
+
+ if (argc >= 3)
+ r = atoi(argv[2]);
+
+ if (argc >= 4)
+ s = (uint64_t)atoi(argv[3]);
+
+ if (argc >= 5) {
+ n_threads = atoi(argv[4]);
+ if (n_threads < 1) {
+ ck_error("ERROR: Number of readers must be >= 1.\n");
+ }
+ }
+
+ if (argc >= 6) {
+ p_r = atof(argv[5]) / 100.00;
+ if (p_r < 0) {
+ ck_error("ERROR: Probability of replacement must be >= 0 and <= 100.\n");
+ }
+ }
+
+ if (argc >= 7) {
+ p_d = atof(argv[6]) / 100.00;
+ if (p_d < 0) {
+ ck_error("ERROR: Probability of deletion must be >= 0 and <= 100.\n");
+ }
+ }
+
+ COMMON_ALARM_INIT(ht_alarm, alarm_event, r)
+
+ affinerator.delta = 1;
+ readers = malloc(sizeof(pthread_t) * n_threads);
+ assert(readers != NULL);
+
+ keys = malloc(sizeof(char *) * keys_capacity);
+ assert(keys != NULL);
+
+ fp = fopen(argv[1], "r");
+ assert(fp != NULL);
+
+ while (fgets(buffer, sizeof(buffer), fp) != NULL) {
+ buffer[strlen(buffer) - 1] = '\0';
+ keys[keys_length++] = strdup(buffer);
+ assert(keys[keys_length - 1] != NULL);
+
+ if (keys_length == keys_capacity) {
+ t = realloc(keys, sizeof(char *) * (keys_capacity *= 2));
+ assert(t != NULL);
+ keys = t;
+ }
+ }
+
+ t = realloc(keys, sizeof(char *) * keys_length);
+ assert(t != NULL);
+ keys = t;
+
+ table_init();
+
+ for (i = 0; i < (size_t)n_threads; i++) {
+ if (pthread_create(&readers[i], NULL, reader, NULL) != 0) {
+ ck_error("ERROR: Failed to create thread %zu.\n", i);
+ }
+ }
+
+ for (i = 0; i < keys_length; i++)
+ d += table_insert(keys[i]) == false;
+
+ fprintf(stderr, " [S] %d readers, 1 writer.\n", n_threads);
+ fprintf(stderr, " [S] %zu entries stored and %u duplicates.\n\n",
+ table_count(), d);
+
+ fprintf(stderr, " ,- BASIC TEST\n");
+ fprintf(stderr, " | Executing SMR test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ if (table_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ d += table_insert(keys[i]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ fprintf(stderr, " | Executing replacement test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ table_replace(keys[i]);
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ fprintf(stderr, " | Executing get test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ if (table_get(keys[i]) == NULL) {
+ ck_error("ERROR: Unexpected NULL value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ a = 0;
+ fprintf(stderr, " | Executing removal test...");
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ table_remove(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ for (i = 0; i < keys_length; i++)
+ table_insert(keys[i]);
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ fprintf(stderr, " | Executing negative look-up test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ table_get("\x50\x03\x04\x05\x06\x10");
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ ck_epoch_record_t epoch_temporary = epoch_wr;
+ ck_epoch_synchronize(&epoch_wr);
+
+ fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
+ "%u pending, %u peak, %lu reclamations\n\n",
+ epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
+ epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
+
+ fprintf(stderr, " ,- READER CONCURRENCY\n");
+ fprintf(stderr, " | Executing reader test...");
+
+ ck_pr_store_int(&state, HT_STATE_GET);
+ while (ck_pr_load_int(&barrier[HT_STATE_STOP]) != n_threads)
+ ck_pr_stall();
+ ck_pr_inc_int(&barrier[HT_STATE_STOP]);
+ common_sleep(r);
+ ck_pr_store_int(&state, HT_STATE_STRICT_REPLACEMENT);
+ while (ck_pr_load_int(&barrier[HT_STATE_GET]) != n_threads)
+ ck_pr_stall();
+ fprintf(stderr, "done (reader = %" PRIu64 " ticks)\n",
+ accumulator[HT_STATE_GET] / n_threads);
+
+ fprintf(stderr, " | Executing strict replacement test...");
+
+ a = repeated = 0;
+ common_alarm(alarm_handler, &alarm_event, r);
+
+ ck_pr_inc_int(&barrier[HT_STATE_GET]);
+ for (;;) {
+ repeated++;
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ table_replace(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ if (next_stage == true) {
+ next_stage = false;
+ break;
+ }
+ }
+
+ ck_pr_store_int(&state, HT_STATE_DELETION);
+ while (ck_pr_load_int(&barrier[HT_STATE_STRICT_REPLACEMENT]) != n_threads)
+ ck_pr_stall();
+ table_reset();
+ ck_epoch_synchronize(&epoch_wr);
+ fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
+ a / (repeated * keys_length), accumulator[HT_STATE_STRICT_REPLACEMENT] / n_threads);
+
+ common_alarm(alarm_handler, &alarm_event, r);
+
+ fprintf(stderr, " | Executing deletion test (%.2f)...", p_d * 100);
+ a = repeated = 0;
+ ck_pr_inc_int(&barrier[HT_STATE_STRICT_REPLACEMENT]);
+ for (;;) {
+ double delete;
+
+ repeated++;
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ table_insert(keys[i]);
+ if (p_d != 0.0) {
+ delete = common_drand48();
+ if (delete <= p_d)
+ table_remove(keys[i]);
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+
+ if (next_stage == true) {
+ next_stage = false;
+ break;
+ }
+ }
+ ck_pr_store_int(&state, HT_STATE_REPLACEMENT);
+ while (ck_pr_load_int(&barrier[HT_STATE_DELETION]) != n_threads)
+ ck_pr_stall();
+
+ table_reset();
+ ck_epoch_synchronize(&epoch_wr);
+ fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
+ a / (repeated * keys_length), accumulator[HT_STATE_DELETION] / n_threads);
+
+ common_alarm(alarm_handler, &alarm_event, r);
+
+ fprintf(stderr, " | Executing replacement test (%.2f)...", p_r * 100);
+ a = repeated = 0;
+ ck_pr_inc_int(&barrier[HT_STATE_DELETION]);
+ for (;;) {
+ double replace, delete;
+
+ repeated++;
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ table_insert(keys[i]);
+ if (p_d != 0.0) {
+ delete = common_drand48();
+ if (delete <= p_d)
+ table_remove(keys[i]);
+ }
+ if (p_r != 0.0) {
+ replace = common_drand48();
+ if (replace <= p_r)
+ table_replace(keys[i]);
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+
+ if (next_stage == true) {
+ next_stage = false;
+ break;
+ }
+ }
+ ck_pr_store_int(&state, HT_STATE_STOP);
+ while (ck_pr_load_int(&barrier[HT_STATE_REPLACEMENT]) != n_threads)
+ ck_pr_stall();
+ table_reset();
+ ck_epoch_synchronize(&epoch_wr);
+ fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
+ a / (repeated * keys_length), accumulator[HT_STATE_REPLACEMENT] / n_threads);
+
+ ck_pr_inc_int(&barrier[HT_STATE_REPLACEMENT]);
+ epoch_temporary = epoch_wr;
+ ck_epoch_synchronize(&epoch_wr);
+
+ fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
+ "%u pending, %u peak, %lu reclamations\n\n",
+ epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
+ epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
+ return 0;
+}
diff --git a/regressions/ck_ht/benchmark/parallel_direct.c b/regressions/ck_ht/benchmark/parallel_direct.c
new file mode 100644
index 0000000..195bb25
--- /dev/null
+++ b/regressions/ck_ht/benchmark/parallel_direct.c
@@ -0,0 +1,545 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_ht.h>
+
+#include <assert.h>
+#include <ck_epoch.h>
+#include <ck_malloc.h>
+#include <ck_pr.h>
+#include <ck_spinlock.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+
+#include "../../common.h"
+
+static ck_ht_t ht CK_CC_CACHELINE;
+static uintptr_t *keys;
+static size_t keys_length = 0;
+static ck_epoch_t epoch_ht;
+static ck_epoch_record_t epoch_wr;
+static int n_threads;
+static bool next_stage;
+
+enum state {
+ HT_STATE_STOP = 0,
+ HT_STATE_GET,
+ HT_STATE_STRICT_REPLACEMENT,
+ HT_STATE_DELETION,
+ HT_STATE_REPLACEMENT,
+ HT_STATE_COUNT
+};
+
+static struct affinity affinerator = AFFINITY_INITIALIZER;
+static uint64_t accumulator[HT_STATE_COUNT];
+static ck_spinlock_t accumulator_mutex = CK_SPINLOCK_INITIALIZER;
+static int barrier[HT_STATE_COUNT];
+static int state;
+
+struct ht_epoch {
+ ck_epoch_entry_t epoch_entry;
+};
+
+COMMON_ALARM_DECLARE_GLOBAL(ht_alarm, alarm_event, next_stage)
+
+static void
+alarm_handler(int s)
+{
+
+ (void)s;
+ next_stage = true;
+ return;
+}
+
+static void
+ht_destroy(ck_epoch_entry_t *e)
+{
+
+ free(e);
+ return;
+}
+
+static void *
+ht_malloc(size_t r)
+{
+ ck_epoch_entry_t *b;
+
+ b = malloc(sizeof(*b) + r);
+ return b + 1;
+}
+
+static void
+ht_free(void *p, size_t b, bool r)
+{
+ struct ht_epoch *e = p;
+
+ (void)b;
+
+ if (r == true) {
+ /* Destruction requires safe memory reclamation. */
+ ck_epoch_call(&epoch_wr, &(--e)->epoch_entry, ht_destroy);
+ } else {
+ free(--e);
+ }
+
+ return;
+}
+
+static struct ck_malloc my_allocator = {
+ .malloc = ht_malloc,
+ .free = ht_free
+};
+
+static void
+hash_function(ck_ht_hash_t *h, const void *key, size_t key_length, uint64_t seed)
+{
+ const uintptr_t *value = key;
+
+ (void)key_length;
+ (void)seed;
+ h->value = *value;
+ return;
+}
+
+static void
+table_init(void)
+{
+
+ ck_epoch_init(&epoch_ht);
+ ck_epoch_register(&epoch_ht, &epoch_wr);
+ common_srand48((long int)time(NULL));
+ if (ck_ht_init(&ht, CK_HT_MODE_DIRECT, hash_function, &my_allocator, 8, common_lrand48()) == false) {
+ perror("ck_ht_init");
+ exit(EXIT_FAILURE);
+ }
+
+ return;
+}
+
+static bool
+table_remove(uintptr_t value)
+{
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+
+ ck_ht_hash_direct(&h, &ht, value);
+ ck_ht_entry_key_set_direct(&entry, value);
+ return ck_ht_remove_spmc(&ht, h, &entry);
+}
+
+static bool
+table_replace(uintptr_t value)
+{
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+
+ ck_ht_hash_direct(&h, &ht, value);
+ ck_ht_entry_set_direct(&entry, h, value, 6605241);
+ return ck_ht_set_spmc(&ht, h, &entry);
+}
+
+static uintptr_t
+table_get(uintptr_t value)
+{
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+
+ ck_ht_hash_direct(&h, &ht, value);
+ ck_ht_entry_key_set_direct(&entry, value);
+ if (ck_ht_get_spmc(&ht, h, &entry) == true)
+ return ck_ht_entry_value_direct(&entry);
+
+ return 0;
+}
+
+static bool
+table_insert(uintptr_t value)
+{
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+
+ ck_ht_hash_direct(&h, &ht, value);
+ ck_ht_entry_set_direct(&entry, h, value, value);
+ return ck_ht_put_spmc(&ht, h, &entry);
+}
+
+static size_t
+table_count(void)
+{
+
+ return ck_ht_count(&ht);
+}
+
+static bool
+table_reset(void)
+{
+
+ return ck_ht_reset_spmc(&ht);
+}
+
+static void *
+ht_reader(void *unused)
+{
+ size_t i;
+ ck_epoch_record_t epoch_record;
+ int state_previous = HT_STATE_STOP;
+ int n_state;
+ uint64_t s, j, a;
+
+ (void)unused;
+ if (aff_iterate(&affinerator) != 0)
+ perror("WARNING: Failed to affine thread");
+
+ s = j = a = 0;
+ ck_epoch_register(&epoch_ht, &epoch_record);
+ for (;;) {
+ j++;
+ ck_epoch_begin(&epoch_record, NULL);
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ uintptr_t r;
+
+ r = table_get(keys[i]);
+ if (r == 0)
+ continue;
+
+ if (r == 6605241)
+ continue;
+
+ if (r == keys[i])
+ continue;
+
+ ck_error("ERROR: Found invalid value: [%ju]\n",
+ (uintmax_t)r);
+ }
+ a += rdtsc() - s;
+ ck_epoch_end(&epoch_record, NULL);
+
+ n_state = ck_pr_load_int(&state);
+ if (n_state != state_previous) {
+ ck_spinlock_lock(&accumulator_mutex);
+ accumulator[state_previous] += a / (j * keys_length);
+ ck_spinlock_unlock(&accumulator_mutex);
+ ck_pr_inc_int(&barrier[state_previous]);
+ while (ck_pr_load_int(&barrier[state_previous]) != n_threads + 1)
+ ck_pr_stall();
+
+ state_previous = n_state;
+ s = j = a = 0;
+ }
+ }
+
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ size_t i, j, r;
+ unsigned int d = 0;
+ uint64_t s, e, a, repeated;
+ pthread_t *readers;
+ double p_r, p_d;
+
+ COMMON_ALARM_DECLARE_LOCAL(ht_alarm, alarm_event)
+
+ r = 20;
+ s = 8;
+ p_d = 0.5;
+ p_r = 0.5;
+ n_threads = CORES - 1;
+
+ if (argc < 2) {
+ fprintf(stderr, "Usage: parallel <#entries> [<interval length> <initial size> <readers>\n"
+ " <probability of replacement> <probability of deletion> <epoch threshold>]\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (argc >= 3)
+ r = atoi(argv[2]);
+
+ if (argc >= 4)
+ s = (uint64_t)atoi(argv[3]);
+
+ if (argc >= 5) {
+ n_threads = atoi(argv[4]);
+ if (n_threads < 1) {
+ ck_error("ERROR: Number of readers must be >= 1.\n");
+ }
+ }
+
+ if (argc >= 6) {
+ p_r = atof(argv[5]) / 100.00;
+ if (p_r < 0) {
+ ck_error("ERROR: Probability of replacement must be >= 0 and <= 100.\n");
+ }
+ }
+
+ if (argc >= 7) {
+ p_d = atof(argv[6]) / 100.00;
+ if (p_d < 0) {
+ ck_error("ERROR: Probability of deletion must be >= 0 and <= 100.\n");
+ }
+ }
+
+ COMMON_ALARM_INIT(ht_alarm, alarm_event, r)
+
+ affinerator.delta = 1;
+ readers = malloc(sizeof(pthread_t) * n_threads);
+ assert(readers != NULL);
+
+ keys_length = (size_t)atoi(argv[1]);
+ keys = malloc(sizeof(uintptr_t) * keys_length);
+ assert(keys != NULL);
+
+ table_init();
+
+ for (i = 0; i < keys_length; i++) {
+ keys[i] = (uintptr_t)common_lrand48();
+ while (keys[i] == 2)
+ keys[i] = (uintptr_t)common_lrand48();
+ }
+
+ for (i = 0; i < (size_t)n_threads; i++) {
+ if (pthread_create(&readers[i], NULL, ht_reader, NULL) != 0) {
+ ck_error("ERROR: Failed to create thread %zu.\n", i);
+ }
+ }
+
+ for (i = 0; i < keys_length; i++)
+ d += table_insert(keys[i]) == false;
+
+ fprintf(stderr, " [S] %zu entries stored and %u duplicates.\n\n",
+ table_count(), d);
+
+ fprintf(stderr, " ,- BASIC TEST\n");
+ fprintf(stderr, " | Executing SMR test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ if (table_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ d += table_insert(keys[i]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ fprintf(stderr, " | Executing replacement test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ table_replace(keys[i]);
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ fprintf(stderr, " | Executing get test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ if (table_get(keys[i]) == 0) {
+ ck_error("ERROR: Unexpected 0 value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ a = 0;
+ fprintf(stderr, " | Executing removal test...");
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ table_remove(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ for (i = 0; i < keys_length; i++)
+ table_insert(keys[i]);
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ fprintf(stderr, " | Executing negative look-up test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ table_get(2);
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ ck_epoch_record_t epoch_temporary = epoch_wr;
+ ck_epoch_synchronize(&epoch_wr);
+
+ fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
+ "%u pending, %u peak, %lu reclamations\n\n",
+ epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
+ epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
+
+ fprintf(stderr, " ,- READER CONCURRENCY\n");
+ fprintf(stderr, " | Executing reader test...");
+
+ ck_pr_store_int(&state, HT_STATE_GET);
+ while (ck_pr_load_int(&barrier[HT_STATE_STOP]) != n_threads)
+ ck_pr_stall();
+ ck_pr_inc_int(&barrier[HT_STATE_STOP]);
+ common_sleep(r);
+ ck_pr_store_int(&state, HT_STATE_STRICT_REPLACEMENT);
+ while (ck_pr_load_int(&barrier[HT_STATE_GET]) != n_threads)
+ ck_pr_stall();
+ fprintf(stderr, "done (reader = %" PRIu64 " ticks)\n",
+ accumulator[HT_STATE_GET] / n_threads);
+
+ fprintf(stderr, " | Executing strict replacement test...");
+
+ a = repeated = 0;
+ common_alarm(alarm_handler, &alarm_event, r);
+
+ ck_pr_inc_int(&barrier[HT_STATE_GET]);
+ for (;;) {
+ repeated++;
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ table_replace(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ if (next_stage == true) {
+ next_stage = false;
+ break;
+ }
+ }
+
+ ck_pr_store_int(&state, HT_STATE_DELETION);
+ while (ck_pr_load_int(&barrier[HT_STATE_STRICT_REPLACEMENT]) != n_threads)
+ ck_pr_stall();
+ table_reset();
+ ck_epoch_synchronize(&epoch_wr);
+ fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
+ a / (repeated * keys_length), accumulator[HT_STATE_STRICT_REPLACEMENT] / n_threads);
+
+ common_alarm(alarm_handler, &alarm_event, r);
+
+ fprintf(stderr, " | Executing deletion test (%.2f)...", p_d * 100);
+ a = repeated = 0;
+ ck_pr_inc_int(&barrier[HT_STATE_STRICT_REPLACEMENT]);
+ for (;;) {
+ double delete;
+
+ repeated++;
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ table_insert(keys[i]);
+ if (p_d != 0.0) {
+ delete = common_drand48();
+ if (delete <= p_d)
+ table_remove(keys[i]);
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+
+ if (next_stage == true) {
+ next_stage = false;
+ break;
+ }
+ }
+ ck_pr_store_int(&state, HT_STATE_REPLACEMENT);
+ while (ck_pr_load_int(&barrier[HT_STATE_DELETION]) != n_threads)
+ ck_pr_stall();
+
+ table_reset();
+ ck_epoch_synchronize(&epoch_wr);
+ fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
+ a / (repeated * keys_length), accumulator[HT_STATE_DELETION] / n_threads);
+
+ common_alarm(alarm_handler, &alarm_event, r);
+
+ fprintf(stderr, " | Executing replacement test (%.2f)...", p_r * 100);
+ a = repeated = 0;
+ ck_pr_inc_int(&barrier[HT_STATE_DELETION]);
+ for (;;) {
+ double replace, delete;
+
+ repeated++;
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ table_insert(keys[i]);
+ if (p_d != 0.0) {
+ delete = common_drand48();
+ if (delete <= p_d)
+ table_remove(keys[i]);
+ }
+ if (p_r != 0.0) {
+ replace = common_drand48();
+ if (replace <= p_r)
+ table_replace(keys[i]);
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+
+ if (next_stage == true) {
+ next_stage = false;
+ break;
+ }
+ }
+ ck_pr_store_int(&state, HT_STATE_STOP);
+ while (ck_pr_load_int(&barrier[HT_STATE_REPLACEMENT]) != n_threads)
+ ck_pr_stall();
+ table_reset();
+ ck_epoch_synchronize(&epoch_wr);
+ fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
+ a / (repeated * keys_length), accumulator[HT_STATE_REPLACEMENT] / n_threads);
+
+ ck_pr_inc_int(&barrier[HT_STATE_REPLACEMENT]);
+ epoch_temporary = epoch_wr;
+ ck_epoch_synchronize(&epoch_wr);
+
+ fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
+ "%u pending, %u peak, %lu reclamations\n\n",
+ epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
+ epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
+ return 0;
+}
diff --git a/regressions/ck_ht/benchmark/serial.c b/regressions/ck_ht/benchmark/serial.c
new file mode 100644
index 0000000..0daa45c
--- /dev/null
+++ b/regressions/ck_ht/benchmark/serial.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_ht.h>
+
+#include <assert.h>
+#include <ck_malloc.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include "../../common.h"
+
+static ck_ht_t ht;
+static char **keys;
+static size_t keys_length = 0;
+static size_t keys_capacity = 128;
+
+static void *
+ht_malloc(size_t r)
+{
+
+ return malloc(r);
+}
+
+static void
+ht_free(void *p, size_t b, bool r)
+{
+
+ (void)b;
+ (void)r;
+
+ free(p);
+
+ return;
+}
+
+static struct ck_malloc my_allocator = {
+ .malloc = ht_malloc,
+ .free = ht_free
+};
+
+static void
+table_init(void)
+{
+ unsigned int mode = CK_HT_MODE_BYTESTRING;
+
+#ifdef HT_DELETE
+ mode |= CK_HT_WORKLOAD_DELETE;
+#endif
+
+ common_srand48((long int)time(NULL));
+ if (ck_ht_init(&ht, mode, NULL, &my_allocator, 8, common_lrand48()) == false) {
+ perror("ck_ht_init");
+ exit(EXIT_FAILURE);
+ }
+
+ return;
+}
+
+static bool
+table_remove(const char *value)
+{
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+ size_t l = strlen(value);
+
+ ck_ht_hash(&h, &ht, value, l);
+ ck_ht_entry_key_set(&entry, value, l);
+ return ck_ht_remove_spmc(&ht, h, &entry);
+}
+
+static bool
+table_replace(const char *value)
+{
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+ size_t l = strlen(value);
+
+ ck_ht_hash(&h, &ht, value, l);
+ ck_ht_entry_set(&entry, h, value, l, "REPLACED");
+ return ck_ht_set_spmc(&ht, h, &entry);
+}
+
+static void *
+table_get(const char *value)
+{
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+ size_t l = strlen(value);
+ void *v = NULL;
+
+ ck_ht_hash(&h, &ht, value, l);
+ ck_ht_entry_key_set(&entry, value, l);
+
+ if (ck_ht_get_spmc(&ht, h, &entry) == true) {
+ v = ck_ht_entry_value(&entry);
+ }
+ return v;
+}
+
+static bool
+table_insert(const char *value)
+{
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+ size_t l = strlen(value);
+
+ ck_ht_hash(&h, &ht, value, l);
+ ck_ht_entry_set(&entry, h, value, l, "VALUE");
+ return ck_ht_put_spmc(&ht, h, &entry);
+}
+
+static size_t
+table_count(void)
+{
+
+ return ck_ht_count(&ht);
+}
+
+static bool
+table_gc(void)
+{
+
+ return ck_ht_gc(&ht, 0, common_lrand48());
+}
+
+static bool
+table_reset(void)
+{
+
+ return ck_ht_reset_spmc(&ht);
+}
+
+static void
+keys_shuffle(char **k)
+{
+ size_t i, j;
+ char *t;
+
+ for (i = keys_length; i > 1; i--) {
+ j = rand() % (i - 1);
+
+ if (j != i - 1) {
+ t = k[i - 1];
+ k[i - 1] = k[j];
+ k[j] = t;
+ }
+ }
+
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ FILE *fp;
+ char buffer[512];
+ size_t i, j, r;
+ unsigned int d = 0;
+ uint64_t s, e, a, ri, si, ai, sr, rg, sg, ag, sd, ng, gg;
+ char **t;
+ struct ck_ht_stat st;
+
+ r = 20;
+ s = 8;
+ srand(time(NULL));
+
+ if (argc < 2) {
+ ck_error("Usage: ck_ht <dictionary> [<repetitions> <initial size>]\n");
+ }
+
+ if (argc >= 3)
+ r = atoi(argv[2]);
+
+ if (argc >= 4)
+ s = (uint64_t)atoi(argv[3]);
+
+ keys = malloc(sizeof(char *) * keys_capacity);
+ assert(keys != NULL);
+
+ fp = fopen(argv[1], "r");
+ assert(fp != NULL);
+
+ while (fgets(buffer, sizeof(buffer), fp) != NULL) {
+ buffer[strlen(buffer) - 1] = '\0';
+ keys[keys_length++] = strdup(buffer);
+ assert(keys[keys_length - 1] != NULL);
+
+ if (keys_length == keys_capacity) {
+ t = realloc(keys, sizeof(char *) * (keys_capacity *= 2));
+ assert(t != NULL);
+ keys = t;
+ }
+ }
+
+ t = realloc(keys, sizeof(char *) * keys_length);
+ assert(t != NULL);
+ keys = t;
+
+ table_init();
+
+ for (i = 0; i < keys_length; i++)
+ d += table_insert(keys[i]) == false;
+ ck_ht_stat(&ht, &st);
+
+ fprintf(stderr, "# %zu entries stored, %u duplicates, %" PRIu64 " probe.\n",
+ table_count(), d, st.probe_maximum);
+
+ fprintf(stderr, "# reverse_insertion serial_insertion random_insertion serial_replace reverse_get serial_get random_get serial_remove negative_get garbage_collect\n\n");
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ if (table_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = keys_length; i > 0; i--)
+ d += table_insert(keys[i - 1]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ ri = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ if (table_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ d += table_insert(keys[i]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ si = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ keys_shuffle(keys);
+
+ if (table_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ d += table_insert(keys[i]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ ai = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ table_replace(keys[i]);
+ e = rdtsc();
+ a += e - s;
+ }
+ sr = a / (r * keys_length);
+
+ table_reset();
+ for (i = 0; i < keys_length; i++)
+ table_insert(keys[i]);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = keys_length; i > 0; i--) {
+ if (table_get(keys[i - 1]) == NULL) {
+ ck_error("ERROR: Unexpected NULL value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ rg = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ if (table_get(keys[i]) == NULL) {
+ ck_error("ERROR: Unexpected NULL value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ sg = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ keys_shuffle(keys);
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ if (table_get(keys[i]) == NULL) {
+ ck_error("ERROR: Unexpected NULL value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ ag = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ table_remove(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ for (i = 0; i < keys_length; i++)
+ table_insert(keys[i]);
+ }
+ sd = a / (r * keys_length);
+
+ for (i = 0; i < keys_length / 2; i++)
+ table_remove(keys[i]);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ table_gc();
+ e = rdtsc();
+ a += e - s;
+ }
+ gg = a / r;
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ table_get("\x50\x03\x04\x05\x06\x10");
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ ng = a / (r * keys_length);
+
+ printf("%zu "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 "\n",
+ keys_length, ri, si, ai, sr, rg, sg, ag, sd, ng, gg);
+
+ return 0;
+}
diff --git a/regressions/ck_ht/validate/Makefile b/regressions/ck_ht/validate/Makefile
new file mode 100644
index 0000000..cb5682c
--- /dev/null
+++ b/regressions/ck_ht/validate/Makefile
@@ -0,0 +1,21 @@
+.PHONY: check clean distribution
+
+OBJECTS=serial serial.delete
+
+all: $(OBJECTS)
+
+serial: serial.c ../../../include/ck_ht.h ../../../src/ck_ht.c
+ $(CC) $(CFLAGS) -o serial serial.c ../../../src/ck_ht.c
+
+serial.delete: serial.c ../../../include/ck_ht.h ../../../src/ck_ht.c
+ $(CC) $(CFLAGS) -DHT_DELETE -o serial.delete serial.c ../../../src/ck_ht.c
+
+check: all
+ ./serial
+ ./serial.delete
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE
diff --git a/regressions/ck_ht/validate/serial.c b/regressions/ck_ht/validate/serial.c
new file mode 100644
index 0000000..9a85c2f
--- /dev/null
+++ b/regressions/ck_ht/validate/serial.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_ht.h>
+
+#include <assert.h>
+#include <ck_malloc.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "../../common.h"
+#include "../../../src/ck_ht_hash.h"
+
+static size_t hash_times_called = 0;
+
+static void *
+ht_malloc(size_t r)
+{
+
+ return malloc(r);
+}
+
+static void
+ht_free(void *p, size_t b, bool r)
+{
+
+ (void)b;
+ (void)r;
+ free(p);
+ return;
+}
+
+static void
+ht_hash_wrapper(struct ck_ht_hash *h,
+ const void *key,
+ size_t length,
+ uint64_t seed)
+{
+ hash_times_called++;
+
+ h->value = (unsigned long)MurmurHash64A(key, length, seed);
+ return;
+}
+
+static struct ck_malloc my_allocator = {
+ .malloc = ht_malloc,
+ .free = ht_free
+};
+
+const char *test[] = {"Samy", "Al", "Bahra", "dances", "in", "the", "wind.", "Once",
+ "upon", "a", "time", "his", "gypsy", "ate", "one", "itsy",
+ "bitsy", "spider.", "What", "goes", "up", "must",
+ "come", "down.", "What", "is", "down", "stays",
+ "down.", "A", "B", "C", "D", "E", "F", "G", "H",
+ "I", "J", "K", "L", "M", "N", "O"};
+
+static uintptr_t direct[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 1, 2, 3, 4, 5, 9 };
+
+const char *negative = "negative";
+
+int
+main(void)
+{
+ size_t i, l;
+ ck_ht_t ht;
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+ ck_ht_iterator_t iterator = CK_HT_ITERATOR_INITIALIZER;
+ ck_ht_entry_t *cursor;
+ unsigned int mode = CK_HT_MODE_BYTESTRING;
+
+#ifdef HT_DELETE
+ mode |= CK_HT_WORKLOAD_DELETE;
+#endif
+
+ if (ck_ht_init(&ht, mode, ht_hash_wrapper, &my_allocator, 2, 6602834) == false) {
+ perror("ck_ht_init");
+ exit(EXIT_FAILURE);
+ }
+
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ l = strlen(test[i]);
+ ck_ht_hash(&h, &ht, test[i], l);
+ ck_ht_entry_set(&entry, h, test[i], l, test[i]);
+ ck_ht_put_spmc(&ht, h, &entry);
+ }
+
+ l = strlen(test[0]);
+ ck_ht_hash(&h, &ht, test[0], l);
+ ck_ht_entry_set(&entry, h, test[0], l, test[0]);
+ ck_ht_put_spmc(&ht, h, &entry);
+
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ l = strlen(test[i]);
+ ck_ht_hash(&h, &ht, test[i], l);
+ ck_ht_entry_key_set(&entry, test[i], l);
+ if (ck_ht_get_spmc(&ht, h, &entry) == false) {
+ ck_error("ERROR (put): Failed to find [%s]\n", test[i]);
+ } else {
+ void *k, *v;
+
+ k = ck_ht_entry_key(&entry);
+ v = ck_ht_entry_value(&entry);
+
+ if (strcmp(k, test[i]) || strcmp(v, test[i])) {
+ ck_error("ERROR: Mismatch: (%s, %s) != (%s, %s)\n",
+ (char *)k, (char *)v, test[i], test[i]);
+ }
+ }
+ }
+
+ ck_ht_hash(&h, &ht, negative, strlen(negative));
+ ck_ht_entry_key_set(&entry, negative, strlen(negative));
+ if (ck_ht_get_spmc(&ht, h, &entry) == true) {
+ ck_error("ERROR: Found non-existing entry.\n");
+ }
+
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ l = strlen(test[i]);
+ ck_ht_hash(&h, &ht, test[i], l);
+ ck_ht_entry_key_set(&entry, test[i], l);
+
+ if (ck_ht_get_spmc(&ht, h, &entry) == false)
+ continue;
+
+ if (ck_ht_remove_spmc(&ht, h, &entry) == false) {
+ ck_error("ERROR: Failed to delete existing entry\n");
+ }
+
+ if (ck_ht_get_spmc(&ht, h, &entry) == true)
+ ck_error("ERROR: Able to find [%s] after delete\n", test[i]);
+
+ ck_ht_entry_set(&entry, h, test[i], l, test[i]);
+ if (ck_ht_put_spmc(&ht, h, &entry) == false)
+ ck_error("ERROR: Failed to insert [%s]\n", test[i]);
+
+ if (ck_ht_remove_spmc(&ht, h, &entry) == false) {
+ ck_error("ERROR: Failed to delete existing entry\n");
+ }
+ }
+
+ ck_ht_reset_spmc(&ht);
+ if (ck_ht_count(&ht) != 0) {
+ ck_error("ERROR: Map was not reset.\n");
+ }
+
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ l = strlen(test[i]);
+ ck_ht_hash(&h, &ht, test[i], l);
+ ck_ht_entry_set(&entry, h, test[i], l, test[i]);
+ ck_ht_put_spmc(&ht, h, &entry);
+ }
+
+ for (i = 0; ck_ht_next(&ht, &iterator, &cursor) == true; i++);
+ if (i != 42) {
+ ck_error("ERROR: Incorrect number of entries in table.\n");
+ }
+
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ l = strlen(test[i]);
+ ck_ht_hash(&h, &ht, test[i], l);
+ ck_ht_entry_set(&entry, h, test[i], l, test[i]);
+ ck_ht_set_spmc(&ht, h, &entry);
+ }
+
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ l = strlen(test[i]);
+ ck_ht_hash(&h, &ht, test[i], l);
+ ck_ht_entry_key_set(&entry, test[i], l);
+ if (ck_ht_get_spmc(&ht, h, &entry) == false) {
+ ck_error("ERROR (set): Failed to find [%s]\n", test[i]);
+ } else {
+ void *k, *v;
+
+ k = ck_ht_entry_key(&entry);
+ v = ck_ht_entry_value(&entry);
+
+ if (strcmp(k, test[i]) || strcmp(v, test[i])) {
+ ck_error("ERROR: Mismatch: (%s, %s) != (%s, %s)\n",
+ (char *)k, (char *)v, test[i], test[i]);
+ }
+ }
+ }
+
+ if (ck_ht_gc(&ht, 0, 27) == false) {
+ ck_error("ck_ht_gc\n");
+ }
+
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ l = strlen(test[i]);
+ ck_ht_hash(&h, &ht, test[i], l);
+ ck_ht_entry_set(&entry, h, test[i], l, "REPLACED");
+ ck_ht_set_spmc(&ht, h, &entry);
+
+ if (strcmp(test[i], "What") == 0)
+ continue;
+
+ if (strcmp(test[i], "down.") == 0)
+ continue;
+
+ if (strcmp(ck_ht_entry_value(&entry), test[i]) != 0) {
+ ck_error("Mismatch detected: %s, expected %s\n",
+ (char *)ck_ht_entry_value(&entry),
+ test[i]);
+ }
+ }
+
+ ck_ht_iterator_init(&iterator);
+ while (ck_ht_next(&ht, &iterator, &cursor) == true) {
+ if (strcmp(ck_ht_entry_value(cursor), "REPLACED") != 0) {
+ ck_error("Mismatch detected: %s, expected REPLACED\n",
+ (char *)ck_ht_entry_value(cursor));
+ }
+ }
+
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ l = strlen(test[i]);
+ ck_ht_hash(&h, &ht, test[i], l);
+ ck_ht_entry_key_set(&entry, test[i], l);
+
+ if (ck_ht_get_spmc(&ht, h, &entry) == false)
+ continue;
+
+ if (ck_ht_remove_spmc(&ht, h, &entry) == false) {
+ ck_error("ERROR: Failed to delete existing entry\n");
+ }
+
+ if (ck_ht_get_spmc(&ht, h, &entry) == true)
+ ck_error("ERROR: Able to find [%s] after delete\n", test[i]);
+
+ ck_ht_entry_set(&entry, h, test[i], l, test[i]);
+ if (ck_ht_put_spmc(&ht, h, &entry) == false)
+ ck_error("ERROR: Failed to insert [%s]\n", test[i]);
+
+ if (ck_ht_remove_spmc(&ht, h, &entry) == false) {
+ ck_error("ERROR: Failed to delete existing entry\n");
+ }
+ }
+
+ ck_ht_destroy(&ht);
+
+ if (hash_times_called == 0) {
+ ck_error("ERROR: Our hash function was not called!\n");
+ }
+
+ hash_times_called = 0;
+
+ if (ck_ht_init(&ht, CK_HT_MODE_DIRECT, ht_hash_wrapper, &my_allocator, 8, 6602834) == false) {
+ perror("ck_ht_init");
+ exit(EXIT_FAILURE);
+ }
+
+ l = 0;
+ for (i = 0; i < sizeof(direct) / sizeof(*direct); i++) {
+ ck_ht_hash_direct(&h, &ht, direct[i]);
+ ck_ht_entry_set_direct(&entry, h, direct[i], (uintptr_t)test[i]);
+ l += ck_ht_put_spmc(&ht, h, &entry) == false;
+ }
+
+ if (l != 7) {
+ ck_error("ERROR: Got %zu failures rather than 7\n", l);
+ }
+
+ for (i = 0; i < sizeof(direct) / sizeof(*direct); i++) {
+ ck_ht_hash_direct(&h, &ht, direct[i]);
+ ck_ht_entry_set_direct(&entry, h, direct[i], (uintptr_t)"REPLACED");
+ l += ck_ht_set_spmc(&ht, h, &entry) == false;
+ }
+
+ ck_ht_iterator_init(&iterator);
+ while (ck_ht_next(&ht, &iterator, &cursor) == true) {
+ if (strcmp(ck_ht_entry_value(cursor), "REPLACED") != 0) {
+ ck_error("Mismatch detected: %s, expected REPLACED\n",
+ (char *)ck_ht_entry_value(cursor));
+ }
+ }
+
+ ck_ht_destroy(&ht);
+
+ if (hash_times_called == 0) {
+ ck_error("ERROR: Our hash function was not called!\n");
+ }
+
+ return 0;
+}
diff --git a/regressions/ck_pflock/benchmark/Makefile b/regressions/ck_pflock/benchmark/Makefile
new file mode 100644
index 0000000..6f739d9
--- /dev/null
+++ b/regressions/ck_pflock/benchmark/Makefile
@@ -0,0 +1,17 @@
+.PHONY: clean distribution
+
+OBJECTS=latency throughput
+
+all: $(OBJECTS)
+
+latency: latency.c ../../../include/ck_rwlock.h
+ $(CC) $(CFLAGS) -o latency latency.c
+
+throughput: throughput.c ../../../include/ck_rwlock.h
+ $(CC) $(CFLAGS) -o throughput throughput.c
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_pflock/benchmark/latency.c b/regressions/ck_pflock/benchmark/latency.c
new file mode 100644
index 0000000..a28c9dd
--- /dev/null
+++ b/regressions/ck_pflock/benchmark/latency.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * Copyright 2013 John Wittrock.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHEPFISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_pflock.h>
+#include <inttypes.h>
+#include <stdio.h>
+
+#include "../../common.h"
+
+#ifndef STEPS
+#define STEPS 1000000
+#endif
+
+int
+main(void)
+{
+ uint64_t s_b, e_b, i;
+ ck_pflock_t pflock = CK_PFLOCK_INITIALIZER;
+
+ for (i = 0; i < STEPS; i++) {
+ ck_pflock_write_lock(&pflock);
+ ck_pflock_write_unlock(&pflock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_pflock_write_lock(&pflock);
+ ck_pflock_write_unlock(&pflock);
+ }
+ e_b = rdtsc();
+ printf("WRITE: pflock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ for (i = 0; i < STEPS; i++) {
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ }
+ e_b = rdtsc();
+ printf("READ: pflock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ return 0;
+}
+
diff --git a/regressions/ck_pflock/benchmark/throughput.c b/regressions/ck_pflock/benchmark/throughput.c
new file mode 100644
index 0000000..429465f
--- /dev/null
+++ b/regressions/ck_pflock/benchmark/throughput.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * Copyright 2013 John Wittrock.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHEPFISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_pflock.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "../../common.h"
+
+#ifndef STEPS
+#define STEPS 1000000
+#endif
+
+static int barrier;
+static int threads;
+static unsigned int flag CK_CC_CACHELINE;
+static ck_pflock_t pflock = CK_PFLOCK_INITIALIZER;
+static struct affinity affinity;
+
+static void *
+thread_pflock(void *pun)
+{
+ uint64_t s_b, e_b, a, i;
+ uint64_t *value = pun;
+
+ if (aff_iterate(&affinity) != 0) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_int(&barrier);
+ while (ck_pr_load_int(&barrier) != threads)
+ ck_pr_stall();
+
+ for (i = 1, a = 0;; i++) {
+ s_b = rdtsc();
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ e_b = rdtsc();
+
+ a += (e_b - s_b) >> 4;
+
+ if (ck_pr_load_uint(&flag) == 1)
+ break;
+ }
+
+ ck_pr_inc_int(&barrier);
+ while (ck_pr_load_int(&barrier) != threads * 2)
+ ck_pr_stall();
+
+ *value = (a / i);
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int t;
+ pthread_t *p;
+ uint64_t *latency;
+
+ if (argc != 3) {
+ ck_error("Usage: throughput <delta> <threads>\n");
+ }
+
+ threads = atoi(argv[2]);
+ if (threads <= 0) {
+ ck_error("ERROR: Threads must be a value > 0.\n");
+ }
+
+ p = malloc(sizeof(pthread_t) * threads);
+ if (p == NULL) {
+ ck_error("ERROR: Failed to initialize thread.\n");
+ }
+
+ latency = malloc(sizeof(uint64_t) * threads);
+ if (latency == NULL) {
+ ck_error("ERROR: Failed to create latency buffer.\n");
+ }
+
+ affinity.delta = atoi(argv[1]);
+ affinity.request = 0;
+
+ fprintf(stderr, "Creating threads (pflock)...");
+ for (t = 0; t < threads; t++) {
+ if (pthread_create(&p[t], NULL, thread_pflock, latency + t) != 0) {
+ ck_error("ERROR: Could not create thread %d\n", t);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ common_sleep(10);
+ ck_pr_store_uint(&flag, 1);
+
+ fprintf(stderr, "Waiting for threads to finish acquisition regression...");
+ for (t = 0; t < threads; t++)
+ pthread_join(p[t], NULL);
+ fprintf(stderr, "done\n\n");
+
+ for (t = 1; t <= threads; t++)
+ printf("%10u %20" PRIu64 "\n", t, latency[t - 1]);
+
+ return 0;
+}
+
diff --git a/regressions/ck_pflock/validate/Makefile b/regressions/ck_pflock/validate/Makefile
new file mode 100644
index 0000000..eea9d02
--- /dev/null
+++ b/regressions/ck_pflock/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean distribution
+
+OBJECTS=validate
+
+all: $(OBJECTS)
+
+validate: validate.c ../../../include/ck_pflock.h
+ $(CC) $(CFLAGS) -o validate validate.c
+
+check: all
+ ./validate $(CORES) 1
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_pflock/validate/validate.c b/regressions/ck_pflock/validate/validate.c
new file mode 100644
index 0000000..2551755
--- /dev/null
+++ b/regressions/ck_pflock/validate/validate.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra, John Wittrock.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_pflock.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 1000000
+#endif
+
+static struct affinity a;
+static unsigned int locked;
+static int nthr;
+static ck_pflock_t lock = CK_PFLOCK_INITIALIZER;
+
+static void *
+thread(void *null CK_CC_UNUSED)
+{
+ int i = ITERATE;
+ unsigned int l;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+ ck_pflock_write_lock(&lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_pflock_write_unlock(&lock);
+
+ ck_pflock_read_lock(&lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_pflock_read_unlock(&lock);
+ }
+
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+ int i;
+
+ if (argc != 3) {
+ ck_error("Usage: validate <number of threads> <affinity delta>\n");
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ a.delta = atoi(argv[2]);
+
+ fprintf(stderr, "Creating threads (mutual exclusion)...");
+ for (i = 0; i < nthr; i++) {
+ if (pthread_create(&threads[i], NULL, thread, NULL)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Waiting for threads to finish correctness regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+
+ return 0;
+}
+
diff --git a/regressions/ck_pr/benchmark/Makefile b/regressions/ck_pr/benchmark/Makefile
new file mode 100644
index 0000000..55183d8
--- /dev/null
+++ b/regressions/ck_pr/benchmark/Makefile
@@ -0,0 +1,31 @@
+.PHONY: clean
+
+all: ck_pr_cas_64 ck_pr_fas_64 ck_pr_cas_64_2 ck_pr_add_64 ck_pr_faa_64 ck_pr_neg_64 fp
+
+fp: fp.c
+ $(CC) $(CFLAGS) -o fp fp.c
+
+ck_pr_cas_64_2: ck_pr_cas_64_2.c
+ $(CC) $(CFLAGS) -o ck_pr_cas_64_2 ck_pr_cas_64_2.c -lm
+
+ck_pr_cas_64: ck_pr_cas_64.c
+ $(CC) $(CFLAGS) -o ck_pr_cas_64 ck_pr_cas_64.c -lm
+
+ck_pr_fas_64: ck_pr_fas_64.c
+ $(CC) $(CFLAGS) -o ck_pr_fas_64 ck_pr_fas_64.c -lm
+
+ck_pr_add_64: ck_pr_add_64.c
+ $(CC) $(CFLAGS) -o ck_pr_add_64 ck_pr_add_64.c -lm
+
+ck_pr_faa_64: ck_pr_faa_64.c
+ $(CC) $(CFLAGS) -o ck_pr_faa_64 ck_pr_faa_64.c -lm
+
+ck_pr_neg_64: ck_pr_neg_64.c
+ $(CC) $(CFLAGS) -o ck_pr_neg_64 ck_pr_neg_64.c -lm
+
+clean:
+ rm -rf ck_pr_cas_64 ck_pr_fas_64 ck_pr_cas_64_2 ck_pr_add_64 \
+ ck_pr_faa_64 ck_pr_neg_64 *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_pr/benchmark/benchmark.h b/regressions/ck_pr/benchmark/benchmark.h
new file mode 100644
index 0000000..f9e4ed2
--- /dev/null
+++ b/regressions/ck_pr/benchmark/benchmark.h
@@ -0,0 +1,130 @@
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+
+/* 8! = 40320, evenly divide 1 .. 8 processor workload. */
+#define WORKLOAD (40320 * 2056)
+
+struct block {
+ unsigned int tid;
+};
+
+static struct affinity a;
+static unsigned int ready;
+static uint64_t *count;
+static uint64_t nthr;
+
+static uint64_t object[2] CK_CC_CACHELINE;
+
+static void *
+fairness(void *null)
+{
+ struct block *context = null;
+ unsigned int i = context->tid;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (ck_pr_load_uint(&ready) == 0);
+ while (ck_pr_load_uint(&ready)) {
+ ATOMIC;
+ ATOMIC;
+ ATOMIC;
+ ATOMIC;
+ ck_pr_store_64(count + i, count[i] + 1);
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ uint64_t v, d;
+ unsigned int i;
+ pthread_t *threads;
+ struct block *context;
+
+ if (argc != 3) {
+ ck_error("Usage: " ATOMIC_STRING " <number of threads> <affinity delta>\n");
+ exit(EXIT_FAILURE);
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ exit(EXIT_FAILURE);
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ exit(EXIT_FAILURE);
+ }
+
+ context = malloc(sizeof(struct block) * nthr);
+ if (context == NULL) {
+ ck_error("ERROR: Could not allocate thread contexts\n");
+ exit(EXIT_FAILURE);
+ }
+
+ a.delta = atoi(argv[2]);
+ a.request = 0;
+
+ count = malloc(sizeof(uint64_t) * nthr);
+ if (count == NULL) {
+ ck_error("ERROR: Could not create acquisition buffer\n");
+ exit(EXIT_FAILURE);
+ }
+ memset(count, 0, sizeof(uint64_t) * nthr);
+
+ fprintf(stderr, "Creating threads (fairness)...");
+ for (i = 0; i < nthr; i++) {
+ context[i].tid = i;
+ if (pthread_create(&threads[i], NULL, fairness, context + i)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ exit(EXIT_FAILURE);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ ck_pr_store_uint(&ready, 1);
+ common_sleep(10);
+ ck_pr_store_uint(&ready, 0);
+
+ fprintf(stderr, "Waiting for threads to finish acquisition regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done\n\n");
+
+ for (i = 0, v = 0; i < nthr; i++) {
+ printf("%d %15" PRIu64 "\n", i, count[i]);
+ v += count[i];
+ }
+
+ printf("\n# total : %15" PRIu64 "\n", v);
+ printf("# throughput : %15" PRIu64 " a/s\n", (v /= nthr) / 10);
+
+ for (i = 0, d = 0; i < nthr; i++)
+ d += (count[i] - v) * (count[i] - v);
+
+ printf("# average : %15" PRIu64 "\n", v);
+ printf("# deviation : %.2f (%.2f%%)\n\n", sqrt(d / nthr), (sqrt(d / nthr) / v) * 100.00);
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/benchmark/ck_pr_add_64.c b/regressions/ck_pr/benchmark/ck_pr_add_64.c
new file mode 100644
index 0000000..9c4d51f
--- /dev/null
+++ b/regressions/ck_pr/benchmark/ck_pr_add_64.c
@@ -0,0 +1,16 @@
+#include <ck_pr.h>
+
+#ifdef CK_F_PR_ADD_64
+#define ATOMIC ck_pr_add_64(object, 1)
+#define ATOMIC_STRING "ck_pr_add_64"
+#include "benchmark.h"
+#else
+#warning Did not find ADD_64 implementation.
+#include <stdlib.h>
+
+int
+main(void)
+{
+ exit(EXIT_FAILURE);
+}
+#endif
diff --git a/regressions/ck_pr/benchmark/ck_pr_cas_64.c b/regressions/ck_pr/benchmark/ck_pr_cas_64.c
new file mode 100644
index 0000000..90dcb64
--- /dev/null
+++ b/regressions/ck_pr/benchmark/ck_pr_cas_64.c
@@ -0,0 +1,16 @@
+#include <ck_pr.h>
+
+#ifdef CK_F_PR_CAS_64
+#define ATOMIC ck_pr_cas_64(object, 1, 1)
+#define ATOMIC_STRING "ck_pr_cas_64"
+#include "benchmark.h"
+#else
+#warning Did not find CAS_64 implementation.
+#include <stdlib.h>
+
+int
+main(void)
+{
+ exit(EXIT_FAILURE);
+}
+#endif
diff --git a/regressions/ck_pr/benchmark/ck_pr_cas_64_2.c b/regressions/ck_pr/benchmark/ck_pr_cas_64_2.c
new file mode 100644
index 0000000..e959b39
--- /dev/null
+++ b/regressions/ck_pr/benchmark/ck_pr_cas_64_2.c
@@ -0,0 +1,17 @@
+#include <ck_pr.h>
+
+#ifdef CK_F_PR_CAS_64_2
+#define ATOMIC { uint64_t z[2] = {1, 2}; ck_pr_cas_64_2(object, z, z); }
+#define ATOMIC_STRING "ck_pr_cas_64_2"
+#include "benchmark.h"
+#else
+#include <stdio.h>
+#include <stdlib.h>
+
+int
+main(void)
+{
+ fprintf(stderr, "Unsupported.\n");
+ return 0;
+}
+#endif
diff --git a/regressions/ck_pr/benchmark/ck_pr_faa_64.c b/regressions/ck_pr/benchmark/ck_pr_faa_64.c
new file mode 100644
index 0000000..9bdc87d
--- /dev/null
+++ b/regressions/ck_pr/benchmark/ck_pr_faa_64.c
@@ -0,0 +1,16 @@
+#include <ck_pr.h>
+
+#ifdef CK_F_PR_FAA_64
+#define ATOMIC ck_pr_faa_64(object, 1)
+#define ATOMIC_STRING "ck_pr_faa_64"
+#include "benchmark.h"
+#else
+#warning Did not find FAA_64 implementation.
+#include <stdlib.h>
+
+int
+main(void)
+{
+ exit(EXIT_FAILURE);
+}
+#endif
diff --git a/regressions/ck_pr/benchmark/ck_pr_fas_64.c b/regressions/ck_pr/benchmark/ck_pr_fas_64.c
new file mode 100644
index 0000000..facd759
--- /dev/null
+++ b/regressions/ck_pr/benchmark/ck_pr_fas_64.c
@@ -0,0 +1,17 @@
+#include <ck_pr.h>
+
+#ifdef CK_F_PR_FAS_64
+#define ATOMIC ck_pr_fas_64(object, 1)
+#define ATOMIC_STRING "ck_pr_fas_64"
+#include "benchmark.h"
+#else
+#warning Did not find FAS_64 implementation.
+#include <stdlib.h>
+
+int
+main(void)
+{
+
+ return 0;
+}
+#endif
diff --git a/regressions/ck_pr/benchmark/ck_pr_neg_64.c b/regressions/ck_pr/benchmark/ck_pr_neg_64.c
new file mode 100644
index 0000000..d4e0ad9
--- /dev/null
+++ b/regressions/ck_pr/benchmark/ck_pr_neg_64.c
@@ -0,0 +1,16 @@
+#include <ck_pr.h>
+
+#ifdef CK_F_PR_NEG_64
+#define ATOMIC ck_pr_neg_64(object)
+#define ATOMIC_STRING "ck_pr_neg_64"
+#include "benchmark.h"
+#else
+#warning Did not find NEG_64 implementation.
+#include <stdlib.h>
+
+int
+main(void)
+{
+ exit(EXIT_FAILURE);
+}
+#endif
diff --git a/regressions/ck_pr/benchmark/fp.c b/regressions/ck_pr/benchmark/fp.c
new file mode 100644
index 0000000..f7aa157
--- /dev/null
+++ b/regressions/ck_pr/benchmark/fp.c
@@ -0,0 +1,66 @@
+#include <stdio.h>
+#include <inttypes.h>
+#include <stdint.h>
+
+#include "../../common.h"
+
+#ifndef IR
+#define IR 3000000
+#endif /* IR */
+
+static int a CK_CC_CACHELINE;
+static int b CK_CC_CACHELINE;
+
+int
+main(void)
+{
+ uint64_t s, e;
+ unsigned int i;
+
+ s = rdtsc();
+ for (i = 0; i < IR; i++) {
+ ck_pr_load_int(&a);
+ ck_pr_fence_strict_load();
+ ck_pr_load_int(&b);
+ }
+ e = rdtsc();
+ printf("[A] fence_load: %" PRIu64 "\n", (e - s) / IR);
+
+ s = rdtsc();
+ for (i = 0; i < IR; i++) {
+ if (ck_pr_load_int(&a) == 0)
+ ck_pr_barrier();
+ ck_pr_fence_strict_lock();
+ ck_pr_load_int(&b);
+ }
+ e = rdtsc();
+ printf("[A] fence_lock: %" PRIu64 "\n", (e - s) / IR);
+
+ s = rdtsc();
+ for (i = 0; i < IR; i++) {
+ ck_pr_store_int(&a, 0);
+ ck_pr_fence_strict_store();
+ ck_pr_store_int(&b, 0);
+ }
+ e = rdtsc();
+ printf("[B] fence_store: %" PRIu64 "\n", (e - s) / IR);
+
+ s = rdtsc();
+ for (i = 0; i < IR; i++) {
+ ck_pr_store_int(&a, 0);
+ ck_pr_fence_strict_memory();
+ ck_pr_load_int(&b);
+ }
+ e = rdtsc();
+ printf("[C] fence_memory: %" PRIu64 "\n", (e - s) / IR);
+
+ s = rdtsc();
+ for (i = 0; i < IR; i++) {
+ ck_pr_store_int(&a, 0);
+ ck_pr_faa_int(&a, 0);
+ ck_pr_load_int(&b);
+ }
+ e = rdtsc();
+ printf("[C] atomic: %" PRIu64 "\n", (e - s) / IR);
+ return 0;
+}
diff --git a/regressions/ck_pr/validate/Makefile b/regressions/ck_pr/validate/Makefile
new file mode 100644
index 0000000..9e4a82d
--- /dev/null
+++ b/regressions/ck_pr/validate/Makefile
@@ -0,0 +1,84 @@
+.PHONY: check clean distribution
+
+OBJECTS=ck_pr_cas ck_pr_faa ck_pr_inc ck_pr_dec ck_pr_bts \
+ ck_pr_btr ck_pr_btc ck_pr_load ck_pr_store \
+ ck_pr_and ck_pr_or ck_pr_xor ck_pr_add ck_pr_sub \
+ ck_pr_fas ck_pr_bin ck_pr_btx ck_pr_fax ck_pr_n \
+ ck_pr_unary
+
+all: $(OBJECTS)
+
+check: all
+ for d in $(OBJECTS) ; do \
+ echo $$d; \
+ ./$$d || exit 1; \
+ done;
+
+ck_pr_cas: ck_pr_cas.c
+ $(CC) $(CFLAGS) -o ck_pr_cas ck_pr_cas.c
+
+ck_pr_inc: ck_pr_inc.c
+ $(CC) $(CFLAGS) -o ck_pr_inc ck_pr_inc.c
+
+ck_pr_dec: ck_pr_dec.c
+ $(CC) $(CFLAGS) -o ck_pr_dec ck_pr_dec.c
+
+ck_pr_faa: ck_pr_faa.c
+ $(CC) $(CFLAGS) -o ck_pr_faa ck_pr_faa.c
+
+ck_pr_btc: ck_pr_btc.c
+ $(CC) $(CFLAGS) -o ck_pr_btc ck_pr_btc.c
+
+ck_pr_btr: ck_pr_btr.c
+ $(CC) $(CFLAGS) -o ck_pr_btr ck_pr_btr.c
+
+ck_pr_bts: ck_pr_bts.c
+ $(CC) $(CFLAGS) -o ck_pr_bts ck_pr_bts.c
+
+ck_pr_load: ck_pr_load.c
+ $(CC) $(CFLAGS) -o ck_pr_load ck_pr_load.c
+
+ck_pr_store: ck_pr_store.c
+ $(CC) $(CFLAGS) -o ck_pr_store ck_pr_store.c
+
+ck_pr_and: ck_pr_and.c
+ $(CC) $(CFLAGS) -o ck_pr_and ck_pr_and.c
+
+ck_pr_or: ck_pr_or.c
+ $(CC) $(CFLAGS) -o ck_pr_or ck_pr_or.c
+
+ck_pr_xor: ck_pr_xor.c
+ $(CC) $(CFLAGS) -o ck_pr_xor ck_pr_xor.c
+
+ck_pr_add: ck_pr_add.c
+ $(CC) $(CFLAGS) -o ck_pr_add ck_pr_add.c
+
+ck_pr_sub: ck_pr_sub.c
+ $(CC) $(CFLAGS) -o ck_pr_sub ck_pr_sub.c
+
+ck_pr_fas: ck_pr_fas.c
+ $(CC) $(CFLAGS) -o ck_pr_fas ck_pr_fas.c
+
+ck_tp: ck_tp.c
+ $(CC) $(CFLAGS) -o ck_tp ck_tp.c
+
+ck_pr_bin: ck_pr_bin.c
+ $(CC) $(CFLAGS) -o ck_pr_bin ck_pr_bin.c
+
+ck_pr_btx: ck_pr_btx.c
+ $(CC) $(CFLAGS) -o ck_pr_btx ck_pr_btx.c
+
+ck_pr_fax: ck_pr_fax.c
+ $(CC) $(CFLAGS) -o ck_pr_fax ck_pr_fax.c
+
+ck_pr_n: ck_pr_n.c
+ $(CC) $(CFLAGS) -o ck_pr_n ck_pr_n.c
+
+ck_pr_unary: ck_pr_unary.c
+ $(CC) $(CFLAGS) -o ck_pr_unary ck_pr_unary.c
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE
diff --git a/regressions/ck_pr/validate/ck_pr_add.c b/regressions/ck_pr/validate/ck_pr_add.c
new file mode 100644
index 0000000..31f1893
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_add.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+#define CK_PR_ADD_T(w, v, d) \
+ { \
+ uint##w##_t t = v; \
+ ck_pr_add_##w(&t, d); \
+ if (t != (uint##w##_t)(v + d)) { \
+ printf("FAIL ["); \
+ printf("%" PRIu##w " (%" PRIu##w ") -> %" PRIu##w "]\n",\
+ (uint##w##_t)v, d, t); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+#define CK_PR_ADD_B(w) \
+ { \
+ unsigned int __ck_i = 0; \
+ printf("ck_pr_add_" #w ": "); \
+ if (w < 10) \
+ printf(" "); \
+ for (__ck_i = 0; __ck_i < R_REPEAT; __ck_i++) { \
+ uint##w##_t a = common_rand() % ((uint##w##_t)-1 / 2); \
+ uint##w##_t b = common_rand() % ((uint##w##_t)-1 / 2); \
+ CK_PR_ADD_T(w, a, b); \
+ } \
+ rg_width(w); \
+ printf(" SUCCESS\n"); \
+ }
+
+#define CK_PR_ADD_W(m, w) \
+ { \
+ uint##m##_t t = -1, r = -1 & ~(uint##m##_t)(uint##w##_t)-1; \
+ ck_pr_add_##w((uint##w##_t *)(void *)&t, 1); \
+ if (t != r) { \
+ printf("FAIL [%#" PRIx##m " != %#" PRIx##m "]\n", t, r);\
+ exit(EXIT_FAILURE); \
+ } \
+ t = 0, r = (uint##m##_t)(uint##w##_t)-1; \
+ ck_pr_add_##w((uint##w##_t *)(void *)&t, -1); \
+ if (t != r) { \
+ printf("FAIL [%#" PRIx##m " != %#" PRIx##m "]\n", t, r);\
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+static void
+rg_width(int m)
+{
+
+ /* Other architectures are bi-endian. */
+#if !defined(__x86__) && !defined(__x86_64__)
+ return;
+#endif
+
+#ifdef CK_F_PR_ADD_64
+ if (m == 64) {
+#if defined(CK_F_PR_ADD_32)
+ CK_PR_ADD_W(64, 32);
+#endif
+#if defined(CK_PR_ADD_16)
+ CK_PR_ADD_W(64, 16);
+#endif
+#if defined(CK_PR_ADD_8)
+ CK_PR_ADD_W(64, 8);
+#endif
+ }
+#endif /* CK_PR_ADD_64 */
+
+#ifdef CK_F_PR_ADD_32
+ if (m == 32) {
+#if defined(CK_F_PR_ADD_16)
+ CK_PR_ADD_W(32, 16);
+#endif
+#if defined(CK_PR_ADD_8)
+ CK_PR_ADD_W(32, 8);
+#endif
+ }
+#endif /* CK_PR_ADD_32 */
+
+#if defined(CK_F_PR_ADD_16) && defined(CK_PR_ADD_8)
+ if (m == 16) {
+ CK_PR_ADD_W(16, 8);
+ }
+#endif /* CK_PR_ADD_16 && CK_PR_ADD_8 */
+
+ return;
+}
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_ADD_64
+ CK_PR_ADD_B(64);
+#endif
+
+#ifdef CK_F_PR_ADD_32
+ CK_PR_ADD_B(32);
+#endif
+
+#ifdef CK_F_PR_ADD_16
+ CK_PR_ADD_B(16);
+#endif
+
+#ifdef CK_F_PR_ADD_8
+ CK_PR_ADD_B(8);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_and.c b/regressions/ck_pr/validate/ck_pr_and.c
new file mode 100644
index 0000000..4c569bb
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_and.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+#define BM(m, w) ((uint##m##_t)-1 << (w))
+
+#define CK_PR_AND_T(w, v, d) \
+ { \
+ uint##w##_t t = v; \
+ ck_pr_and_##w(&t, d); \
+ if (t != (uint##w##_t)(v & d)) { \
+ printf("FAIL ["); \
+ printf("%" PRIu##w " (%" PRIu##w ") -> %" PRIu##w "]\n",\
+ (uint##w##_t)v, d, t); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+#define CK_PR_AND_B(w) \
+ { \
+ unsigned int __ck_i = 0; \
+ printf("ck_pr_and_" #w ": "); \
+ if (w < 10) \
+ printf(" "); \
+ for (__ck_i = 0; __ck_i < R_REPEAT; __ck_i++) { \
+ uint##w##_t a = (uint##w##_t)common_rand(); \
+ uint##w##_t b = (uint##w##_t)common_rand(); \
+ CK_PR_AND_T(w, a, b); \
+ } \
+ rg_width(w); \
+ printf(" SUCCESS\n"); \
+ }
+
+#define CK_PR_AND_W(m, w) \
+ { \
+ uint##m##_t t = -1; \
+ ck_pr_and_##w((uint##w##_t *)(void *)&t, 0); \
+ if (t != BM(m, w)) { \
+ printf(" FAIL [%#" PRIx##m " != %#" PRIx##m "]\n", t, BM(m, w)); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+static void
+rg_width(int m)
+{
+
+ /* Other architectures are bi-endian. */
+#if !defined(__x86__) && !defined(__x86_64__)
+ return;
+#endif
+
+#ifdef CK_F_PR_AND_64
+ if (m == 64) {
+#if defined(CK_F_PR_AND_32)
+ CK_PR_AND_W(64, 32);
+#endif
+#if defined(CK_PR_AND_16)
+ CK_PR_AND_W(64, 16);
+#endif
+#if defined(CK_PR_AND_8)
+ CK_PR_AND_W(64, 8);
+#endif
+ }
+#endif /* CK_PR_AND_64 */
+
+#ifdef CK_F_PR_AND_32
+ if (m == 32) {
+#if defined(CK_F_PR_AND_16)
+ CK_PR_AND_W(32, 16);
+#endif
+#if defined(CK_PR_AND_8)
+ CK_PR_AND_W(32, 8);
+#endif
+ }
+#endif /* CK_PR_AND_32 */
+
+#if defined(CK_F_PR_AND_16) && defined(CK_PR_AND_8)
+ if (m == 16) {
+ CK_PR_AND_W(16, 8);
+ }
+#endif /* CK_PR_AND_16 && CK_PR_AND_8 */
+
+ return;
+}
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_AND_64
+ CK_PR_AND_B(64);
+#endif
+
+#ifdef CK_F_PR_AND_32
+ CK_PR_AND_B(32);
+#endif
+
+#ifdef CK_F_PR_AND_16
+ CK_PR_AND_B(16);
+#endif
+
+#ifdef CK_F_PR_AND_8
+ CK_PR_AND_B(8);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_bin.c b/regressions/ck_pr/validate/ck_pr_bin.c
new file mode 100644
index 0000000..31868f4
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_bin.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_pr.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "../../common.h"
+#define REPEAT 2000000
+
+#define TEST_BINARY(K, S, T, P, D) \
+ static void \
+ run_test_##K##_##S(void) \
+ { \
+ int i, r; \
+ T serial_result = 65535; \
+ T ck_result = 65535; \
+ \
+ puts("***TESTING ck_pr_" #K "_" #S "***"); \
+ common_srand((unsigned int)getpid()); \
+ for (i = 0; i < REPEAT; ++i) { \
+ r = common_rand(); \
+ serial_result = serial_result P r; \
+ ck_pr_##K##_##S(&ck_result, r); \
+ } \
+ \
+ printf("Value of operation " #K " on 2000000 " \
+ "random numbers\n\tusing " #P ": %" #D ",\n" \
+ "\tusing ck_pr_"#K"_"#S": %" #D "\n", \
+ serial_result, ck_result); \
+ (serial_result == ck_result) ? puts("SUCCESS.") \
+ : puts("FAILURE."); \
+ \
+ return; \
+ } \
+
+#define GENERATE_TEST(K, P) \
+ TEST_BINARY(K, int, int, P, d) \
+ TEST_BINARY(K, uint, unsigned int, P, u) \
+ static void \
+ run_test_##K(void) \
+ { \
+ run_test_##K##_int(); \
+ run_test_##K##_uint(); \
+ \
+ return; \
+ }
+
+GENERATE_TEST(add, +)
+GENERATE_TEST(sub, -)
+GENERATE_TEST(and, &)
+GENERATE_TEST(or, |)
+GENERATE_TEST(xor, ^)
+
+#undef GENERATE_TEST
+#undef TEST_BINARY
+
+int
+main(void)
+{
+ run_test_add();
+ run_test_sub();
+ run_test_and();
+ run_test_or();
+ run_test_xor();
+
+ return (0);
+}
+
+
diff --git a/regressions/ck_pr/validate/ck_pr_btc.c b/regressions/ck_pr/validate/ck_pr_btc.c
new file mode 100644
index 0000000..0edec98
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_btc.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+/*
+ * Bit selector.
+ */
+#define BM(v, b) (((v) >> (b)) & 1)
+
+#define CK_PR_BTC_T(w, v) \
+ { \
+ unsigned int j; \
+ uint##w##_t r = v; \
+ bool t; \
+ for (j = 0; j < (w); j++) { \
+ t = ck_pr_btc_##w(&r, j); \
+ if ((t && !BM(v, j)) || ((BM(v, j) + BM(r, j)) != 1)) { \
+ printf("FAIL [%" PRIx##w ":%u]\n", r, j); \
+ exit(EXIT_FAILURE); \
+ } \
+ } \
+ }
+
+#define CK_PR_BTC_B(w) \
+ { \
+ uint##w##_t o; \
+ unsigned int i; \
+ printf("ck_pr_btc_" #w ": "); \
+ for (i = 0; i < R_REPEAT; i++) { \
+ o = (uint##w##_t)common_rand(); \
+ CK_PR_BTC_T(w, o); \
+ } \
+ printf(" SUCCESS\n"); \
+ }
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_BTC_64
+ CK_PR_BTC_B(64);
+#endif
+
+#ifdef CK_F_PR_BTC_32
+ CK_PR_BTC_B(32);
+#endif
+
+#ifdef CK_F_PR_BTC_16
+ CK_PR_BTC_B(16);
+#endif
+
+#ifdef CK_F_PR_BTC_8
+ CK_PR_BTC_B(8);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_btr.c b/regressions/ck_pr/validate/ck_pr_btr.c
new file mode 100644
index 0000000..91abb30
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_btr.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+/*
+ * Bit selector.
+ */
+#define BM(v, b) (((v) >> (b)) & 1)
+
+#define CK_PR_BTR_T(w, v) \
+ { \
+ unsigned int j; \
+ uint##w##_t r = v, c = v; \
+ bool t; \
+ for (j = 0; j < (w); j++) { \
+ c &= (uint##w##_t)-1 ^ (1 << j); \
+ t = ck_pr_btr_##w(&r, j); \
+ if ((t && !BM(v, j)) || (r != c)) { \
+ printf("FAIL [%" PRIx##w ":%u != %" PRIx##w ":%u]\n", r, j, c, j); \
+ exit(EXIT_FAILURE); \
+ } \
+ } \
+ }
+
+#define CK_PR_BTR_B(w) \
+ { \
+ uint##w##_t o; \
+ unsigned int i; \
+ printf("ck_pr_btr_" #w ": "); \
+ for (i = 0; i < R_REPEAT; i++) { \
+ o = (uint##w##_t)common_rand(); \
+ CK_PR_BTR_T(w, o); \
+ } \
+ printf(" SUCCESS\n"); \
+ }
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_BTR_64
+ CK_PR_BTR_B(64);
+#endif
+
+#ifdef CK_F_PR_BTR_32
+ CK_PR_BTR_B(32);
+#endif
+
+#ifdef CK_F_PR_BTR_16
+ CK_PR_BTR_B(16);
+#endif
+
+#ifdef CK_F_PR_BTR_8
+ CK_PR_BTR_B(8);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_bts.c b/regressions/ck_pr/validate/ck_pr_bts.c
new file mode 100644
index 0000000..1e62165
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_bts.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+/*
+ * Bit selector.
+ */
+#define BM(v, b) (((v) >> (b)) & 1)
+
+#define CK_PR_BTS_T(w, v) \
+ { \
+ unsigned int j; \
+ uint##w##_t r = v, c = v; \
+ bool t; \
+ for (j = 0; j < (w); j++) { \
+ c |= (uint##w##_t)1 << j; \
+ t = ck_pr_bts_##w(&r, j); \
+ if ((t && !BM(v, j)) || (r != c)) { \
+ printf("FAIL [%" PRIx##w ":%u != %" PRIx##w ":%u]\n", r, j, c, j); \
+ exit(EXIT_FAILURE); \
+ } \
+ } \
+ }
+
+#define CK_PR_BTS_B(w) \
+ { \
+ uint##w##_t o; \
+ unsigned int i; \
+ printf("ck_pr_bts_" #w ": "); \
+ for (i = 0; i < R_REPEAT; i++) { \
+ o = (uint##w##_t)common_rand(); \
+ CK_PR_BTS_T(w, o); \
+ } \
+ printf(" SUCCESS\n"); \
+ }
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_BTS_64
+ CK_PR_BTS_B(64);
+#endif
+
+#ifdef CK_F_PR_BTS_32
+ CK_PR_BTS_B(32);
+#endif
+
+#ifdef CK_F_PR_BTS_16
+ CK_PR_BTS_B(16);
+#endif
+
+#ifdef CK_F_PR_BTS_8
+ CK_PR_BTS_B(8);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_btx.c b/regressions/ck_pr/validate/ck_pr_btx.c
new file mode 100644
index 0000000..2bb3964
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_btx.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdbool.h>
+#include <ck_pr.h>
+
+#include "../../common.h"
+#define REPEAT 2000000
+
+#define TEST_BTX(K, S, M, T, L, P, D, R) \
+ static bool \
+ test_##K##_##S(M *target, int offset) \
+ { \
+ T previous; \
+ const L change = R (0x01 << offset); \
+ \
+ previous = (T)*target; \
+ *target = previous P change; \
+ return ((previous >> offset) & 0x01); \
+ } \
+ static void \
+ run_test_##K##_##S(void) \
+ { \
+ int i, offset, m; \
+ bool serial_t, ck_pr_t; \
+ T x = 65535, y = 65535; \
+ \
+ common_srand((unsigned int)getpid()); \
+ m = sizeof(T) * 8; \
+ \
+ puts("***TESTING ck_pr_"#K"_"#S"***"); \
+ for (i = 0; i < REPEAT; ++i) { \
+ offset = common_rand() % m; \
+ serial_t = test_##K##_##S(&x, offset); \
+ ck_pr_t = ck_pr_##K##_##S(&y, offset); \
+ \
+ if (serial_t != ck_pr_t || x != y ) { \
+ printf("Serial(%"#D") and ck_pr(%"#D")" \
+ #K"_"#S " do not match.\n" \
+ "FAILURE.\n", \
+ serial_t, ck_pr_t); \
+ \
+ return; \
+ } \
+ } \
+ printf("\tserial_"#K"_"#S": %"#D"\n" \
+ "\tck_pr_"#K"_"#S": %"#D"\n" \
+ "SUCCESS.\n", \
+ x, y); \
+ \
+ return; \
+ }
+
+#define TEST_BTX_S(K, S, T, P, D, R) TEST_BTX(K, S, T, T, T, P, D, R)
+
+#define GENERATE_TEST(K, P, R) \
+ TEST_BTX_S(K, int, int, P, d, R) \
+ TEST_BTX_S(K, uint, unsigned int, P, u, R) \
+ static void \
+ run_test_##K(void) \
+ { \
+ run_test_##K##_int(); \
+ run_test_##K##_uint(); \
+ \
+ return; \
+ }
+
+GENERATE_TEST(btc, ^, 0+)
+GENERATE_TEST(btr, &, ~)
+GENERATE_TEST(bts, |, 0+)
+
+#undef GENERATE_TEST
+#undef TEST_BTX_S
+#undef TEST_BTX
+
+int
+main(void)
+{
+ run_test_btc();
+ run_test_btr();
+ run_test_bts();
+
+ return (0);
+}
+
+
diff --git a/regressions/ck_pr/validate/ck_pr_cas.c b/regressions/ck_pr/validate/ck_pr_cas.c
new file mode 100644
index 0000000..132d1e5
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_cas.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+#define W(w, x) (uint##w##_t)((x) & (uint##w##_t)~0)
+
+#define CK_PR_CAS_T(w, v, c, s) \
+ { \
+ uint##w##_t t = v; \
+ bool r; \
+ r = ck_pr_cas_##w(&t, c, s); \
+ if (((c == v) && (r == false)) || ((c != v) && (r == true)) || \
+ ((r == true) && (W(w, s) != t))) { \
+ printf("FAIL ["); \
+ printf("%" PRIu##w " (%" PRIu##w " -> %" PRIu##w ")" \
+ " -> %" PRIu##w "]\n", \
+ (uint##w##_t)(v), (uint##w##_t)(c), W(w, s), (uint##w##_t)(t)); \
+ } \
+ }
+
+#define CK_PR_CAS_B(w) \
+ { \
+ unsigned int __ck_i; \
+ printf("ck_pr_cas_" #w ": "); \
+ if (w < 10) \
+ printf(" "); \
+ for (__ck_i = 0; __ck_i < R_REPEAT; __ck_i++) { \
+ uint##w##_t a = common_rand() % (uint##w##_t)-1; \
+ CK_PR_CAS_T(w, a, a + 1, (a - 1)); \
+ CK_PR_CAS_T(w, a, a, (a - 1)); \
+ CK_PR_CAS_T(w, a, a + 1, a); \
+ } \
+ rg_width(w); \
+ printf(" SUCCESS\n"); \
+ }
+
+#define CK_PR_CAS_W(m, w) \
+ { \
+ uint##m##_t t = -1, r = -1 & ~(uint##m##_t)(uint##w##_t)-1; \
+ ck_pr_cas_##w((uint##w##_t *)(void *)&t, (uint##w##_t)t, 0); \
+ if (t != r) { \
+ printf("FAIL [%#" PRIx##m " != %#" PRIx##m "]\n", \
+ (uint##m##_t)t, (uint##m##_t)r); \
+ } \
+ }
+
+static void
+rg_width(int m)
+{
+
+ /* Other architectures are bi-endian. */
+#if !defined(__x86__) && !defined(__x86_64__)
+ return;
+#endif
+
+#ifdef CK_F_PR_CAS_64
+ if (m == 64) {
+#if defined(CK_F_PR_CAS_32)
+ CK_PR_CAS_W(64, 32);
+#endif
+#if defined(CK_PR_CAS_16)
+ CK_PR_CAS_W(64, 16);
+#endif
+#if defined(CK_PR_CAS_8)
+ CK_PR_CAS_W(64, 8);
+#endif
+ }
+#endif /* CK_PR_CAS_64 */
+
+#ifdef CK_F_PR_CAS_32
+ if (m == 32) {
+#if defined(CK_F_PR_CAS_16)
+ CK_PR_CAS_W(32, 16);
+#endif
+#if defined(CK_PR_CAS_8)
+ CK_PR_CAS_W(32, 8);
+#endif
+ }
+#endif /* CK_PR_CAS_32 */
+
+#if defined(CK_F_PR_CAS_16) && defined(CK_PR_CAS_8)
+ if (m == 16) {
+ CK_PR_CAS_W(16, 8);
+ }
+#endif /* CK_PR_CAS_16 && CK_PR_CAS_8 */
+
+ return;
+}
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_CAS_64
+ CK_PR_CAS_B(64);
+#endif
+
+#ifdef CK_F_PR_CAS_32
+ CK_PR_CAS_B(32);
+#endif
+
+#ifdef CK_F_PR_CAS_16
+ CK_PR_CAS_B(16);
+#endif
+
+#ifdef CK_F_PR_CAS_8
+ CK_PR_CAS_B(8);
+#endif
+
+#ifdef CK_F_PR_CAS_64_VALUE
+ uint64_t a = 0xffffffffaaaaaaaa, b = 0x8888888800000000;
+
+ printf("%" PRIx64 " (%" PRIx64 ") -> ", b, a);
+ ck_pr_cas_64_value(&a, a, b, &b);
+ printf("%" PRIx64 " (%" PRIx64 ")\n", b, a);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_dec.c b/regressions/ck_pr/validate/ck_pr_dec.c
new file mode 100644
index 0000000..86ce088
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_dec.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+#define CK_PR_DEC_T(w, v) \
+ { \
+ uint##w##_t t = v; \
+ ck_pr_dec_##w(&t); \
+ if ((t != (uint##w##_t)(v - 1))) { \
+ printf("FAIL ["); \
+ printf("%" PRIu##w " -> %" PRIu##w "]\n", (uint##w##_t)v, t); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+#define CK_PR_DEC_B(w) \
+ { \
+ unsigned int __ck_i = 0; \
+ printf("ck_pr_dec_" #w ": "); \
+ if (w < 10) \
+ printf(" "); \
+ for (__ck_i = 0; __ck_i < R_REPEAT; __ck_i++) { \
+ uint##w##_t a = common_rand() % ((uint##w##_t)-1); \
+ CK_PR_DEC_T(w, a); \
+ } \
+ rg_width(w); \
+ printf(" SUCCESS\n"); \
+ }
+
+#define CK_PR_DEC_W(m, w) \
+ { \
+ uint##m##_t t = 0, r = (uint##w##_t)-1; \
+ ck_pr_dec_##w((uint##w##_t *)(void *)&t); \
+ if (t != r) { \
+ printf("FAIL [%#" PRIx##m " != %#" PRIx##m "]\n", t, r);\
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+static void
+rg_width(int m)
+{
+
+ /* Other architectures are bi-endian. */
+#if !defined(__x86__) && !defined(__x86_64__)
+ return;
+#endif
+
+#ifdef CK_F_PR_DEC_64
+ if (m == 64) {
+#if defined(CK_F_PR_DEC_32)
+ CK_PR_DEC_W(64, 32);
+#endif
+#if defined(CK_PR_DEC_16)
+ CK_PR_DEC_W(64, 16);
+#endif
+#if defined(CK_PR_DEC_8)
+ CK_PR_DEC_W(64, 8);
+#endif
+ }
+#endif /* CK_PR_DEC_64 */
+
+#ifdef CK_F_PR_DEC_32
+ if (m == 32) {
+#if defined(CK_F_PR_DEC_16)
+ CK_PR_DEC_W(32, 16);
+#endif
+#if defined(CK_PR_DEC_8)
+ CK_PR_DEC_W(32, 8);
+#endif
+ }
+#endif /* CK_PR_DEC_32 */
+
+#if defined(CK_F_PR_DEC_16) && defined(CK_PR_DEC_8)
+ if (m == 16) {
+ CK_PR_DEC_W(16, 8);
+ }
+#endif /* CK_PR_DEC_16 && CK_PR_DEC_8 */
+
+ return;
+}
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_DEC_64
+ CK_PR_DEC_B(64);
+#endif
+
+#ifdef CK_F_PR_DEC_32
+ CK_PR_DEC_B(32);
+#endif
+
+#ifdef CK_F_PR_DEC_16
+ CK_PR_DEC_B(16);
+#endif
+
+#ifdef CK_F_PR_DEC_8
+ CK_PR_DEC_B(8);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_faa.c b/regressions/ck_pr/validate/ck_pr_faa.c
new file mode 100644
index 0000000..1d10bb9
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_faa.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+#define CK_PR_FAA_T(w, v, d) \
+ { \
+ uint##w##_t r, t = v; \
+ r = ck_pr_faa_##w(&t, d); \
+ if ((t != (uint##w##_t)(v + d)) || (r != v)) { \
+ printf("FAIL ["); \
+ printf("%" PRIu##w " (%" PRIu##w ") -> %" PRIu##w \
+ " (%" PRIu##w ")]\n", \
+ (uint##w##_t)v, d, t, r); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+#define CK_PR_FAA_B(w) \
+ { \
+ unsigned int __ck_i = 0; \
+ printf("ck_pr_faa_" #w ": "); \
+ if (w < 10) \
+ printf(" "); \
+ for (__ck_i = 0; __ck_i < R_REPEAT; __ck_i++) { \
+ uint##w##_t a = common_rand() % ((uint##w##_t)-1 / 2); \
+ uint##w##_t b = common_rand() % ((uint##w##_t)-1 / 2); \
+ CK_PR_FAA_T(w, a, b); \
+ } \
+ rg_width(w); \
+ printf(" SUCCESS\n"); \
+ }
+
+#define CK_PR_FAA_W(m, w) \
+ { \
+ uint##m##_t t = -1, r = -1 & ~(uint##m##_t)(uint##w##_t)-1; \
+ ck_pr_faa_##w((uint##w##_t *)(void *)&t, 1); \
+ if (t != r) { \
+ printf("FAIL [%#" PRIx##m " != %#" PRIx##m "]\n", t, r);\
+ exit(EXIT_FAILURE); \
+ } \
+ t = 0, r = (uint##m##_t)(uint##w##_t)-1; \
+ ck_pr_faa_##w((uint##w##_t *)(void *)&t, -1); \
+ if (t != r) { \
+ printf("FAIL [%#" PRIx##m " != %#" PRIx##m "]\n", t, r);\
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+static void
+rg_width(int m)
+{
+
+ /* Other architectures are bi-endian. */
+#if !defined(__x86__) && !defined(__x86_64__)
+ return;
+#endif
+
+#ifdef CK_F_PR_FAA_64
+ if (m == 64) {
+#if defined(CK_F_PR_FAA_32)
+ CK_PR_FAA_W(64, 32);
+#endif
+#if defined(CK_PR_FAA_16)
+ CK_PR_FAA_W(64, 16);
+#endif
+#if defined(CK_PR_FAA_8)
+ CK_PR_FAA_W(64, 8);
+#endif
+ }
+#endif /* CK_PR_FAA_64 */
+
+#ifdef CK_F_PR_FAA_32
+ if (m == 32) {
+#if defined(CK_F_PR_FAA_16)
+ CK_PR_FAA_W(32, 16);
+#endif
+#if defined(CK_PR_FAA_8)
+ CK_PR_FAA_W(32, 8);
+#endif
+ }
+#endif /* CK_PR_FAA_32 */
+
+#if defined(CK_F_PR_FAA_16) && defined(CK_PR_FAA_8)
+ if (m == 16) {
+ CK_PR_FAA_W(16, 8);
+ }
+#endif /* CK_PR_FAA_16 && CK_PR_FAA_8 */
+
+ return;
+}
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_FAA_64
+ CK_PR_FAA_B(64);
+#endif
+
+#ifdef CK_F_PR_FAA_32
+ CK_PR_FAA_B(32);
+#endif
+
+#ifdef CK_F_PR_FAA_16
+ CK_PR_FAA_B(16);
+#endif
+
+#ifdef CK_F_PR_FAA_8
+ CK_PR_FAA_B(8);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_fas.c b/regressions/ck_pr/validate/ck_pr_fas.c
new file mode 100644
index 0000000..00cef4e
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_fas.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+#define BM(m, w) ((uint##m##_t)(uint##w##_t)(-1))
+
+#define CK_PR_FAS_T(w, v, d) \
+ { \
+ uint##w##_t r, t = v; \
+ r = ck_pr_fas_##w(&t, d); \
+ if ((t != d) || (r != v)) { \
+ printf("FAIL ["); \
+ printf("%" PRIu##w " (%" PRIu##w ") -> %" PRIu##w \
+ " (%" PRIu##w ")]\n", \
+ (uint##w##_t)v, d, t, r); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+#define CK_PR_FAS_B(w) \
+ { \
+ unsigned int __ck_i = 0; \
+ printf("ck_pr_fas_" #w ": "); \
+ if (w < 10) \
+ printf(" "); \
+ for (__ck_i = 0; __ck_i < R_REPEAT; __ck_i++) { \
+ uint##w##_t a = common_rand(); \
+ uint##w##_t b = common_rand(); \
+ CK_PR_FAS_T(w, a, b); \
+ } \
+ rg_width(w); \
+ printf(" SUCCESS\n"); \
+ }
+
+#define CK_PR_FAS_W(m, w) \
+ { \
+ uint##m##_t t = 0; \
+ ck_pr_fas_##w((uint##w##_t *)(void *)&t, -1); \
+ if (t != BM(m, w)) { \
+ printf("FAIL [%#" PRIx##m " != %#" PRIx##m "]\n", t, BM(m, w)); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+static void
+rg_width(int m)
+{
+
+ /* Other architectures are bi-endian. */
+#if !defined(__x86__) && !defined(__x86_64__)
+ return;
+#endif
+
+#ifdef CK_F_PR_FAS_64
+ if (m == 64) {
+#if defined(CK_F_PR_FAS_32)
+ CK_PR_FAS_W(64, 32);
+#endif
+#if defined(CK_PR_FAS_16)
+ CK_PR_FAS_W(64, 16);
+#endif
+#if defined(CK_PR_FAS_8)
+ CK_PR_FAS_W(64, 8);
+#endif
+ }
+#endif /* CK_PR_FAS_64 */
+
+#ifdef CK_F_PR_FAS_32
+ if (m == 32) {
+#if defined(CK_F_PR_FAS_16)
+ CK_PR_FAS_W(32, 16);
+#endif
+#if defined(CK_PR_FAS_8)
+ CK_PR_FAS_W(32, 8);
+#endif
+ }
+#endif /* CK_PR_FAS_32 */
+
+#if defined(CK_F_PR_FAS_16) && defined(CK_PR_FAS_8)
+ if (m == 16) {
+ CK_PR_FAS_W(16, 8);
+ }
+#endif /* CK_PR_FAS_16 && CK_PR_FAS_8 */
+
+ return;
+}
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_FAS_64
+ CK_PR_FAS_B(64);
+#endif
+
+#ifdef CK_F_PR_FAS_32
+ CK_PR_FAS_B(32);
+#endif
+
+#ifdef CK_F_PR_FAS_16
+ CK_PR_FAS_B(16);
+#endif
+
+#ifdef CK_F_PR_FAS_8
+ CK_PR_FAS_B(8);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_fax.c b/regressions/ck_pr/validate/ck_pr_fax.c
new file mode 100644
index 0000000..9d8c94f
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_fax.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <ck_pr.h>
+
+#include "../../common.h"
+#define REPEAT 2000000
+
+#define TEST_FAX_FN(S, T, M) \
+ static T \
+ test_faa_##S(M *target, T delta) \
+ { \
+ T previous = (T)*target; \
+ *target = (T)*target + delta; \
+ \
+ return (previous); \
+ } \
+ static T \
+ test_fas_##S(M *target, T update) \
+ { \
+ T previous = *target; \
+ *target = update; \
+ \
+ return (previous); \
+ }
+
+#define TEST_FAX_FN_S(S, T) TEST_FAX_FN(S, T, T)
+
+TEST_FAX_FN_S(int, int)
+TEST_FAX_FN_S(uint, unsigned int)
+
+#undef TEST_FAX_FN_S
+#undef TEST_FAX_FN
+
+#define TEST_FAX(K, S, T, D) \
+ static void \
+ run_test_##K##_##S(void) \
+ { \
+ int i, r; \
+ T x = 0, y = 0, x_b, y_b; \
+ \
+ puts("***TESTING ck_pr_"#K"_"#S"***"); \
+ common_srand((unsigned int)getpid()); \
+ for (i = 0; i < REPEAT; ++i) { \
+ r = common_rand(); \
+ x_b = test_##K##_##S(&x, r); \
+ y_b = ck_pr_##K##_##S(&y, r); \
+ \
+ if (x_b != y_b) { \
+ printf("Serial fetch does not match ck_pr fetch.\n" \
+ "\tSerial: %"#D"\n" \
+ "\tck_pr: %"#D"\n", \
+ x_b, y_b); \
+ \
+ return; \
+ } \
+ } \
+ \
+ printf("Final result:\n" \
+ "\tSerial: %"#D"\n" \
+ "\tck_pr: %"#D"\n", \
+ x, y); \
+ (x == y) ? puts("SUCCESS.") \
+ : puts("FAILURE."); \
+ \
+ return; \
+ } \
+
+
+#define GENERATE_TEST(K) \
+ TEST_FAX(K, int, int, d) \
+ TEST_FAX(K, uint, unsigned int, u) \
+ static void \
+ run_test_##K(void) \
+ { \
+ run_test_##K##_int(); \
+ run_test_##K##_uint(); \
+ }
+
+GENERATE_TEST(faa)
+GENERATE_TEST(fas)
+
+#undef GENERATE_TEST
+#undef TEST_FAX
+
+int
+main(void)
+{
+ run_test_faa();
+ run_test_fas();
+
+ return (0);
+}
+
+
diff --git a/regressions/ck_pr/validate/ck_pr_inc.c b/regressions/ck_pr/validate/ck_pr_inc.c
new file mode 100644
index 0000000..e8524a5
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_inc.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+#define CK_PR_INC_T(w, v) \
+ { \
+ uint##w##_t t = v; \
+ ck_pr_inc_##w(&t); \
+ if ((t != (uint##w##_t)(v + 1))) { \
+ printf("FAIL [%" PRIu##w " -> %" PRIu##w "]\n", \
+ (uint##w##_t)v, t); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+#define CK_PR_INC_B(w) \
+ { \
+ unsigned int __ck_i = 0; \
+ printf("ck_pr_inc_" #w ": "); \
+ if (w < 10) \
+ printf(" "); \
+ for (__ck_i = 0; __ck_i < R_REPEAT; __ck_i++) { \
+ uint##w##_t a = common_rand() % ((uint##w##_t)-1); \
+ CK_PR_INC_T(w, a); \
+ } \
+ rg_width(w); \
+ printf(" SUCCESS\n"); \
+ }
+
+#define CK_PR_INC_W(m, w) \
+ { \
+ uint##m##_t t = -1, r = -1 & ~(uint##m##_t)(uint##w##_t)-1; \
+ ck_pr_inc_##w((uint##w##_t *)(void *)&t); \
+ if (t != r) { \
+ printf("FAIL [%#" PRIx##m " != %#" PRIx##m "]\n", t, r);\
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+static void
+rg_width(int m)
+{
+
+ /* Other architectures are bi-endian. */
+#if !defined(__x86__) && !defined(__x86_64__)
+ return;
+#endif
+
+#ifdef CK_F_PR_INC_64
+ if (m == 64) {
+#if defined(CK_F_PR_INC_32)
+ CK_PR_INC_W(64, 32);
+#endif
+#if defined(CK_PR_INC_16)
+ CK_PR_INC_W(64, 16);
+#endif
+#if defined(CK_PR_INC_8)
+ CK_PR_INC_W(64, 8);
+#endif
+ }
+#endif /* CK_PR_INC_64 */
+
+#ifdef CK_F_PR_INC_32
+ if (m == 32) {
+#if defined(CK_F_PR_INC_16)
+ CK_PR_INC_W(32, 16);
+#endif
+#if defined(CK_PR_INC_8)
+ CK_PR_INC_W(32, 8);
+#endif
+ }
+#endif /* CK_PR_INC_32 */
+
+#if defined(CK_F_PR_INC_16) && defined(CK_PR_INC_8)
+ if (m == 16) {
+ CK_PR_INC_W(16, 8);
+ }
+#endif /* CK_PR_INC_16 && CK_PR_INC_8 */
+
+ return;
+}
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_INC_64
+ CK_PR_INC_B(64);
+#endif
+
+#ifdef CK_F_PR_INC_32
+ CK_PR_INC_B(32);
+#endif
+
+#ifdef CK_F_PR_INC_16
+ CK_PR_INC_B(16);
+#endif
+
+#ifdef CK_F_PR_INC_8
+ CK_PR_INC_B(8);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_load.c b/regressions/ck_pr/validate/ck_pr_load.c
new file mode 100644
index 0000000..a15acd0
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_load.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+#define CK_PR_LOAD_B(w) \
+ { \
+ uint##w##_t t = (uint##w##_t)-1, a = 0; \
+ unsigned int i; \
+ printf("ck_pr_load_" #w ": "); \
+ if (w < 10) \
+ printf(" "); \
+ a = ck_pr_load_##w(&t); \
+ if (a != t) { \
+ printf("FAIL [%#" PRIx##w " != %#" PRIx##w "]\n", a, t); \
+ exit(EXIT_FAILURE); \
+ } \
+ for (i = 0; i < R_REPEAT; i++) { \
+ t = (uint##w##_t)common_rand(); \
+ a = ck_pr_load_##w(&t); \
+ if (a != t) { \
+ printf("FAIL [%#" PRIx##w " != %#" PRIx##w "]\n", a, t);\
+ exit(EXIT_FAILURE); \
+ } \
+ } \
+ rg_width(w); \
+ printf(" SUCCESS\n"); \
+ }
+
+#define CK_PR_LOAD_W(m, w) \
+ { \
+ uint##m##_t f = 0; \
+ uint##w##_t j = (uint##w##_t)-1; \
+ f = ck_pr_load_##w(&j); \
+ if (f != j) { \
+ printf("FAIL [%#" PRIx##m " != %#" PRIx##w "]\n", f, j);\
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+static void
+rg_width(int m)
+{
+
+ /* Other architectures are bi-endian. */
+#if !defined(__x86__) && !defined(__x86_64__)
+ return;
+#endif
+
+#ifdef CK_F_PR_LOAD_64
+ if (m == 64) {
+#if defined(CK_F_PR_LOAD_32)
+ CK_PR_LOAD_W(64, 32);
+#endif
+#if defined(CK_PR_LOAD_16)
+ CK_PR_LOAD_W(64, 16);
+#endif
+#if defined(CK_PR_LOAD_8)
+ CK_PR_LOAD_W(64, 8);
+#endif
+ }
+#endif /* CK_PR_LOAD_64 */
+
+#ifdef CK_F_PR_LOAD_32
+ if (m == 32) {
+#if defined(CK_F_PR_LOAD_16)
+ CK_PR_LOAD_W(32, 16);
+#endif
+#if defined(CK_PR_LOAD_8)
+ CK_PR_LOAD_W(32, 8);
+#endif
+ }
+#endif /* CK_PR_LOAD_32 */
+
+#if defined(CK_F_PR_LOAD_16) && defined(CK_PR_LOAD_8)
+ if (m == 16)
+ CK_PR_LOAD_W(16, 8);
+#endif /* CK_PR_LOAD_16 && CK_PR_LOAD_8 */
+
+ return;
+}
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_LOAD_64
+ CK_PR_LOAD_B(64);
+#endif
+
+#ifdef CK_F_PR_LOAD_32
+ CK_PR_LOAD_B(32);
+#endif
+
+#ifdef CK_F_PR_LOAD_16
+ CK_PR_LOAD_B(16);
+#endif
+
+#ifdef CK_F_PR_LOAD_8
+ CK_PR_LOAD_B(8);
+#endif
+
+#if 0
+ uint64_t a[2] = {0, 0}, b[2] = {0x1111111144444444, 0x2222222266666666};
+ printf("%" PRIx64 ":%" PRIx64 " -> ", a[0], a[1]);
+ ck_pr_load_64_2(&b, &a);
+ printf("%" PRIx64 ":%" PRIx64 "\n", a[0], a[1]);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_n.c b/regressions/ck_pr/validate/ck_pr_n.c
new file mode 100644
index 0000000..81e3639
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_n.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <ck_pr.h>
+
+#include "../../common.h"
+#define REPEAT 2000000
+
+#define TEST_N(K, S, T, P, D) \
+ static void \
+ run_test_##K##_##S(void) \
+ { \
+ int i, r; \
+ T x = 0, y = 0; \
+ \
+ puts("***TESTING ck_pr_"#K"_"#S"***"); \
+ common_srand((unsigned int)getpid()); \
+ for (i = 0; i < REPEAT; ++i) { \
+ r = common_rand(); \
+ x += r; \
+ x = P x; \
+ y += r; \
+ ck_pr_##K##_##S(&y); \
+ } \
+ \
+ printf("Value of operation "#K" on 2000000 " \
+ "random numbers\n" \
+ "\tusing "#P": %"#D",\n" \
+ "\tusing ck_pr_"#K"_"#S": %"#D",\n", \
+ x, y); \
+ (x == y) ? puts("SUCCESS.") \
+ : puts("FAILURE."); \
+ \
+ return; \
+ }
+
+#define GENERATE_TEST(K, P) \
+ TEST_N(K, int, int, P, d) \
+ TEST_N(K, uint, unsigned int, P, u) \
+ static void \
+ run_test_##K(void) \
+ { \
+ run_test_##K##_int(); \
+ run_test_##K##_uint(); \
+ \
+ return; \
+ }
+
+GENERATE_TEST(not, ~)
+GENERATE_TEST(neg, -)
+
+#undef GENERATE_TEST
+#undef TEST_N
+
+int
+main(void)
+{
+ run_test_not();
+ run_test_neg();
+
+ return (0);
+}
+
+
diff --git a/regressions/ck_pr/validate/ck_pr_or.c b/regressions/ck_pr/validate/ck_pr_or.c
new file mode 100644
index 0000000..27580c3
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_or.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+#define BM(m, w) (uint##m##_t)(uint##w##_t)-1
+
+#define CK_PR_OR_T(w, v, d) \
+ { \
+ uint##w##_t t; \
+ ck_pr_or_##w(&t, 1ULL << (w - 1)); \
+ t = v; \
+ ck_pr_or_##w(&t, d); \
+ if (t != (uint##w##_t)(v | d)) { \
+ printf("FAIL ["); \
+ printf("%" PRIu##w " (%" PRIu##w ") -> %" PRIu##w "]\n",\
+ (uint##w##_t)v, d, t); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+#define CK_PR_OR_B(w) \
+ { \
+ unsigned int __ck_i = 0; \
+ printf("ck_pr_or_" #w ": "); \
+ if (w < 10) \
+ printf(" "); \
+ for (__ck_i = 0; __ck_i < R_REPEAT; __ck_i++) { \
+ uint##w##_t a = (uint##w##_t)common_rand(); \
+ uint##w##_t b = (uint##w##_t)common_rand(); \
+ CK_PR_OR_T(w, a, b); \
+ } \
+ rg_width(w); \
+ printf(" SUCCESS\n"); \
+ }
+
+#define CK_PR_OR_W(m, w) \
+ { \
+ uint##m##_t t = 0; \
+ ck_pr_or_##w((uint##w##_t *)(void *)&t, -1); \
+ if (t != BM(m, w)) { \
+ printf(" FAIL [%#" PRIx##m " != %#" PRIx##m "]\n", t, BM(m, w)); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+static void
+rg_width(int m)
+{
+
+ /* Other architectures are bi-endian. */
+#if !defined(__x86__) && !defined(__x86_64__)
+ return;
+#endif
+
+#ifdef CK_F_PR_OR_64
+ if (m == 64) {
+#if defined(CK_F_PR_OR_32)
+ CK_PR_OR_W(64, 32);
+#endif
+#if defined(CK_PR_OR_16)
+ CK_PR_OR_W(64, 16);
+#endif
+#if defined(CK_PR_OR_8)
+ CK_PR_OR_W(64, 8);
+#endif
+ }
+#endif /* CK_PR_OR_64 */
+
+#ifdef CK_F_PR_OR_32
+ if (m == 32) {
+#if defined(CK_F_PR_OR_16)
+ CK_PR_OR_W(32, 16);
+#endif
+#if defined(CK_PR_OR_8)
+ CK_PR_OR_W(32, 8);
+#endif
+ }
+#endif /* CK_PR_OR_32 */
+
+#if defined(CK_F_PR_OR_16) && defined(CK_PR_OR_8)
+ if (m == 16) {
+ CK_PR_OR_W(16, 8);
+ }
+#endif /* CK_PR_OR_16 && CK_PR_OR_8 */
+
+ return;
+}
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_OR_64
+ CK_PR_OR_B(64);
+#endif
+
+#ifdef CK_F_PR_OR_32
+ CK_PR_OR_B(32);
+#endif
+
+#ifdef CK_F_PR_OR_16
+ CK_PR_OR_B(16);
+#endif
+
+#ifdef CK_F_PR_OR_8
+ CK_PR_OR_B(8);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_store.c b/regressions/ck_pr/validate/ck_pr_store.c
new file mode 100644
index 0000000..e4b852b
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_store.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "../../common.h"
+#include <ck_pr.h>
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+#define CK_PR_STORE_B(w) \
+ { \
+ uint##w##_t t = (uint##w##_t)-1, a = 0, b; \
+ ck_pr_store_##w(&b, 1ULL << (w - 1)); \
+ unsigned int i; \
+ printf("ck_pr_store_" #w ": "); \
+ if (w < 10) \
+ printf(" "); \
+ ck_pr_store_##w(&a, t); \
+ if (a != t) { \
+ printf("FAIL [%#" PRIx##w " != %#" PRIx##w "]\n", a, t); \
+ exit(EXIT_FAILURE); \
+ } \
+ for (i = 0; i < R_REPEAT; i++) { \
+ t = (uint##w##_t)common_rand(); \
+ ck_pr_store_##w(&a, t); \
+ if (a != t) { \
+ printf("FAIL [%#" PRIx##w " != %#" PRIx##w "]\n", a, t);\
+ exit(EXIT_FAILURE); \
+ } \
+ } \
+ rg_width(w); \
+ printf("SUCCESS\n"); \
+ }
+
+#define CK_PR_STORE_W(m, w) \
+ { \
+ uint##m##_t f = 0; \
+ uint##w##_t j = (uint##w##_t)-1; \
+ ck_pr_store_##w((uint##w##_t *)(void *)&f, j); \
+ if (f != j) { \
+ printf("FAIL [%#" PRIx##m " != %#" PRIx##w "]\n", f, j);\
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+static void
+rg_width(int m)
+{
+
+ /* Other architectures are bi-endian. */
+#if !defined(__x86__) && !defined(__x86_64__)
+ return;
+#endif
+
+#ifdef CK_F_PR_STORE_64
+ if (m == 64) {
+#if defined(CK_F_PR_STORE_32)
+ CK_PR_STORE_W(64, 32);
+#endif
+#if defined(CK_PR_STORE_16)
+ CK_PR_STORE_W(64, 16);
+#endif
+#if defined(CK_PR_STORE_8)
+ CK_PR_STORE_W(64, 8);
+#endif
+ }
+#endif /* CK_PR_STORE_64 */
+
+#ifdef CK_F_PR_STORE_32
+ if (m == 32) {
+#if defined(CK_F_PR_STORE_16)
+ CK_PR_STORE_W(32, 16);
+#endif
+#if defined(CK_PR_STORE_8)
+ CK_PR_STORE_W(32, 8);
+#endif
+ }
+#endif /* CK_PR_STORE_32 */
+
+#if defined(CK_F_PR_STORE_16) && defined(CK_PR_STORE_8)
+ if (m == 16)
+ CK_PR_STORE_W(16, 8);
+#endif /* CK_PR_STORE_16 && CK_PR_STORE_8 */
+
+ return;
+}
+
+int
+main(void)
+{
+#if defined(CK_F_PR_STORE_DOUBLE) && defined(CK_F_PR_LOAD_DOUBLE)
+ double d;
+
+ ck_pr_store_double(&d, 0.0);
+ if (ck_pr_load_double(&d) != 0.0) {
+ ck_error("Stored 0 in double, did not find 0.\n");
+ }
+#endif
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_STORE_64
+ CK_PR_STORE_B(64);
+#endif
+
+#ifdef CK_F_PR_STORE_32
+ CK_PR_STORE_B(32);
+#endif
+
+#ifdef CK_F_PR_STORE_16
+ CK_PR_STORE_B(16);
+#endif
+
+#ifdef CK_F_PR_STORE_8
+ CK_PR_STORE_B(8);
+#endif
+
+ return (0);
+}
diff --git a/regressions/ck_pr/validate/ck_pr_sub.c b/regressions/ck_pr/validate/ck_pr_sub.c
new file mode 100644
index 0000000..f515914
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_sub.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+#define CK_PR_SUB_T(w, v, d) \
+ { \
+ uint##w##_t t = v; \
+ ck_pr_sub_##w(&t, d); \
+ if (t != (uint##w##_t)(v - d)) { \
+ printf("FAIL ["); \
+ printf("%" PRIu##w " (%" PRIu##w ") -> %" PRIu##w "]\n", \
+ (uint##w##_t)v, d, t); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+#define CK_PR_SUB_B(w) \
+ { \
+ unsigned int __ck_i = 0; \
+ printf("ck_pr_sub_" #w ": "); \
+ if (w < 10) \
+ printf(" "); \
+ for (__ck_i = 0; __ck_i < R_REPEAT; __ck_i++) { \
+ uint##w##_t a = common_rand() % ((uint##w##_t)-1 / 2); \
+ uint##w##_t b = common_rand() % ((uint##w##_t)-1 / 2); \
+ CK_PR_SUB_T(w, a, b); \
+ } \
+ rg_width(w); \
+ printf(" SUCCESS\n"); \
+ }
+
+#define CK_PR_SUB_W(m, w) \
+ { \
+ uint##m##_t t = 0, r = (uint##m##_t)(uint##w##_t)-1; \
+ ck_pr_sub_##w((uint##w##_t *)(void *)&t, 1); \
+ if (t != r) { \
+ printf(" FAIL [%#" PRIx##m " != %#" PRIx##m "]\n", t, r); \
+ exit(EXIT_FAILURE); \
+ } \
+ t = 0; \
+ ck_pr_sub_##w((uint##w##_t *)(void *)&t, -1); \
+ if (t != 1) { \
+ printf(" FAIL [%#" PRIx##m " != 1]\n", t); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+static void
+rg_width(int m)
+{
+
+ /* Other architectures are bi-endian. */
+#if !defined(__x86__) && !defined(__x86_64__)
+ return;
+#endif
+
+#ifdef CK_F_PR_SUB_64
+ if (m == 64) {
+#if defined(CK_F_PR_SUB_32)
+ CK_PR_SUB_W(64, 32);
+#endif
+#if defined(CK_PR_SUB_16)
+ CK_PR_SUB_W(64, 16);
+#endif
+#if defined(CK_PR_SUB_8)
+ CK_PR_SUB_W(64, 8);
+#endif
+ }
+#endif /* CK_PR_SUB_64 */
+
+#ifdef CK_F_PR_SUB_32
+ if (m == 32) {
+#if defined(CK_F_PR_SUB_16)
+ CK_PR_SUB_W(32, 16);
+#endif
+#if defined(CK_PR_SUB_8)
+ CK_PR_SUB_W(32, 8);
+#endif
+ }
+#endif /* CK_PR_SUB_32 */
+
+#if defined(CK_F_PR_SUB_16) && defined(CK_PR_SUB_8)
+ if (m == 16) {
+ CK_PR_SUB_W(16, 8);
+ }
+#endif /* CK_PR_SUB_16 && CK_PR_SUB_8 */
+
+ return;
+}
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_SUB_64
+ CK_PR_SUB_B(64);
+#endif
+
+#ifdef CK_F_PR_SUB_32
+ CK_PR_SUB_B(32);
+#endif
+
+#ifdef CK_F_PR_SUB_16
+ CK_PR_SUB_B(16);
+#endif
+
+#ifdef CK_F_PR_SUB_8
+ CK_PR_SUB_B(8);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_unary.c b/regressions/ck_pr/validate/ck_pr_unary.c
new file mode 100644
index 0000000..b2300cd
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_unary.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <ck_pr.h>
+
+#define REPEAT 2000000
+
+#define TEST_UNARY(K, S, M, T, P, D, H) \
+ static void \
+ test_##K##_##S(M *target) \
+ { \
+ *target = *target P 1; \
+ \
+ return; \
+ } \
+ static void \
+ test_##K##_##S##_zero(M *target, bool *zero) \
+ { \
+ *zero = *target == H; \
+ *target = *target P 1; \
+ \
+ return; \
+ } \
+ static void \
+ run_test_##K##_##S(bool use_zero) \
+ { \
+ int i; \
+ T x = 1, y = 1; \
+ bool zero_x = false, zero_y = false; \
+ \
+ use_zero ? puts("***TESTING ck_pr_"#K"_"#S"_zero***") \
+ : puts("***TESTING ck_pr_"#K"_"#S"***"); \
+ for (i = 0; i < REPEAT; ++i) { \
+ if (use_zero) { \
+ test_##K##_##S##_zero(&x, &zero_x); \
+ ck_pr_##K##_##S##_zero(&y, &zero_y); \
+ } \
+ else { \
+ test_##K##_##S(&x); \
+ ck_pr_##K##_##S(&y); \
+ } \
+ \
+ if (x != y || zero_x != zero_y) { \
+ printf("Serial(%"#D") and ck_pr(%"#D")" \
+ #K"_"#S" do not match.\n" \
+ "FAILURE.\n", \
+ x, y); \
+ \
+ return; \
+ } \
+ \
+ if (zero_x) \
+ printf("Variables are zero at iteration %d\n", i); \
+ } \
+ \
+ \
+ printf("\tserial_"#K"_"#S": %"#D"\n" \
+ "\tck_pr_"#K"_"#S": %"#D"\n" \
+ "SUCCESS.\n", \
+ x, y); \
+ \
+ return; \
+ }
+
+#define GENERATE_TEST(K, P, Y, Z) \
+ TEST_UNARY(K, int, int, int, P, d, Y) \
+ TEST_UNARY(K, uint, unsigned int, unsigned int, P, u, Z) \
+ static void \
+ run_test_##K(void) \
+ { \
+ run_test_##K##_int(false); \
+ run_test_##K##_int(true); \
+ run_test_##K##_uint(false); \
+ run_test_##K##_uint(true); \
+ }
+
+GENERATE_TEST(inc, +, -1, UINT_MAX)
+GENERATE_TEST(dec, -, 1, 1)
+
+#undef GENERATE_TEST
+#undef TEST_UNARY
+
+int
+main(void)
+{
+ run_test_inc();
+ run_test_dec();
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_xor.c b/regressions/ck_pr/validate/ck_pr_xor.c
new file mode 100644
index 0000000..4515cc4
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_xor.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+#define BM(m, w) ((uint##m##_t)-1 << (w))
+
+#define CK_PR_XOR_T(w, v, d) \
+ { \
+ uint##w##_t t = v; \
+ ck_pr_xor_##w(&t, d); \
+ if (t != (uint##w##_t)(v ^ d)) { \
+ printf("FAIL ["); \
+ printf("%" PRIu##w " (%" PRIu##w ") -> %" PRIu##w "]\n",\
+ (uint##w##_t)v, d, t); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+#define CK_PR_XOR_B(w) \
+ { \
+ unsigned int __ck_i = 0; \
+ printf("ck_pr_xor_" #w ": "); \
+ if (w < 10) \
+ printf(" "); \
+ for (__ck_i = 0; __ck_i < R_REPEAT; __ck_i++) { \
+ uint##w##_t a = (uint##w##_t)common_rand(); \
+ uint##w##_t b = (uint##w##_t)common_rand(); \
+ CK_PR_XOR_T(w, a, b); \
+ } \
+ rg_width(w); \
+ printf(" SUCCESS\n"); \
+ }
+
+#define CK_PR_XOR_W(m, w) \
+ { \
+ uint##m##_t t = -1; \
+ ck_pr_xor_##w((uint##w##_t *)(void *)&t, -1); \
+ if (t != BM(m, w)) { \
+ printf(" FAIL [%#" PRIx##m " != %#" PRIx##m "]\n", t, BM(m, w)); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+static void
+rg_width(int m)
+{
+
+ /* Other architectures are bi-endian. */
+#if !defined(__x86__) && !defined(__x86_64__)
+ return;
+#endif
+
+#ifdef CK_F_PR_XOR_64
+ if (m == 64) {
+#if defined(CK_F_PR_XOR_32)
+ CK_PR_XOR_W(64, 32);
+#endif
+#if defined(CK_PR_XOR_16)
+ CK_PR_XOR_W(64, 16);
+#endif
+#if defined(CK_PR_XOR_8)
+ CK_PR_XOR_W(64, 8);
+#endif
+ }
+#endif /* CK_PR_XOR_64 */
+
+#ifdef CK_F_PR_XOR_32
+ if (m == 32) {
+#if defined(CK_F_PR_XOR_16)
+ CK_PR_XOR_W(32, 16);
+#endif
+#if defined(CK_PR_XOR_8)
+ CK_PR_XOR_W(32, 8);
+#endif
+ }
+#endif /* CK_PR_XOR_32 */
+
+#if defined(CK_F_PR_XOR_16) && defined(CK_PR_XOR_8)
+ if (m == 16) {
+ CK_PR_XOR_W(16, 8);
+ }
+#endif /* CK_PR_XOR_16 && CK_PR_XOR_8 */
+
+ return;
+}
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_XOR_64
+ CK_PR_XOR_B(64);
+#endif
+
+#ifdef CK_F_PR_XOR_32
+ CK_PR_XOR_B(32);
+#endif
+
+#ifdef CK_F_PR_XOR_16
+ CK_PR_XOR_B(16);
+#endif
+
+#ifdef CK_F_PR_XOR_8
+ CK_PR_XOR_B(8);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_queue/validate/Makefile b/regressions/ck_queue/validate/Makefile
new file mode 100644
index 0000000..d6be3dc
--- /dev/null
+++ b/regressions/ck_queue/validate/Makefile
@@ -0,0 +1,26 @@
+.PHONY: check clean distribution
+
+HEADER=../../../include/ck_queue.h
+OBJECTS=ck_list ck_slist ck_stailq
+
+all: $(OBJECTS)
+
+check: all
+ ./ck_list $(CORES) 5
+ ./ck_slist $(CORES) 5
+ ./ck_stailq $(CORES) 1000000
+
+ck_list: $(HEADER) ck_list.c
+ $(CC) $(CFLAGS) -o ck_list ck_list.c
+
+ck_slist: $(HEADER) ck_slist.c
+ $(CC) $(CFLAGS) -o ck_slist ck_slist.c
+
+ck_stailq: $(HEADER) ck_stailq.c
+ $(CC) $(CFLAGS) -o ck_stailq ck_stailq.c
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_queue/validate/ck_list.c b/regressions/ck_queue/validate/ck_list.c
new file mode 100644
index 0000000..daa48b1
--- /dev/null
+++ b/regressions/ck_queue/validate/ck_list.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <pthread.h>
+#include <ck_queue.h>
+
+#include "../../common.h"
+
+struct test {
+ int value;
+ CK_LIST_ENTRY(test) list_entry;
+};
+static CK_LIST_HEAD(test_list, test) head = CK_LIST_HEAD_INITIALIZER(head);
+
+static int goal;
+
+static void
+test_foreach(void)
+{
+ struct test *n, *next, *safe;
+ int i, s = 0, j = 0, k = 0;
+
+ for (i = goal; i != 0; i = goal) {
+ s = 0;
+
+ CK_LIST_FOREACH(n, &head, list_entry) {
+ j++;
+ if (s == 0)
+ s = n->value;
+ else
+ s = s - 1;
+
+ if (n->value != s) {
+ ck_error("\nExpected %d, but got %d.\n",
+ s, n->value);
+ }
+
+ next = CK_LIST_NEXT(n, list_entry);
+ if (next != NULL && next->value != s - 1) {
+ ck_error("\nExpected %d, but got %d.\n",
+ s, next->value);
+ }
+
+ i--;
+ }
+
+ if (i == 0)
+ break;
+
+ s = 0;
+ CK_LIST_FOREACH_SAFE(n, &head, list_entry, safe) {
+ k++;
+
+ if (s == 0)
+ s = n->value;
+ else
+ s = s - 1;
+
+ if (n->value != s) {
+ ck_error("\nExpected %d, but got %d.\n",
+ s, n->value);
+ }
+
+ next = CK_LIST_NEXT(n, list_entry);
+ if (next != NULL && next->value != s - 1) {
+ ck_error("\nExpected %d, but got %d.\n",
+ s, next->value);
+ }
+
+ i--;
+ }
+
+ if (i == 0 || CK_LIST_EMPTY(&head) == true)
+ break;
+ }
+
+ fprintf(stderr, "(%d, %d) ", j, k);
+ return;
+}
+
+static void *
+execute(void *c)
+{
+
+ (void)c;
+ test_foreach();
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *thread;
+ struct test *n, a, b;
+ struct test_list target;
+ int n_threads, i;
+
+ if (argc != 3) {
+ ck_error("Usage: %s <number of threads> <number of list entries>\n", argv[0]);
+ }
+
+ n_threads = atoi(argv[1]);
+ if (n_threads < 1) {
+ ck_error("ERROR: Number of threads must be >= 1.\n");
+ }
+
+ thread = malloc(sizeof(pthread_t) * n_threads);
+ assert(thread != NULL);
+
+ goal = atoi(argv[2]);
+ if (goal < 4) {
+ ck_error("ERROR: Number of entries must be >= 4.\n");
+ }
+
+ fprintf(stderr, "Beginning serial test...");
+ CK_LIST_INIT(&head);
+
+ for (i = 1; i <= goal; i++) {
+ n = malloc(sizeof *n);
+ assert(n != NULL);
+ n->value = i;
+ CK_LIST_INSERT_HEAD(&head, n, list_entry);
+ }
+
+ test_foreach();
+
+ for (i = 1; i <= goal; i++) {
+ n = CK_LIST_FIRST(&head);
+ CK_LIST_REMOVE(n, list_entry);
+ free(n);
+ }
+
+ CK_LIST_INSERT_HEAD(&head, &a, list_entry);
+ CK_LIST_INSERT_HEAD(&head, &b, list_entry);
+ CK_LIST_REMOVE(&a, list_entry);
+ if (CK_LIST_FIRST(&head) != &b)
+ ck_error("List is in invalid state.\n");
+ CK_LIST_REMOVE(&b, list_entry);
+
+ if (CK_LIST_EMPTY(&head) == false) {
+ ck_error("List is not empty after bulk removal.\n");
+ }
+
+ CK_LIST_INSERT_HEAD(&head, &a, list_entry);
+ CK_LIST_INSERT_AFTER(&a, &b, list_entry);
+
+ if (CK_LIST_NEXT(&b, list_entry) != NULL)
+ ck_error("Inserted item after last, it should not have no next.\n");
+
+ CK_LIST_INIT(&head);
+
+ CK_LIST_INSERT_HEAD(&head, &a, list_entry);
+ CK_LIST_INSERT_BEFORE(&a, &b, list_entry);
+
+ if (CK_LIST_NEXT(&b, list_entry) != &a)
+ ck_error("Inserted item before last, it should point to last.\n");
+
+ CK_LIST_INIT(&head);
+ fprintf(stderr, "done (success)\n");
+
+ fprintf(stderr, "Beginning parallel traversal...");
+
+ n = malloc(sizeof *n);
+ assert(n != NULL);
+ n->value = 1;
+ CK_LIST_INSERT_HEAD(&head, n, list_entry);
+
+ for (i = 0; i < n_threads; i++) {
+ int r = pthread_create(&thread[i], NULL, execute, NULL);
+ assert(r == 0);
+ }
+
+ for (i = 2; i <= goal; i++) {
+ volatile int j;
+
+ n = malloc(sizeof *n);
+ assert(n != NULL);
+ n->value = i;
+ CK_LIST_INSERT_HEAD(&head, n, list_entry);
+ for (j = 0; j <= 1000; j++);
+ }
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(thread[i], NULL);
+
+ for (i = 0; i < n_threads; i++) {
+ int r = pthread_create(&thread[i], NULL, execute, NULL);
+ assert(r == 0);
+ }
+
+ CK_LIST_MOVE(&target, &head, list_entry);
+
+ for (i = 1; i <= goal; i++) {
+ volatile int j;
+
+ if (CK_LIST_EMPTY(&target) == false) {
+ struct test *r = CK_LIST_FIRST(&target);
+ CK_LIST_REMOVE(r, list_entry);
+ }
+
+ for (j = 0; j <= 1000; j++);
+ }
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(thread[i], NULL);
+
+ fprintf(stderr, "done (success)\n");
+ return (0);
+}
+
diff --git a/regressions/ck_queue/validate/ck_slist.c b/regressions/ck_queue/validate/ck_slist.c
new file mode 100644
index 0000000..7adf2ef
--- /dev/null
+++ b/regressions/ck_queue/validate/ck_slist.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <pthread.h>
+#include <ck_queue.h>
+
+#include "../../common.h"
+
+struct test {
+ int value;
+ CK_SLIST_ENTRY(test) list_entry;
+};
+static CK_SLIST_HEAD(test_list, test) head = CK_SLIST_HEAD_INITIALIZER(head);
+
+static int goal;
+
+static void
+test_foreach(void)
+{
+ struct test *n, *next, *safe;
+ int i, s = 0, j = 0, k = 0;
+
+ for (i = goal; i != 0; i = goal) {
+ s = 0;
+
+ CK_SLIST_FOREACH(n, &head, list_entry) {
+ j++;
+ if (s == 0)
+ s = n->value;
+ else
+ s = s - 1;
+
+ if (n->value != s) {
+ ck_error("\nExpected %d, but got %d.\n",
+ s, n->value);
+ }
+
+ next = CK_SLIST_NEXT(n, list_entry);
+ if (next != NULL && next->value != s - 1) {
+ ck_error("\nExpected %d, but got %d.\n",
+ s, next->value);
+ }
+
+ i--;
+ }
+
+ if (i == 0)
+ break;
+
+ s = 0;
+ CK_SLIST_FOREACH_SAFE(n, &head, list_entry, safe) {
+ k++;
+
+ if (s == 0)
+ s = n->value;
+ else
+ s = s - 1;
+
+ if (n->value != s) {
+ ck_error("\nExpected %d, but got %d.\n",
+ s, n->value);
+ }
+
+ next = CK_SLIST_NEXT(n, list_entry);
+ if (next != NULL && next->value != s - 1) {
+ ck_error("\nExpected %d, but got %d.\n",
+ s, next->value);
+ }
+
+ i--;
+ }
+
+ if (i == 0 || CK_SLIST_EMPTY(&head) == true)
+ break;
+ }
+
+ fprintf(stderr, "(%d, %d) ", j, k);
+ return;
+}
+
+static void *
+execute(void *c)
+{
+
+ (void)c;
+ test_foreach();
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *thread;
+ struct test *n;
+ struct test_list target;
+ int n_threads, i;
+
+ if (argc != 3) {
+ ck_error("Usage: %s <number of threads> <number of list entries>\n", argv[0]);
+ }
+
+ n_threads = atoi(argv[1]);
+ if (n_threads < 1) {
+ ck_error("ERROR: Number of threads must be >= 1.\n");
+ }
+
+ thread = malloc(sizeof(pthread_t) * n_threads);
+ assert(thread != NULL);
+
+ goal = atoi(argv[2]);
+ if (goal < 4) {
+ ck_error("ERROR: Number of entries must be >= 4.\n");
+ }
+
+ fprintf(stderr, "Beginning serial test...");
+ CK_SLIST_INIT(&head);
+
+ for (i = 1; i <= goal; i++) {
+ n = malloc(sizeof *n);
+ assert(n != NULL);
+ n->value = i;
+ CK_SLIST_INSERT_HEAD(&head, n, list_entry);
+ }
+
+ test_foreach();
+
+ for (i = 1; i <= goal; i++) {
+ n = CK_SLIST_FIRST(&head);
+ CK_SLIST_REMOVE_HEAD(&head, list_entry);
+ free(n);
+ }
+
+ if (CK_SLIST_EMPTY(&head) == false) {
+ ck_error("List is not empty after bulk removal.\n");
+ }
+
+ fprintf(stderr, "done (success)\n");
+
+ fprintf(stderr, "Beginning parallel traversal...");
+
+ n = malloc(sizeof *n);
+ assert(n != NULL);
+ n->value = 1;
+ CK_SLIST_INSERT_HEAD(&head, n, list_entry);
+
+ for (i = 0; i < n_threads; i++) {
+ int r = pthread_create(&thread[i], NULL, execute, NULL);
+ assert(r == 0);
+ }
+
+ for (i = 2; i <= goal; i++) {
+ volatile int j;
+
+ n = malloc(sizeof *n);
+ assert(n != NULL);
+ n->value = i;
+ CK_SLIST_INSERT_HEAD(&head, n, list_entry);
+ for (j = 0; j <= 1000; j++);
+ }
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(thread[i], NULL);
+
+ for (i = 0; i < n_threads; i++) {
+ int r = pthread_create(&thread[i], NULL, execute, NULL);
+ assert(r == 0);
+ }
+
+ CK_SLIST_MOVE(&target, &head, list_entry);
+
+ for (i = 1; i <= goal; i++) {
+ volatile int j;
+
+ if (CK_SLIST_EMPTY(&target) == false)
+ CK_SLIST_REMOVE_HEAD(&target, list_entry);
+
+ for (j = 0; j <= 1000; j++);
+
+ if (CK_SLIST_EMPTY(&target) == false) {
+ struct test *r = CK_SLIST_FIRST(&target);
+ CK_SLIST_REMOVE(&target, r, test, list_entry);
+ }
+ }
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(thread[i], NULL);
+
+ fprintf(stderr, "done (success)\n");
+ return (0);
+}
+
diff --git a/regressions/ck_queue/validate/ck_stailq.c b/regressions/ck_queue/validate/ck_stailq.c
new file mode 100644
index 0000000..219e93f
--- /dev/null
+++ b/regressions/ck_queue/validate/ck_stailq.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <pthread.h>
+#include <ck_queue.h>
+#include "../../common.h"
+
+struct test {
+ int value;
+ CK_STAILQ_ENTRY(test) list_entry;
+};
+static CK_STAILQ_HEAD(test_list, test) head = CK_STAILQ_HEAD_INITIALIZER(head);
+
+static int goal;
+
+static void
+test_foreach(void)
+{
+ struct test *n, *next, *safe;
+ int i, s = 0, j = 0, k = 0;
+
+ for (i = goal; i != 0; i = goal) {
+ s = 0;
+
+ CK_STAILQ_FOREACH(n, &head, list_entry) {
+ j++;
+ if (s == 0)
+ s = n->value;
+ else
+ s = s - 1;
+
+ if (n->value != s) {
+ ck_error("\nExpected %d, but got %d.\n",
+ s, n->value);
+ }
+
+ next = CK_STAILQ_NEXT(n, list_entry);
+ if (next != NULL && next->value != s - 1) {
+ ck_error("\nExpected %d, but got %d.\n",
+ s, next->value);
+ }
+
+ i--;
+ }
+
+ if (i == 0)
+ break;
+
+ s = 0;
+ CK_STAILQ_FOREACH_SAFE(n, &head, list_entry, safe) {
+ k++;
+
+ if (s == 0)
+ s = n->value;
+ else
+ s = s - 1;
+
+ if (n->value != s) {
+ ck_error("\nExpected %d, but got %d.\n",
+ s, n->value);
+ }
+
+ next = CK_STAILQ_NEXT(n, list_entry);
+ if (next != NULL && next->value != s - 1) {
+ ck_error("\nExpected %d, but got %d.\n",
+ s, next->value);
+ }
+
+ i--;
+ }
+
+ if (i == 0 || CK_STAILQ_EMPTY(&head) == true)
+ break;
+ }
+
+ fprintf(stderr, "(%d, %d) ", j, k);
+ return;
+}
+
+static void *
+execute(void *c)
+{
+
+ (void)c;
+ test_foreach();
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *thread;
+ struct test *n, a, b;
+ struct test_list target;
+ int n_threads, i;
+
+ if (argc != 3) {
+ ck_error("Usage: %s <number of threads> <number of list entries>\n", argv[0]);
+ }
+
+ n_threads = atoi(argv[1]);
+ if (n_threads < 1) {
+ ck_error("ERROR: Number of threads must be >= 1.\n");
+ }
+
+ thread = malloc(sizeof(pthread_t) * n_threads);
+ assert(thread != NULL);
+
+ goal = atoi(argv[2]);
+ if (goal < 4) {
+ ck_error("ERROR: Number of entries must be >= 4.\n");
+ }
+
+ fprintf(stderr, "Beginning serial test...");
+ CK_STAILQ_INIT(&head);
+
+ for (i = 1; i <= goal; i++) {
+ n = malloc(sizeof *n);
+ assert(n != NULL);
+ n->value = i;
+ CK_STAILQ_INSERT_HEAD(&head, n, list_entry);
+ }
+
+ test_foreach();
+
+ for (i = 1; i <= goal; i++) {
+ n = CK_STAILQ_FIRST(&head);
+ CK_STAILQ_REMOVE(&head, n, test, list_entry);
+ free(n);
+ }
+
+ if (CK_STAILQ_EMPTY(&head) == false) {
+ ck_error("List is not empty after bulk removal.\n");
+ }
+
+ for (i = 1; i <= goal; i++) {
+ n = malloc(sizeof *n);
+ assert(n != NULL);
+ n->value = goal - i;
+ CK_STAILQ_INSERT_TAIL(&head, n, list_entry);
+ }
+
+ test_foreach();
+
+ for (i = 1; i <= goal; i++) {
+ n = CK_STAILQ_FIRST(&head);
+ CK_STAILQ_REMOVE(&head, n, test, list_entry);
+ free(n);
+ }
+
+ if (CK_STAILQ_EMPTY(&head) == false) {
+ ck_error("List is not empty after bulk removal.\n");
+ }
+
+ CK_STAILQ_INSERT_HEAD(&head, &a, list_entry);
+ CK_STAILQ_INSERT_HEAD(&head, &b, list_entry);
+ CK_STAILQ_REMOVE(&head, &a, test, list_entry);
+ if (CK_STAILQ_FIRST(&head) != &b)
+ ck_error("List is in invalid state.\n");
+ CK_STAILQ_REMOVE(&head, &b, test, list_entry);
+
+ if (CK_STAILQ_EMPTY(&head) == false) {
+ ck_error("List is not empty after bulk removal.\n");
+ }
+
+ CK_STAILQ_INSERT_HEAD(&head, &a, list_entry);
+ CK_STAILQ_INSERT_AFTER(&head, &a, &b, list_entry);
+
+ if (CK_STAILQ_NEXT(&b, list_entry) != NULL)
+ ck_error("Inserted item after last, it should not have no next.\n");
+
+ CK_STAILQ_INIT(&head);
+
+ CK_STAILQ_INSERT_HEAD(&head, &a, list_entry);
+ if (CK_STAILQ_NEXT(&a, list_entry) != NULL)
+ ck_error("Inserted item as last, but it contains next pointer.\n");
+
+ CK_STAILQ_INIT(&head);
+ fprintf(stderr, "done (success)\n");
+
+ fprintf(stderr, "Beginning parallel traversal...");
+
+ n = malloc(sizeof *n);
+ assert(n != NULL);
+ n->value = 1;
+ CK_STAILQ_INSERT_HEAD(&head, n, list_entry);
+
+ for (i = 0; i < n_threads; i++) {
+ int r = pthread_create(&thread[i], NULL, execute, NULL);
+ assert(r == 0);
+ }
+
+ for (i = 2; i <= goal; i++) {
+ volatile int j;
+
+ n = malloc(sizeof *n);
+ assert(n != NULL);
+ n->value = i;
+ CK_STAILQ_INSERT_HEAD(&head, n, list_entry);
+ for (j = 0; j <= 1000; j++);
+ }
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(thread[i], NULL);
+
+ for (i = 0; i < n_threads; i++) {
+ int r = pthread_create(&thread[i], NULL, execute, NULL);
+ assert(r == 0);
+ }
+
+ CK_STAILQ_MOVE(&target, &head, list_entry);
+
+ for (i = 1; i <= goal; i++) {
+ volatile int j;
+
+ if (CK_STAILQ_EMPTY(&target) == false) {
+ struct test *r = CK_STAILQ_FIRST(&target);
+ CK_STAILQ_REMOVE(&target, r, test, list_entry);
+ }
+
+ for (j = 0; j <= 1000; j++);
+ }
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(thread[i], NULL);
+
+ fprintf(stderr, "done (success)\n");
+ return (0);
+}
+
diff --git a/regressions/ck_rhs/benchmark/Makefile b/regressions/ck_rhs/benchmark/Makefile
new file mode 100644
index 0000000..e997993
--- /dev/null
+++ b/regressions/ck_rhs/benchmark/Makefile
@@ -0,0 +1,17 @@
+.PHONY: clean distribution
+
+OBJECTS=serial parallel_bytestring
+
+all: $(OBJECTS)
+
+serial: serial.c ../../../include/ck_rhs.h ../../../src/ck_rhs.c
+ $(CC) $(CFLAGS) -o serial serial.c ../../../src/ck_rhs.c
+
+parallel_bytestring: parallel_bytestring.c ../../../include/ck_rhs.h ../../../src/ck_rhs.c ../../../src/ck_epoch.c
+ $(CC) $(PTHREAD_CFLAGS) $(CFLAGS) -o parallel_bytestring parallel_bytestring.c ../../../src/ck_rhs.c ../../../src/ck_epoch.c
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE
diff --git a/regressions/ck_rhs/benchmark/parallel_bytestring.c b/regressions/ck_rhs/benchmark/parallel_bytestring.c
new file mode 100644
index 0000000..a95d940
--- /dev/null
+++ b/regressions/ck_rhs/benchmark/parallel_bytestring.c
@@ -0,0 +1,599 @@
+/*
+ * Copyright 2012 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyrighs
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyrighs
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include "../../common.h"
+#include <ck_rhs.h>
+#include "../../../src/ck_ht_hash.h"
+#include <assert.h>
+#include <ck_epoch.h>
+#include <ck_malloc.h>
+#include <ck_pr.h>
+#include <ck_spinlock.h>
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+
+static ck_rhs_t hs CK_CC_CACHELINE;
+static char **keys;
+static size_t keys_length = 0;
+static size_t keys_capacity = 128;
+static ck_epoch_t epoch_hs;
+static ck_epoch_record_t epoch_wr;
+static int n_threads;
+static bool next_stage;
+
+enum state {
+ HS_STATE_STOP = 0,
+ HS_STATE_GET,
+ HS_STATE_STRICT_REPLACEMENT,
+ HS_STATE_DELETION,
+ HS_STATE_REPLACEMENT,
+ HS_STATE_COUNT
+};
+
+static ck_spinlock_t mtx = CK_SPINLOCK_INITIALIZER;
+static struct affinity affinerator = AFFINITY_INITIALIZER;
+static uint64_t accumulator[HS_STATE_COUNT];
+static int barrier[HS_STATE_COUNT];
+static int state;
+
+struct hs_epoch {
+ ck_epoch_entry_t epoch_entry;
+};
+
+COMMON_ALARM_DECLARE_GLOBAL(hs_alarm, alarm_event, next_stage)
+
+static void
+alarm_handler(int s)
+{
+
+ (void)s;
+ next_stage = true;
+ return;
+}
+
+static unsigned long
+hs_hash(const void *object, unsigned long seed)
+{
+ const char *c = object;
+ unsigned long h;
+
+ h = (unsigned long)MurmurHash64A(c, strlen(c), seed);
+ return h;
+}
+
+static bool
+hs_compare(const void *previous, const void *compare)
+{
+
+ return strcmp(previous, compare) == 0;
+}
+
+static void
+hs_destroy(ck_epoch_entry_t *e)
+{
+
+ free(e);
+ return;
+}
+
+static void *
+hs_malloc(size_t r)
+{
+ ck_epoch_entry_t *b;
+
+ b = malloc(sizeof(*b) + r);
+ return b + 1;
+}
+
+static void
+hs_free(void *p, size_t b, bool r)
+{
+ struct hs_epoch *e = p;
+
+ (void)b;
+
+ if (r == true) {
+ /* Destruction requires safe memory reclamation. */
+ ck_epoch_call(&epoch_wr, &(--e)->epoch_entry, hs_destroy);
+ } else {
+ free(--e);
+ }
+
+ return;
+}
+
+static struct ck_malloc my_allocator = {
+ .malloc = hs_malloc,
+ .free = hs_free
+};
+
+static void
+set_init(void)
+{
+ unsigned int mode = CK_RHS_MODE_OBJECT | CK_RHS_MODE_SPMC;
+
+
+ ck_epoch_init(&epoch_hs);
+ ck_epoch_register(&epoch_hs, &epoch_wr);
+ common_srand48((long int)time(NULL));
+ if (ck_rhs_init(&hs, mode, hs_hash, hs_compare, &my_allocator, 65536, common_lrand48()) == false) {
+ perror("ck_rhs_init");
+ exit(EXIT_FAILURE);
+ }
+
+ return;
+}
+
+static bool
+set_remove(const char *value)
+{
+ unsigned long h;
+
+ h = CK_RHS_HASH(&hs, hs_hash, value);
+ return (bool)ck_rhs_remove(&hs, h, value);
+}
+
+static bool
+set_replace(const char *value)
+{
+ unsigned long h;
+ void *previous;
+
+ h = CK_RHS_HASH(&hs, hs_hash, value);
+ return ck_rhs_set(&hs, h, value, &previous);
+}
+
+static bool
+set_swap(const char *value)
+{
+ unsigned long h;
+ void *previous;
+
+ h = CK_RHS_HASH(&hs, hs_hash, value);
+ return ck_rhs_fas(&hs, h, value, &previous);
+}
+
+static void *
+set_get(const char *value)
+{
+ unsigned long h;
+ void *v;
+
+ h = CK_RHS_HASH(&hs, hs_hash, value);
+ v = ck_rhs_get(&hs, h, value);
+ return v;
+}
+
+static bool
+set_insert(const char *value)
+{
+ unsigned long h;
+
+ h = CK_RHS_HASH(&hs, hs_hash, value);
+ return ck_rhs_put(&hs, h, value);
+}
+
+static size_t
+set_count(void)
+{
+
+ return ck_rhs_count(&hs);
+}
+
+static bool
+set_reset(void)
+{
+
+ return ck_rhs_reset(&hs);
+}
+
+static void *
+reader(void *unused)
+{
+ size_t i;
+ ck_epoch_record_t epoch_record;
+ int state_previous = HS_STATE_STOP;
+ int n_state = 0;
+ uint64_t s, j, a;
+
+ (void)unused;
+ if (aff_iterate(&affinerator) != 0)
+ perror("WARNING: Failed to affine thread");
+
+ s = j = a = 0;
+ ck_epoch_register(&epoch_hs, &epoch_record);
+ for (;;) {
+ j++;
+ ck_epoch_begin(&epoch_record, NULL);
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ char *r;
+
+ r = set_get(keys[i]);
+ if (r == NULL) {
+ if (n_state == HS_STATE_STRICT_REPLACEMENT) {
+ ck_error("ERROR: Did not find during replacement: %s\n", keys[i]);
+ }
+
+ continue;
+ }
+
+ if (strcmp(r, keys[i]) == 0)
+ continue;
+
+ ck_error("ERROR: Found invalid value: [%s] but expected [%s]\n", (char *)r, keys[i]);
+ }
+ a += rdtsc() - s;
+ ck_epoch_end(&epoch_record, NULL);
+
+ n_state = ck_pr_load_int(&state);
+ if (n_state != state_previous) {
+ ck_spinlock_lock(&mtx);
+ accumulator[state_previous] += a / (j * keys_length);
+ ck_spinlock_unlock(&mtx);
+
+ ck_pr_inc_int(&barrier[state_previous]);
+ while (ck_pr_load_int(&barrier[state_previous]) != n_threads + 1)
+ ck_pr_stall();
+
+ state_previous = n_state;
+ s = j = a = 0;
+ }
+ }
+
+ return NULL;
+}
+
+static uint64_t
+acc(size_t i)
+{
+ uint64_t r;
+
+ ck_spinlock_lock(&mtx);
+ r = accumulator[i];
+ ck_spinlock_unlock(&mtx);
+
+ return r;
+}
+
+int
+main(int argc, char *argv[])
+{
+ FILE *fp;
+ char buffer[512];
+ size_t i, j, r;
+ unsigned int d = 0;
+ uint64_t s, e, a, repeated;
+ char **t;
+ pthread_t *readers;
+ double p_r, p_d;
+
+ COMMON_ALARM_DECLARE_LOCAL(hs_alarm, alarm_event)
+
+ r = 20;
+ s = 8;
+ p_d = 0.5;
+ p_r = 0.5;
+ n_threads = CORES - 1;
+
+ if (argc < 2) {
+ ck_error("Usage: parallel <dictionary> [<interval length> <initial size> <readers>\n"
+ " <probability of replacement> <probability of deletion> <epoch threshold>]\n");
+ }
+
+ if (argc >= 3)
+ r = atoi(argv[2]);
+
+ if (argc >= 4)
+ s = (uint64_t)atoi(argv[3]);
+
+ if (argc >= 5) {
+ n_threads = atoi(argv[4]);
+ if (n_threads < 1) {
+ ck_error("ERROR: Number of readers must be >= 1.\n");
+ }
+ }
+
+ if (argc >= 6) {
+ p_r = atof(argv[5]) / 100.00;
+ if (p_r < 0) {
+ ck_error("ERROR: Probability of replacement must be >= 0 and <= 100.\n");
+ }
+ }
+
+ if (argc >= 7) {
+ p_d = atof(argv[6]) / 100.00;
+ if (p_d < 0) {
+ ck_error("ERROR: Probability of deletion must be >= 0 and <= 100.\n");
+ }
+ }
+
+ COMMON_ALARM_INIT(hs_alarm, alarm_event, r)
+
+ affinerator.delta = 1;
+ readers = malloc(sizeof(pthread_t) * n_threads);
+ assert(readers != NULL);
+
+ keys = malloc(sizeof(char *) * keys_capacity);
+ assert(keys != NULL);
+
+ fp = fopen(argv[1], "r");
+ assert(fp != NULL);
+
+ while (fgets(buffer, sizeof(buffer), fp) != NULL) {
+ buffer[strlen(buffer) - 1] = '\0';
+ keys[keys_length++] = strdup(buffer);
+ assert(keys[keys_length - 1] != NULL);
+
+ if (keys_length == keys_capacity) {
+ t = realloc(keys, sizeof(char *) * (keys_capacity *= 2));
+ assert(t != NULL);
+ keys = t;
+ }
+ }
+
+ t = realloc(keys, sizeof(char *) * keys_length);
+ assert(t != NULL);
+ keys = t;
+
+ set_init();
+
+ for (i = 0; i < (size_t)n_threads; i++) {
+ if (pthread_create(&readers[i], NULL, reader, NULL) != 0) {
+ ck_error("ERROR: Failed to create thread %zu.\n", i);
+ }
+ }
+
+ for (i = 0; i < keys_length; i++)
+ d += set_insert(keys[i]) == false;
+
+ fprintf(stderr, " [S] %d readers, 1 writer.\n", n_threads);
+ fprintf(stderr, " [S] %zu entries stored and %u duplicates.\n\n",
+ set_count(), d);
+
+ fprintf(stderr, " ,- BASIC TEST\n");
+ fprintf(stderr, " | Executing SMR test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ if (set_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ d += set_insert(keys[i]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ fprintf(stderr, " | Executing replacement test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_replace(keys[i]);
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ fprintf(stderr, " | Executing get test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ if (set_get(keys[i]) == NULL) {
+ ck_error("ERROR: Unexpected NULL value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ a = 0;
+ fprintf(stderr, " | Executing removal test...");
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_remove(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ for (i = 0; i < keys_length; i++)
+ set_insert(keys[i]);
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ fprintf(stderr, " | Executing negative look-up test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ set_get("\x50\x03\x04\x05\x06\x10");
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ ck_epoch_record_t epoch_temporary = epoch_wr;
+ ck_epoch_synchronize(&epoch_wr);
+
+ fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
+ "%u pending, %u peak, %lu reclamations\n\n",
+ epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
+ epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
+
+ fprintf(stderr, " ,- READER CONCURRENCY\n");
+ fprintf(stderr, " | Executing reader test...");
+
+ ck_pr_store_int(&state, HS_STATE_GET);
+ while (ck_pr_load_int(&barrier[HS_STATE_STOP]) != n_threads)
+ ck_pr_stall();
+ ck_pr_inc_int(&barrier[HS_STATE_STOP]);
+ common_sleep(r);
+ ck_pr_store_int(&state, HS_STATE_STRICT_REPLACEMENT);
+ while (ck_pr_load_int(&barrier[HS_STATE_GET]) != n_threads)
+ ck_pr_stall();
+
+ fprintf(stderr, "done (reader = %" PRIu64 " ticks)\n",
+ acc(HS_STATE_GET) / n_threads);
+
+ fprintf(stderr, " | Executing strict replacement test...");
+
+ a = repeated = 0;
+ common_alarm(alarm_handler, &alarm_event, r);
+
+ ck_pr_inc_int(&barrier[HS_STATE_GET]);
+ for (;;) {
+ repeated++;
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ if (i & 1) {
+ set_replace(keys[i]);
+ } else {
+ set_swap(keys[i]);
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+
+ if (next_stage == true) {
+ next_stage = false;
+ break;
+ }
+ }
+
+ ck_pr_store_int(&state, HS_STATE_DELETION);
+ while (ck_pr_load_int(&barrier[HS_STATE_STRICT_REPLACEMENT]) != n_threads)
+ ck_pr_stall();
+ set_reset();
+ ck_epoch_synchronize(&epoch_wr);
+ fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
+ a / (repeated * keys_length), acc(HS_STATE_STRICT_REPLACEMENT) / n_threads);
+
+ common_alarm(alarm_handler, &alarm_event, r);
+
+ fprintf(stderr, " | Executing deletion test (%.2f)...", p_d * 100);
+ a = repeated = 0;
+ ck_pr_inc_int(&barrier[HS_STATE_STRICT_REPLACEMENT]);
+ for (;;) {
+ double delete;
+
+ repeated++;
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ set_insert(keys[i]);
+ if (p_d != 0.0) {
+ delete = common_drand48();
+ if (delete <= p_d)
+ set_remove(keys[i]);
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+
+ if (next_stage == true) {
+ next_stage = false;
+ break;
+ }
+ }
+ ck_pr_store_int(&state, HS_STATE_REPLACEMENT);
+ while (ck_pr_load_int(&barrier[HS_STATE_DELETION]) != n_threads)
+ ck_pr_stall();
+
+ set_reset();
+ ck_epoch_synchronize(&epoch_wr);
+ fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
+ a / (repeated * keys_length), acc(HS_STATE_DELETION) / n_threads);
+
+ common_alarm(alarm_handler, &alarm_event, r);
+
+ fprintf(stderr, " | Executing replacement test (%.2f)...", p_r * 100);
+ a = repeated = 0;
+ ck_pr_inc_int(&barrier[HS_STATE_DELETION]);
+ for (;;) {
+ double delete, replace;
+
+ repeated++;
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ set_insert(keys[i]);
+ if (p_d != 0.0) {
+ delete = common_drand48();
+ if (delete <= p_d)
+ set_remove(keys[i]);
+ } else {
+ delete = 0.0;
+ }
+
+ if (p_r != 0.0) {
+ replace = common_drand48();
+ if (replace <= p_r) {
+ if ((i & 1) || (delete <= p_d)) {
+ set_replace(keys[i]);
+ } else {
+ set_swap(keys[i]);
+ }
+ }
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+
+ if (next_stage == true) {
+ next_stage = false;
+ break;
+ }
+ }
+ ck_pr_store_int(&state, HS_STATE_STOP);
+ while (ck_pr_load_int(&barrier[HS_STATE_REPLACEMENT]) != n_threads)
+ ck_pr_stall();
+ set_reset();
+ ck_epoch_synchronize(&epoch_wr);
+ fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
+ a / (repeated * keys_length), acc(HS_STATE_REPLACEMENT) / n_threads);
+
+ ck_pr_inc_int(&barrier[HS_STATE_REPLACEMENT]);
+ epoch_temporary = epoch_wr;
+ ck_epoch_synchronize(&epoch_wr);
+
+ fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
+ "%u pending, %u peak, %lu reclamations\n\n",
+ epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
+ epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
+ return 0;
+}
+
diff --git a/regressions/ck_rhs/benchmark/serial.c b/regressions/ck_rhs/benchmark/serial.c
new file mode 100644
index 0000000..18fa892
--- /dev/null
+++ b/regressions/ck_rhs/benchmark/serial.c
@@ -0,0 +1,517 @@
+/*
+ * Copyright 2012 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyrighs
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyrighs
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_rhs.h>
+
+#include <assert.h>
+#include <ck_malloc.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include "../../common.h"
+#include "../../../src/ck_ht_hash.h"
+
+static ck_rhs_t hs;
+static char **keys;
+static size_t keys_length = 0;
+static size_t keys_capacity = 128;
+static unsigned long global_seed;
+
+static void *
+hs_malloc(size_t r)
+{
+
+ return malloc(r);
+}
+
+static void
+hs_free(void *p, size_t b, bool r)
+{
+
+ (void)b;
+ (void)r;
+
+ free(p);
+
+ return;
+}
+
+static struct ck_malloc my_allocator = {
+ .malloc = hs_malloc,
+ .free = hs_free
+};
+
+static unsigned long
+hs_hash(const void *object, unsigned long seed)
+{
+ const char *c = object;
+ unsigned long h;
+
+ h = (unsigned long)MurmurHash64A(c, strlen(c), seed);
+ return h;
+}
+
+static bool
+hs_compare(const void *previous, const void *compare)
+{
+
+ return strcmp(previous, compare) == 0;
+}
+
+static void
+set_destroy(void)
+{
+
+ ck_rhs_destroy(&hs);
+ return;
+}
+
+static void
+set_init(unsigned int size, unsigned int mode)
+{
+
+ if (ck_rhs_init(&hs, CK_RHS_MODE_OBJECT | CK_RHS_MODE_SPMC | mode, hs_hash, hs_compare,
+ &my_allocator, size, global_seed) == false) {
+ perror("ck_rhs_init");
+ exit(EXIT_FAILURE);
+ }
+
+ return;
+}
+
+static bool
+set_remove(const char *value)
+{
+ unsigned long h;
+
+ h = CK_RHS_HASH(&hs, hs_hash, value);
+ return ck_rhs_remove(&hs, h, value) != NULL;
+}
+
+static bool
+set_swap(const char *value)
+{
+ unsigned long h;
+ void *previous;
+
+ h = CK_RHS_HASH(&hs, hs_hash, value);
+ return ck_rhs_fas(&hs, h, value, &previous);
+}
+
+static bool
+set_replace(const char *value)
+{
+ unsigned long h;
+ void *previous;
+
+ h = CK_RHS_HASH(&hs, hs_hash, value);
+ ck_rhs_set(&hs, h, value, &previous);
+ return previous != NULL;
+}
+
+static void *
+set_get(const char *value)
+{
+ unsigned long h;
+ void *v;
+
+ h = CK_RHS_HASH(&hs, hs_hash, value);
+ v = ck_rhs_get(&hs, h, value);
+ return v;
+}
+
+static bool
+set_insert(const char *value)
+{
+ unsigned long h;
+
+ h = CK_RHS_HASH(&hs, hs_hash, value);
+ return ck_rhs_put(&hs, h, value);
+}
+
+static bool
+set_insert_unique(const char *value)
+{
+ unsigned long h;
+
+ h = CK_RHS_HASH(&hs, hs_hash, value);
+ return ck_rhs_put_unique(&hs, h, value);
+}
+
+static size_t
+set_count(void)
+{
+
+ return ck_rhs_count(&hs);
+}
+
+static bool
+set_reset(void)
+{
+
+ return ck_rhs_reset(&hs);
+}
+
+static void
+set_gc(void)
+{
+
+ ck_rhs_gc(&hs);
+ return;
+}
+
+static void
+set_rebuild(void)
+{
+
+ ck_rhs_rebuild(&hs);
+ return;
+}
+
+static void
+keys_shuffle(char **k)
+{
+ size_t i, j;
+ char *t;
+
+ for (i = keys_length; i > 1; i--) {
+ j = rand() % (i - 1);
+
+ if (j != i - 1) {
+ t = k[i - 1];
+ k[i - 1] = k[j];
+ k[j] = t;
+ }
+ }
+
+ return;
+}
+
+static void
+run_test(const char *file, size_t r, unsigned int size, unsigned int mode)
+{
+ FILE *fp;
+ char buffer[512];
+ size_t i, j;
+ unsigned int d = 0;
+ uint64_t s, e, a, ri, si, ai, sr, rg, sg, ag, sd, ng, ss, sts, su, sgc, sb;
+ struct ck_rhs_stat st;
+ char **t;
+
+ keys = malloc(sizeof(char *) * keys_capacity);
+ assert(keys != NULL);
+
+ fp = fopen(file, "r");
+ assert(fp != NULL);
+
+ while (fgets(buffer, sizeof(buffer), fp) != NULL) {
+ buffer[strlen(buffer) - 1] = '\0';
+ keys[keys_length++] = strdup(buffer);
+ assert(keys[keys_length - 1] != NULL);
+
+ if (keys_length == keys_capacity) {
+ t = realloc(keys, sizeof(char *) * (keys_capacity *= 2));
+ assert(t != NULL);
+ keys = t;
+ }
+ }
+
+ t = realloc(keys, sizeof(char *) * keys_length);
+ assert(t != NULL);
+ keys = t;
+
+ set_init(size, mode);
+ for (i = 0; i < keys_length; i++)
+ d += set_insert(keys[i]) == false;
+ ck_rhs_stat(&hs, &st);
+
+ fprintf(stderr, "# %zu entries stored, %u duplicates, %u probe.\n",
+ set_count(), d, st.probe_maximum);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ if (set_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = keys_length; i > 0; i--)
+ d += set_insert(keys[i - 1]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ ri = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ if (set_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ d += set_insert(keys[i]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ si = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ keys_shuffle(keys);
+
+ if (set_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ d += set_insert(keys[i]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ ai = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_swap(keys[i]);
+ e = rdtsc();
+ a += e - s;
+ }
+ ss = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_replace(keys[i]);
+ e = rdtsc();
+ a += e - s;
+ }
+ sr = a / (r * keys_length);
+
+ set_reset();
+ for (i = 0; i < keys_length; i++)
+ set_insert(keys[i]);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = keys_length; i > 0; i--) {
+ if (set_get(keys[i - 1]) == NULL) {
+ ck_error("ERROR: Unexpected NULL value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ rg = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ if (set_get(keys[i]) == NULL) {
+ ck_error("ERROR: Unexpected NULL value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ sg = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ keys_shuffle(keys);
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ if (set_get(keys[i]) == NULL) {
+ ck_error("ERROR: Unexpected NULL value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ ag = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_remove(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ for (i = 0; i < keys_length; i++)
+ set_insert(keys[i]);
+ }
+ sd = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ set_get("\x50\x03\x04\x05\x06\x10");
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ ng = a / (r * keys_length);
+
+ set_reset();
+ for (i = 0; i < keys_length; i++)
+ set_insert(keys[i]);
+ for (i = 0; i < keys_length; i++)
+ set_remove(keys[i]);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_insert(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ for (i = 0; i < keys_length; i++)
+ set_remove(keys[i]);
+ }
+ sts = a / (r * keys_length);
+
+ set_reset();
+
+ /* Prune duplicates. */
+ for (i = 0; i < keys_length; i++) {
+ if (set_insert(keys[i]) == true)
+ continue;
+
+ free(keys[i]);
+ keys[i] = keys[--keys_length];
+ }
+
+ for (i = 0; i < keys_length; i++)
+ set_remove(keys[i]);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_insert_unique(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ for (i = 0; i < keys_length; i++)
+ set_remove(keys[i]);
+ }
+ su = a / (r * keys_length);
+
+ for (i = 0; i < keys_length; i++)
+ set_insert_unique(keys[i]);
+
+ for (i = 0; i < keys_length / 2; i++)
+ set_remove(keys[i]);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ set_gc();
+ e = rdtsc();
+ a += e - s;
+ }
+ sgc = a / r;
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ set_rebuild();
+ e = rdtsc();
+ a += e - s;
+ }
+ sb = a / r;
+
+ printf("%zu "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 "\n",
+ keys_length, ri, si, ai, ss, sr, rg, sg, ag, sd, ng, sts, su, sgc, sb);
+
+ fclose(fp);
+
+ for (i = 0; i < keys_length; i++) {
+ free(keys[i]);
+ }
+
+ free(keys);
+ keys_length = 0;
+ set_destroy();
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ unsigned int r, size;
+
+ common_srand48((long int)time(NULL));
+ if (argc < 2) {
+ ck_error("Usage: ck_rhs <dictionary> [<repetitions> <initial size>]\n");
+ }
+
+ r = 16;
+ if (argc >= 3)
+ r = atoi(argv[2]);
+
+ size = 8;
+ if (argc >= 4)
+ size = atoi(argv[3]);
+
+ global_seed = common_lrand48();
+ run_test(argv[1], r, size, 0);
+ run_test(argv[1], r, size, CK_RHS_MODE_READ_MOSTLY);
+ fprintf(stderr, "# reverse_insertion serial_insertion random_insertion serial_swap "
+ "serial_replace reverse_get serial_get random_get serial_remove negative_get tombstone "
+ "set_unique gc rebuild\n\n");
+
+ return 0;
+}
+
diff --git a/regressions/ck_rhs/validate/Makefile b/regressions/ck_rhs/validate/Makefile
new file mode 100644
index 0000000..5987395
--- /dev/null
+++ b/regressions/ck_rhs/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean distribution
+
+OBJECTS=serial
+
+all: $(OBJECTS)
+
+serial: serial.c ../../../include/ck_rhs.h ../../../src/ck_rhs.c
+ $(CC) $(CFLAGS) -o serial serial.c ../../../src/ck_rhs.c
+
+check: all
+ ./serial
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE
diff --git a/regressions/ck_rhs/validate/serial.c b/regressions/ck_rhs/validate/serial.c
new file mode 100644
index 0000000..ef9365f
--- /dev/null
+++ b/regressions/ck_rhs/validate/serial.c
@@ -0,0 +1,310 @@
+/*
+ * Copyright 2012 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyrighs
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyrighs
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_rhs.h>
+
+#include <assert.h>
+#include <ck_malloc.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "../../common.h"
+
+static void *
+hs_malloc(size_t r)
+{
+
+ return malloc(r);
+}
+
+static void
+hs_free(void *p, size_t b, bool r)
+{
+
+ (void)b;
+ (void)r;
+ free(p);
+ return;
+}
+
+static struct ck_malloc my_allocator = {
+ .malloc = hs_malloc,
+ .free = hs_free
+};
+
+const char *test[] = { "Samy", "Al", "Bahra", "dances", "in", "the", "wind.", "Once",
+ "upon", "a", "time", "his", "gypsy", "ate", "one", "itsy",
+ "bitsy", "spider.", "What", "goes", "up", "must",
+ "come", "down.", "What", "is", "down", "stays",
+ "down.", "A", "B", "C", "D", "E", "F", "G", "H",
+ "I", "J", "K", "L", "M", "N", "O", "P", "Q" };
+
+const char *negative = "negative";
+
+/* Purposefully crappy hash function. */
+static unsigned long
+hs_hash(const void *object, unsigned long seed)
+{
+ const char *c = object;
+ unsigned long h;
+
+ (void)seed;
+ h = c[0];
+ return h;
+}
+
+static bool
+hs_compare(const void *previous, const void *compare)
+{
+
+ return strcmp(previous, compare) == 0;
+}
+
+static void *
+test_ip(void *key, void *closure)
+{
+ const char *a = key;
+ const char *b = closure;
+
+ if (strcmp(a, b) != 0)
+ ck_error("Mismatch: %s != %s\n", a, b);
+
+ return closure;
+}
+
+static void *
+test_negative(void *key, void *closure)
+{
+
+ (void)closure;
+ if (key != NULL)
+ ck_error("ERROR: Apply callback expects NULL argument instead of [%s]\n", key);
+
+ return NULL;
+}
+
+static void *
+test_unique(void *key, void *closure)
+{
+
+ if (key != NULL)
+ ck_error("ERROR: Apply callback expects NULL argument instead of [%s]\n", key);
+
+ return closure;
+}
+
+static void *
+test_remove(void *key, void *closure)
+{
+
+ (void)key;
+ (void)closure;
+
+ return NULL;
+}
+
+static void
+run_test(unsigned int is, unsigned int ad)
+{
+ ck_rhs_t hs[16];
+ const size_t size = sizeof(hs) / sizeof(*hs);
+ size_t i, j;
+ const char *blob = "#blobs";
+ unsigned long h;
+
+ if (ck_rhs_init(&hs[0], CK_RHS_MODE_SPMC | CK_RHS_MODE_OBJECT | ad, hs_hash, hs_compare, &my_allocator, is, 6602834) == false)
+ ck_error("ck_rhs_init\n");
+
+ for (j = 0; j < size; j++) {
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ h = test[i][0];
+ if (ck_rhs_get(&hs[j], h, test[i]) != NULL) {
+ continue;
+ }
+
+ if (i & 1) {
+ if (ck_rhs_put_unique(&hs[j], h, test[i]) == false)
+ ck_error("ERROR [%zu]: Failed to insert unique (%s)\n", j, test[i]);
+ } else if (ck_rhs_apply(&hs[j], h, test[i], test_unique,
+ (void *)(uintptr_t)test[i]) == false) {
+ ck_error("ERROR: Failed to apply for insertion.\n");
+ }
+
+ if (i & 1) {
+ if (ck_rhs_remove(&hs[j], h, test[i]) == false)
+ ck_error("ERROR [%zu]: Failed to remove unique (%s)\n", j, test[i]);
+ } else if (ck_rhs_apply(&hs[j], h, test[i], test_remove, NULL) == false) {
+ ck_error("ERROR: Failed to remove apply.\n");
+ }
+
+ if (ck_rhs_apply(&hs[j], h, test[i], test_negative,
+ (void *)(uintptr_t)test[i]) == false)
+ ck_error("ERROR: Failed to apply.\n");
+
+ break;
+ }
+
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ h = test[i][0];
+ ck_rhs_put(&hs[j], h, test[i]);
+ if (ck_rhs_put(&hs[j], h, test[i]) == true) {
+ ck_error("ERROR [%u] [1]: put must fail on collision (%s).\n", is, test[i]);
+ }
+ if (ck_rhs_get(&hs[j], h, test[i]) == NULL) {
+ ck_error("ERROR [%u]: get must not fail after put\n", is);
+ }
+ }
+
+ /* Test grow semantics. */
+ ck_rhs_grow(&hs[j], 128);
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ h = test[i][0];
+ if (ck_rhs_put(&hs[j], h, test[i]) == true) {
+ ck_error("ERROR [%u] [2]: put must fail on collision.\n", is);
+ }
+
+ if (ck_rhs_get(&hs[j], h, test[i]) == NULL) {
+ ck_error("ERROR [%u]: get must not fail\n", is);
+ }
+ }
+
+ h = blob[0];
+ if (ck_rhs_get(&hs[j], h, blob) == NULL) {
+ if (j > 0)
+ ck_error("ERROR [%u]: Blob must always exist after first.\n", is);
+
+ if (ck_rhs_put(&hs[j], h, blob) == false) {
+ ck_error("ERROR [%u]: A unique blob put failed.\n", is);
+ }
+ } else {
+ if (ck_rhs_put(&hs[j], h, blob) == true) {
+ ck_error("ERROR [%u]: Duplicate blob put succeeded.\n", is);
+ }
+ }
+
+ /* Grow set and check get semantics. */
+ ck_rhs_grow(&hs[j], 512);
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ h = test[i][0];
+ if (ck_rhs_get(&hs[j], h, test[i]) == NULL) {
+ ck_error("ERROR [%u]: get must not fail\n", is);
+ }
+ }
+
+ /* Delete and check negative membership. */
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ void *r;
+
+ h = test[i][0];
+ if (ck_rhs_get(&hs[j], h, test[i]) == NULL)
+ continue;
+
+ if (r = ck_rhs_remove(&hs[j], h, test[i]), r == NULL) {
+ ck_error("ERROR [%u]: remove must not fail\n", is);
+ }
+
+ if (strcmp(r, test[i]) != 0) {
+ ck_error("ERROR [%u]: Removed incorrect node (%s != %s)\n", (char *)r, test[i], is);
+ }
+ }
+
+ /* Test replacement semantics. */
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ void *r;
+ bool d;
+
+ h = test[i][0];
+ d = ck_rhs_get(&hs[j], h, test[i]) != NULL;
+ if (ck_rhs_set(&hs[j], h, test[i], &r) == false) {
+ ck_error("ERROR [%u]: Failed to set\n", is);
+ }
+
+ /* Expected replacement. */
+ if (d == true && (r == NULL || strcmp(r, test[i]) != 0)) {
+ ck_error("ERROR [%u]: Incorrect previous value: %s != %s\n",
+ is, test[i], (char *)r);
+ }
+
+ /* Replacement should succeed. */
+ if (ck_rhs_fas(&hs[j], h, test[i], &r) == false)
+ ck_error("ERROR [%u]: ck_rhs_fas must succeed.\n", is);
+
+ if (strcmp(r, test[i]) != 0) {
+ ck_error("ERROR [%u]: Incorrect replaced value: %s != %s\n",
+ is, test[i], (char *)r);
+ }
+
+ if (ck_rhs_fas(&hs[j], h, negative, &r) == true)
+ ck_error("ERROR [%u]: Replacement of negative should fail.\n", is);
+
+ if (ck_rhs_set(&hs[j], h, test[i], &r) == false) {
+ ck_error("ERROR [%u]: Failed to set [1]\n", is);
+ }
+
+ if (strcmp(r, test[i]) != 0) {
+ ck_error("ERROR [%u]: Invalid &hs[j]: %s != %s\n", (char *)r, test[i], is);
+ }
+ /* Attempt in-place mutation. */
+ if (ck_rhs_apply(&hs[j], h, test[i], test_ip,
+ (void *)(uintptr_t)test[i]) == false) {
+ ck_error("ERROR [%u]: Failed to apply: %s != %s\n", is, (char *)r, test[i]);
+ }
+
+ d = ck_rhs_get(&hs[j], h, test[i]) != NULL;
+ if (d == false)
+ ck_error("ERROR [%u]: Expected [%s] to exist.\n", is, test[i]);
+ }
+
+ if (j == size - 1)
+ break;
+
+ if (ck_rhs_move(&hs[j + 1], &hs[j], hs_hash, hs_compare, &my_allocator) == false)
+ ck_error("Failed to move hash table");
+
+ ck_rhs_gc(&hs[j + 1]);
+
+ if (ck_rhs_rebuild(&hs[j + 1]) == false)
+ ck_error("Failed to rebuild");
+ }
+
+ return;
+}
+
+int
+main(void)
+{
+ unsigned int k;
+
+ for (k = 16; k <= 64; k <<= 1) {
+ run_test(k, 0);
+ break;
+ }
+
+ return 0;
+}
+
diff --git a/regressions/ck_ring/benchmark/Makefile b/regressions/ck_ring/benchmark/Makefile
new file mode 100644
index 0000000..4087ed1
--- /dev/null
+++ b/regressions/ck_ring/benchmark/Makefile
@@ -0,0 +1,14 @@
+.PHONY: clean distribution
+
+OBJECTS=latency
+
+all: $(OBJECTS)
+
+latency: latency.c ../../../include/ck_ring.h
+ $(CC) $(CFLAGS) -o latency latency.c
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE
diff --git a/regressions/ck_ring/benchmark/latency.c b/regressions/ck_ring/benchmark/latency.c
new file mode 100644
index 0000000..657be4d
--- /dev/null
+++ b/regressions/ck_ring/benchmark/latency.c
@@ -0,0 +1,142 @@
+#include <ck_ring.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../../common.h"
+
+#ifndef ITERATIONS
+#define ITERATIONS (128000)
+#endif
+
+struct entry {
+ int tid;
+ int value;
+};
+
+int
+main(int argc, char *argv[])
+{
+ int i, r, size;
+ uint64_t s, e, e_a, d_a;
+ struct entry entry = {0, 0};
+ ck_ring_buffer_t *buf;
+ ck_ring_t ring;
+
+ if (argc != 2) {
+ ck_error("Usage: latency <size>\n");
+ }
+
+ size = atoi(argv[1]);
+ if (size <= 4 || (size & (size - 1))) {
+ ck_error("ERROR: Size must be a power of 2 greater than 4.\n");
+ }
+
+ buf = malloc(sizeof(ck_ring_buffer_t) * size);
+ if (buf == NULL) {
+ ck_error("ERROR: Failed to allocate buffer\n");
+ }
+
+ ck_ring_init(&ring, size);
+
+ e_a = d_a = s = e = 0;
+ for (r = 0; r < ITERATIONS; r++) {
+ for (i = 0; i < size / 4; i += 4) {
+ s = rdtsc();
+ ck_ring_enqueue_spsc(&ring, buf, &entry);
+ ck_ring_enqueue_spsc(&ring, buf, &entry);
+ ck_ring_enqueue_spsc(&ring, buf, &entry);
+ ck_ring_enqueue_spsc(&ring, buf, &entry);
+ e = rdtsc();
+ }
+ e_a += (e - s) / 4;
+
+ for (i = 0; i < size / 4; i += 4) {
+ s = rdtsc();
+ ck_ring_dequeue_spsc(&ring, buf, &entry);
+ ck_ring_dequeue_spsc(&ring, buf, &entry);
+ ck_ring_dequeue_spsc(&ring, buf, &entry);
+ ck_ring_dequeue_spsc(&ring, buf, &entry);
+ e = rdtsc();
+ }
+ d_a += (e - s) / 4;
+ }
+
+ printf("spsc %10d %16" PRIu64 " %16" PRIu64 "\n", size, e_a / ITERATIONS, d_a / ITERATIONS);
+
+ e_a = d_a = s = e = 0;
+ for (r = 0; r < ITERATIONS; r++) {
+ for (i = 0; i < size / 4; i += 4) {
+ s = rdtsc();
+ ck_ring_enqueue_spmc(&ring, buf, &entry);
+ ck_ring_enqueue_spmc(&ring, buf, &entry);
+ ck_ring_enqueue_spmc(&ring, buf, &entry);
+ ck_ring_enqueue_spmc(&ring, buf, &entry);
+ e = rdtsc();
+ }
+ e_a += (e - s) / 4;
+
+ for (i = 0; i < size / 4; i += 4) {
+ s = rdtsc();
+ ck_ring_dequeue_spmc(&ring, buf, &entry);
+ ck_ring_dequeue_spmc(&ring, buf, &entry);
+ ck_ring_dequeue_spmc(&ring, buf, &entry);
+ ck_ring_dequeue_spmc(&ring, buf, &entry);
+ e = rdtsc();
+ }
+ d_a += (e - s) / 4;
+ }
+
+ printf("spmc %10d %16" PRIu64 " %16" PRIu64 "\n", size, e_a / ITERATIONS, d_a / ITERATIONS);
+
+ ck_ring_init(&ring, size);
+ e_a = d_a = s = e = 0;
+ for (r = 0; r < ITERATIONS; r++) {
+ for (i = 0; i < size / 4; i += 4) {
+ s = rdtsc();
+ ck_ring_enqueue_mpsc(&ring, buf, &entry);
+ ck_ring_enqueue_mpsc(&ring, buf, &entry);
+ ck_ring_enqueue_mpsc(&ring, buf, &entry);
+ ck_ring_enqueue_mpsc(&ring, buf, &entry);
+ e = rdtsc();
+ }
+ e_a += (e - s) / 4;
+
+ for (i = 0; i < size / 4; i += 4) {
+ s = rdtsc();
+ ck_ring_dequeue_mpsc(&ring, buf, &entry);
+ ck_ring_dequeue_mpsc(&ring, buf, &entry);
+ ck_ring_dequeue_mpsc(&ring, buf, &entry);
+ ck_ring_dequeue_mpsc(&ring, buf, &entry);
+ e = rdtsc();
+ }
+ d_a += (e - s) / 4;
+ }
+ printf("mpsc %10d %16" PRIu64 " %16" PRIu64 "\n", size, e_a / ITERATIONS, d_a / ITERATIONS);
+ ck_ring_init(&ring, size);
+ e_a = d_a = s = e = 0;
+ for (r = 0; r < ITERATIONS; r++) {
+ for (i = 0; i < size / 4; i += 4) {
+ s = rdtsc();
+ ck_ring_enqueue_mpmc(&ring, buf, &entry);
+ ck_ring_enqueue_mpmc(&ring, buf, &entry);
+ ck_ring_enqueue_mpmc(&ring, buf, &entry);
+ ck_ring_enqueue_mpmc(&ring, buf, &entry);
+ e = rdtsc();
+ }
+ e_a += (e - s) / 4;
+
+ for (i = 0; i < size / 4; i += 4) {
+ s = rdtsc();
+ ck_ring_dequeue_mpmc(&ring, buf, &entry);
+ ck_ring_dequeue_mpmc(&ring, buf, &entry);
+ ck_ring_dequeue_mpmc(&ring, buf, &entry);
+ ck_ring_dequeue_mpmc(&ring, buf, &entry);
+ e = rdtsc();
+ }
+ d_a += (e - s) / 4;
+ }
+ printf("mpmc %10d %16" PRIu64 " %16" PRIu64 "\n", size, e_a / ITERATIONS, d_a / ITERATIONS);
+ return (0);
+}
diff --git a/regressions/ck_ring/validate/Makefile b/regressions/ck_ring/validate/Makefile
new file mode 100644
index 0000000..0b68fad
--- /dev/null
+++ b/regressions/ck_ring/validate/Makefile
@@ -0,0 +1,40 @@
+.PHONY: check clean distribution
+
+OBJECTS=ck_ring_spsc ck_ring_spmc ck_ring_spmc_template ck_ring_mpmc \
+ ck_ring_mpmc_template
+SIZE=16384
+
+all: $(OBJECTS)
+
+check: all
+ ./ck_ring_spsc $(CORES) 1 $(SIZE)
+ ./ck_ring_spmc $(CORES) 1 $(SIZE)
+ ./ck_ring_spmc_template $(CORES) 1 $(SIZE)
+ ./ck_ring_mpmc $(CORES) 1 $(SIZE)
+ ./ck_ring_mpmc_template $(CORES) 1 $(SIZE)
+
+ck_ring_spsc: ck_ring_spsc.c ../../../include/ck_ring.h
+ $(CC) $(CFLAGS) -o ck_ring_spsc ck_ring_spsc.c \
+ ../../../src/ck_barrier_centralized.c
+
+ck_ring_spmc: ck_ring_spmc.c ../../../include/ck_ring.h
+ $(CC) $(CFLAGS) -o ck_ring_spmc ck_ring_spmc.c \
+ ../../../src/ck_barrier_centralized.c
+
+ck_ring_mpmc: ck_ring_mpmc.c ../../../include/ck_ring.h
+ $(CC) $(CFLAGS) -o ck_ring_mpmc ck_ring_mpmc.c \
+ ../../../src/ck_barrier_centralized.c
+
+ck_ring_mpmc_template: ck_ring_mpmc_template.c ../../../include/ck_ring.h
+ $(CC) $(CFLAGS) -o ck_ring_mpmc_template ck_ring_mpmc_template.c \
+ ../../../src/ck_barrier_centralized.c
+
+ck_ring_spmc_template: ck_ring_spmc_template.c ../../../include/ck_ring.h
+ $(CC) $(CFLAGS) -o ck_ring_spmc_template ck_ring_spmc_template.c \
+ ../../../src/ck_barrier_centralized.c
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_ring/validate/ck_ring_mpmc.c b/regressions/ck_ring/validate/ck_ring_mpmc.c
new file mode 100644
index 0000000..66d7f39
--- /dev/null
+++ b/regressions/ck_ring/validate/ck_ring_mpmc.c
@@ -0,0 +1,448 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <pthread.h>
+
+#include <ck_barrier.h>
+#include <ck_ring.h>
+#include <ck_spinlock.h>
+#include "../../common.h"
+
+#ifndef ITERATIONS
+#define ITERATIONS 128
+#endif
+
+struct context {
+ unsigned int tid;
+ unsigned int previous;
+ unsigned int next;
+ ck_ring_buffer_t *buffer;
+};
+
+struct entry {
+ unsigned long value_long;
+ unsigned int magic;
+ unsigned int ref;
+ int tid;
+ int value;
+};
+
+static int nthr;
+static ck_ring_t *ring;
+static ck_ring_t ring_mpmc CK_CC_CACHELINE;
+static ck_ring_t ring_mw CK_CC_CACHELINE;
+static struct affinity a;
+static int size;
+static int eb;
+static ck_barrier_centralized_t barrier = CK_BARRIER_CENTRALIZED_INITIALIZER;
+static struct context *_context;
+
+static unsigned int global_counter;
+
+static void *
+test_mpmc(void *c)
+{
+ unsigned int observed = 0;
+ unsigned int enqueue = 0;
+ unsigned int seed;
+ int i, k, j, tid;
+ struct context *context = c;
+ ck_ring_buffer_t *buffer;
+ unsigned int *csp;
+
+ csp = malloc(sizeof(*csp) * nthr);
+ assert(csp != NULL);
+
+ memset(csp, 0, sizeof(*csp) * nthr);
+
+ buffer = context->buffer;
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ tid = ck_pr_faa_int(&eb, 1);
+ ck_pr_fence_memory();
+ while (ck_pr_load_int(&eb) != nthr - 1);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ struct entry *o = NULL;
+ int spin;
+
+ /* Keep trying until we encounter at least one node. */
+ if (j & 1) {
+ if (ck_ring_dequeue_mpmc(&ring_mw, buffer, &o) == false)
+ o = NULL;
+ } else {
+ if (ck_ring_trydequeue_mpmc(&ring_mw, buffer, &o) == false)
+ o = NULL;
+ }
+
+ if (o == NULL) {
+ o = malloc(sizeof(*o));
+ if (o == NULL)
+ continue;
+
+ o->value_long = (unsigned long)ck_pr_faa_uint(&global_counter, 1) + 1;
+
+ o->magic = 0xdead;
+ o->ref = 0;
+ o->tid = tid;
+
+ if (ck_ring_enqueue_mpmc(&ring_mw, buffer, o) == false) {
+ free(o);
+ } else {
+ enqueue++;
+ }
+
+ continue;
+ }
+
+ observed++;
+
+ if (o->magic != 0xdead) {
+ ck_error("[%p] (%x)\n",
+ (void *)o, o->magic);
+ }
+
+ o->magic = 0xbeef;
+
+ if (csp[o->tid] >= o->value_long)
+ ck_error("queue semantics violated: %lu <= %lu\n", o->value_long, csp[o->tid]);
+
+ csp[o->tid] = o->value_long;
+
+ if (ck_pr_faa_uint(&o->ref, 1) != 0) {
+ ck_error("[%p] We dequeued twice.\n", (void *)o);
+ }
+
+ if ((i % 4) == 0) {
+ spin = common_rand_r(&seed) % 16384;
+ for (k = 0; k < spin; k++) {
+ ck_pr_stall();
+ }
+ }
+
+ free(o);
+ }
+ }
+
+ fprintf(stderr, "[%d] dequeue=%u enqueue=%u\n", tid, observed, enqueue);
+ return NULL;
+}
+
+static void *
+test_spmc(void *c)
+{
+ unsigned int observed = 0;
+ unsigned long previous = 0;
+ unsigned int seed;
+ int i, k, j, tid;
+ struct context *context = c;
+ ck_ring_buffer_t *buffer;
+
+ buffer = context->buffer;
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ tid = ck_pr_faa_int(&eb, 1);
+ ck_pr_fence_memory();
+ while (ck_pr_load_int(&eb) != nthr - 1);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ struct entry *o;
+ int spin;
+
+ /* Keep trying until we encounter at least one node. */
+ if (j & 1) {
+ while (ck_ring_dequeue_mpmc(&ring_mpmc, buffer,
+ &o) == false);
+ } else {
+ while (ck_ring_trydequeue_mpmc(&ring_mpmc, buffer,
+ &o) == false);
+ }
+
+ observed++;
+ if (o->value < 0
+ || o->value != o->tid
+ || o->magic != 0xdead
+ || (previous != 0 && previous >= o->value_long)) {
+ ck_error("[0x%p] (%x) (%d, %d) >< (0, %d)\n",
+ (void *)o, o->magic, o->tid, o->value, size);
+ }
+
+ o->magic = 0xbeef;
+ o->value = -31337;
+ o->tid = -31338;
+ previous = o->value_long;
+
+ if (ck_pr_faa_uint(&o->ref, 1) != 0) {
+ ck_error("[%p] We dequeued twice.\n", (void *)o);
+ }
+
+ if ((i % 4) == 0) {
+ spin = common_rand_r(&seed) % 16384;
+ for (k = 0; k < spin; k++) {
+ ck_pr_stall();
+ }
+ }
+
+ free(o);
+ }
+ }
+
+ fprintf(stderr, "[%d] Observed %u\n", tid, observed);
+ return NULL;
+}
+
+static void *
+test(void *c)
+{
+ struct context *context = c;
+ struct entry *entry;
+ unsigned int s;
+ int i, j;
+ bool r;
+ ck_ring_buffer_t *buffer = context->buffer;
+ ck_barrier_centralized_state_t sense =
+ CK_BARRIER_CENTRALIZED_STATE_INITIALIZER;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ if (context->tid == 0) {
+ struct entry *entries;
+
+ entries = malloc(sizeof(struct entry) * size);
+ assert(entries != NULL);
+
+ if (ck_ring_size(ring) != 0) {
+ ck_error("More entries than expected: %u > 0\n",
+ ck_ring_size(ring));
+ }
+
+ for (i = 0; i < size; i++) {
+ entries[i].value = i;
+ entries[i].tid = 0;
+
+ if (true) {
+ r = ck_ring_enqueue_mpmc(ring, buffer,
+ entries + i);
+ } else {
+ r = ck_ring_enqueue_mpmc_size(ring, buffer,
+ entries + i, &s);
+
+ if ((int)s != i) {
+ ck_error("Size is %u, expected %d.\n",
+ s, size);
+ }
+ }
+
+ assert(r != false);
+ }
+
+ if (ck_ring_size(ring) != (unsigned int)size) {
+ ck_error("Less entries than expected: %u < %d\n",
+ ck_ring_size(ring), size);
+ }
+
+ if (ck_ring_capacity(ring) != ck_ring_size(ring) + 1) {
+ ck_error("Capacity less than expected: %u < %u\n",
+ ck_ring_size(ring), ck_ring_capacity(ring));
+ }
+ }
+
+ /*
+ * Wait for all threads. The idea here is to maximize the contention.
+ */
+ ck_barrier_centralized(&barrier, &sense, nthr);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ buffer = _context[context->previous].buffer;
+ while (ck_ring_dequeue_mpmc(ring + context->previous,
+ buffer, &entry) == false);
+
+ if (context->previous != (unsigned int)entry->tid) {
+ ck_error("[%u:%p] %u != %u\n",
+ context->tid, (void *)entry, entry->tid, context->previous);
+ }
+
+ if (entry->value < 0 || entry->value >= size) {
+ ck_error("[%u:%p] %u </> %u\n",
+ context->tid, (void *)entry, entry->tid, context->previous);
+ }
+
+ entry->tid = context->tid;
+ buffer = context->buffer;
+
+ if (true) {
+ r = ck_ring_enqueue_mpmc(ring + context->tid,
+ buffer, entry);
+ } else {
+ r = ck_ring_enqueue_mpmc_size(ring + context->tid,
+ buffer, entry, &s);
+
+ if ((int)s >= size) {
+ ck_error("Size %u out of range of %d\n",
+ s, size);
+ }
+ }
+ assert(r == true);
+ }
+ }
+
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int i, r;
+ unsigned long l;
+ pthread_t *thread;
+ ck_ring_buffer_t *buffer;
+
+ if (argc != 4) {
+ ck_error("Usage: validate <threads> <affinity delta> <size>\n");
+ }
+
+ a.request = 0;
+ a.delta = atoi(argv[2]);
+
+ nthr = atoi(argv[1]);
+ assert(nthr >= 1);
+
+ size = atoi(argv[3]);
+ assert(size >= 4 && (size & size - 1) == 0);
+ size -= 1;
+
+ ring = malloc(sizeof(ck_ring_t) * nthr);
+ assert(ring);
+
+ _context = malloc(sizeof(*_context) * nthr);
+ assert(_context);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread);
+ fprintf(stderr, "SPSC test:");
+ for (i = 0; i < nthr; i++) {
+ _context[i].tid = i;
+ if (i == 0) {
+ _context[i].previous = nthr - 1;
+ _context[i].next = i + 1;
+ } else if (i == nthr - 1) {
+ _context[i].next = 0;
+ _context[i].previous = i - 1;
+ } else {
+ _context[i].next = i + 1;
+ _context[i].previous = i - 1;
+ }
+
+ buffer = malloc(sizeof(ck_ring_buffer_t) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(ck_ring_buffer_t) * (size + 1));
+ _context[i].buffer = buffer;
+ ck_ring_init(ring + i, size + 1);
+ r = pthread_create(thread + i, NULL, test, _context + i);
+ assert(r == 0);
+ }
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ fprintf(stderr, " done\n");
+
+ fprintf(stderr, "SPMC test:\n");
+ buffer = malloc(sizeof(ck_ring_buffer_t) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(void *) * (size + 1));
+ ck_ring_init(&ring_mpmc, size + 1);
+ for (i = 0; i < nthr - 1; i++) {
+ _context[i].buffer = buffer;
+ r = pthread_create(thread + i, NULL, test_spmc, _context + i);
+ assert(r == 0);
+ }
+
+ for (l = 0; l < (unsigned long)size * ITERATIONS * (nthr - 1) ; l++) {
+ struct entry *entry = malloc(sizeof *entry);
+
+ assert(entry != NULL);
+ entry->value_long = l;
+ entry->value = (int)l;
+ entry->tid = (int)l;
+ entry->magic = 0xdead;
+ entry->ref = 0;
+
+ /* Wait until queue is not full. */
+ if (l & 1) {
+ while (ck_ring_enqueue_mpmc(&ring_mpmc,
+ buffer,
+ entry) == false)
+ ck_pr_stall();
+ } else {
+ unsigned int s;
+
+ while (ck_ring_enqueue_mpmc_size(&ring_mpmc,
+ buffer, entry, &s) == false) {
+ ck_pr_stall();
+ }
+
+ if ((int)s >= (size * ITERATIONS * (nthr - 1))) {
+ ck_error("MPMC: Unexpected size of %u\n", s);
+ }
+ }
+ }
+
+ for (i = 0; i < nthr - 1; i++)
+ pthread_join(thread[i], NULL);
+ ck_pr_store_int(&eb, 0);
+ fprintf(stderr, "MPMC test:\n");
+ buffer = malloc(sizeof(ck_ring_buffer_t) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(void *) * (size + 1));
+ ck_ring_init(&ring_mw, size + 1);
+ for (i = 0; i < nthr - 1; i++) {
+ _context[i].buffer = buffer;
+ r = pthread_create(thread + i, NULL, test_mpmc, _context + i);
+ assert(r == 0);
+ }
+
+ for (i = 0; i < nthr - 1; i++)
+ pthread_join(thread[i], NULL);
+
+ return (0);
+}
diff --git a/regressions/ck_ring/validate/ck_ring_mpmc_template.c b/regressions/ck_ring/validate/ck_ring_mpmc_template.c
new file mode 100644
index 0000000..f076e9a
--- /dev/null
+++ b/regressions/ck_ring/validate/ck_ring_mpmc_template.c
@@ -0,0 +1,349 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <pthread.h>
+
+#include <ck_barrier.h>
+#include <ck_ring.h>
+#include <ck_spinlock.h>
+#include "../../common.h"
+
+#ifndef ITERATIONS
+#define ITERATIONS 128
+#endif
+
+struct context {
+ unsigned int tid;
+ unsigned int previous;
+ unsigned int next;
+ struct entry **buffer;
+};
+
+struct entry {
+ unsigned long value_long;
+ unsigned int magic;
+ unsigned int ref;
+ int tid;
+ int value;
+};
+
+CK_RING_PROTOTYPE(entry, entry *)
+
+static int nthr;
+static ck_ring_t *ring;
+static ck_ring_t ring_spmc CK_CC_CACHELINE;
+static struct affinity a;
+static int size;
+static int eb;
+static ck_barrier_centralized_t barrier = CK_BARRIER_CENTRALIZED_INITIALIZER;
+static struct context *_context;
+
+static void *
+test_spmc(void *c)
+{
+ unsigned int observed = 0;
+ unsigned long previous = 0;
+ unsigned int seed;
+ int i, k, j, tid;
+ struct context *context = c;
+ struct entry **buffer;
+
+ buffer = context->buffer;
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ tid = ck_pr_faa_int(&eb, 1);
+ ck_pr_fence_memory();
+ while (ck_pr_load_int(&eb) != nthr - 1);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ struct entry *o;
+ int spin;
+
+ /* Keep trying until we encounter at least one node. */
+ if (j & 1) {
+ while (CK_RING_DEQUEUE_MPMC(entry,
+ &ring_spmc, buffer, &o) == false);
+ } else {
+ while (CK_RING_TRYDEQUEUE_MPMC(entry,
+ &ring_spmc, buffer, &o) == false);
+ }
+
+ observed++;
+ if (o->value < 0
+ || o->value != o->tid
+ || o->magic != 0xdead
+ || (previous != 0 && previous >= o->value_long)) {
+ ck_error("[0x%p] (%x) (%d, %d) >< (0, %d)\n",
+ (void *)o, o->magic, o->tid, o->value, size);
+ }
+
+ o->magic = 0xbeef;
+ o->value = -31337;
+ o->tid = -31338;
+ previous = o->value_long;
+
+ if (ck_pr_faa_uint(&o->ref, 1) != 0) {
+ ck_error("[%p] We dequeued twice.\n", (void *)o);
+ }
+
+ if ((i % 4) == 0) {
+ spin = common_rand_r(&seed) % 16384;
+ for (k = 0; k < spin; k++) {
+ ck_pr_stall();
+ }
+ }
+
+ free(o);
+ }
+ }
+
+ fprintf(stderr, "[%d] Observed %u\n", tid, observed);
+ return NULL;
+}
+
+static void *
+test(void *c)
+{
+ struct context *context = c;
+ struct entry *entry;
+ unsigned int s;
+ int i, j;
+ bool r;
+ struct entry **buffer = context->buffer;
+ ck_barrier_centralized_state_t sense =
+ CK_BARRIER_CENTRALIZED_STATE_INITIALIZER;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ if (context->tid == 0) {
+ struct entry **entries;
+
+ entries = malloc(sizeof(struct entry *) * size);
+ assert(entries != NULL);
+
+ if (ck_ring_size(ring) != 0) {
+ ck_error("More entries than expected: %u > 0\n",
+ ck_ring_size(ring));
+ }
+
+ for (i = 0; i < size; i++) {
+ entries[i] = malloc(sizeof(struct entry));
+ assert(entries[i] != NULL);
+
+ entries[i]->value = i;
+ entries[i]->tid = 0;
+
+ if (i & 1) {
+ r = CK_RING_ENQUEUE_MPMC(entry, ring, buffer,
+ &entries[i]);
+ } else {
+ r = CK_RING_ENQUEUE_MPMC_SIZE(entry, ring,
+ buffer, &entries[i], &s);
+
+ if ((int)s != i) {
+ ck_error("Size is %u, expected %d.\n",
+ s, size);
+ }
+ }
+
+ assert(r != false);
+ }
+
+ if (ck_ring_size(ring) != (unsigned int)size) {
+ ck_error("Less entries than expected: %u < %d\n",
+ ck_ring_size(ring), size);
+ }
+
+ if (ck_ring_capacity(ring) != ck_ring_size(ring) + 1) {
+ ck_error("Capacity less than expected: %u < %u\n",
+ ck_ring_size(ring), ck_ring_capacity(ring));
+ }
+ }
+
+ /*
+ * Wait for all threads. The idea here is to maximize the contention.
+ */
+ ck_barrier_centralized(&barrier, &sense, nthr);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ buffer = _context[context->previous].buffer;
+ while (CK_RING_DEQUEUE_MPMC(entry,
+ ring + context->previous,
+ buffer, &entry) == false);
+
+ if (context->previous != (unsigned int)entry->tid) {
+ ck_error("[%u:%p] %u != %u\n",
+ context->tid, (void *)entry,
+ entry->tid, context->previous);
+ }
+
+ if (entry->value < 0 || entry->value >= size) {
+ ck_error("[%u:%p] %u </> %u\n",
+ context->tid, (void *)entry,
+ entry->tid, context->previous);
+ }
+
+ entry->tid = context->tid;
+ buffer = context->buffer;
+
+ if (i & 1) {
+ r = CK_RING_ENQUEUE_MPMC(entry,
+ ring + context->tid,
+ buffer, &entry);
+ } else {
+ r = CK_RING_ENQUEUE_MPMC_SIZE(entry,
+ ring + context->tid,
+ buffer, &entry, &s);
+
+ if ((int)s >= size) {
+ ck_error("Size %u out of range of %d\n",
+ s, size);
+ }
+ }
+ assert(r == true);
+ }
+ }
+
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int i, r;
+ unsigned long l;
+ pthread_t *thread;
+ struct entry **buffer;
+
+ if (argc != 4) {
+ ck_error("Usage: validate <threads> <affinity delta> <size>\n");
+ }
+
+ a.request = 0;
+ a.delta = atoi(argv[2]);
+
+ nthr = atoi(argv[1]);
+ assert(nthr >= 1);
+
+ size = atoi(argv[3]);
+ assert(size >= 4 && (size & size - 1) == 0);
+ size -= 1;
+
+ ring = malloc(sizeof(ck_ring_t) * nthr);
+ assert(ring);
+
+ _context = malloc(sizeof(*_context) * nthr);
+ assert(_context);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread);
+
+ fprintf(stderr, "SPSC test:");
+ for (i = 0; i < nthr; i++) {
+ _context[i].tid = i;
+ if (i == 0) {
+ _context[i].previous = nthr - 1;
+ _context[i].next = i + 1;
+ } else if (i == nthr - 1) {
+ _context[i].next = 0;
+ _context[i].previous = i - 1;
+ } else {
+ _context[i].next = i + 1;
+ _context[i].previous = i - 1;
+ }
+
+ buffer = malloc(sizeof(struct entry *) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(struct entry *) * (size + 1));
+ _context[i].buffer = buffer;
+ ck_ring_init(ring + i, size + 1);
+ r = pthread_create(thread + i, NULL, test, _context + i);
+ assert(r == 0);
+ }
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ fprintf(stderr, " done\n");
+
+ fprintf(stderr, "MPMC test:\n");
+ buffer = malloc(sizeof(struct entry *) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(struct entry *) * (size + 1));
+ ck_ring_init(&ring_spmc, size + 1);
+ for (i = 0; i < nthr - 1; i++) {
+ _context[i].buffer = buffer;
+ r = pthread_create(thread + i, NULL, test_spmc, _context + i);
+ assert(r == 0);
+ }
+
+ for (l = 0; l < (unsigned long)size * ITERATIONS * (nthr - 1) ; l++) {
+ struct entry *entry = malloc(sizeof *entry);
+
+ assert(entry != NULL);
+ entry->value_long = l;
+ entry->value = (int)l;
+ entry->tid = (int)l;
+ entry->magic = 0xdead;
+ entry->ref = 0;
+
+ /* Wait until queue is not full. */
+ if (l & 1) {
+ while (CK_RING_ENQUEUE_MPMC(entry, &ring_spmc,
+ buffer, &entry) == false) {
+ ck_pr_stall();
+ }
+ } else {
+ unsigned int s;
+
+ while (CK_RING_ENQUEUE_MPMC_SIZE(entry, &ring_spmc,
+ buffer, &entry, &s) == false) {
+ ck_pr_stall();
+ }
+
+ if ((int)s >= (size * ITERATIONS * (nthr - 1))) {
+ ck_error("MPMC: Unexpected size of %u\n", s);
+ }
+ }
+ }
+
+ for (i = 0; i < nthr - 1; i++)
+ pthread_join(thread[i], NULL);
+
+ return 0;
+}
diff --git a/regressions/ck_ring/validate/ck_ring_spmc.c b/regressions/ck_ring/validate/ck_ring_spmc.c
new file mode 100644
index 0000000..161c0d8
--- /dev/null
+++ b/regressions/ck_ring/validate/ck_ring_spmc.c
@@ -0,0 +1,340 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <pthread.h>
+
+#include <ck_barrier.h>
+#include <ck_ring.h>
+#include <ck_spinlock.h>
+#include "../../common.h"
+
+#ifndef ITERATIONS
+#define ITERATIONS 128
+#endif
+
+struct context {
+ unsigned int tid;
+ unsigned int previous;
+ unsigned int next;
+ ck_ring_buffer_t *buffer;
+};
+
+struct entry {
+ unsigned long value_long;
+ unsigned int magic;
+ unsigned int ref;
+ int tid;
+ int value;
+};
+
+static int nthr;
+static ck_ring_t *ring;
+static ck_ring_t ring_spmc CK_CC_CACHELINE;
+static struct affinity a;
+static int size;
+static int eb;
+static ck_barrier_centralized_t barrier = CK_BARRIER_CENTRALIZED_INITIALIZER;
+static struct context *_context;
+
+static void *
+test_spmc(void *c)
+{
+ unsigned int observed = 0;
+ unsigned long previous = 0;
+ unsigned int seed;
+ int i, k, j, tid;
+ struct context *context = c;
+ ck_ring_buffer_t *buffer;
+
+ buffer = context->buffer;
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ tid = ck_pr_faa_int(&eb, 1);
+ ck_pr_fence_memory();
+ while (ck_pr_load_int(&eb) != nthr - 1);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ struct entry *o;
+ int spin;
+
+ /* Keep trying until we encounter at least one node. */
+ if (j & 1) {
+ while (ck_ring_dequeue_spmc(&ring_spmc, buffer,
+ &o) == false);
+ } else {
+ while (ck_ring_trydequeue_spmc(&ring_spmc, buffer,
+ &o) == false);
+ }
+
+ observed++;
+ if (o->value < 0
+ || o->value != o->tid
+ || o->magic != 0xdead
+ || (previous != 0 && previous >= o->value_long)) {
+ ck_error("[0x%p] (%x) (%d, %d) >< (0, %d)\n",
+ (void *)o, o->magic, o->tid, o->value, size);
+ }
+
+ o->magic = 0xbeef;
+ o->value = -31337;
+ o->tid = -31338;
+ previous = o->value_long;
+
+ if (ck_pr_faa_uint(&o->ref, 1) != 0) {
+ ck_error("[%p] We dequeued twice.\n", (void *)o);
+ }
+
+ if ((i % 4) == 0) {
+ spin = common_rand_r(&seed) % 16384;
+ for (k = 0; k < spin; k++) {
+ ck_pr_stall();
+ }
+ }
+
+ free(o);
+ }
+ }
+
+ fprintf(stderr, "[%d] Observed %u\n", tid, observed);
+ return NULL;
+}
+
+static void *
+test(void *c)
+{
+ struct context *context = c;
+ struct entry *entry;
+ unsigned int s;
+ int i, j;
+ bool r;
+ ck_ring_buffer_t *buffer = context->buffer;
+ ck_barrier_centralized_state_t sense =
+ CK_BARRIER_CENTRALIZED_STATE_INITIALIZER;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ if (context->tid == 0) {
+ struct entry *entries;
+
+ entries = malloc(sizeof(struct entry) * size);
+ assert(entries != NULL);
+
+ if (ck_ring_size(ring) != 0) {
+ ck_error("More entries than expected: %u > 0\n",
+ ck_ring_size(ring));
+ }
+
+ for (i = 0; i < size; i++) {
+ entries[i].value = i;
+ entries[i].tid = 0;
+
+ if (i & 1) {
+ r = ck_ring_enqueue_spmc(ring, buffer,
+ entries + i);
+ } else {
+ r = ck_ring_enqueue_spmc_size(ring, buffer,
+ entries + i, &s);
+
+ if ((int)s != i) {
+ ck_error("Size is %u, expected %d.\n",
+ s, size);
+ }
+ }
+
+ assert(r != false);
+ }
+
+ if (ck_ring_size(ring) != (unsigned int)size) {
+ ck_error("Less entries than expected: %u < %d\n",
+ ck_ring_size(ring), size);
+ }
+
+ if (ck_ring_capacity(ring) != ck_ring_size(ring) + 1) {
+ ck_error("Capacity less than expected: %u < %u\n",
+ ck_ring_size(ring), ck_ring_capacity(ring));
+ }
+ }
+
+ /*
+ * Wait for all threads. The idea here is to maximize the contention.
+ */
+ ck_barrier_centralized(&barrier, &sense, nthr);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ buffer = _context[context->previous].buffer;
+ while (ck_ring_dequeue_spmc(ring + context->previous,
+ buffer, &entry) == false);
+
+ if (context->previous != (unsigned int)entry->tid) {
+ ck_error("[%u:%p] %u != %u\n",
+ context->tid, (void *)entry, entry->tid, context->previous);
+ }
+
+ if (entry->value < 0 || entry->value >= size) {
+ ck_error("[%u:%p] %u </> %u\n",
+ context->tid, (void *)entry, entry->tid, context->previous);
+ }
+
+ entry->tid = context->tid;
+ buffer = context->buffer;
+
+ if (i & 1) {
+ r = ck_ring_enqueue_spmc(ring + context->tid,
+ buffer, entry);
+ } else {
+ r = ck_ring_enqueue_spmc_size(ring + context->tid,
+ buffer, entry, &s);
+
+ if ((int)s >= size) {
+ ck_error("Size %u out of range of %d\n",
+ s, size);
+ }
+ }
+ assert(r == true);
+ }
+ }
+
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int i, r;
+ unsigned long l;
+ pthread_t *thread;
+ ck_ring_buffer_t *buffer;
+
+ if (argc != 4) {
+ ck_error("Usage: validate <threads> <affinity delta> <size>\n");
+ }
+
+ a.request = 0;
+ a.delta = atoi(argv[2]);
+
+ nthr = atoi(argv[1]);
+ assert(nthr >= 1);
+
+ size = atoi(argv[3]);
+ assert(size >= 4 && (size & size - 1) == 0);
+ size -= 1;
+
+ ring = malloc(sizeof(ck_ring_t) * nthr);
+ assert(ring);
+
+ _context = malloc(sizeof(*_context) * nthr);
+ assert(_context);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread);
+
+ fprintf(stderr, "SPSC test:");
+ for (i = 0; i < nthr; i++) {
+ _context[i].tid = i;
+ if (i == 0) {
+ _context[i].previous = nthr - 1;
+ _context[i].next = i + 1;
+ } else if (i == nthr - 1) {
+ _context[i].next = 0;
+ _context[i].previous = i - 1;
+ } else {
+ _context[i].next = i + 1;
+ _context[i].previous = i - 1;
+ }
+
+ buffer = malloc(sizeof(ck_ring_buffer_t) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(ck_ring_buffer_t) * (size + 1));
+ _context[i].buffer = buffer;
+ ck_ring_init(ring + i, size + 1);
+ r = pthread_create(thread + i, NULL, test, _context + i);
+ assert(r == 0);
+ }
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ fprintf(stderr, " done\n");
+
+ fprintf(stderr, "SPMC test:\n");
+ buffer = malloc(sizeof(ck_ring_buffer_t) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(void *) * (size + 1));
+ ck_ring_init(&ring_spmc, size + 1);
+ for (i = 0; i < nthr - 1; i++) {
+ _context[i].buffer = buffer;
+ r = pthread_create(thread + i, NULL, test_spmc, _context + i);
+ assert(r == 0);
+ }
+
+ for (l = 0; l < (unsigned long)size * ITERATIONS * (nthr - 1) ; l++) {
+ struct entry *entry = malloc(sizeof *entry);
+
+ assert(entry != NULL);
+ entry->value_long = l;
+ entry->value = (int)l;
+ entry->tid = (int)l;
+ entry->magic = 0xdead;
+ entry->ref = 0;
+
+ /* Wait until queue is not full. */
+ if (l & 1) {
+ while (ck_ring_enqueue_spmc(&ring_spmc,
+ buffer,
+ entry) == false)
+ ck_pr_stall();
+ } else {
+ unsigned int s;
+
+ while (ck_ring_enqueue_spmc_size(&ring_spmc,
+ buffer, entry, &s) == false) {
+ ck_pr_stall();
+ }
+
+ if ((int)s >= (size * ITERATIONS * (nthr - 1))) {
+ ck_error("MPMC: Unexpected size of %u\n", s);
+ }
+ }
+ }
+
+ for (i = 0; i < nthr - 1; i++)
+ pthread_join(thread[i], NULL);
+
+ return (0);
+}
+
diff --git a/regressions/ck_ring/validate/ck_ring_spmc_template.c b/regressions/ck_ring/validate/ck_ring_spmc_template.c
new file mode 100644
index 0000000..bbc75c1
--- /dev/null
+++ b/regressions/ck_ring/validate/ck_ring_spmc_template.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <pthread.h>
+
+#include <ck_barrier.h>
+#include <ck_ring.h>
+#include <ck_spinlock.h>
+#include "../../common.h"
+
+#ifndef ITERATIONS
+#define ITERATIONS 128
+#endif
+
+struct context {
+ unsigned int tid;
+ unsigned int previous;
+ unsigned int next;
+ struct entry **buffer;
+};
+
+struct entry {
+ unsigned long value_long;
+ unsigned int magic;
+ unsigned int ref;
+ int tid;
+ int value;
+};
+
+CK_RING_PROTOTYPE(entry, entry *)
+
+static int nthr;
+static ck_ring_t *ring;
+static ck_ring_t ring_spmc CK_CC_CACHELINE;
+static struct affinity a;
+static int size;
+static int eb;
+static ck_barrier_centralized_t barrier = CK_BARRIER_CENTRALIZED_INITIALIZER;
+static struct context *_context;
+
+static void *
+test_spmc(void *c)
+{
+ unsigned int observed = 0;
+ unsigned long previous = 0;
+ unsigned int seed;
+ int i, k, j, tid;
+ struct context *context = c;
+ struct entry **buffer;
+
+ buffer = context->buffer;
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ tid = ck_pr_faa_int(&eb, 1);
+ ck_pr_fence_memory();
+ while (ck_pr_load_int(&eb) != nthr - 1);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ struct entry *o;
+ int spin;
+
+ /* Keep trying until we encounter at least one node. */
+ if (j & 1) {
+ while (CK_RING_DEQUEUE_SPMC(entry,
+ &ring_spmc, buffer, &o) == false);
+ } else {
+ while (CK_RING_TRYDEQUEUE_SPMC(entry,
+ &ring_spmc, buffer, &o) == false);
+ }
+
+ observed++;
+ if (o->value < 0
+ || o->value != o->tid
+ || o->magic != 0xdead
+ || (previous != 0 && previous >= o->value_long)) {
+ ck_error("[0x%p] (%x) (%d, %d) >< (0, %d)\n",
+ (void *)o, o->magic, o->tid, o->value, size);
+ }
+
+ o->magic = 0xbeef;
+ o->value = -31337;
+ o->tid = -31338;
+ previous = o->value_long;
+
+ if (ck_pr_faa_uint(&o->ref, 1) != 0) {
+ ck_error("[%p] We dequeued twice.\n", (void *)o);
+ }
+
+ if ((i % 4) == 0) {
+ spin = common_rand_r(&seed) % 16384;
+ for (k = 0; k < spin; k++) {
+ ck_pr_stall();
+ }
+ }
+
+ free(o);
+ }
+ }
+
+ fprintf(stderr, "[%d] Observed %u\n", tid, observed);
+ return NULL;
+}
+
+static void *
+test(void *c)
+{
+ struct context *context = c;
+ struct entry *entry;
+ unsigned int s;
+ int i, j;
+ bool r;
+ struct entry **buffer = context->buffer;
+ ck_barrier_centralized_state_t sense =
+ CK_BARRIER_CENTRALIZED_STATE_INITIALIZER;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ if (context->tid == 0) {
+ struct entry **entries;
+
+ entries = malloc(sizeof(struct entry *) * size);
+ assert(entries != NULL);
+
+ if (ck_ring_size(ring) != 0) {
+ ck_error("More entries than expected: %u > 0\n",
+ ck_ring_size(ring));
+ }
+
+ for (i = 0; i < size; i++) {
+ entries[i] = malloc(sizeof(struct entry));
+ assert(entries[i] != NULL);
+
+ entries[i]->value = i;
+ entries[i]->tid = 0;
+
+ if (i & 1) {
+ r = CK_RING_ENQUEUE_SPMC(entry, ring, buffer,
+ &entries[i]);
+ } else {
+ r = CK_RING_ENQUEUE_SPMC_SIZE(entry, ring,
+ buffer, &entries[i], &s);
+
+ if ((int)s != i) {
+ ck_error("Size is %u, expected %d.\n",
+ s, size);
+ }
+ }
+
+ assert(r != false);
+ }
+
+ if (ck_ring_size(ring) != (unsigned int)size) {
+ ck_error("Less entries than expected: %u < %d\n",
+ ck_ring_size(ring), size);
+ }
+
+ if (ck_ring_capacity(ring) != ck_ring_size(ring) + 1) {
+ ck_error("Capacity less than expected: %u < %u\n",
+ ck_ring_size(ring), ck_ring_capacity(ring));
+ }
+ }
+
+ /*
+ * Wait for all threads. The idea here is to maximize the contention.
+ */
+ ck_barrier_centralized(&barrier, &sense, nthr);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ buffer = _context[context->previous].buffer;
+ while (CK_RING_DEQUEUE_SPMC(entry,
+ ring + context->previous,
+ buffer, &entry) == false);
+
+ if (context->previous != (unsigned int)entry->tid) {
+ ck_error("[%u:%p] %u != %u\n",
+ context->tid, (void *)entry,
+ entry->tid, context->previous);
+ }
+
+ if (entry->value < 0 || entry->value >= size) {
+ ck_error("[%u:%p] %u </> %u\n",
+ context->tid, (void *)entry,
+ entry->tid, context->previous);
+ }
+
+ entry->tid = context->tid;
+ buffer = context->buffer;
+
+ if (i & 1) {
+ r = CK_RING_ENQUEUE_SPMC(entry,
+ ring + context->tid,
+ buffer, &entry);
+ } else {
+ r = CK_RING_ENQUEUE_SPMC_SIZE(entry,
+ ring + context->tid,
+ buffer, &entry, &s);
+
+ if ((int)s >= size) {
+ ck_error("Size %u out of range of %d\n",
+ s, size);
+ }
+ }
+ assert(r == true);
+ }
+ }
+
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int i, r;
+ unsigned long l;
+ pthread_t *thread;
+ struct entry **buffer;
+
+ if (argc != 4) {
+ ck_error("Usage: validate <threads> <affinity delta> <size>\n");
+ }
+
+ a.request = 0;
+ a.delta = atoi(argv[2]);
+
+ nthr = atoi(argv[1]);
+ assert(nthr >= 1);
+
+ size = atoi(argv[3]);
+ assert(size >= 4 && (size & size - 1) == 0);
+ size -= 1;
+
+ ring = malloc(sizeof(ck_ring_t) * nthr);
+ assert(ring);
+
+ _context = malloc(sizeof(*_context) * nthr);
+ assert(_context);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread);
+
+ fprintf(stderr, "SPSC test:");
+ for (i = 0; i < nthr; i++) {
+ _context[i].tid = i;
+ if (i == 0) {
+ _context[i].previous = nthr - 1;
+ _context[i].next = i + 1;
+ } else if (i == nthr - 1) {
+ _context[i].next = 0;
+ _context[i].previous = i - 1;
+ } else {
+ _context[i].next = i + 1;
+ _context[i].previous = i - 1;
+ }
+
+ buffer = malloc(sizeof(struct entry *) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(struct entry *) * (size + 1));
+ _context[i].buffer = buffer;
+ ck_ring_init(ring + i, size + 1);
+ r = pthread_create(thread + i, NULL, test, _context + i);
+ assert(r == 0);
+ }
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ fprintf(stderr, " done\n");
+
+ fprintf(stderr, "SPMC test:\n");
+ buffer = malloc(sizeof(struct entry *) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(struct entry *) * (size + 1));
+ ck_ring_init(&ring_spmc, size + 1);
+ for (i = 0; i < nthr - 1; i++) {
+ _context[i].buffer = buffer;
+ r = pthread_create(thread + i, NULL, test_spmc, _context + i);
+ assert(r == 0);
+ }
+
+ for (l = 0; l < (unsigned long)size * ITERATIONS * (nthr - 1) ; l++) {
+ struct entry *entry = malloc(sizeof *entry);
+
+ assert(entry != NULL);
+ entry->value_long = l;
+ entry->value = (int)l;
+ entry->tid = (int)l;
+ entry->magic = 0xdead;
+ entry->ref = 0;
+
+ /* Wait until queue is not full. */
+ if (l & 1) {
+ while (CK_RING_ENQUEUE_SPMC(entry, &ring_spmc,
+ buffer, &entry) == false) {
+ ck_pr_stall();
+ }
+ } else {
+ unsigned int s;
+
+ while (CK_RING_ENQUEUE_SPMC_SIZE(entry, &ring_spmc,
+ buffer, &entry, &s) == false) {
+ ck_pr_stall();
+ }
+
+ if ((int)s >= (size * ITERATIONS * (nthr - 1))) {
+ ck_error("MPMC: Unexpected size of %u\n", s);
+ }
+ }
+ }
+
+ for (i = 0; i < nthr - 1; i++)
+ pthread_join(thread[i], NULL);
+
+ return 0;
+}
+
diff --git a/regressions/ck_ring/validate/ck_ring_spsc.c b/regressions/ck_ring/validate/ck_ring_spsc.c
new file mode 100644
index 0000000..910f7e6
--- /dev/null
+++ b/regressions/ck_ring/validate/ck_ring_spsc.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <pthread.h>
+
+#include <ck_barrier.h>
+#include <ck_ring.h>
+#include "../../common.h"
+
+#ifndef ITERATIONS
+#define ITERATIONS 128
+#endif
+
+struct context {
+ unsigned int tid;
+ unsigned int previous;
+ unsigned int next;
+ void *buffer;
+};
+
+struct entry {
+ int tid;
+ int value;
+};
+
+static int nthr;
+static ck_ring_t *ring;
+static struct affinity a;
+static int size;
+static ck_barrier_centralized_t barrier = CK_BARRIER_CENTRALIZED_INITIALIZER;
+static struct context *_context;
+
+static void *
+test(void *c)
+{
+ struct context *context = c;
+ struct entry *entry;
+ unsigned int s;
+ int i, j;
+ bool r;
+ ck_barrier_centralized_state_t sense =
+ CK_BARRIER_CENTRALIZED_STATE_INITIALIZER;
+ ck_ring_buffer_t *buffer;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ buffer = context->buffer;
+ if (context->tid == 0) {
+ struct entry *entries;
+
+ entries = malloc(sizeof(struct entry) * size);
+ assert(entries != NULL);
+
+ if (ck_ring_size(ring) != 0) {
+ ck_error("More entries than expected: %u > 0\n",
+ ck_ring_size(ring));
+ }
+
+ for (i = 0; i < size; i++) {
+ entries[i].value = i;
+ entries[i].tid = 0;
+
+ if (i & 1) {
+ r = ck_ring_enqueue_spsc(ring, buffer,
+ entries + i);
+ } else {
+ r = ck_ring_enqueue_spsc_size(ring,
+ buffer, entries + i, &s);
+
+ if ((int)s != i) {
+ ck_error("Size is %u, expected %d\n",
+ s, i + 1);
+ }
+ }
+
+ assert(r != false);
+ }
+
+ if (ck_ring_size(ring) != (unsigned int)size) {
+ ck_error("Less entries than expected: %u < %d\n",
+ ck_ring_size(ring), size);
+ }
+
+ if (ck_ring_capacity(ring) != ck_ring_size(ring) + 1) {
+ ck_error("Capacity less than expected: %u < %u\n",
+ ck_ring_size(ring), ck_ring_capacity(ring));
+ }
+ }
+
+ ck_barrier_centralized(&barrier, &sense, nthr);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ buffer = _context[context->previous].buffer;
+ while (ck_ring_dequeue_spsc(ring + context->previous,
+ buffer, &entry) == false);
+
+ if (context->previous != (unsigned int)entry->tid) {
+ ck_error("[%u:%p] %u != %u\n",
+ context->tid, (void *)entry, entry->tid, context->previous);
+ }
+
+ if (entry->value != j) {
+ ck_error("[%u:%p] %u != %u\n",
+ context->tid, (void *)entry, entry->tid, context->previous);
+ }
+
+ entry->tid = context->tid;
+ buffer = context->buffer;
+ if (i & 1) {
+ r = ck_ring_enqueue_spsc(ring + context->tid,
+ buffer, entry);
+ } else {
+ r = ck_ring_enqueue_spsc_size(ring +
+ context->tid, buffer, entry, &s);
+
+ if ((int)s >= size) {
+ ck_error("Size %u is out of range %d\n",
+ s, size);
+ }
+ }
+ assert(r == true);
+ }
+ }
+
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int i, r;
+ ck_ring_buffer_t *buffer;
+ pthread_t *thread;
+
+ if (argc != 4) {
+ ck_error("Usage: validate <threads> <affinity delta> <size>\n");
+ }
+
+ a.request = 0;
+ a.delta = atoi(argv[2]);
+
+ nthr = atoi(argv[1]);
+ assert(nthr >= 1);
+
+ size = atoi(argv[3]);
+ assert(size >= 4 && (size & size - 1) == 0);
+ size -= 1;
+
+ ring = malloc(sizeof(ck_ring_t) * nthr);
+ assert(ring);
+
+ _context = malloc(sizeof(*_context) * nthr);
+ assert(_context);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread);
+
+ for (i = 0; i < nthr; i++) {
+ _context[i].tid = i;
+ if (i == 0) {
+ _context[i].previous = nthr - 1;
+ _context[i].next = i + 1;
+ } else if (i == nthr - 1) {
+ _context[i].next = 0;
+ _context[i].previous = i - 1;
+ } else {
+ _context[i].next = i + 1;
+ _context[i].previous = i - 1;
+ }
+
+ buffer = malloc(sizeof(ck_ring_buffer_t) * (size + 1));
+ assert(buffer);
+ _context[i].buffer = buffer;
+ ck_ring_init(ring + i, size + 1);
+ r = pthread_create(thread + i, NULL, test, _context + i);
+ assert(r == 0);
+ }
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ return (0);
+}
diff --git a/regressions/ck_rwcohort/benchmark/Makefile b/regressions/ck_rwcohort/benchmark/Makefile
new file mode 100644
index 0000000..054c85c
--- /dev/null
+++ b/regressions/ck_rwcohort/benchmark/Makefile
@@ -0,0 +1,32 @@
+.PHONY: clean distribution
+
+OBJECTS=latency throughput
+OBJECTS=ck_neutral.THROUGHPUT ck_neutral.LATENCY \
+ ck_rp.THROUGHPUT ck_rp.LATENCY \
+ ck_wp.THROUGHPUT ck_wp.LATENCY
+
+all: $(OBJECTS)
+
+ck_neutral.THROUGHPUT: ck_neutral.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_neutral.THROUGHPUT ck_neutral.c
+
+ck_neutral.LATENCY: ck_neutral.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_neutral.LATENCY ck_neutral.c
+
+ck_rp.THROUGHPUT: ck_rp.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_rp.THROUGHPUT ck_rp.c
+
+ck_rp.LATENCY: ck_rp.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_rp.LATENCY ck_rp.c
+
+ck_wp.THROUGHPUT: ck_wp.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_wp.THROUGHPUT ck_wp.c
+
+ck_wp.LATENCY: ck_wp.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_wp.LATENCY ck_wp.c
+
+clean:
+ rm -rf *.dSYM *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_rwcohort/benchmark/ck_neutral.c b/regressions/ck_rwcohort/benchmark/ck_neutral.c
new file mode 100644
index 0000000..9fb85db
--- /dev/null
+++ b/regressions/ck_rwcohort/benchmark/ck_neutral.c
@@ -0,0 +1,7 @@
+#include "../ck_neutral.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
diff --git a/regressions/ck_rwcohort/benchmark/ck_rp.c b/regressions/ck_rwcohort/benchmark/ck_rp.c
new file mode 100644
index 0000000..798e578
--- /dev/null
+++ b/regressions/ck_rwcohort/benchmark/ck_rp.c
@@ -0,0 +1,7 @@
+#include "../ck_rp.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
diff --git a/regressions/ck_rwcohort/benchmark/ck_wp.c b/regressions/ck_rwcohort/benchmark/ck_wp.c
new file mode 100644
index 0000000..07b0cce
--- /dev/null
+++ b/regressions/ck_rwcohort/benchmark/ck_wp.c
@@ -0,0 +1,7 @@
+#include "../ck_wp.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
diff --git a/regressions/ck_rwcohort/benchmark/latency.h b/regressions/ck_rwcohort/benchmark/latency.h
new file mode 100644
index 0000000..027a8b2
--- /dev/null
+++ b/regressions/ck_rwcohort/benchmark/latency.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2013-2015 Samy Al Bahra.
+ * Copyright 2013 Brendon Scheinman.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_rwcohort.h>
+#include <ck_spinlock.h>
+#include <inttypes.h>
+#include <stdio.h>
+
+#include "../../common.h"
+
+#ifndef STEPS
+#define STEPS 1000000
+#endif
+
+static void
+ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+ (void)context;
+ ck_spinlock_fas_lock(lock);
+}
+
+static void
+ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+ (void)context;
+ ck_spinlock_fas_unlock(lock);
+}
+
+static bool
+ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+ (void)context;
+ return ck_spinlock_fas_locked(lock);
+}
+
+CK_COHORT_PROTOTYPE(fas_fas,
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context,
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context)
+LOCK_PROTOTYPE(fas_fas)
+
+int
+main(void)
+{
+ uint64_t s_b, e_b, i;
+ ck_spinlock_fas_t global_lock = CK_SPINLOCK_FAS_INITIALIZER;
+ ck_spinlock_fas_t local_lock = CK_SPINLOCK_FAS_INITIALIZER;
+ CK_COHORT_INSTANCE(fas_fas) cohort = CK_COHORT_INITIALIZER;
+ LOCK_INSTANCE(fas_fas) rw_cohort = LOCK_INITIALIZER;
+
+ CK_COHORT_INIT(fas_fas, &cohort, &global_lock, &local_lock,
+ CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT);
+ LOCK_INIT(fas_fas, &rw_cohort, CK_RWCOHORT_WP_DEFAULT_WAIT_LIMIT);
+
+ for (i = 0; i < STEPS; i++) {
+ WRITE_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL);
+ WRITE_UNLOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ WRITE_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL);
+ WRITE_UNLOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL);
+ }
+ e_b = rdtsc();
+ printf("WRITE: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ for (i = 0; i < STEPS; i++) {
+ READ_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ READ_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL);
+ }
+ e_b = rdtsc();
+ printf("READ: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ return (0);
+}
+
diff --git a/regressions/ck_rwcohort/benchmark/throughput.h b/regressions/ck_rwcohort/benchmark/throughput.h
new file mode 100644
index 0000000..2870855
--- /dev/null
+++ b/regressions/ck_rwcohort/benchmark/throughput.h
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2013-2015 Samy Al Bahra.
+ * Copyright 2013 Brendon Scheinman.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_cohort.h>
+#include <ck_rwcohort.h>
+#include <ck_spinlock.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "../../common.h"
+
+#define max(x, y) (((x) > (y)) ? (x) : (y))
+
+#ifndef STEPS
+#define STEPS 1000000
+#endif
+
+static unsigned int barrier;
+static unsigned int flag CK_CC_CACHELINE;
+static struct affinity affinity;
+static unsigned int nthr;
+
+static void
+ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+
+ (void)context;
+ ck_spinlock_fas_lock(lock);
+ return;
+}
+
+static void
+ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+
+ (void)context;
+ ck_spinlock_fas_unlock(lock);
+ return;
+}
+
+static bool
+ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+
+ (void)context;
+ return ck_spinlock_fas_locked(lock);
+}
+
+CK_COHORT_PROTOTYPE(fas_fas,
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context,
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context)
+LOCK_PROTOTYPE(fas_fas)
+
+struct cohort_record {
+ CK_COHORT_INSTANCE(fas_fas) cohort;
+} CK_CC_CACHELINE;
+static struct cohort_record *cohorts;
+
+static ck_spinlock_t global_lock = CK_SPINLOCK_INITIALIZER;
+static LOCK_INSTANCE(fas_fas) rw_cohort = LOCK_INITIALIZER;
+static unsigned int n_cohorts;
+
+struct block {
+ unsigned int tid;
+};
+
+static void *
+thread_rwlock(void *pun)
+{
+ uint64_t s_b, e_b, a, i;
+ uint64_t *value = pun;
+ CK_COHORT_INSTANCE(fas_fas) *cohort;
+ unsigned int core;
+
+ if (aff_iterate_core(&affinity, &core) != 0) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ cohort = &((cohorts + (core / (int)(affinity.delta)) % n_cohorts)->cohort);
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) != nthr)
+ ck_pr_stall();
+
+ for (i = 1, a = 0;; i++) {
+ s_b = rdtsc();
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ e_b = rdtsc();
+
+ a += (e_b - s_b) >> 4;
+
+ if (ck_pr_load_uint(&flag) == 1)
+ break;
+ }
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) != nthr * 2)
+ ck_pr_stall();
+
+ *value = (a / i);
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ unsigned int i;
+ pthread_t *threads;
+ uint64_t *latency;
+ struct block *context;
+ ck_spinlock_fas_t *local_lock;
+
+ if (argc != 4) {
+ ck_error("Usage: throughput <number of cohorts> <threads per cohort> <affinity delta>\n");
+ }
+
+ n_cohorts = atoi(argv[1]);
+ if (n_cohorts <= 0) {
+ ck_error("ERROR: Number of cohorts must be greater than 0\n");
+ }
+
+ nthr = n_cohorts * atoi(argv[2]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ cohorts = malloc(sizeof(struct cohort_record) * n_cohorts);
+ if (cohorts == NULL) {
+ ck_error("ERROR: Could not allocate cohort structures\n");
+ }
+
+ context = malloc(sizeof(struct block) * nthr);
+ if (context == NULL) {
+ ck_error("ERROR: Could not allocate thread contexts\n");
+ }
+
+ affinity.delta = atoi(argv[3]);
+ affinity.request = 0;
+
+ latency = malloc(sizeof(*latency) * nthr);
+ if (latency == NULL) {
+ ck_error("ERROR: Could not create latency buffer\n");
+ }
+ memset(latency, 0, sizeof(*latency) * nthr);
+
+ fprintf(stderr, "Creating cohorts...");
+ for (i = 0 ; i < n_cohorts ; i++) {
+ local_lock = malloc(max(CK_MD_CACHELINE, sizeof(ck_spinlock_fas_t)));
+ if (local_lock == NULL) {
+ ck_error("ERROR: Could not allocate local lock\n");
+ }
+ CK_COHORT_INIT(fas_fas, &((cohorts + i)->cohort), &global_lock, local_lock,
+ CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT);
+ local_lock = NULL;
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Creating threads (rwlock)...");
+ for (i = 0; i < nthr; i++) {
+ if (pthread_create(&threads[i], NULL, thread_rwlock, latency + i) != 0) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ common_sleep(10);
+ ck_pr_store_uint(&flag, 1);
+
+ fprintf(stderr, "Waiting for threads to finish acquisition regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done\n\n");
+
+ for (i = 1; i <= nthr; i++)
+ printf("%10u %20" PRIu64 "\n", i, latency[i - 1]);
+
+ return (0);
+}
+
diff --git a/regressions/ck_rwcohort/ck_neutral.h b/regressions/ck_rwcohort/ck_neutral.h
new file mode 100644
index 0000000..dbbda9d
--- /dev/null
+++ b/regressions/ck_rwcohort/ck_neutral.h
@@ -0,0 +1,8 @@
+#define LOCK_PROTOTYPE CK_RWCOHORT_NEUTRAL_PROTOTYPE
+#define LOCK_INSTANCE CK_RWCOHORT_NEUTRAL_INSTANCE
+#define LOCK_INITIALIZER CK_RWCOHORT_NEUTRAL_INITIALIZER
+#define LOCK_INIT(N, C, W) CK_RWCOHORT_NEUTRAL_INIT(N, C)
+#define READ_LOCK CK_RWCOHORT_NEUTRAL_READ_LOCK
+#define WRITE_LOCK CK_RWCOHORT_NEUTRAL_WRITE_LOCK
+#define READ_UNLOCK CK_RWCOHORT_NEUTRAL_READ_UNLOCK
+#define WRITE_UNLOCK CK_RWCOHORT_NEUTRAL_WRITE_UNLOCK
diff --git a/regressions/ck_rwcohort/ck_rp.h b/regressions/ck_rwcohort/ck_rp.h
new file mode 100644
index 0000000..e20f3d2
--- /dev/null
+++ b/regressions/ck_rwcohort/ck_rp.h
@@ -0,0 +1,8 @@
+#define LOCK_PROTOTYPE CK_RWCOHORT_RP_PROTOTYPE
+#define LOCK_INSTANCE CK_RWCOHORT_RP_INSTANCE
+#define LOCK_INITIALIZER CK_RWCOHORT_RP_INITIALIZER
+#define LOCK_INIT CK_RWCOHORT_RP_INIT
+#define READ_LOCK CK_RWCOHORT_RP_READ_LOCK
+#define READ_UNLOCK CK_RWCOHORT_RP_READ_UNLOCK
+#define WRITE_LOCK CK_RWCOHORT_RP_WRITE_LOCK
+#define WRITE_UNLOCK CK_RWCOHORT_RP_WRITE_UNLOCK
diff --git a/regressions/ck_rwcohort/ck_wp.h b/regressions/ck_rwcohort/ck_wp.h
new file mode 100644
index 0000000..556c7df
--- /dev/null
+++ b/regressions/ck_rwcohort/ck_wp.h
@@ -0,0 +1,8 @@
+#define LOCK_PROTOTYPE CK_RWCOHORT_WP_PROTOTYPE
+#define LOCK_INSTANCE CK_RWCOHORT_WP_INSTANCE
+#define LOCK_INITIALIZER CK_RWCOHORT_WP_INITIALIZER
+#define LOCK_INIT CK_RWCOHORT_WP_INIT
+#define READ_LOCK CK_RWCOHORT_WP_READ_LOCK
+#define WRITE_LOCK CK_RWCOHORT_WP_WRITE_LOCK
+#define READ_UNLOCK CK_RWCOHORT_WP_READ_UNLOCK
+#define WRITE_UNLOCK CK_RWCOHORT_WP_WRITE_UNLOCK
diff --git a/regressions/ck_rwcohort/validate/Makefile b/regressions/ck_rwcohort/validate/Makefile
new file mode 100644
index 0000000..33e3a29
--- /dev/null
+++ b/regressions/ck_rwcohort/validate/Makefile
@@ -0,0 +1,25 @@
+.PHONY: check clean distribution
+
+OBJECTS=ck_neutral ck_rp ck_wp
+
+all: $(OBJECTS)
+
+ck_neutral: ck_neutral.c ../../../include/ck_rwcohort.h
+ $(CC) $(CFLAGS) -o ck_neutral ck_neutral.c
+
+ck_rp: ck_rp.c ../../../include/ck_rwcohort.h
+ $(CC) $(CFLAGS) -o ck_rp ck_rp.c
+
+ck_wp: ck_wp.c ../../../include/ck_rwcohort.h
+ $(CC) $(CFLAGS) -o ck_wp ck_wp.c
+
+check: all
+ ./ck_neutral `expr $(CORES) / 2` 2 1
+ ./ck_rp `expr $(CORES) / 2` 2 1
+ ./ck_wp `expr $(CORES) / 2` 2 1
+
+clean:
+ rm -rf *.dSYM *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_rwcohort/validate/ck_neutral.c b/regressions/ck_rwcohort/validate/ck_neutral.c
new file mode 100644
index 0000000..7884dc5
--- /dev/null
+++ b/regressions/ck_rwcohort/validate/ck_neutral.c
@@ -0,0 +1,2 @@
+#include "../ck_neutral.h"
+#include "validate.h"
diff --git a/regressions/ck_rwcohort/validate/ck_rp.c b/regressions/ck_rwcohort/validate/ck_rp.c
new file mode 100644
index 0000000..d63e9d5
--- /dev/null
+++ b/regressions/ck_rwcohort/validate/ck_rp.c
@@ -0,0 +1,2 @@
+#include "../ck_rp.h"
+#include "validate.h"
diff --git a/regressions/ck_rwcohort/validate/ck_wp.c b/regressions/ck_rwcohort/validate/ck_wp.c
new file mode 100644
index 0000000..f89be35
--- /dev/null
+++ b/regressions/ck_rwcohort/validate/ck_wp.c
@@ -0,0 +1,2 @@
+#include "../ck_wp.h"
+#include "validate.h"
diff --git a/regressions/ck_rwcohort/validate/validate.h b/regressions/ck_rwcohort/validate/validate.h
new file mode 100644
index 0000000..8bc9a88
--- /dev/null
+++ b/regressions/ck_rwcohort/validate/validate.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2013-2015 Samy Al Bahra.
+ * Copything 2013 Brendon Scheinman.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_rwcohort.h>
+#include <ck_spinlock.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 1000000
+#endif
+
+
+static struct affinity a;
+static unsigned int locked;
+static int nthr;
+static ck_spinlock_fas_t global_fas_lock = CK_SPINLOCK_FAS_INITIALIZER;
+
+static void
+ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+ (void)context;
+ ck_spinlock_fas_lock(lock);
+}
+
+static void
+ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+ (void)context;
+ ck_spinlock_fas_unlock(lock);
+}
+
+static bool
+ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+ (void)context;
+ return ck_spinlock_fas_locked(lock);
+}
+
+CK_COHORT_PROTOTYPE(fas_fas,
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context,
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context)
+LOCK_PROTOTYPE(fas_fas)
+
+static CK_COHORT_INSTANCE(fas_fas) *cohorts;
+static LOCK_INSTANCE(fas_fas) rw_cohort = LOCK_INITIALIZER;
+static int n_cohorts;
+
+static void *
+thread(void *null CK_CC_UNUSED)
+{
+ int i = ITERATE;
+ unsigned int l;
+ unsigned int core;
+ CK_COHORT_INSTANCE(fas_fas) *cohort;
+
+ if (aff_iterate_core(&a, &core)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ cohort = cohorts + (core / (int)(a.delta)) % n_cohorts;
+
+ while (i--) {
+ WRITE_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ WRITE_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+ int threads_per_cohort;
+ ck_spinlock_fas_t *local_lock;
+ int i;
+
+ if (argc != 4) {
+ ck_error("Usage: validate <number of cohorts> <threads per cohort> <affinity delta>\n");
+ }
+
+ n_cohorts = atoi(argv[1]);
+ if (n_cohorts <= 0) {
+ ck_error("ERROR: Number of cohorts must be greater than 0\n");
+ }
+
+ threads_per_cohort = atoi(argv[2]);
+ if (threads_per_cohort <= 0) {
+ ck_error("ERROR: Threads per cohort must be greater than 0\n");
+ }
+
+ nthr = n_cohorts * threads_per_cohort;
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ a.delta = atoi(argv[3]);
+
+ fprintf(stderr, "Creating cohorts...");
+ cohorts = malloc(sizeof(CK_COHORT_INSTANCE(fas_fas)) * n_cohorts);
+ if (cohorts == NULL) {
+ ck_error("ERROR: Could not allocate base cohort structures\n");
+ }
+ for (i = 0 ; i < n_cohorts ; i++) {
+ local_lock = malloc(sizeof(ck_spinlock_fas_t));
+ CK_COHORT_INIT(fas_fas, cohorts + i, &global_fas_lock, local_lock,
+ CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT);
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Creating threads...");
+ for (i = 0; i < nthr; i++) {
+ if (pthread_create(&threads[i], NULL, thread, NULL)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Waiting for threads to finish correctness regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+
+ return (0);
+}
+
diff --git a/regressions/ck_rwlock/benchmark/Makefile b/regressions/ck_rwlock/benchmark/Makefile
new file mode 100644
index 0000000..ed63504
--- /dev/null
+++ b/regressions/ck_rwlock/benchmark/Makefile
@@ -0,0 +1,17 @@
+.PHONY: clean distribution
+
+OBJECTS=latency throughput
+
+all: $(OBJECTS)
+
+latency: latency.c ../../../include/ck_rwlock.h ../../../include/ck_elide.h
+ $(CC) $(CFLAGS) -o latency latency.c
+
+throughput: throughput.c ../../../include/ck_rwlock.h ../../../include/ck_elide.h
+ $(CC) $(CFLAGS) -o throughput throughput.c
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_rwlock/benchmark/latency.c b/regressions/ck_rwlock/benchmark/latency.c
new file mode 100644
index 0000000..18213c6
--- /dev/null
+++ b/regressions/ck_rwlock/benchmark/latency.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_rwlock.h>
+#include <inttypes.h>
+#include <stdio.h>
+
+#include "../../common.h"
+
+#define CK_F_PR_RTM
+
+#ifndef STEPS
+#define STEPS 2000000
+#endif
+
+int
+main(void)
+{
+ uint64_t s_b, e_b, i;
+ ck_rwlock_t rwlock = CK_RWLOCK_INITIALIZER;
+
+ for (i = 0; i < STEPS; i++) {
+ ck_rwlock_write_lock(&rwlock);
+ ck_rwlock_write_unlock(&rwlock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_rwlock_write_lock(&rwlock);
+ ck_rwlock_write_unlock(&rwlock);
+ }
+ e_b = rdtsc();
+ printf(" WRITE: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+#ifdef CK_F_PR_RTM
+ struct ck_elide_config config = CK_ELIDE_CONFIG_DEFAULT_INITIALIZER;
+ struct ck_elide_stat st = CK_ELIDE_STAT_INITIALIZER;
+
+ for (i = 0; i < STEPS; i++) {
+ CK_ELIDE_LOCK(ck_rwlock_write, &rwlock);
+ CK_ELIDE_UNLOCK(ck_rwlock_write, &rwlock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ CK_ELIDE_LOCK(ck_rwlock_write, &rwlock);
+ CK_ELIDE_UNLOCK(ck_rwlock_write, &rwlock);
+ }
+ e_b = rdtsc();
+ printf(" (rtm) WRITE: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ for (i = 0; i < STEPS; i++) {
+ CK_ELIDE_LOCK_ADAPTIVE(ck_rwlock_write, &st, &config, &rwlock);
+ CK_ELIDE_UNLOCK_ADAPTIVE(ck_rwlock_write, &st, &rwlock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ CK_ELIDE_LOCK_ADAPTIVE(ck_rwlock_write, &st, &config, &rwlock);
+ CK_ELIDE_UNLOCK_ADAPTIVE(ck_rwlock_write, &st, &rwlock);
+ }
+ e_b = rdtsc();
+ printf(" (rtm-adaptive) WRITE: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+#endif /* CK_F_PR_RTM */
+
+ for (i = 0; i < STEPS; i++) {
+ ck_rwlock_read_lock(&rwlock);
+ ck_rwlock_read_unlock(&rwlock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_rwlock_read_lock(&rwlock);
+ ck_rwlock_read_unlock(&rwlock);
+ }
+ e_b = rdtsc();
+ printf(" READ: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+#ifdef CK_F_PR_RTM
+ ck_elide_stat_init(&st);
+
+ for (i = 0; i < STEPS; i++) {
+ CK_ELIDE_LOCK(ck_rwlock_read, &rwlock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rwlock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ CK_ELIDE_LOCK(ck_rwlock_read, &rwlock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rwlock);
+ }
+ e_b = rdtsc();
+ printf(" (rtm) READ: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ for (i = 0; i < STEPS; i++) {
+ CK_ELIDE_LOCK_ADAPTIVE(ck_rwlock_read, &st, &config, &rwlock);
+ CK_ELIDE_UNLOCK_ADAPTIVE(ck_rwlock_read, &st, &rwlock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ CK_ELIDE_LOCK_ADAPTIVE(ck_rwlock_read, &st, &config, &rwlock);
+ CK_ELIDE_UNLOCK_ADAPTIVE(ck_rwlock_read, &st, &rwlock);
+ }
+ e_b = rdtsc();
+ printf(" (rtm-adaptive) READ: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+#endif /* CK_F_PR_RTM */
+
+ return 0;
+}
+
diff --git a/regressions/ck_rwlock/benchmark/throughput.c b/regressions/ck_rwlock/benchmark/throughput.c
new file mode 100644
index 0000000..f57fbd8
--- /dev/null
+++ b/regressions/ck_rwlock/benchmark/throughput.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_rwlock.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "../../common.h"
+
+#ifndef STEPS
+#define STEPS 1000000
+#endif
+
+static int barrier;
+static int threads;
+static unsigned int flag CK_CC_CACHELINE;
+static struct {
+ ck_rwlock_t lock;
+} rw CK_CC_CACHELINE = {
+ .lock = CK_RWLOCK_INITIALIZER
+};
+
+static struct affinity affinity;
+
+#ifdef CK_F_PR_RTM
+static void *
+thread_lock_rtm(void *pun)
+{
+ uint64_t s_b, e_b, a, i;
+ uint64_t *value = pun;
+
+ if (aff_iterate(&affinity) != 0) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_int(&barrier);
+ while (ck_pr_load_int(&barrier) != threads)
+ ck_pr_stall();
+
+ for (i = 1, a = 0;; i++) {
+ s_b = rdtsc();
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ e_b = rdtsc();
+
+ a += (e_b - s_b) >> 4;
+
+ if (ck_pr_load_uint(&flag) == 1)
+ break;
+ }
+
+ ck_pr_inc_int(&barrier);
+ while (ck_pr_load_int(&barrier) != threads * 2)
+ ck_pr_stall();
+
+ *value = (a / i);
+ return NULL;
+}
+#endif /* CK_F_PR_RTM */
+
+static void *
+thread_lock(void *pun)
+{
+ uint64_t s_b, e_b, a, i;
+ uint64_t *value = pun;
+
+ if (aff_iterate(&affinity) != 0) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_int(&barrier);
+ while (ck_pr_load_int(&barrier) != threads)
+ ck_pr_stall();
+
+ for (i = 1, a = 0;; i++) {
+ s_b = rdtsc();
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ e_b = rdtsc();
+
+ a += (e_b - s_b) >> 4;
+
+ if (ck_pr_load_uint(&flag) == 1)
+ break;
+ }
+
+ ck_pr_inc_int(&barrier);
+ while (ck_pr_load_int(&barrier) != threads * 2)
+ ck_pr_stall();
+
+ *value = (a / i);
+ return NULL;
+}
+
+static void
+rwlock_test(pthread_t *p, int d, uint64_t *latency, void *(*f)(void *), const char *label)
+{
+ int t;
+
+ ck_pr_store_int(&barrier, 0);
+ ck_pr_store_uint(&flag, 0);
+
+ affinity.delta = d;
+ affinity.request = 0;
+
+ fprintf(stderr, "Creating threads (%s)...", label);
+ for (t = 0; t < threads; t++) {
+ if (pthread_create(&p[t], NULL, f, latency + t) != 0) {
+ ck_error("ERROR: Could not create thread %d\n", t);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ common_sleep(10);
+ ck_pr_store_uint(&flag, 1);
+
+ fprintf(stderr, "Waiting for threads to finish acquisition regression...");
+ for (t = 0; t < threads; t++)
+ pthread_join(p[t], NULL);
+ fprintf(stderr, "done\n\n");
+
+ for (t = 1; t <= threads; t++)
+ printf("%10u %20" PRIu64 "\n", t, latency[t - 1]);
+
+ fprintf(stderr, "\n");
+ return;
+}
+
+
+int
+main(int argc, char *argv[])
+{
+ int d;
+ pthread_t *p;
+ uint64_t *latency;
+
+ if (argc != 3) {
+ ck_error("Usage: throughput <delta> <threads>\n");
+ }
+
+ threads = atoi(argv[2]);
+ if (threads <= 0) {
+ ck_error("ERROR: Threads must be a value > 0.\n");
+ }
+
+ p = malloc(sizeof(pthread_t) * threads);
+ if (p == NULL) {
+ ck_error("ERROR: Failed to initialize thread.\n");
+ }
+
+ latency = malloc(sizeof(uint64_t) * threads);
+ if (latency == NULL) {
+ ck_error("ERROR: Failed to create latency buffer.\n");
+ }
+
+ d = atoi(argv[1]);
+ rwlock_test(p, d, latency, thread_lock, "rwlock");
+
+#ifdef CK_F_PR_RTM
+ rwlock_test(p, d, latency, thread_lock_rtm, "rwlock, rtm");
+#endif /* CK_F_PR_RTM */
+
+ return 0;
+}
+
diff --git a/regressions/ck_rwlock/validate/Makefile b/regressions/ck_rwlock/validate/Makefile
new file mode 100644
index 0000000..2c2116b
--- /dev/null
+++ b/regressions/ck_rwlock/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean distribution
+
+OBJECTS=validate
+
+all: $(OBJECTS)
+
+validate: validate.c ../../../include/ck_rwlock.h ../../../include/ck_elide.h
+ $(CC) $(CFLAGS) -o validate validate.c
+
+check: all
+ ./validate $(CORES) 1
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_rwlock/validate/validate.c b/regressions/ck_rwlock/validate/validate.c
new file mode 100644
index 0000000..8a32e08
--- /dev/null
+++ b/regressions/ck_rwlock/validate/validate.c
@@ -0,0 +1,447 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_rwlock.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 1000000
+#endif
+
+static struct affinity a;
+static unsigned int locked;
+static unsigned int tid = 2;
+static int nthr;
+static ck_rwlock_t lock = CK_RWLOCK_INITIALIZER;
+static ck_rwlock_recursive_t r_lock = CK_RWLOCK_RECURSIVE_INITIALIZER;
+
+static void *
+thread_recursive(void *null CK_CC_UNUSED)
+{
+ int i = ITERATE;
+ unsigned int l;
+ unsigned int t = ck_pr_faa_uint(&tid, 1);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+ while (ck_rwlock_recursive_write_trylock(&r_lock, t) == false)
+ ck_pr_stall();
+
+ ck_rwlock_recursive_write_lock(&r_lock, t);
+ ck_rwlock_recursive_write_lock(&r_lock, t);
+ ck_rwlock_recursive_write_lock(&r_lock, t);
+
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_rwlock_recursive_write_unlock(&r_lock);
+ ck_rwlock_recursive_write_unlock(&r_lock);
+ ck_rwlock_recursive_write_unlock(&r_lock);
+ ck_rwlock_recursive_write_unlock(&r_lock);
+
+ ck_rwlock_recursive_read_lock(&r_lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_rwlock_recursive_read_unlock(&r_lock);
+ }
+
+ return (NULL);
+}
+
+#ifdef CK_F_PR_RTM
+static void *
+thread_rtm_adaptive(void *null CK_CC_UNUSED)
+{
+ unsigned int i = ITERATE;
+ unsigned int l;
+ struct ck_elide_config config = CK_ELIDE_CONFIG_DEFAULT_INITIALIZER;
+ struct ck_elide_stat st = CK_ELIDE_STAT_INITIALIZER;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+ CK_ELIDE_LOCK_ADAPTIVE(ck_rwlock_write, &st, &config, &lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ CK_ELIDE_UNLOCK_ADAPTIVE(ck_rwlock_write, &st, &lock);
+
+ CK_ELIDE_LOCK(ck_rwlock_read, &lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &lock);
+ }
+
+ return NULL;
+}
+
+static void *
+thread_rtm_mix(void *null CK_CC_UNUSED)
+{
+ unsigned int i = ITERATE;
+ unsigned int l;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+ if (i & 1) {
+ CK_ELIDE_LOCK(ck_rwlock_write, &lock);
+ } else {
+ ck_rwlock_write_lock(&lock);
+ }
+
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+
+ if (i & 1) {
+ CK_ELIDE_UNLOCK(ck_rwlock_write, &lock);
+ } else {
+ ck_rwlock_write_unlock(&lock);
+ }
+
+ if (i & 1) {
+ CK_ELIDE_LOCK(ck_rwlock_read, &lock);
+ } else {
+ ck_rwlock_read_lock(&lock);
+ }
+
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+
+ if (i & 1) {
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &lock);
+ } else {
+ ck_rwlock_read_unlock(&lock);
+ }
+ }
+
+ return (NULL);
+}
+
+static void *
+thread_rtm(void *null CK_CC_UNUSED)
+{
+ unsigned int i = ITERATE;
+ unsigned int l;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+ CK_ELIDE_LOCK(ck_rwlock_write, &lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ CK_ELIDE_UNLOCK(ck_rwlock_write, &lock);
+
+ CK_ELIDE_LOCK(ck_rwlock_read, &lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &lock);
+ }
+
+ return (NULL);
+}
+#endif /* CK_F_PR_RTM */
+
+static void *
+thread(void *null CK_CC_UNUSED)
+{
+ unsigned int i = ITERATE;
+ unsigned int l;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+ ck_rwlock_write_lock(&lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_rwlock_write_unlock(&lock);
+
+ ck_rwlock_read_lock(&lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_rwlock_read_unlock(&lock);
+ }
+
+ return (NULL);
+}
+
+static void
+rwlock_test(pthread_t *threads, void *(*f)(void *), const char *test)
+{
+ int i;
+
+ fprintf(stderr, "Creating threads (%s)...", test);
+ for (i = 0; i < nthr; i++) {
+ if (pthread_create(&threads[i], NULL, f, NULL)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, ".");
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+
+ if (argc != 3) {
+ ck_error("Usage: validate <number of threads> <affinity delta>\n");
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ a.delta = atoi(argv[2]);
+
+ rwlock_test(threads, thread, "regular");
+#ifdef CK_F_PR_RTM
+ rwlock_test(threads, thread_rtm, "rtm");
+ rwlock_test(threads, thread_rtm_mix, "rtm-mix");
+ rwlock_test(threads, thread_rtm_adaptive, "rtm-adaptive");
+#endif
+ rwlock_test(threads, thread_recursive, "recursive");
+ return 0;
+}
+
diff --git a/regressions/ck_sequence/benchmark/Makefile b/regressions/ck_sequence/benchmark/Makefile
new file mode 100644
index 0000000..5803a4d
--- /dev/null
+++ b/regressions/ck_sequence/benchmark/Makefile
@@ -0,0 +1,18 @@
+.PHONY: clean distribution
+
+OBJECTS=ck_sequence
+
+all: $(OBJECTS)
+
+ck_sequence: ck_sequence.c ../../../include/ck_sequence.h
+ $(CC) $(CFLAGS) -o ck_sequence ck_sequence.c
+
+check: all
+ ./ck_sequence $(CORES) 1
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE
+
diff --git a/regressions/ck_sequence/benchmark/ck_sequence.c b/regressions/ck_sequence/benchmark/ck_sequence.c
new file mode 100644
index 0000000..f720c31
--- /dev/null
+++ b/regressions/ck_sequence/benchmark/ck_sequence.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2013-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_cc.h>
+#include <ck_sequence.h>
+#include <errno.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <inttypes.h>
+
+#include "../../common.h"
+
+#ifndef STEPS
+#define STEPS (65536 * 64)
+#endif
+
+static ck_sequence_t seqlock CK_CC_CACHELINE = CK_SEQUENCE_INITIALIZER;
+
+int
+main(void)
+{
+ unsigned int i = 0;
+ unsigned int version;
+ uint64_t a, s;
+
+ /* Read-side latency. */
+ a = 0;
+ for (i = 0; i < STEPS / 4; i++) {
+ s = rdtsc();
+ ck_sequence_read_retry(&seqlock, ck_sequence_read_begin(&seqlock));
+ ck_sequence_read_retry(&seqlock, ck_sequence_read_begin(&seqlock));
+ ck_sequence_read_retry(&seqlock, ck_sequence_read_begin(&seqlock));
+ ck_sequence_read_retry(&seqlock, ck_sequence_read_begin(&seqlock));
+ a += rdtsc() - s;
+ }
+ printf("read: %" PRIu64 "\n", a / STEPS);
+
+ a = 0;
+ for (i = 0; i < STEPS / 4; i++) {
+ s = rdtsc();
+ CK_SEQUENCE_READ(&seqlock, &version);
+ CK_SEQUENCE_READ(&seqlock, &version);
+ CK_SEQUENCE_READ(&seqlock, &version);
+ CK_SEQUENCE_READ(&seqlock, &version);
+ a += rdtsc() - s;
+ }
+ printf("READ %" PRIu64 "\n", a / STEPS);
+
+ /* Write-side latency. */
+ a = 0;
+ for (i = 0; i < STEPS / 4; i++) {
+ s = rdtsc();
+ ck_sequence_write_begin(&seqlock);
+ ck_sequence_write_end(&seqlock);
+ ck_sequence_write_begin(&seqlock);
+ ck_sequence_write_end(&seqlock);
+ ck_sequence_write_begin(&seqlock);
+ ck_sequence_write_end(&seqlock);
+ ck_sequence_write_begin(&seqlock);
+ ck_sequence_write_end(&seqlock);
+ a += rdtsc() - s;
+ }
+ printf("write: %" PRIu64 "\n", a / STEPS);
+
+ return 0;
+}
+
diff --git a/regressions/ck_sequence/validate/Makefile b/regressions/ck_sequence/validate/Makefile
new file mode 100644
index 0000000..bc2e5be
--- /dev/null
+++ b/regressions/ck_sequence/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean distribution
+
+OBJECTS=ck_sequence
+
+all: $(OBJECTS)
+
+ck_sequence: ck_sequence.c ../../../include/ck_sequence.h
+ $(CC) $(CFLAGS) -o ck_sequence ck_sequence.c
+
+check: all
+ ./ck_sequence $(CORES) 1
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_sequence/validate/ck_sequence.c b/regressions/ck_sequence/validate/ck_sequence.c
new file mode 100644
index 0000000..e0bc700
--- /dev/null
+++ b/regressions/ck_sequence/validate/ck_sequence.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_cc.h>
+#include <ck_sequence.h>
+#include <errno.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../../common.h"
+
+#ifndef STEPS
+#define STEPS 1000000
+#endif
+
+struct example {
+ unsigned int a;
+ unsigned int b;
+ unsigned int c;
+};
+
+static struct example global CK_CC_CACHELINE;
+static ck_sequence_t seqlock CK_CC_CACHELINE = CK_SEQUENCE_INITIALIZER;
+static unsigned int barrier;
+static struct affinity affinerator;
+
+static void
+validate(struct example *copy)
+{
+
+ if (copy->b != copy->a + 1000) {
+ ck_error("ERROR: Failed regression: copy->b (%u != %u + %u / %u)\n",
+ copy->b, copy->a, 1000, copy->a + 1000);
+ }
+
+ if (copy->c != copy->a + copy->b) {
+ ck_error("ERROR: Failed regression: copy->c (%u != %u + %u / %u)\n",
+ copy->c, copy->a, copy->b, copy->a + copy->b);
+ }
+
+ return;
+}
+
+static void *
+consumer(void *unused CK_CC_UNUSED)
+{
+ struct example copy;
+ uint32_t version;
+ unsigned int retries = 0;
+ unsigned int i;
+
+ unused = NULL;
+ if (aff_iterate(&affinerator)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (ck_pr_load_uint(&barrier) == 0);
+ for (i = 0; i < STEPS; i++) {
+ /*
+ * Attempt a read of the data structure. If the structure
+ * has been modified between ck_sequence_read_begin and
+ * ck_sequence_read_retry then attempt another read since
+ * the data may be in an inconsistent state.
+ */
+ do {
+ version = ck_sequence_read_begin(&seqlock);
+ copy.a = ck_pr_load_uint(&global.a);
+ copy.b = ck_pr_load_uint(&global.b);
+ copy.c = ck_pr_load_uint(&global.c);
+ retries++;
+ } while (ck_sequence_read_retry(&seqlock, version) == true);
+ validate(&copy);
+
+ CK_SEQUENCE_READ(&seqlock, &version) {
+ copy.a = ck_pr_load_uint(&global.a);
+ copy.b = ck_pr_load_uint(&global.b);
+ copy.c = ck_pr_load_uint(&global.c);
+ retries++;
+ }
+ validate(&copy);
+ }
+
+ fprintf(stderr, "%u retries.\n", retries - STEPS);
+ ck_pr_dec_uint(&barrier);
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+ unsigned int counter = 0;
+ bool first = true;
+ int n_threads, i;
+
+ if (argc != 3) {
+ ck_error("Usage: ck_sequence <number of threads> <affinity delta>\n");
+ }
+
+ n_threads = atoi(argv[1]);
+ if (n_threads <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * n_threads);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate memory for threads\n");
+ }
+
+ affinerator.delta = atoi(argv[2]);
+ affinerator.request = 0;
+
+ for (i = 0; i < n_threads; i++) {
+ if (pthread_create(&threads[i], NULL, consumer, NULL)) {
+ ck_error("ERROR: Failed to create thread %d\n", i);
+ }
+ }
+
+ for (;;) {
+ /*
+ * Update the shared data in a non-blocking fashion.
+ * If the data is modified by multiple writers then
+ * ck_sequence_write_begin must be called after acquiring
+ * the associated lock and ck_sequence_write_end must be
+ * called before relinquishing the lock.
+ */
+ ck_sequence_write_begin(&seqlock);
+ global.a = counter++;
+ global.b = global.a + 1000;
+ global.c = global.b + global.a;
+ ck_sequence_write_end(&seqlock);
+
+ if (first == true) {
+ ck_pr_store_uint(&barrier, n_threads);
+ first = false;
+ }
+
+ counter++;
+ if (ck_pr_load_uint(&barrier) == 0)
+ break;
+ }
+
+ printf("%u updates made.\n", counter);
+ return (0);
+}
+
diff --git a/regressions/ck_spinlock/benchmark/Makefile b/regressions/ck_spinlock/benchmark/Makefile
new file mode 100644
index 0000000..ca3e1cf
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/Makefile
@@ -0,0 +1,87 @@
+.PHONY: all clean
+
+OBJECTS=ck_ticket.THROUGHPUT ck_ticket.LATENCY \
+ ck_mcs.THROUGHPUT ck_mcs.LATENCY \
+ ck_dec.THROUGHPUT ck_dec.LATENCY \
+ ck_cas.THROUGHPUT ck_cas.LATENCY \
+ ck_fas.THROUGHPUT ck_fas.LATENCY \
+ ck_clh.THROUGHPUT ck_clh.LATENCY \
+ linux_spinlock.THROUGHPUT linux_spinlock.LATENCY \
+ ck_ticket_pb.THROUGHPUT ck_ticket_pb.LATENCY \
+ ck_anderson.THROUGHPUT ck_anderson.LATENCY \
+ ck_spinlock.THROUGHPUT ck_spinlock.LATENCY \
+ ck_hclh.THROUGHPUT ck_hclh.LATENCY
+
+all: $(OBJECTS)
+
+ck_spinlock.THROUGHPUT: ck_spinlock.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_spinlock.THROUGHPUT ck_spinlock.c -lm
+
+ck_spinlock.LATENCY: ck_spinlock.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_spinlock.LATENCY ck_spinlock.c -lm
+
+ck_ticket.THROUGHPUT: ck_ticket.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_ticket.THROUGHPUT ck_ticket.c -lm
+
+ck_ticket.LATENCY: ck_ticket.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_ticket.LATENCY ck_ticket.c -lm
+
+ck_mcs.THROUGHPUT: ck_mcs.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_mcs.THROUGHPUT ck_mcs.c -lm
+
+ck_mcs.LATENCY: ck_mcs.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_mcs.LATENCY ck_mcs.c -lm
+
+ck_dec.THROUGHPUT: ck_dec.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_dec.THROUGHPUT ck_dec.c -lm
+
+ck_dec.LATENCY: ck_dec.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_dec.LATENCY ck_dec.c -lm
+
+ck_cas.THROUGHPUT: ck_cas.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_cas.THROUGHPUT ck_cas.c -lm
+
+ck_cas.LATENCY: ck_cas.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_cas.LATENCY ck_cas.c -lm
+
+ck_fas.THROUGHPUT: ck_fas.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_fas.THROUGHPUT ck_fas.c -lm
+
+ck_fas.LATENCY: ck_fas.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_fas.LATENCY ck_fas.c -lm
+
+ck_clh.THROUGHPUT: ck_clh.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_clh.THROUGHPUT ck_clh.c -lm
+
+ck_clh.LATENCY: ck_clh.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_clh.LATENCY ck_clh.c -lm
+
+ck_hclh.THROUGHPUT: ck_hclh.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_hclh.THROUGHPUT ck_hclh.c -lm
+
+ck_hclh.LATENCY: ck_hclh.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_hclh.LATENCY ck_hclh.c -lm
+
+linux_spinlock.THROUGHPUT: linux_spinlock.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o linux_spinlock.THROUGHPUT linux_spinlock.c -lm
+
+linux_spinlock.LATENCY: linux_spinlock.c
+ $(CC) -DLATENCY $(CFLAGS) -o linux_spinlock.LATENCY linux_spinlock.c -lm
+
+ck_ticket_pb.THROUGHPUT: ck_ticket_pb.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_ticket_pb.THROUGHPUT ck_ticket_pb.c -lm
+
+ck_ticket_pb.LATENCY: ck_ticket_pb.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_ticket_pb.LATENCY ck_ticket_pb.c -lm
+
+ck_anderson.THROUGHPUT: ck_anderson.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_anderson.THROUGHPUT ck_anderson.c -lm
+
+ck_anderson.LATENCY: ck_anderson.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_anderson.LATENCY ck_anderson.c -lm
+
+clean:
+ rm -rf *.dSYM *.exe $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_spinlock/benchmark/ck_anderson.c b/regressions/ck_spinlock/benchmark/ck_anderson.c
new file mode 100644
index 0000000..2f1aecd
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/ck_anderson.c
@@ -0,0 +1,8 @@
+#include "../ck_anderson.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
+
diff --git a/regressions/ck_spinlock/benchmark/ck_cas.c b/regressions/ck_spinlock/benchmark/ck_cas.c
new file mode 100644
index 0000000..96bd9d8
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/ck_cas.c
@@ -0,0 +1,8 @@
+#include "../ck_cas.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
+
diff --git a/regressions/ck_spinlock/benchmark/ck_clh.c b/regressions/ck_spinlock/benchmark/ck_clh.c
new file mode 100644
index 0000000..da71d5e
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/ck_clh.c
@@ -0,0 +1,7 @@
+#include "../ck_clh.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
diff --git a/regressions/ck_spinlock/benchmark/ck_dec.c b/regressions/ck_spinlock/benchmark/ck_dec.c
new file mode 100644
index 0000000..115c116
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/ck_dec.c
@@ -0,0 +1,7 @@
+#include "../ck_dec.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
diff --git a/regressions/ck_spinlock/benchmark/ck_fas.c b/regressions/ck_spinlock/benchmark/ck_fas.c
new file mode 100644
index 0000000..c76c964
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/ck_fas.c
@@ -0,0 +1,7 @@
+#include "../ck_fas.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
diff --git a/regressions/ck_spinlock/benchmark/ck_hclh.c b/regressions/ck_spinlock/benchmark/ck_hclh.c
new file mode 100644
index 0000000..9ae443e
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/ck_hclh.c
@@ -0,0 +1,7 @@
+#include "../ck_hclh.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
diff --git a/regressions/ck_spinlock/benchmark/ck_mcs.c b/regressions/ck_spinlock/benchmark/ck_mcs.c
new file mode 100644
index 0000000..c2e95de
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/ck_mcs.c
@@ -0,0 +1,7 @@
+#include "../ck_mcs.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
diff --git a/regressions/ck_spinlock/benchmark/ck_spinlock.c b/regressions/ck_spinlock/benchmark/ck_spinlock.c
new file mode 100644
index 0000000..138541e
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/ck_spinlock.c
@@ -0,0 +1,7 @@
+#include "../ck_spinlock.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
diff --git a/regressions/ck_spinlock/benchmark/ck_ticket.c b/regressions/ck_spinlock/benchmark/ck_ticket.c
new file mode 100644
index 0000000..09c9193
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/ck_ticket.c
@@ -0,0 +1,8 @@
+#include "../ck_ticket.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
+
diff --git a/regressions/ck_spinlock/benchmark/ck_ticket_pb.c b/regressions/ck_spinlock/benchmark/ck_ticket_pb.c
new file mode 100644
index 0000000..6122d6a
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/ck_ticket_pb.c
@@ -0,0 +1,7 @@
+#include "../ck_ticket_pb.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
diff --git a/regressions/ck_spinlock/benchmark/latency.h b/regressions/ck_spinlock/benchmark/latency.h
new file mode 100644
index 0000000..afadcd2
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/latency.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_bytelock.h>
+#include <ck_spinlock.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../../common.h"
+
+#ifndef STEPS
+#define STEPS 30000000
+#endif
+
+LOCK_DEFINE;
+
+int
+main(void)
+{
+ CK_CC_UNUSED unsigned int nthr = 1;
+
+ #ifdef LOCK_INIT
+ LOCK_INIT;
+ #endif
+
+ #ifdef LOCK_STATE
+ LOCK_STATE;
+ #endif
+
+ uint64_t s_b, e_b, i;
+ CK_CC_UNUSED int core = 0;
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; ++i) {
+ #ifdef LOCK
+ LOCK;
+ UNLOCK;
+ LOCK;
+ UNLOCK;
+ LOCK;
+ UNLOCK;
+ LOCK;
+ UNLOCK;
+ #endif
+ }
+ e_b = rdtsc();
+ printf("%15" PRIu64 "\n", (e_b - s_b) / 4 / STEPS);
+
+ return (0);
+}
+
diff --git a/regressions/ck_spinlock/benchmark/linux_spinlock.c b/regressions/ck_spinlock/benchmark/linux_spinlock.c
new file mode 100644
index 0000000..954019b
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/linux_spinlock.c
@@ -0,0 +1,7 @@
+#include "../linux_spinlock.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
diff --git a/regressions/ck_spinlock/benchmark/throughput.h b/regressions/ck_spinlock/benchmark/throughput.h
new file mode 100644
index 0000000..7851c50
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/throughput.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright 2008-2012 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_spinlock.h>
+
+#include "../../common.h"
+
+/* 8! = 40320, evenly divide 1 .. 8 processor workload. */
+#define WORKLOAD (40320 * 2056)
+
+#ifndef ITERATE
+#define ITERATE 65536
+#endif
+
+struct block {
+ unsigned int tid;
+};
+
+static struct affinity a;
+static unsigned int ready;
+
+struct counters {
+ uint64_t value;
+} CK_CC_CACHELINE;
+
+static struct counters *count;
+static uint64_t nthr;
+static unsigned int barrier;
+
+int critical __attribute__((aligned(64)));
+
+LOCK_DEFINE;
+
+CK_CC_USED static void
+gen_lock(void)
+{
+ CK_CC_UNUSED int core = 0;
+#ifdef LOCK_STATE
+ LOCK_STATE;
+#endif
+
+#ifdef LOCK
+ LOCK;
+#endif
+}
+
+CK_CC_USED static void
+gen_unlock(void)
+{
+#ifdef LOCK_STATE
+ LOCK_STATE;
+#endif
+
+#ifdef UNLOCK
+ UNLOCK;
+#endif
+}
+
+static void *
+fairness(void *null)
+{
+#ifdef LOCK_STATE
+ LOCK_STATE;
+#endif
+ struct block *context = null;
+ unsigned int i = context->tid;
+ volatile int j;
+ long int base;
+ unsigned int core;
+
+ if (aff_iterate_core(&a, &core)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (ck_pr_load_uint(&ready) == 0);
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) != nthr);
+
+ while (ready) {
+ LOCK;
+
+ count[i].value++;
+ if (critical) {
+ base = common_lrand48() % critical;
+ for (j = 0; j < base; j++);
+ }
+
+ UNLOCK;
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ uint64_t v, d;
+ unsigned int i;
+ pthread_t *threads;
+ struct block *context;
+
+ if (argc != 4) {
+ ck_error("Usage: " LOCK_NAME " <number of threads> <affinity delta> <critical section>\n");
+ exit(EXIT_FAILURE);
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ exit(EXIT_FAILURE);
+ }
+
+#ifdef LOCK_INIT
+ LOCK_INIT;
+#endif
+
+ critical = atoi(argv[3]);
+ if (critical < 0) {
+ ck_error("ERROR: critical section cannot be negative\n");
+ exit(EXIT_FAILURE);
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ exit(EXIT_FAILURE);
+ }
+
+ context = malloc(sizeof(struct block) * nthr);
+ if (context == NULL) {
+ ck_error("ERROR: Could not allocate thread contexts\n");
+ exit(EXIT_FAILURE);
+ }
+
+ a.delta = atoi(argv[2]);
+ a.request = 0;
+
+ count = malloc(sizeof(*count) * nthr);
+ if (count == NULL) {
+ ck_error("ERROR: Could not create acquisition buffer\n");
+ exit(EXIT_FAILURE);
+ }
+ memset(count, 0, sizeof(*count) * nthr);
+
+ fprintf(stderr, "Creating threads (fairness)...");
+ for (i = 0; i < nthr; i++) {
+ context[i].tid = i;
+ if (pthread_create(&threads[i], NULL, fairness, context + i)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ exit(EXIT_FAILURE);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ ck_pr_store_uint(&ready, 1);
+ common_sleep(10);
+ ck_pr_store_uint(&ready, 0);
+
+ fprintf(stderr, "Waiting for threads to finish acquisition regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done\n\n");
+
+ for (i = 0, v = 0; i < nthr; i++) {
+ printf("%d %15" PRIu64 "\n", i, count[i].value);
+ v += count[i].value;
+ }
+
+ printf("\n# total : %15" PRIu64 "\n", v);
+ printf("# throughput : %15" PRIu64 " a/s\n", (v /= nthr) / 10);
+
+ for (i = 0, d = 0; i < nthr; i++)
+ d += (count[i].value - v) * (count[i].value - v);
+
+ printf("# average : %15" PRIu64 "\n", v);
+ printf("# deviation : %.2f (%.2f%%)\n\n", sqrt(d / nthr), (sqrt(d / nthr) / v) * 100.00);
+
+ return (0);
+}
+
diff --git a/regressions/ck_spinlock/ck_anderson.h b/regressions/ck_spinlock/ck_anderson.h
new file mode 100644
index 0000000..7dc8e6e
--- /dev/null
+++ b/regressions/ck_spinlock/ck_anderson.h
@@ -0,0 +1,11 @@
+#define MAX(a,b) ((a) > (b) ? (a) : (b))
+#define LOCK_NAME "ck_anderson"
+#define LOCK_DEFINE static ck_spinlock_anderson_t lock CK_CC_CACHELINE
+#define LOCK_STATE ck_spinlock_anderson_thread_t *nad = NULL
+#define LOCK ck_spinlock_anderson_lock(&lock, &nad)
+#define UNLOCK ck_spinlock_anderson_unlock(&lock, nad)
+#define LOCK_INIT ck_spinlock_anderson_init(&lock, malloc(MAX(64,sizeof(ck_spinlock_anderson_thread_t)) * nthr), nthr)
+#define LOCKED ck_spinlock_anderson_locked(&lock)
+
+#define NO_LOCAL
+
diff --git a/regressions/ck_spinlock/ck_cas.h b/regressions/ck_spinlock/ck_cas.h
new file mode 100644
index 0000000..bd4ae13
--- /dev/null
+++ b/regressions/ck_spinlock/ck_cas.h
@@ -0,0 +1,6 @@
+#define LOCK_NAME "ck_cas"
+#define LOCK_DEFINE static ck_spinlock_cas_t CK_CC_CACHELINE lock = CK_SPINLOCK_CAS_INITIALIZER
+#define LOCK ck_spinlock_cas_lock_eb(&lock)
+#define UNLOCK ck_spinlock_cas_unlock(&lock)
+#define LOCKED ck_spinlock_cas_locked(&lock)
+
diff --git a/regressions/ck_spinlock/ck_clh.h b/regressions/ck_spinlock/ck_clh.h
new file mode 100644
index 0000000..df7e49f
--- /dev/null
+++ b/regressions/ck_spinlock/ck_clh.h
@@ -0,0 +1,9 @@
+#define MAX(a,b) ((a) > (b) ? (a) : (b))
+#define LOCK_NAME "ck_clh"
+#define LOCK_DEFINE static ck_spinlock_clh_t CK_CC_CACHELINE *lock = NULL
+#define LOCK_STATE ck_spinlock_clh_t *na = malloc(MAX(sizeof(ck_spinlock_clh_t), 64))
+#define LOCK ck_spinlock_clh_lock(&lock, na)
+#define UNLOCK ck_spinlock_clh_unlock(&na)
+#define LOCK_INIT ck_spinlock_clh_init(&lock, malloc(MAX(sizeof(ck_spinlock_clh_t), 64)))
+#define LOCKED ck_spinlock_clh_locked(&lock)
+
diff --git a/regressions/ck_spinlock/ck_dec.h b/regressions/ck_spinlock/ck_dec.h
new file mode 100644
index 0000000..c21a390
--- /dev/null
+++ b/regressions/ck_spinlock/ck_dec.h
@@ -0,0 +1,6 @@
+#define LOCK_NAME "ck_dec"
+#define LOCK_DEFINE static ck_spinlock_dec_t CK_CC_CACHELINE lock = CK_SPINLOCK_DEC_INITIALIZER
+#define LOCK ck_spinlock_dec_lock_eb(&lock)
+#define UNLOCK ck_spinlock_dec_unlock(&lock)
+#define LOCKED ck_spinlock_dec_locked(&lock)
+
diff --git a/regressions/ck_spinlock/ck_fas.h b/regressions/ck_spinlock/ck_fas.h
new file mode 100644
index 0000000..e244746
--- /dev/null
+++ b/regressions/ck_spinlock/ck_fas.h
@@ -0,0 +1,6 @@
+#define LOCK_NAME "ck_fas"
+#define LOCK_DEFINE static ck_spinlock_fas_t CK_CC_CACHELINE lock = CK_SPINLOCK_FAS_INITIALIZER
+#define LOCK ck_spinlock_fas_lock_eb(&lock)
+#define UNLOCK ck_spinlock_fas_unlock(&lock)
+#define LOCKED ck_spinlock_fas_locked(&lock)
+
diff --git a/regressions/ck_spinlock/ck_hclh.h b/regressions/ck_spinlock/ck_hclh.h
new file mode 100644
index 0000000..eb2e6eb
--- /dev/null
+++ b/regressions/ck_spinlock/ck_hclh.h
@@ -0,0 +1,16 @@
+#define MAX(a,b) ((a) > (b) ? (a) : (b))
+#define LOCK_NAME "ck_clh"
+#define LOCK_DEFINE static ck_spinlock_hclh_t CK_CC_CACHELINE *glob_lock; \
+ static ck_spinlock_hclh_t CK_CC_CACHELINE *local_lock[CORES / 2]
+#define LOCK_STATE ck_spinlock_hclh_t *na = malloc(MAX(sizeof(ck_spinlock_hclh_t), 64))
+#define LOCK ck_spinlock_hclh_lock(&glob_lock, &local_lock[(core % CORES) / 2], na)
+#define UNLOCK ck_spinlock_hclh_unlock(&na)
+#define LOCK_INIT do { \
+ int _i; \
+ ck_spinlock_hclh_init(&glob_lock, malloc(MAX(sizeof(ck_spinlock_hclh_t), 64)), -1); \
+ for (_i = 0; _i < CORES / 2; _i++) { \
+ ck_spinlock_hclh_init(&local_lock[_i], malloc(MAX(sizeof(ck_spinlock_hclh_t), 64)), _i); } \
+} while (0)
+
+#define LOCKED ck_spinlock_hclh_locked(&glob_lock)
+
diff --git a/regressions/ck_spinlock/ck_mcs.h b/regressions/ck_spinlock/ck_mcs.h
new file mode 100644
index 0000000..dd127df
--- /dev/null
+++ b/regressions/ck_spinlock/ck_mcs.h
@@ -0,0 +1,7 @@
+#define LOCK_NAME "ck_mcs"
+#define LOCK_DEFINE static ck_spinlock_mcs_t CK_CC_CACHELINE lock = NULL
+#define LOCK_STATE ck_spinlock_mcs_context_t node CK_CC_CACHELINE;
+#define LOCK ck_spinlock_mcs_lock(&lock, &node)
+#define UNLOCK ck_spinlock_mcs_unlock(&lock, &node)
+#define LOCKED ck_spinlock_mcs_locked(&lock)
+
diff --git a/regressions/ck_spinlock/ck_spinlock.h b/regressions/ck_spinlock/ck_spinlock.h
new file mode 100644
index 0000000..938e1ce
--- /dev/null
+++ b/regressions/ck_spinlock/ck_spinlock.h
@@ -0,0 +1,6 @@
+#define LOCK_NAME "ck_spinlock"
+#define LOCK_DEFINE static ck_spinlock_t CK_CC_CACHELINE lock = CK_SPINLOCK_INITIALIZER
+#define LOCK ck_spinlock_lock_eb(&lock)
+#define UNLOCK ck_spinlock_unlock(&lock)
+#define LOCKED ck_spinlock_locked(&lock)
+
diff --git a/regressions/ck_spinlock/ck_ticket.h b/regressions/ck_spinlock/ck_ticket.h
new file mode 100644
index 0000000..39054a6
--- /dev/null
+++ b/regressions/ck_spinlock/ck_ticket.h
@@ -0,0 +1,11 @@
+#include <ck_spinlock.h>
+
+#define LOCK_NAME "ck_ticket"
+#define LOCK_DEFINE static ck_spinlock_ticket_t CK_CC_CACHELINE lock = CK_SPINLOCK_TICKET_INITIALIZER
+#define LOCK ck_spinlock_ticket_lock(&lock)
+#define UNLOCK ck_spinlock_ticket_unlock(&lock)
+#ifdef CK_F_SPINLOCK_TICKET_TRYLOCK
+#define TRYLOCK ck_spinlock_ticket_trylock(&lock)
+#endif
+#define LOCKED ck_spinlock_ticket_locked(&lock)
+
diff --git a/regressions/ck_spinlock/ck_ticket_pb.h b/regressions/ck_spinlock/ck_ticket_pb.h
new file mode 100644
index 0000000..b8a7a84
--- /dev/null
+++ b/regressions/ck_spinlock/ck_ticket_pb.h
@@ -0,0 +1,6 @@
+#define LOCK_NAME "ck_ticket_pb"
+#define LOCK_DEFINE static ck_spinlock_ticket_t CK_CC_CACHELINE lock = CK_SPINLOCK_TICKET_INITIALIZER
+#define LOCK ck_spinlock_ticket_lock_pb(&lock, 0)
+#define UNLOCK ck_spinlock_ticket_unlock(&lock)
+#define LOCKED ck_spinlock_ticket_locked(&lock)
+
diff --git a/regressions/ck_spinlock/linux_spinlock.h b/regressions/ck_spinlock/linux_spinlock.h
new file mode 100644
index 0000000..5fe1f3e
--- /dev/null
+++ b/regressions/ck_spinlock/linux_spinlock.h
@@ -0,0 +1,39 @@
+#include <ck_cc.h>
+
+CK_CC_INLINE static void
+spin_lock(volatile unsigned int *lock)
+{
+#ifdef __x86_64__
+ __asm__ __volatile__(
+ "\n1:\t"
+ "lock ; decl %0\n\t"
+ "jns 2f\n"
+ "3:\n"
+ "rep;nop\n\t"
+ "cmpl $0,%0\n\t"
+ "jle 3b\n\t"
+ "jmp 1b\n"
+ "2:\t" : "=m" (*lock) : : "memory");
+#else
+ *lock = 1;
+#endif
+
+ return;
+}
+
+CK_CC_INLINE static void
+spin_unlock(volatile unsigned int *lock)
+{
+#ifdef __x86_64__
+ __asm__ __volatile__("movl $1,%0" :"=m" (*lock) :: "memory");
+#else
+ *lock = 0;
+ return;
+#endif
+}
+
+#define LOCK_NAME "linux_spinlock"
+#define LOCK_DEFINE volatile unsigned int lock = 1
+#define LOCK spin_lock(&lock)
+#define UNLOCK spin_unlock(&lock)
+
diff --git a/regressions/ck_spinlock/validate/Makefile b/regressions/ck_spinlock/validate/Makefile
new file mode 100644
index 0000000..b1d7cba
--- /dev/null
+++ b/regressions/ck_spinlock/validate/Makefile
@@ -0,0 +1,57 @@
+.PHONY: check clean
+
+all: ck_ticket ck_mcs ck_dec ck_cas ck_fas ck_clh linux_spinlock \
+ ck_ticket_pb ck_anderson ck_spinlock ck_hclh
+
+check: all
+ ./ck_ticket $(CORES) 1
+ ./ck_mcs $(CORES) 1
+ ./ck_dec $(CORES) 1
+ ./ck_cas $(CORES) 1
+ ./ck_fas $(CORES) 1
+ ./ck_clh $(CORES) 1
+ ./ck_hclh $(CORES) 1
+ ./linux_spinlock $(CORES) 1
+ ./ck_ticket_pb $(CORES) 1
+ ./ck_anderson $(CORES) 1
+ ./ck_spinlock $(CORES) 1
+
+linux_spinlock: linux_spinlock.c
+ $(CC) $(CFLAGS) -o linux_spinlock linux_spinlock.c
+
+ck_spinlock: ck_spinlock.c
+ $(CC) $(CFLAGS) -o ck_spinlock ck_spinlock.c
+
+ck_ticket_pb: ck_ticket_pb.c
+ $(CC) $(CFLAGS) -o ck_ticket_pb ck_ticket_pb.c
+
+ck_clh: ck_clh.c
+ $(CC) $(CFLAGS) -o ck_clh ck_clh.c
+
+ck_hclh: ck_hclh.c
+ $(CC) $(CFLAGS) -o ck_hclh ck_hclh.c
+
+ck_anderson: ck_anderson.c
+ $(CC) $(CFLAGS) -o ck_anderson ck_anderson.c
+
+ck_fas: ck_fas.c
+ $(CC) $(CFLAGS) -o ck_fas ck_fas.c
+
+ck_ticket: ck_ticket.c
+ $(CC) $(CFLAGS) -o ck_ticket ck_ticket.c
+
+ck_cas: ck_cas.c
+ $(CC) $(CFLAGS) -o ck_cas ck_cas.c
+
+ck_mcs: ck_mcs.c
+ $(CC) $(CFLAGS) -o ck_mcs ck_mcs.c
+
+ck_dec: ck_dec.c
+ $(CC) $(CFLAGS) -o ck_dec ck_dec.c
+
+clean:
+ rm -rf ck_ticket ck_mcs ck_dec ck_cas ck_fas ck_clh linux_spinlock ck_ticket_pb \
+ ck_anderson ck_spinlock ck_hclh *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE -lm
diff --git a/regressions/ck_spinlock/validate/ck_anderson.c b/regressions/ck_spinlock/validate/ck_anderson.c
new file mode 100644
index 0000000..b10900c
--- /dev/null
+++ b/regressions/ck_spinlock/validate/ck_anderson.c
@@ -0,0 +1,2 @@
+#include "../ck_anderson.h"
+#include "validate.h"
diff --git a/regressions/ck_spinlock/validate/ck_cas.c b/regressions/ck_spinlock/validate/ck_cas.c
new file mode 100644
index 0000000..162490a
--- /dev/null
+++ b/regressions/ck_spinlock/validate/ck_cas.c
@@ -0,0 +1,2 @@
+#include "../ck_cas.h"
+#include "validate.h"
diff --git a/regressions/ck_spinlock/validate/ck_clh.c b/regressions/ck_spinlock/validate/ck_clh.c
new file mode 100644
index 0000000..19cb512
--- /dev/null
+++ b/regressions/ck_spinlock/validate/ck_clh.c
@@ -0,0 +1,2 @@
+#include "../ck_clh.h"
+#include "validate.h"
diff --git a/regressions/ck_spinlock/validate/ck_dec.c b/regressions/ck_spinlock/validate/ck_dec.c
new file mode 100644
index 0000000..fd351de
--- /dev/null
+++ b/regressions/ck_spinlock/validate/ck_dec.c
@@ -0,0 +1,2 @@
+#include "../ck_dec.h"
+#include "validate.h"
diff --git a/regressions/ck_spinlock/validate/ck_fas.c b/regressions/ck_spinlock/validate/ck_fas.c
new file mode 100644
index 0000000..5cf4071
--- /dev/null
+++ b/regressions/ck_spinlock/validate/ck_fas.c
@@ -0,0 +1,2 @@
+#include "../ck_fas.h"
+#include "validate.h"
diff --git a/regressions/ck_spinlock/validate/ck_hclh.c b/regressions/ck_spinlock/validate/ck_hclh.c
new file mode 100644
index 0000000..001f57b
--- /dev/null
+++ b/regressions/ck_spinlock/validate/ck_hclh.c
@@ -0,0 +1,2 @@
+#include "../ck_hclh.h"
+#include "validate.h"
diff --git a/regressions/ck_spinlock/validate/ck_mcs.c b/regressions/ck_spinlock/validate/ck_mcs.c
new file mode 100644
index 0000000..7adad43
--- /dev/null
+++ b/regressions/ck_spinlock/validate/ck_mcs.c
@@ -0,0 +1,2 @@
+#include "../ck_mcs.h"
+#include "validate.h"
diff --git a/regressions/ck_spinlock/validate/ck_spinlock.c b/regressions/ck_spinlock/validate/ck_spinlock.c
new file mode 100644
index 0000000..e682905
--- /dev/null
+++ b/regressions/ck_spinlock/validate/ck_spinlock.c
@@ -0,0 +1,2 @@
+#include "../ck_spinlock.h"
+#include "validate.h"
diff --git a/regressions/ck_spinlock/validate/ck_ticket.c b/regressions/ck_spinlock/validate/ck_ticket.c
new file mode 100644
index 0000000..be67254
--- /dev/null
+++ b/regressions/ck_spinlock/validate/ck_ticket.c
@@ -0,0 +1,2 @@
+#include "../ck_ticket.h"
+#include "validate.h"
diff --git a/regressions/ck_spinlock/validate/ck_ticket_pb.c b/regressions/ck_spinlock/validate/ck_ticket_pb.c
new file mode 100644
index 0000000..e62ee0e
--- /dev/null
+++ b/regressions/ck_spinlock/validate/ck_ticket_pb.c
@@ -0,0 +1,2 @@
+#include "../ck_ticket_pb.h"
+#include "validate.h"
diff --git a/regressions/ck_spinlock/validate/linux_spinlock.c b/regressions/ck_spinlock/validate/linux_spinlock.c
new file mode 100644
index 0000000..781e419
--- /dev/null
+++ b/regressions/ck_spinlock/validate/linux_spinlock.c
@@ -0,0 +1,14 @@
+#ifdef __x86_64__
+#include "../linux_spinlock.h"
+#include "validate.h"
+#else
+#include <stdio.h>
+
+int
+main(void)
+{
+
+ fprintf(stderr, "Unsupported.\n");
+ return 0;
+}
+#endif
diff --git a/regressions/ck_spinlock/validate/validate.h b/regressions/ck_spinlock/validate/validate.h
new file mode 100644
index 0000000..df40584
--- /dev/null
+++ b/regressions/ck_spinlock/validate/validate.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <ck_spinlock.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 1000000
+#endif
+
+struct block {
+ unsigned int tid;
+};
+
+static struct affinity a;
+static unsigned int locked = 0;
+static uint64_t nthr;
+
+LOCK_DEFINE;
+
+static void *
+thread(void *null CK_CC_UNUSED)
+{
+#ifdef LOCK_STATE
+ LOCK_STATE;
+#endif
+ unsigned int i = ITERATE;
+ unsigned int j;
+ unsigned int core;
+
+ if (aff_iterate_core(&a, &core)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+#ifdef TRYLOCK
+ if (i & 1) {
+ LOCK;
+ } else {
+ while (TRYLOCK == false)
+ ck_pr_stall();
+ }
+#else
+ LOCK;
+#endif
+
+#ifdef LOCKED
+ if (LOCKED == false)
+ ck_error("is_locked operation failed.");
+#endif
+
+ ck_pr_store_uint(&locked, locked + 1);
+ ck_pr_store_uint(&locked, locked + 1);
+ ck_pr_store_uint(&locked, locked + 1);
+ ck_pr_store_uint(&locked, locked + 1);
+ ck_pr_store_uint(&locked, locked + 1);
+ ck_pr_store_uint(&locked, locked + 1);
+ ck_pr_store_uint(&locked, locked + 1);
+ ck_pr_store_uint(&locked, locked + 1);
+ ck_pr_store_uint(&locked, locked + 1);
+ ck_pr_store_uint(&locked, locked + 1);
+
+ j = ck_pr_load_uint(&locked);
+
+ if (j != 10) {
+ ck_error("ERROR (WR): Race condition (%u)\n", j);
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_store_uint(&locked, locked - 1);
+ ck_pr_store_uint(&locked, locked - 1);
+ ck_pr_store_uint(&locked, locked - 1);
+ ck_pr_store_uint(&locked, locked - 1);
+ ck_pr_store_uint(&locked, locked - 1);
+ ck_pr_store_uint(&locked, locked - 1);
+ ck_pr_store_uint(&locked, locked - 1);
+ ck_pr_store_uint(&locked, locked - 1);
+ ck_pr_store_uint(&locked, locked - 1);
+ ck_pr_store_uint(&locked, locked - 1);
+
+ UNLOCK;
+ LOCK;
+
+ j = ck_pr_load_uint(&locked);
+ if (j != 0) {
+ ck_error("ERROR (RD): Race condition (%u)\n", j);
+ exit(EXIT_FAILURE);
+ }
+
+ UNLOCK;
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ uint64_t i;
+ pthread_t *threads;
+
+ if (argc != 3) {
+ ck_error("Usage: " LOCK_NAME " <number of threads> <affinity delta>\n");
+ exit(EXIT_FAILURE);
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ exit(EXIT_FAILURE);
+ }
+
+#ifdef LOCK_INIT
+ LOCK_INIT;
+#endif
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ exit(EXIT_FAILURE);
+ }
+
+ a.delta = atoi(argv[2]);
+ a.request = 0;
+
+ fprintf(stderr, "Creating threads (mutual exclusion)...");
+ for (i = 0; i < nthr; i++) {
+ if (pthread_create(&threads[i], NULL, thread, NULL)) {
+ ck_error("ERROR: Could not create thread %" PRIu64 "\n", i);
+ exit(EXIT_FAILURE);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Waiting for threads to finish correctness regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+
+ return (0);
+}
+
diff --git a/regressions/ck_stack/benchmark/Makefile b/regressions/ck_stack/benchmark/Makefile
new file mode 100644
index 0000000..6e2df2a
--- /dev/null
+++ b/regressions/ck_stack/benchmark/Makefile
@@ -0,0 +1,14 @@
+.PHONY: clean distribution
+
+OBJECTS=latency
+
+all: $(OBJECTS)
+
+latency: latency.c
+ $(CC) $(CFLAGS) -o latency latency.c
+
+clean:
+ rm -rf *~ *.o *.dSYM *.exe $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_stack/benchmark/latency.c b/regressions/ck_stack/benchmark/latency.c
new file mode 100644
index 0000000..867151c
--- /dev/null
+++ b/regressions/ck_stack/benchmark/latency.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_stack.h>
+#include <ck_spinlock.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include "../../common.h"
+
+#ifndef ENTRIES
+#define ENTRIES 4096
+#endif
+
+#ifndef STEPS
+#define STEPS 40000
+#endif
+
+/*
+ * Note the redundant post-increment of r. This is to silence
+ * some irrelevant GCC warnings.
+ */
+
+static ck_stack_t stack CK_CC_CACHELINE;
+
+int
+main(void)
+{
+ ck_stack_entry_t entry[ENTRIES];
+ ck_spinlock_fas_t mutex = CK_SPINLOCK_FAS_INITIALIZER;
+ volatile ck_stack_entry_t * volatile r;
+ uint64_t s, e, a;
+ unsigned int i;
+ unsigned int j;
+
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_stack_init(&stack);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++) {
+ ck_spinlock_fas_lock(&mutex);
+ ck_stack_push_spnc(&stack, entry + j);
+ ck_spinlock_fas_unlock(&mutex);
+ }
+ e = rdtsc();
+
+ a += e - s;
+ }
+ printf(" spinlock_push: %16" PRIu64 "\n", a / STEPS / ENTRIES);
+
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_stack_init(&stack);
+
+ for (j = 0; j < ENTRIES; j++)
+ ck_stack_push_spnc(&stack, entry + j);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++) {
+ ck_spinlock_fas_lock(&mutex);
+ r = ck_stack_pop_npsc(&stack);
+ ck_spinlock_fas_unlock(&mutex);
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ printf(" spinlock_pop: %16" PRIu64 "\n", a / STEPS / ENTRIES);
+ r++;
+
+#ifdef CK_F_STACK_PUSH_UPMC
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_stack_init(&stack);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++)
+ ck_stack_push_upmc(&stack, entry + j);
+ e = rdtsc();
+
+ a += e - s;
+ }
+ printf("ck_stack_push_upmc: %16" PRIu64 "\n", a / STEPS / ENTRIES);
+#endif /* CK_F_STACK_PUSH_UPMC */
+
+#ifdef CK_F_STACK_PUSH_MPMC
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_stack_init(&stack);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++)
+ ck_stack_push_mpmc(&stack, entry + j);
+ e = rdtsc();
+
+ a += e - s;
+ }
+ printf("ck_stack_push_mpmc: %16" PRIu64 "\n", a / STEPS / ENTRIES);
+#endif /* CK_F_STACK_PUSH_MPMC */
+
+#ifdef CK_F_STACK_PUSH_MPNC
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_stack_init(&stack);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++)
+ ck_stack_push_mpnc(&stack, entry + j);
+ e = rdtsc();
+
+ a += e - s;
+ }
+ printf("ck_stack_push_mpnc: %16" PRIu64 "\n", a / STEPS / ENTRIES);
+#endif /* CK_F_STACK_PUSH_MPNC */
+
+#if defined(CK_F_STACK_PUSH_UPMC) && defined(CK_F_STACK_POP_UPMC)
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_stack_init(&stack);
+
+ for (j = 0; j < ENTRIES; j++)
+ ck_stack_push_upmc(&stack, entry + j);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++)
+ r = ck_stack_pop_upmc(&stack);
+ e = rdtsc();
+ a += e - s;
+ }
+ printf(" ck_stack_pop_upmc: %16" PRIu64 "\n", a / STEPS / (sizeof(entry) / sizeof(*entry)));
+#endif /* CK_F_STACK_PUSH_UPMC && CK_F_STACK_POP_UPMC */
+
+#if defined(CK_F_STACK_POP_MPMC) && defined(CK_F_STACK_PUSH_MPMC)
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_stack_init(&stack);
+
+ for (j = 0; j < ENTRIES; j++)
+ ck_stack_push_mpmc(&stack, entry + j);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++)
+ r = ck_stack_pop_mpmc(&stack);
+ e = rdtsc();
+ a += e - s;
+ }
+ printf(" ck_stack_pop_mpmc: %16" PRIu64 "\n", a / STEPS / (sizeof(entry) / sizeof(*entry)));
+ r++;
+#endif
+
+ return 0;
+}
diff --git a/regressions/ck_stack/validate/Makefile b/regressions/ck_stack/validate/Makefile
new file mode 100644
index 0000000..519dca1
--- /dev/null
+++ b/regressions/ck_stack/validate/Makefile
@@ -0,0 +1,56 @@
+.PHONY: check clean distribution
+
+OBJECTS=serial mpnc_push mpmc_push upmc_push spinlock_push spinlock_eb_push \
+ mpmc_pop upmc_pop spinlock_pop spinlock_eb_pop \
+ upmc_trypop mpmc_trypop mpmc_trypair \
+ mpmc_pair spinlock_pair spinlock_eb_pair pthreads_pair \
+ mpmc_trypush upmc_trypush
+
+all: $(OBJECTS)
+
+check: all
+ ./serial
+ ./mpmc_pair $(CORES) 1 0
+ ./upmc_trypop $(CORES) 1 0
+ ./mpmc_trypop $(CORES) 1 0
+ ./mpmc_trypair $(CORES) 1 0
+ ./mpmc_pop $(CORES) 1 0
+ ./upmc_pop $(CORES) 1 0
+ ./mpnc_push $(CORES) 1 0
+ ./mpmc_push $(CORES) 1 0
+ ./upmc_push $(CORES) 1 0
+ ./mpmc_trypush $(CORES) 1 0
+ ./upmc_trypush $(CORES) 1 0
+
+serial: serial.c
+ $(CC) $(CFLAGS) -o serial serial.c
+
+mpmc_trypush upmc_trypush mpnc_push mpmc_push upmc_push spinlock_push spinlock_eb_push: push.c
+ $(CC) -DTRYUPMC $(CFLAGS) -o upmc_trypush push.c
+ $(CC) -DTRYMPMC $(CFLAGS) -o mpmc_trypush push.c
+ $(CC) -DMPNC $(CFLAGS) -o mpnc_push push.c
+ $(CC) -DMPMC $(CFLAGS) -o mpmc_push push.c
+ $(CC) -DUPMC $(CFLAGS) -o upmc_push push.c
+ $(CC) -DSPINLOCK $(CFLAGS) -o spinlock_push push.c
+ $(CC) -DSPINLOCK -DEB $(CFLAGS) -o spinlock_eb_push push.c
+
+upmc_trypop mpmc_trypop mpmc_pop tryupmc_pop upmc_pop spinlock_pop spinlock_eb_pop: pop.c
+ $(CC) -DTRYMPMC $(CFLAGS) -o mpmc_trypop pop.c
+ $(CC) -DTRYUPMC $(CFLAGS) -o upmc_trypop pop.c
+ $(CC) -DMPMC $(CFLAGS) -o mpmc_pop pop.c
+ $(CC) -DUPMC $(CFLAGS) -o upmc_pop pop.c
+ $(CC) -DSPINLOCK $(CFLAGS) -o spinlock_pop pop.c
+ $(CC) -DEB -DSPINLOCK $(CFLAGS) -o spinlock_eb_pop pop.c
+
+mpmc_trypair mpmc_pair spinlock_pair spinlock_eb_pair pthreads_pair: pair.c
+ $(CC) -DTRYMPMC $(CFLAGS) -o mpmc_trypair pair.c
+ $(CC) -DMPMC $(CFLAGS) -o mpmc_pair pair.c
+ $(CC) -DSPINLOCK $(CFLAGS) -o spinlock_pair pair.c
+ $(CC) -DEB -DSPINLOCK $(CFLAGS) -o spinlock_eb_pair pair.c
+ $(CC) -DPTHREADS $(CFLAGS) -o pthreads_pair pair.c
+
+clean:
+ rm -rf *~ *.o *.dSYM *.exe $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_stack/validate/pair.c b/regressions/ck_stack/validate/pair.c
new file mode 100644
index 0000000..c0f1bb1
--- /dev/null
+++ b/regressions/ck_stack/validate/pair.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <ck_cc.h>
+#include <ck_pr.h>
+#ifdef SPINLOCK
+#include <ck_spinlock.h>
+#endif
+#include <ck_stack.h>
+#include <errno.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <pthread.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+#include "../../common.h"
+
+#ifndef ITEMS
+#define ITEMS (5765760)
+#endif
+
+#define TVTOD(tv) ((tv).tv_sec+((tv).tv_usec / (double)1000000))
+
+struct entry {
+ int value;
+#if defined(SPINLOCK) || defined(PTHREADS)
+ struct entry *next;
+#else
+ ck_stack_entry_t next;
+#endif
+} CK_CC_CACHELINE;
+
+#ifdef SPINLOCK
+static struct entry *stack CK_CC_CACHELINE;
+ck_spinlock_fas_t stack_spinlock = CK_SPINLOCK_FAS_INITIALIZER;
+#define UNLOCK ck_spinlock_fas_unlock
+#if defined(EB)
+#define LOCK ck_spinlock_fas_lock_eb
+#else
+#define LOCK ck_spinlock_fas_lock
+#endif
+#elif defined(PTHREADS)
+static struct entry *stack CK_CC_CACHELINE;
+pthread_mutex_t stack_spinlock = PTHREAD_MUTEX_INITIALIZER;
+#define LOCK pthread_mutex_lock
+#define UNLOCK pthread_mutex_unlock
+#else
+static ck_stack_t stack CK_CC_CACHELINE;
+CK_STACK_CONTAINER(struct entry, next, getvalue)
+#endif
+
+static struct affinity affinerator;
+static unsigned long long nthr;
+static volatile unsigned int barrier = 0;
+static unsigned int critical;
+
+static void *
+stack_thread(void *buffer)
+{
+#if (defined(MPMC) && defined(CK_F_STACK_POP_MPMC)) || (defined(UPMC) && defined(CK_F_STACK_POP_UPMC)) || (defined(TRYUPMC) && defined(CK_F_STACK_TRYPOP_UPMC)) || (defined(TRYMPMC) && defined(CK_F_STACK_TRYPOP_MPMC))
+ ck_stack_entry_t *ref;
+#endif
+ struct entry *entry = buffer;
+ unsigned long long i, n = ITEMS;
+ unsigned int seed;
+ int j;
+
+ if (aff_iterate(&affinerator)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (barrier == 0);
+
+ for (i = 0; i < n; i++) {
+#if defined(MPMC)
+ ck_stack_push_mpmc(&stack, &entry->next);
+#elif defined(TRYMPMC)
+ while (ck_stack_trypush_mpmc(&stack, &entry->next) == false)
+ ck_pr_stall();
+#elif defined(UPMC)
+ ck_stack_push_upmc(&stack, &entry->next);
+#elif defined(TRYUPMC)
+ while (ck_stack_trypush_upmc(&stack, &entry->next) == false)
+ ck_pr_stall();
+#elif defined(SPINLOCK) || defined(PTHREADS)
+ LOCK(&stack_spinlock);
+ ck_pr_store_ptr(&entry->next, stack);
+ ck_pr_store_ptr(&stack, entry);
+ UNLOCK(&stack_spinlock);
+#else
+# error Undefined operation.
+#endif
+
+ if (critical) {
+ j = common_rand_r(&seed) % critical;
+ while (j--)
+ __asm__ __volatile__("" ::: "memory");
+ }
+
+#if defined(MPMC)
+#ifdef CK_F_STACK_POP_MPMC
+ ref = ck_stack_pop_mpmc(&stack);
+ entry = getvalue(ref);
+#endif
+#elif defined(TRYMPMC)
+#ifdef CK_F_STACK_TRYPOP_MPMC
+ while (ck_stack_trypop_mpmc(&stack, &ref) == false)
+ ck_pr_stall();
+ entry = getvalue(ref);
+#endif /* CK_F_STACK_TRYPOP_MPMC */
+#elif defined(UPMC)
+ ref = ck_stack_pop_upmc(&stack);
+ entry = getvalue(ref);
+#elif defined(SPINLOCK) || defined(PTHREADS)
+ LOCK(&stack_spinlock);
+ entry = stack;
+ stack = stack->next;
+ UNLOCK(&stack_spinlock);
+#else
+# error Undefined operation.
+#endif
+ }
+
+ return (NULL);
+}
+
+static void
+stack_assert(void)
+{
+
+#if defined(SPINLOCK) || defined(PTHREADS)
+ assert(stack == NULL);
+#else
+ assert(CK_STACK_ISEMPTY(&stack));
+#endif
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ struct entry *bucket;
+ unsigned long long i, d;
+ pthread_t *thread;
+ struct timeval stv, etv;
+
+#if (defined(TRYMPMC) || defined(MPMC)) && (!defined(CK_F_STACK_PUSH_MPMC) || !defined(CK_F_STACK_POP_MPMC))
+ fprintf(stderr, "Unsupported.\n");
+ return 0;
+#endif
+
+ if (argc != 4) {
+ ck_error("Usage: stack <threads> <delta> <critical>\n");
+ }
+
+ {
+ char *e;
+
+ nthr = strtol(argv[1], &e, 10);
+ if (errno == ERANGE) {
+ perror("ERROR: too many threads");
+ exit(EXIT_FAILURE);
+ } else if (*e != '\0') {
+ ck_error("ERROR: input format is incorrect\n");
+ }
+
+ d = strtol(argv[2], &e, 10);
+ if (errno == ERANGE) {
+ perror("ERROR: delta is too large");
+ exit(EXIT_FAILURE);
+ } else if (*e != '\0') {
+ ck_error("ERROR: input format is incorrect\n");
+ }
+
+ critical = strtoul(argv[3], &e, 10);
+ if (errno == ERANGE) {
+ perror("ERROR: critical section is too large");
+ exit(EXIT_FAILURE);
+ } else if (*e != '\0') {
+ ck_error("ERROR: input format is incorrect\n");
+ }
+ }
+
+ srand(getpid());
+
+ affinerator.request = 0;
+ affinerator.delta = d;
+
+ bucket = malloc(sizeof(struct entry) * nthr);
+ assert(bucket != NULL);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread != NULL);
+
+ for (i = 0; i < nthr; i++)
+ pthread_create(&thread[i], NULL, stack_thread, bucket + i);
+
+ barrier = 1;
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ barrier = 0;
+
+ for (i = 0; i < nthr; i++)
+ pthread_create(&thread[i], NULL, stack_thread, bucket + i);
+
+ common_gettimeofday(&stv, NULL);
+ barrier = 1;
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+ common_gettimeofday(&etv, NULL);
+
+ stack_assert();
+#ifdef _WIN32
+ printf("%3llu %.6f\n", nthr, TVTOD(etv) - TVTOD(stv));
+#else
+ printf("%3llu %.6lf\n", nthr, TVTOD(etv) - TVTOD(stv));
+#endif
+ return 0;
+}
diff --git a/regressions/ck_stack/validate/pop.c b/regressions/ck_stack/validate/pop.c
new file mode 100644
index 0000000..0d69d42
--- /dev/null
+++ b/regressions/ck_stack/validate/pop.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <ck_cc.h>
+#include <ck_pr.h>
+#ifdef SPINLOCK
+#include <ck_spinlock.h>
+#endif
+#include <ck_stack.h>
+#include <errno.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <pthread.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+#include "../../common.h"
+
+#ifndef ITEMS
+#define ITEMS (5765760 * 2)
+#endif
+
+#define TVTOD(tv) ((tv).tv_sec+((tv).tv_usec / (double)1000000))
+
+struct entry {
+ int value;
+#ifdef SPINLOCK
+ struct entry *next;
+#else
+ ck_stack_entry_t next;
+#endif
+};
+
+#ifdef SPINLOCK
+static struct entry *stack CK_CC_CACHELINE;
+ck_spinlock_fas_t stack_spinlock = CK_SPINLOCK_FAS_INITIALIZER;
+#define UNLOCK ck_spinlock_fas_unlock
+#if defined(EB)
+#define LOCK ck_spinlock_fas_lock_eb
+#else
+#define LOCK ck_spinlock_fas_lock
+#endif
+#else
+static ck_stack_t stack CK_CC_CACHELINE;
+CK_STACK_CONTAINER(struct entry, next, getvalue)
+#endif
+
+static struct affinity affinerator = AFFINITY_INITIALIZER;
+static unsigned long long nthr;
+static volatile unsigned int barrier = 0;
+static unsigned int critical;
+
+static void *
+stack_thread(void *unused CK_CC_UNUSED)
+{
+#if (defined(MPMC) && defined(CK_F_STACK_POP_MPMC)) || (defined(UPMC) && defined(CK_F_STACK_POP_UPMC)) || (defined(TRYMPMC) && defined(CK_F_STACK_TRYPOP_MPMC)) || (defined(TRYUPMC) && defined(CK_F_STACK_TRYPOP_UPMC))
+ ck_stack_entry_t *ref;
+#endif
+ struct entry *entry = NULL;
+ unsigned long long i, n = ITEMS / nthr;
+ unsigned int seed;
+ int j, previous = INT_MAX;
+
+ if (aff_iterate(&affinerator)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (barrier == 0);
+
+ for (i = 0; i < n; i++) {
+#ifdef MPMC
+#ifdef CK_F_STACK_POP_MPMC
+ ref = ck_stack_pop_mpmc(&stack);
+ assert(ref);
+ entry = getvalue(ref);
+#endif /* CK_F_STACK_POP_MPMC */
+#elif defined(TRYMPMC)
+#ifdef CK_F_STACK_TRYPOP_MPMC
+ while (ck_stack_trypop_mpmc(&stack, &ref) == false)
+ ck_pr_stall();
+ assert(ref);
+ entry = getvalue(ref);
+#endif /* CK_F_STACK_TRYPOP_MPMC */
+#elif defined(UPMC)
+ ref = ck_stack_pop_upmc(&stack);
+ assert(ref);
+ entry = getvalue(ref);
+#elif defined(TRYUPMC)
+ while (ck_stack_trypop_upmc(&stack, &ref) == false)
+ ck_pr_stall();
+ assert(ref);
+ entry = getvalue(ref);
+#elif defined(SPINLOCK)
+ LOCK(&stack_spinlock);
+ entry = stack;
+ stack = stack->next;
+ UNLOCK(&stack_spinlock);
+#else
+# error Undefined operation.
+#endif
+
+ if (critical) {
+ j = common_rand_r(&seed) % critical;
+ while (j--)
+ __asm__ __volatile__("" ::: "memory");
+ }
+
+ assert (previous >= entry->value);
+ previous = entry->value;
+ }
+
+ return (NULL);
+}
+
+static void
+stack_assert(void)
+{
+
+#ifdef SPINLOCK
+ assert(stack == NULL);
+#else
+ assert(CK_STACK_ISEMPTY(&stack));
+#endif
+ return;
+}
+
+static void
+push_stack(struct entry *bucket)
+{
+ unsigned long long i;
+
+#ifdef SPINLOCK
+ stack = NULL;
+#else
+ ck_stack_init(&stack);
+#endif
+
+ for (i = 0; i < ITEMS; i++) {
+ bucket[i].value = i % INT_MAX;
+#ifdef SPINLOCK
+ bucket[i].next = stack;
+ stack = bucket + i;
+#else
+ ck_stack_push_spnc(&stack, &bucket[i].next);
+#endif
+ }
+
+#ifndef SPINLOCK
+ ck_stack_entry_t *entry;
+ i = 0;
+ CK_STACK_FOREACH(&stack, entry) {
+ i++;
+ }
+ assert(i == ITEMS);
+#endif
+
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ struct entry *bucket;
+ unsigned long long i, d;
+ pthread_t *thread;
+ struct timeval stv, etv;
+
+#if (defined(TRYMPMC) || defined(MPMC)) && (!defined(CK_F_STACK_PUSH_MPMC) || !defined(CK_F_STACK_POP_MPMC))
+ fprintf(stderr, "Unsupported.\n");
+ return 0;
+#endif
+
+ if (argc != 4) {
+ ck_error("Usage: stack <threads> <delta> <critical>\n");
+ }
+
+ {
+ char *e;
+
+ nthr = strtol(argv[1], &e, 10);
+ if (errno == ERANGE) {
+ perror("ERROR: too many threads");
+ exit(EXIT_FAILURE);
+ } else if (*e != '\0') {
+ ck_error("ERROR: input format is incorrect\n");
+ }
+
+ d = strtol(argv[2], &e, 10);
+ if (errno == ERANGE) {
+ perror("ERROR: delta is too large");
+ exit(EXIT_FAILURE);
+ } else if (*e != '\0') {
+ ck_error("ERROR: input format is incorrect\n");
+ }
+
+ critical = strtoul(argv[3], &e, 10);
+ if (errno == ERANGE) {
+ perror("ERROR: critical section is too large");
+ exit(EXIT_FAILURE);
+ } else if (*e != '\0') {
+ ck_error("ERROR: input format is incorrect\n");
+ }
+ }
+
+ srand(getpid());
+
+ affinerator.delta = d;
+ bucket = malloc(sizeof(struct entry) * ITEMS);
+ assert(bucket != NULL);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread != NULL);
+
+ push_stack(bucket);
+ for (i = 0; i < nthr; i++)
+ pthread_create(&thread[i], NULL, stack_thread, NULL);
+
+ barrier = 1;
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ barrier = 0;
+
+ push_stack(bucket);
+ for (i = 0; i < nthr; i++)
+ pthread_create(&thread[i], NULL, stack_thread, NULL);
+
+ common_gettimeofday(&stv, NULL);
+ barrier = 1;
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+ common_gettimeofday(&etv, NULL);
+
+ stack_assert();
+#ifdef _WIN32
+ printf("%3llu %.6f\n", nthr, TVTOD(etv) - TVTOD(stv));
+#else
+ printf("%3llu %.6lf\n", nthr, TVTOD(etv) - TVTOD(stv));
+#endif
+ return 0;
+}
diff --git a/regressions/ck_stack/validate/push.c b/regressions/ck_stack/validate/push.c
new file mode 100644
index 0000000..2b3ea33
--- /dev/null
+++ b/regressions/ck_stack/validate/push.c
@@ -0,0 +1,248 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <ck_pr.h>
+#ifdef SPINLOCK
+#include <ck_spinlock.h>
+#endif
+#include <ck_stack.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <pthread.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+#include "../../common.h"
+
+#ifndef ITEMS
+#define ITEMS (5765760 * 2)
+#endif
+
+#define TVTOD(tv) ((tv).tv_sec+((tv).tv_usec / (double)1000000))
+
+struct entry {
+ int value;
+#ifdef SPINLOCK
+ struct entry *next;
+#else
+ ck_stack_entry_t next;
+#endif
+};
+
+#ifdef SPINLOCK
+static struct entry *stack CK_CC_CACHELINE;
+#else
+static ck_stack_t stack CK_CC_CACHELINE;
+#endif
+
+CK_STACK_CONTAINER(struct entry, next, getvalue)
+
+static struct affinity affinerator = AFFINITY_INITIALIZER;
+static unsigned long long nthr;
+static volatile unsigned int barrier = 0;
+static unsigned int critical;
+
+#if defined(SPINLOCK)
+ck_spinlock_fas_t stack_spinlock = CK_SPINLOCK_FAS_INITIALIZER;
+#define UNLOCK ck_spinlock_fas_unlock
+#if defined(EB)
+#define LOCK ck_spinlock_fas_lock_eb
+#else
+#define LOCK ck_spinlock_fas_lock
+#endif
+#elif defined(PTHREAD)
+pthread_mutex_t stack_spinlock = PTHREAD_MUTEX_INITIALIZER;
+#define LOCK pthread_mutex_lock
+#define UNLOCK pthread_mutex_unlock
+#endif
+
+static void *
+stack_thread(void *buffer)
+{
+ struct entry *bucket = buffer;
+ unsigned long long i, n = ITEMS / nthr;
+ unsigned int seed;
+ int j;
+
+ if (aff_iterate(&affinerator)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (barrier == 0);
+
+ for (i = 0; i < n; i++) {
+ bucket[i].value = (i + 1) * 2;
+
+#if defined(MPNC)
+ ck_stack_push_mpnc(&stack, &bucket[i].next);
+#elif defined(MPMC)
+ ck_stack_push_mpmc(&stack, &bucket[i].next);
+#elif defined(TRYMPMC)
+ while (ck_stack_trypush_mpmc(&stack, &bucket[i].next) == false)
+ ck_pr_stall();
+#elif defined(TRYUPMC)
+ while (ck_stack_trypush_upmc(&stack, &bucket[i].next) == false)
+ ck_pr_stall();
+#elif defined(UPMC)
+ ck_stack_push_upmc(&stack, &bucket[i].next);
+#elif defined(SPINLOCK) || defined(PTHREADS)
+ LOCK(&stack_spinlock);
+ bucket[i].next = stack;
+ stack = bucket + i;
+ UNLOCK(&stack_spinlock);
+#else
+# error Undefined operation.
+#endif
+
+ if (critical) {
+ j = common_rand_r(&seed) % critical;
+ while (j--)
+ __asm__ __volatile__("" ::: "memory");
+ }
+ }
+
+ return (NULL);
+}
+
+static void
+stack_assert(void)
+{
+#ifndef SPINLOCK
+ ck_stack_entry_t *n;
+#endif
+ struct entry *p;
+ unsigned long long c = 0;
+
+#ifdef SPINLOCK
+ for (p = stack; p; p = p->next)
+ c++;
+#else
+ CK_STACK_FOREACH(&stack, n) {
+ p = getvalue(n);
+ (void)((volatile struct entry *)p)->value;
+ c++;
+ }
+#endif
+
+ assert(c == ITEMS);
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ struct entry *bucket;
+ unsigned long long i, d, n;
+ pthread_t *thread;
+ struct timeval stv, etv;
+
+ if (argc != 4) {
+ ck_error("Usage: stack <threads> <delta> <critical>\n");
+ }
+
+ {
+ char *e;
+
+ nthr = strtol(argv[1], &e, 10);
+ if (errno == ERANGE) {
+ perror("ERROR: too many threads");
+ exit(EXIT_FAILURE);
+ } else if (*e != '\0') {
+ ck_error("ERROR: input format is incorrect\n");
+ }
+
+ d = strtol(argv[2], &e, 10);
+ if (errno == ERANGE) {
+ perror("ERROR: delta is too large");
+ exit(EXIT_FAILURE);
+ } else if (*e != '\0') {
+ ck_error("ERROR: input format is incorrect\n");
+ }
+
+ critical = strtoul(argv[3], &e, 10);
+ if (errno == ERANGE) {
+ perror("ERROR: critical section is too large");
+ exit(EXIT_FAILURE);
+ } else if (*e != '\0') {
+ ck_error("ERROR: input format is incorrect\n");
+ }
+ }
+
+ srand(getpid());
+
+ affinerator.request = 0;
+ affinerator.delta = d;
+ n = ITEMS / nthr;
+
+#ifndef SPINLOCK
+ ck_stack_init(&stack);
+#else
+ stack = NULL;
+#endif
+
+ bucket = malloc(sizeof(struct entry) * ITEMS);
+ assert(bucket != NULL);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread != NULL);
+
+ for (i = 0; i < nthr; i++)
+ pthread_create(&thread[i], NULL, stack_thread, bucket + i * n);
+
+ barrier = 1;
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ barrier = 0;
+
+#ifndef SPINLOCK
+ ck_stack_init(&stack);
+#else
+ stack = NULL;
+#endif
+
+ for (i = 0; i < nthr; i++)
+ pthread_create(&thread[i], NULL, stack_thread, bucket + i * n);
+
+ common_gettimeofday(&stv, NULL);
+ barrier = 1;
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+ common_gettimeofday(&etv, NULL);
+
+ stack_assert();
+#ifdef _WIN32
+ printf("%3llu %.6f\n", nthr, TVTOD(etv) - TVTOD(stv));
+#else
+ printf("%3llu %.6lf\n", nthr, TVTOD(etv) - TVTOD(stv));
+#endif
+ return 0;
+}
diff --git a/regressions/ck_stack/validate/serial.c b/regressions/ck_stack/validate/serial.c
new file mode 100644
index 0000000..eb667ca
--- /dev/null
+++ b/regressions/ck_stack/validate/serial.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include <ck_stack.h>
+
+#ifndef SIZE
+#define SIZE 1024000
+#endif
+
+struct entry {
+ int value;
+ ck_stack_entry_t next;
+};
+
+CK_STACK_CONTAINER(struct entry, next, get_entry)
+
+#define LOOP(PUSH, POP) \
+ for (i = 0; i < SIZE; i++) { \
+ entries[i].value = i; \
+ PUSH(stack, &entries[i].next); \
+ } \
+ for (i = SIZE - 1; i >= 0; i--) { \
+ entry = POP(stack); \
+ assert(entry); \
+ assert(get_entry(entry)->value == i); \
+ }
+
+static void
+serial(ck_stack_t *stack)
+{
+ struct entry *entries;
+ ck_stack_entry_t *entry;
+ int i;
+
+ ck_stack_init(stack);
+
+ entries = malloc(sizeof(struct entry) * SIZE);
+ assert(entries != NULL);
+
+ LOOP(ck_stack_push_upmc, ck_stack_pop_upmc);
+#ifdef CK_F_STACK_POP_MPMC
+ LOOP(ck_stack_push_mpmc, ck_stack_pop_mpmc);
+#endif
+ LOOP(ck_stack_push_mpnc, ck_stack_pop_upmc);
+ LOOP(ck_stack_push_spnc, ck_stack_pop_npsc);
+
+ return;
+}
+
+int
+main(void)
+{
+ ck_stack_t stack CK_CC_CACHELINE;
+
+ serial(&stack);
+ return (0);
+}
diff --git a/regressions/ck_swlock/benchmark/Makefile b/regressions/ck_swlock/benchmark/Makefile
new file mode 100644
index 0000000..4ec728c
--- /dev/null
+++ b/regressions/ck_swlock/benchmark/Makefile
@@ -0,0 +1,17 @@
+.PHONY: clean distribution
+
+OBJECTS=latency throughput
+
+all: $(OBJECTS)
+
+latency: latency.c ../../../include/ck_swlock.h
+ $(CC) $(CFLAGS) -o latency latency.c
+
+throughput: throughput.c ../../../include/ck_swlock.h
+ $(CC) $(CFLAGS) -o throughput throughput.c
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_swlock/benchmark/latency.c b/regressions/ck_swlock/benchmark/latency.c
new file mode 100644
index 0000000..73a9482
--- /dev/null
+++ b/regressions/ck_swlock/benchmark/latency.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_swlock.h>
+#include <inttypes.h>
+#include <stdio.h>
+
+#include "../../common.h"
+
+#define CK_F_PR_RTM
+
+#ifndef STEPS
+#define STEPS 2000000
+#endif
+
+int
+main(void)
+{
+ uint64_t s_b, e_b, i;
+ ck_swlock_t swlock = CK_SWLOCK_INITIALIZER;
+
+ for (i = 0; i < STEPS; i++) {
+ ck_swlock_write_lock(&swlock);
+ ck_swlock_write_unlock(&swlock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_swlock_write_lock(&swlock);
+ ck_swlock_write_unlock(&swlock);
+ }
+ e_b = rdtsc();
+ printf(" WRITE: swlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ for (i = 0; i < STEPS; i++) {
+ ck_swlock_read_lock(&swlock);
+ ck_swlock_read_unlock(&swlock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_swlock_read_lock(&swlock);
+ ck_swlock_read_unlock(&swlock);
+ }
+ e_b = rdtsc();
+ printf(" READ: swlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ for (i = 0; i < STEPS; i++) {
+ ck_swlock_write_latch(&swlock);
+ ck_swlock_write_unlatch(&swlock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_swlock_write_latch(&swlock);
+ ck_swlock_write_unlatch(&swlock);
+ }
+ e_b = rdtsc();
+ printf(" LATCH: swlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ return 0;
+}
+
diff --git a/regressions/ck_swlock/benchmark/throughput.c b/regressions/ck_swlock/benchmark/throughput.c
new file mode 100644
index 0000000..5b05365
--- /dev/null
+++ b/regressions/ck_swlock/benchmark/throughput.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_swlock.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "../../common.h"
+
+#ifndef STEPS
+#define STEPS 1000000
+#endif
+
+static int barrier;
+static int threads;
+static unsigned int flag CK_CC_CACHELINE;
+static struct {
+ ck_swlock_t lock;
+} rw CK_CC_CACHELINE = {
+ .lock = CK_SWLOCK_INITIALIZER
+};
+
+static struct affinity affinity;
+
+static void *
+thread_lock(void *pun)
+{
+ uint64_t s_b, e_b, a, i;
+ uint64_t *value = pun;
+
+ if (aff_iterate(&affinity) != 0) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_int(&barrier);
+ while (ck_pr_load_int(&barrier) != threads)
+ ck_pr_stall();
+
+ for (i = 1, a = 0;; i++) {
+ s_b = rdtsc();
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ e_b = rdtsc();
+
+ a += (e_b - s_b) >> 4;
+
+ if (ck_pr_load_uint(&flag) == 1)
+ break;
+ }
+
+ ck_pr_inc_int(&barrier);
+ while (ck_pr_load_int(&barrier) != threads * 2)
+ ck_pr_stall();
+
+ *value = (a / i);
+ return NULL;
+}
+
+static void
+swlock_test(pthread_t *p, int d, uint64_t *latency, void *(*f)(void *), const char *label)
+{
+ int t;
+
+ ck_pr_store_int(&barrier, 0);
+ ck_pr_store_uint(&flag, 0);
+
+ affinity.delta = d;
+ affinity.request = 0;
+
+ fprintf(stderr, "Creating threads (%s)...", label);
+ for (t = 0; t < threads; t++) {
+ if (pthread_create(&p[t], NULL, f, latency + t) != 0) {
+ ck_error("ERROR: Could not create thread %d\n", t);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ common_sleep(10);
+ ck_pr_store_uint(&flag, 1);
+
+ fprintf(stderr, "Waiting for threads to finish acquisition regression...");
+ for (t = 0; t < threads; t++)
+ pthread_join(p[t], NULL);
+ fprintf(stderr, "done\n\n");
+
+ for (t = 1; t <= threads; t++)
+ printf("%10u %20" PRIu64 "\n", t, latency[t - 1]);
+
+ fprintf(stderr, "\n");
+ return;
+}
+
+
+int
+main(int argc, char *argv[])
+{
+ int d;
+ pthread_t *p;
+ uint64_t *latency;
+
+ if (argc != 3) {
+ ck_error("Usage: throughput <delta> <threads>\n");
+ }
+
+ threads = atoi(argv[2]);
+ if (threads <= 0) {
+ ck_error("ERROR: Threads must be a value > 0.\n");
+ }
+
+ p = malloc(sizeof(pthread_t) * threads);
+ if (p == NULL) {
+ ck_error("ERROR: Failed to initialize thread.\n");
+ }
+
+ latency = malloc(sizeof(uint64_t) * threads);
+ if (latency == NULL) {
+ ck_error("ERROR: Failed to create latency buffer.\n");
+ }
+
+ d = atoi(argv[1]);
+ swlock_test(p, d, latency, thread_lock, "swlock");
+
+ return 0;
+}
+
diff --git a/regressions/ck_swlock/validate/Makefile b/regressions/ck_swlock/validate/Makefile
new file mode 100644
index 0000000..54d62f2
--- /dev/null
+++ b/regressions/ck_swlock/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean distribution
+
+OBJECTS=validate
+
+all: $(OBJECTS)
+
+validate: validate.c ../../../include/ck_swlock.h
+ $(CC) $(CFLAGS) -o validate validate.c
+
+check: all
+ ./validate $(CORES) 1
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_swlock/validate/validate.c b/regressions/ck_swlock/validate/validate.c
new file mode 100644
index 0000000..11366ce
--- /dev/null
+++ b/regressions/ck_swlock/validate/validate.c
@@ -0,0 +1,455 @@
+/*
+ * Copyright 2014 Jaidev Sridhar.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_swlock.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 1000000
+#endif
+
+static struct affinity a;
+static unsigned int locked;
+static int nthr;
+static ck_swlock_t lock = CK_SWLOCK_INITIALIZER;
+static ck_swlock_t copy;
+#ifdef CK_F_PR_RTM
+static void *
+thread_rtm_adaptive(void *arg)
+{
+ unsigned int i = ITERATE;
+ unsigned int l;
+ int tid = ck_pr_load_int(arg);
+
+ struct ck_elide_config config = CK_ELIDE_CONFIG_DEFAULT_INITIALIZER;
+ struct ck_elide_stat st = CK_ELIDE_STAT_INITIALIZER;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+ if (tid == 0) {
+ CK_ELIDE_LOCK_ADAPTIVE(ck_swlock_write, &st, &config, &lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ CK_ELIDE_UNLOCK_ADAPTIVE(ck_swlock_write, &st, &lock);
+ }
+
+ CK_ELIDE_LOCK(ck_swlock_read, &lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ CK_ELIDE_UNLOCK(ck_swlock_read, &lock);
+ }
+
+ return NULL;
+}
+
+static void *
+thread_rtm_mix(void *arg)
+{
+ unsigned int i = ITERATE;
+ unsigned int l;
+ int tid = ck_pr_load_int(arg);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+ if (tid == 0) {
+ if (i & 1) {
+ CK_ELIDE_LOCK(ck_swlock_write, &lock);
+ } else {
+ ck_swlock_write_lock(&lock);
+ }
+
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+
+ if (i & 1) {
+ CK_ELIDE_UNLOCK(ck_swlock_write, &lock);
+ } else {
+ ck_swlock_write_unlock(&lock);
+ }
+ }
+ if (i & 1) {
+ CK_ELIDE_LOCK(ck_swlock_read, &lock);
+ } else {
+ ck_swlock_read_lock(&lock);
+ }
+
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+
+ if (i & 1) {
+ CK_ELIDE_UNLOCK(ck_swlock_read, &lock);
+ } else {
+ ck_swlock_read_unlock(&lock);
+ }
+ }
+
+ return (NULL);
+}
+
+static void *
+thread_rtm(void *arg)
+{
+ unsigned int i = ITERATE;
+ unsigned int l;
+ int tid = ck_pr_load_int(arg);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+ if (tid == 0) {
+ CK_ELIDE_LOCK(ck_swlock_write, &lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ CK_ELIDE_UNLOCK(ck_swlock_write, &lock);
+ }
+
+ CK_ELIDE_LOCK(ck_swlock_read, &lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ CK_ELIDE_UNLOCK(ck_swlock_read, &lock);
+ }
+
+ return (NULL);
+}
+#endif /* CK_F_PR_RTM */
+
+static void *
+thread_latch(void *arg)
+{
+ unsigned int i = ITERATE;
+ unsigned int l;
+ int tid = ck_pr_load_int(arg);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+ if (tid == 0) {
+ /* Writer */
+ ck_swlock_write_latch(&lock);
+ {
+ memcpy(&copy, &lock, sizeof(ck_swlock_t));
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ memcpy(&lock, &copy, sizeof(ck_swlock_t));
+ }
+ ck_swlock_write_unlatch(&lock);
+ }
+
+ ck_swlock_read_lock(&lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_swlock_read_unlock(&lock);
+ }
+
+ return (NULL);
+}
+
+static void *
+thread(void *arg)
+{
+ unsigned int i = ITERATE;
+ unsigned int l;
+ int tid = ck_pr_load_int(arg);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+ if (tid == 0) {
+ /* Writer */
+ ck_swlock_write_lock(&lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_swlock_write_unlock(&lock);
+ }
+
+ ck_swlock_read_lock(&lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_swlock_read_unlock(&lock);
+ }
+
+ return (NULL);
+}
+
+static void
+swlock_test(pthread_t *threads, void *(*f)(void *), const char *test)
+{
+ int i, tid[nthr];
+
+ fprintf(stderr, "Creating threads (%s)...", test);
+ for (i = 0; i < nthr; i++) {
+ ck_pr_store_int(&tid[i], i);
+ if (pthread_create(&threads[i], NULL, f, &tid[i])) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, ".");
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+
+ if (argc != 3) {
+ ck_error("Usage: validate <number of threads> <affinity delta>\n");
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ a.delta = atoi(argv[2]);
+
+ swlock_test(threads, thread, "regular");
+ swlock_test(threads, thread_latch, "latch");
+#ifdef CK_F_PR_RTM
+ swlock_test(threads, thread_rtm, "rtm");
+ swlock_test(threads, thread_rtm_mix, "rtm-mix");
+ swlock_test(threads, thread_rtm_adaptive, "rtm-adaptive");
+#endif
+ return 0;
+}
+
diff --git a/regressions/ck_tflock/benchmark/Makefile b/regressions/ck_tflock/benchmark/Makefile
new file mode 100644
index 0000000..ed63504
--- /dev/null
+++ b/regressions/ck_tflock/benchmark/Makefile
@@ -0,0 +1,17 @@
+.PHONY: clean distribution
+
+OBJECTS=latency throughput
+
+all: $(OBJECTS)
+
+latency: latency.c ../../../include/ck_rwlock.h ../../../include/ck_elide.h
+ $(CC) $(CFLAGS) -o latency latency.c
+
+throughput: throughput.c ../../../include/ck_rwlock.h ../../../include/ck_elide.h
+ $(CC) $(CFLAGS) -o throughput throughput.c
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_tflock/benchmark/latency.c b/regressions/ck_tflock/benchmark/latency.c
new file mode 100644
index 0000000..fd77d44
--- /dev/null
+++ b/regressions/ck_tflock/benchmark/latency.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_tflock.h>
+#include <inttypes.h>
+#include <stdio.h>
+
+#include "../../common.h"
+
+#define CK_F_PR_RTM
+
+#ifndef STEPS
+#define STEPS 2000000
+#endif
+
+int
+main(void)
+{
+ uint64_t s_b, e_b, i;
+ ck_tflock_ticket_t tflock = CK_TFLOCK_TICKET_INITIALIZER;
+
+ for (i = 0; i < STEPS; i++) {
+ ck_tflock_ticket_write_lock(&tflock);
+ ck_tflock_ticket_write_unlock(&tflock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_tflock_ticket_write_lock(&tflock);
+ ck_tflock_ticket_write_unlock(&tflock);
+ }
+ e_b = rdtsc();
+ printf(" WRITE: tflock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ for (i = 0; i < STEPS; i++) {
+ ck_tflock_ticket_read_lock(&tflock);
+ ck_tflock_ticket_read_unlock(&tflock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_tflock_ticket_read_lock(&tflock);
+ ck_tflock_ticket_read_unlock(&tflock);
+ }
+ e_b = rdtsc();
+ printf(" READ: tflock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ return 0;
+}
+
diff --git a/regressions/ck_tflock/benchmark/throughput.c b/regressions/ck_tflock/benchmark/throughput.c
new file mode 100644
index 0000000..41d22bd
--- /dev/null
+++ b/regressions/ck_tflock/benchmark/throughput.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_tflock.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "../../common.h"
+
+#ifndef STEPS
+#define STEPS 1000000
+#endif
+
+static int barrier;
+static int threads;
+static unsigned int flag CK_CC_CACHELINE;
+static struct {
+ ck_tflock_ticket_t lock;
+} rw CK_CC_CACHELINE = {
+ .lock = CK_TFLOCK_TICKET_INITIALIZER
+};
+
+static struct affinity affinity;
+
+static void *
+thread_lock(void *pun)
+{
+ uint64_t s_b, e_b, a, i;
+ uint64_t *value = pun;
+
+ if (aff_iterate(&affinity) != 0) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_int(&barrier);
+ while (ck_pr_load_int(&barrier) != threads)
+ ck_pr_stall();
+
+ for (i = 1, a = 0;; i++) {
+ s_b = rdtsc();
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ e_b = rdtsc();
+
+ a += (e_b - s_b) >> 4;
+
+ if (ck_pr_load_uint(&flag) == 1)
+ break;
+ }
+
+ ck_pr_inc_int(&barrier);
+ while (ck_pr_load_int(&barrier) != threads * 2)
+ ck_pr_stall();
+
+ *value = (a / i);
+ return NULL;
+}
+
+static void
+tflock_test(pthread_t *p, int d, uint64_t *latency, void *(*f)(void *), const char *label)
+{
+ int t;
+
+ ck_pr_store_int(&barrier, 0);
+ ck_pr_store_uint(&flag, 0);
+
+ affinity.delta = d;
+ affinity.request = 0;
+
+ fprintf(stderr, "Creating threads (%s)...", label);
+ for (t = 0; t < threads; t++) {
+ if (pthread_create(&p[t], NULL, f, latency + t) != 0) {
+ ck_error("ERROR: Could not create thread %d\n", t);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ common_sleep(10);
+ ck_pr_store_uint(&flag, 1);
+
+ fprintf(stderr, "Waiting for threads to finish acquisition regression...");
+ for (t = 0; t < threads; t++)
+ pthread_join(p[t], NULL);
+ fprintf(stderr, "done\n\n");
+
+ for (t = 1; t <= threads; t++)
+ printf("%10u %20" PRIu64 "\n", t, latency[t - 1]);
+
+ fprintf(stderr, "\n");
+ return;
+}
+
+
+int
+main(int argc, char *argv[])
+{
+ int d;
+ pthread_t *p;
+ uint64_t *latency;
+
+ if (argc != 3) {
+ ck_error("Usage: throughput <delta> <threads>\n");
+ }
+
+ threads = atoi(argv[2]);
+ if (threads <= 0) {
+ ck_error("ERROR: Threads must be a value > 0.\n");
+ }
+
+ p = malloc(sizeof(pthread_t) * threads);
+ if (p == NULL) {
+ ck_error("ERROR: Failed to initialize thread.\n");
+ }
+
+ latency = malloc(sizeof(uint64_t) * threads);
+ if (latency == NULL) {
+ ck_error("ERROR: Failed to create latency buffer.\n");
+ }
+
+ d = atoi(argv[1]);
+ tflock_test(p, d, latency, thread_lock, "tflock");
+ return 0;
+}
+
diff --git a/regressions/ck_tflock/validate/Makefile b/regressions/ck_tflock/validate/Makefile
new file mode 100644
index 0000000..6ae7c73
--- /dev/null
+++ b/regressions/ck_tflock/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean distribution
+
+OBJECTS=validate
+
+all: $(OBJECTS)
+
+validate: validate.c ../../../include/ck_tflock.h ../../../include/ck_elide.h
+ $(CC) $(CFLAGS) -o validate validate.c
+
+check: all
+ ./validate $(CORES) 1
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_tflock/validate/validate.c b/regressions/ck_tflock/validate/validate.c
new file mode 100644
index 0000000..22e9e65
--- /dev/null
+++ b/regressions/ck_tflock/validate/validate.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_tflock.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 1000000
+#endif
+
+static struct affinity a;
+static unsigned int locked;
+static int nthr;
+static ck_tflock_ticket_t lock = CK_TFLOCK_TICKET_INITIALIZER;
+
+static void *
+thread(void *null CK_CC_UNUSED)
+{
+ unsigned int i = ITERATE;
+ unsigned int l;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+ ck_tflock_ticket_write_lock(&lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_tflock_ticket_write_unlock(&lock);
+
+ ck_tflock_ticket_read_lock(&lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_tflock_ticket_read_unlock(&lock);
+ }
+
+ return (NULL);
+}
+
+static void
+tflock_ticket_test(pthread_t *threads, void *(*f)(void *), const char *test)
+{
+ int i;
+
+ fprintf(stderr, "Creating threads (%s)...", test);
+ for (i = 0; i < nthr; i++) {
+ if (pthread_create(&threads[i], NULL, f, NULL)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, ".");
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+
+ if (argc != 3) {
+ ck_error("Usage: validate <number of threads> <affinity delta>\n");
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ a.delta = atoi(argv[2]);
+
+ tflock_ticket_test(threads, thread, "regular");
+ ck_tflock_ticket_init(&lock);
+ return 0;
+}
+
diff --git a/regressions/common.h b/regressions/common.h
new file mode 100644
index 0000000..f67c2af
--- /dev/null
+++ b/regressions/common.h
@@ -0,0 +1,471 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_COMMON_H
+#define CK_COMMON_H
+
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/time.h>
+
+#ifdef __linux__
+#include <sched.h>
+#include <sys/types.h>
+#include <sys/syscall.h>
+#elif defined(__MACH__)
+#include <mach/mach.h>
+#include <mach/thread_policy.h>
+#elif defined(__FreeBSD__)
+#include <sys/param.h>
+#include <sys/cpuset.h>
+#endif
+
+#if defined(_WIN32)
+#include <assert.h>
+#define NOMINMAX
+#include <windows.h>
+#define DELTA_EPOCH 11644473600000000ULL
+#else
+#include <signal.h>
+#include <unistd.h>
+#endif
+
+#ifndef CORES
+#define CORES 8
+#endif
+
+CK_CC_INLINE static void
+common_srand(unsigned int i)
+{
+#ifdef _WIN32
+ srand(i);
+#else
+ srandom(i);
+#endif
+}
+
+CK_CC_INLINE static int
+common_rand(void)
+{
+#ifdef _WIN32
+ return rand();
+#else
+ return random();
+#endif
+}
+
+CK_CC_INLINE static int
+common_rand_r(unsigned int *i)
+{
+#ifdef _WIN32
+ (void)i;
+
+ /*
+ * When linked with -mthreads, rand() is thread-safe.
+ * rand_s is also an option.
+ */
+ return rand();
+#else
+ return rand_r(i);
+#endif
+}
+
+CK_CC_INLINE static void
+common_srand48(long int i)
+{
+#ifdef _WIN32
+ srand(i);
+#else
+ srand48(i);
+#endif
+}
+
+CK_CC_INLINE static long int
+common_lrand48(void)
+{
+#ifdef _WIN32
+ return rand();
+#else
+ return lrand48();
+#endif
+}
+
+CK_CC_INLINE static double
+common_drand48(void)
+{
+#ifdef _WIN32
+ return (double)rand()/RAND_MAX;
+#else
+ return drand48();
+#endif
+}
+
+CK_CC_INLINE static void
+common_sleep(unsigned int n)
+{
+#ifdef _WIN32
+ Sleep(n * 1000);
+#else
+ sleep(n);
+#endif
+}
+
+CK_CC_INLINE static int
+common_gettimeofday(struct timeval *tv, void *tz)
+{
+#ifdef _WIN32
+ FILETIME ft;
+ uint64_t tmp_time = 0;
+ static bool tzflag = false;
+ struct timezone *tzp = tz;
+
+ if (tv != NULL) {
+ GetSystemTimeAsFileTime(&ft);
+ tmp_time |= ft.dwHighDateTime;
+ tmp_time <<= 32;
+ tmp_time |= ft.dwLowDateTime;
+
+ /* GetSystemTimeAsFileTime returns 100 nanosecond intervals. */
+ tmp_time /= 10;
+
+ /* Windows' epoch starts on 01/01/1601, while Unix' starts on 01/01/1970. */
+ tmp_time -= DELTA_EPOCH;
+
+ tv->tv_sec = (long)(tmp_time / 1000000UL);
+ tv->tv_usec = (long)(tmp_time % 1000000UL);
+ }
+
+
+ if (tz != NULL) {
+ if (tzflag == false) {
+ _tzset();
+ tzflag = true;
+ }
+
+ tzp->tz_minuteswest = _timezone / 60;
+ tzp->tz_dsttime = _daylight;
+ }
+
+ return 0;
+#else
+ return gettimeofday(tv, tz);
+#endif
+}
+
+CK_CC_UNUSED static unsigned int
+common_alarm(void (*sig_handler)(int), void *alarm_event, unsigned int duration)
+{
+#ifdef _WIN32
+ (void)sig_handler;
+ (void)duration;
+ bool success;
+ HANDLE *alarm_handle = alarm_event;
+ success = SetEvent(*alarm_handle);
+ assert(success != false);
+ return 0;
+#else
+ (void)alarm_event;
+ signal(SIGALRM, sig_handler);
+ return alarm(duration);
+#endif
+}
+
+#ifdef _WIN32
+#ifndef SECOND_TIMER
+#define SECOND_TIMER 10000000
+#endif
+#define COMMON_ALARM_DECLARE_GLOBAL(prefix, alarm_event_name, flag_name) \
+static HANDLE prefix##_common_win_alarm_timer; \
+static HANDLE alarm_event_name; \
+static LARGE_INTEGER prefix##_common_alarm_timer_length; \
+ \
+static void CALLBACK \
+prefix##_common_win_alarm_handler(LPVOID arg, DWORD timer_low_value, DWORD timer_high_value) \
+{ \
+ (void)arg; \
+ (void)timer_low_value; \
+ (void)timer_high_value; \
+ flag_name = true; \
+ return; \
+} \
+ \
+static void * \
+prefix##_common_win_alarm(void *unused) \
+{ \
+ (void)unused; \
+ bool timer_success = false; \
+ for (;;) { \
+ WaitForSingleObjectEx(alarm_event_name, INFINITE, true); \
+ timer_success = SetWaitableTimer(prefix##_common_win_alarm_timer, \
+ &prefix##_common_alarm_timer_length, \
+ 0, \
+ prefix##_common_win_alarm_handler, NULL, false); \
+ assert(timer_success != false); \
+ WaitForSingleObjectEx(prefix##_common_win_alarm_timer, INFINITE, true); \
+ } \
+ \
+ return NULL; \
+}
+
+#define COMMON_ALARM_DECLARE_LOCAL(prefix, alarm_event_name) \
+ int64_t prefix##_common_alarm_tl; \
+ pthread_t prefix##_common_win_alarm_thread;
+
+#define COMMON_ALARM_INIT(prefix, alarm_event_name, duration) \
+ prefix##_common_alarm_tl = -1 * (duration) * SECOND_TIMER; \
+ prefix##_common_alarm_timer_length.LowPart = \
+ (DWORD) (prefix##_common_alarm_tl & 0xFFFFFFFF); \
+ prefix##_common_alarm_timer_length.HighPart = \
+ (LONG) (prefix##_common_alarm_tl >> 32); \
+ alarm_event_name = CreateEvent(NULL, false, false, NULL); \
+ assert(alarm_event_name != NULL); \
+ prefix##_common_win_alarm_timer = CreateWaitableTimer(NULL, true, NULL); \
+ assert(prefix##_common_win_alarm_timer != NULL); \
+ if (pthread_create(&prefix##_common_win_alarm_thread, \
+ NULL, \
+ prefix##_common_win_alarm, \
+ NULL) != 0) \
+ ck_error("ERROR: Failed to create common_win_alarm thread.\n");
+#else
+#define COMMON_ALARM_DECLARE_GLOBAL(prefix, alarm_event_name, flag_name)
+#define COMMON_ALARM_DECLARE_LOCAL(prefix, alarm_event_name) \
+ int alarm_event_name = 0;
+#define COMMON_ALARM_INIT(prefix, alarm_event_name, duration)
+#endif
+
+struct affinity {
+ unsigned int delta;
+ unsigned int request;
+};
+
+#define AFFINITY_INITIALIZER {0, 0}
+
+#ifdef __linux__
+#ifndef gettid
+static pid_t
+gettid(void)
+{
+ return syscall(__NR_gettid);
+}
+#endif /* gettid */
+
+CK_CC_UNUSED static int
+aff_iterate(struct affinity *acb)
+{
+ cpu_set_t s;
+ unsigned int c;
+
+ c = ck_pr_faa_uint(&acb->request, acb->delta);
+ CPU_ZERO(&s);
+ CPU_SET(c % CORES, &s);
+
+ return sched_setaffinity(gettid(), sizeof(s), &s);
+}
+
+CK_CC_UNUSED static int
+aff_iterate_core(struct affinity *acb, unsigned int *core)
+{
+ cpu_set_t s;
+
+ *core = ck_pr_faa_uint(&acb->request, acb->delta);
+ CPU_ZERO(&s);
+ CPU_SET((*core) % CORES, &s);
+
+ return sched_setaffinity(gettid(), sizeof(s), &s);
+}
+#elif defined(__MACH__)
+CK_CC_UNUSED static int
+aff_iterate(struct affinity *acb)
+{
+ thread_affinity_policy_data_t policy;
+ unsigned int c;
+
+ c = ck_pr_faa_uint(&acb->request, acb->delta) % CORES;
+ policy.affinity_tag = c;
+ return thread_policy_set(mach_thread_self(),
+ THREAD_AFFINITY_POLICY,
+ (thread_policy_t)&policy,
+ THREAD_AFFINITY_POLICY_COUNT);
+}
+
+CK_CC_UNUSED static int
+aff_iterate_core(struct affinity *acb, unsigned int *core)
+{
+ thread_affinity_policy_data_t policy;
+
+ *core = ck_pr_faa_uint(&acb->request, acb->delta) % CORES;
+ policy.affinity_tag = *core;
+ return thread_policy_set(mach_thread_self(),
+ THREAD_AFFINITY_POLICY,
+ (thread_policy_t)&policy,
+ THREAD_AFFINITY_POLICY_COUNT);
+}
+#elif defined(__FreeBSD__)
+CK_CC_UNUSED static int
+aff_iterate(struct affinity *acb CK_CC_UNUSED)
+{
+ unsigned int c;
+ cpuset_t mask;
+
+ c = ck_pr_faa_uint(&acb->request, acb->delta) % CORES;
+ CPU_ZERO(&mask);
+ CPU_SET(c, &mask);
+ return (cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, -1,
+ sizeof(mask), &mask));
+}
+
+CK_CC_UNUSED static int
+aff_iterate_core(struct affinity *acb CK_CC_UNUSED, unsigned int *core)
+{
+ cpuset_t mask;
+
+ *core = ck_pr_faa_uint(&acb->request, acb->delta) % CORES;
+ CPU_ZERO(&mask);
+ CPU_SET(*core, &mask);
+ return (cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, -1,
+ sizeof(mask), &mask));
+}
+#else
+CK_CC_UNUSED static int
+aff_iterate(struct affinity *acb CK_CC_UNUSED)
+{
+
+ return (0);
+}
+
+CK_CC_UNUSED static int
+aff_iterate_core(struct affinity *acb CK_CC_UNUSED, unsigned int *core)
+{
+ *core = 0;
+ return (0);
+}
+#endif
+
+CK_CC_INLINE static uint64_t
+rdtsc(void)
+{
+#if defined(__x86_64__)
+ uint32_t eax = 0, edx;
+#if defined(CK_MD_RDTSCP)
+ __asm__ __volatile__("rdtscp"
+ : "+a" (eax), "=d" (edx)
+ :
+ : "%ecx", "memory");
+
+ return (((uint64_t)edx << 32) | eax);
+#else
+ __asm__ __volatile__("cpuid;"
+ "rdtsc;"
+ : "+a" (eax), "=d" (edx)
+ :
+ : "%ebx", "%ecx", "memory");
+
+ __asm__ __volatile__("xorl %%eax, %%eax;"
+ "cpuid;"
+ :
+ :
+ : "%eax", "%ebx", "%ecx", "%edx", "memory");
+
+ return (((uint64_t)edx << 32) | eax);
+#endif /* !CK_MD_RDTSCP */
+#elif defined(__x86__)
+ uint32_t eax = 0, edx;
+#if defined(CK_MD_RDTSCP)
+ __asm__ __volatile__("rdtscp"
+ : "+a" (eax), "=d" (edx)
+ :
+ : "%ecx", "memory");
+
+ return (((uint64_t)edx << 32) | eax);
+#else
+ __asm__ __volatile__("pushl %%ebx;"
+ "cpuid;"
+ "rdtsc;"
+ : "+a" (eax), "=d" (edx)
+ :
+ : "%ecx", "memory");
+
+ __asm__ __volatile__("xorl %%eax, %%eax;"
+ "cpuid;"
+ "popl %%ebx;"
+ :
+ :
+ : "%eax", "%ecx", "%edx", "memory");
+
+ return (((uint64_t)edx << 32) | eax);
+#endif /* !CK_MD_RDTSCP */
+#elif defined(__sparcv9__)
+ uint64_t r;
+
+ __asm__ __volatile__("rd %%tick, %0"
+ : "=r" (r)
+ :
+ : "memory");
+ return r;
+#elif defined(__ppc64__)
+ uint32_t high, low, snapshot;
+
+ do {
+ __asm__ __volatile__("isync;"
+ "mftbu %0;"
+ "mftb %1;"
+ "mftbu %2;"
+ : "=r" (high), "=r" (low), "=r" (snapshot)
+ :
+ : "memory");
+ } while (snapshot != high);
+
+ return (((uint64_t)high << 32) | low);
+#elif defined(__aarch64__)
+ uint64_t r;
+
+ __asm __volatile__ ("mrs %0, cntvct_el0" : "=r" (r) : : "memory");
+ return r;
+#else
+ return 0;
+#endif
+}
+
+CK_CC_USED static void
+ck_error(const char *message, ...)
+{
+ va_list ap;
+
+ va_start(ap, message);
+ vfprintf(stderr, message, ap);
+ va_end(ap);
+ exit(EXIT_FAILURE);
+}
+
+#define ck_test(A, B, ...) do { \
+ if (A) \
+ ck_error(B, ##__VA_ARGS__); \
+} while (0)
+
+#endif /* CK_COMMON_H */
diff --git a/src/Makefile.in b/src/Makefile.in
new file mode 100644
index 0000000..0d84e76
--- /dev/null
+++ b/src/Makefile.in
@@ -0,0 +1,64 @@
+.PHONY: clean distribution
+
+include @BUILD_DIR@/build/ck.build
+
+TARGET_DIR=$(BUILD_DIR)/src
+SDIR=$(SRC_DIR)/src
+INCLUDE_DIR=$(SRC_DIR)/include
+
+OBJECTS=ck_barrier_centralized.o \
+ ck_barrier_combining.o \
+ ck_barrier_dissemination.o \
+ ck_barrier_tournament.o \
+ ck_barrier_mcs.o \
+ ck_epoch.o \
+ ck_ht.o \
+ ck_hp.o \
+ ck_hs.o \
+ ck_rhs.o \
+ ck_array.o
+
+all: $(ALL_LIBS)
+
+libck.so: $(OBJECTS)
+ $(LD) $(LDFLAGS) -o $(TARGET_DIR)/libck.so $(OBJECTS)
+
+libck.a: $(OBJECTS)
+ ar rcs $(TARGET_DIR)/libck.a $(OBJECTS)
+
+ck_array.o: $(INCLUDE_DIR)/ck_array.h $(SDIR)/ck_array.c
+ $(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_array.o $(SDIR)/ck_array.c
+
+ck_epoch.o: $(INCLUDE_DIR)/ck_epoch.h $(SDIR)/ck_epoch.c $(INCLUDE_DIR)/ck_stack.h
+ $(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_epoch.o $(SDIR)/ck_epoch.c
+
+ck_hs.o: $(INCLUDE_DIR)/ck_hs.h $(SDIR)/ck_hs.c
+ $(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_hs.o $(SDIR)/ck_hs.c
+
+ck_rhs.o: $(INCLUDE_DIR)/ck_rhs.h $(SDIR)/ck_rhs.c
+ $(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_rhs.o $(SDIR)/ck_rhs.c
+
+ck_ht.o: $(INCLUDE_DIR)/ck_ht.h $(SDIR)/ck_ht.c
+ $(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_ht.o $(SDIR)/ck_ht.c
+
+ck_hp.o: $(SDIR)/ck_hp.c $(INCLUDE_DIR)/ck_hp.h $(INCLUDE_DIR)/ck_stack.h
+ $(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_hp.o $(SDIR)/ck_hp.c
+
+ck_barrier_centralized.o: $(SDIR)/ck_barrier_centralized.c
+ $(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_barrier_centralized.o $(SDIR)/ck_barrier_centralized.c
+
+ck_barrier_combining.o: $(SDIR)/ck_barrier_combining.c
+ $(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_barrier_combining.o $(SDIR)/ck_barrier_combining.c
+
+ck_barrier_dissemination.o: $(SDIR)/ck_barrier_dissemination.c
+ $(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_barrier_dissemination.o $(SDIR)/ck_barrier_dissemination.c
+
+ck_barrier_tournament.o: $(SDIR)/ck_barrier_tournament.c
+ $(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_barrier_tournament.o $(SDIR)/ck_barrier_tournament.c
+
+ck_barrier_mcs.o: $(SDIR)/ck_barrier_mcs.c
+ $(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_barrier_mcs.o $(SDIR)/ck_barrier_mcs.c
+
+clean:
+ rm -rf $(TARGET_DIR)/*.dSYM $(TARGET_DIR)/*~ $(TARGET_DIR)/*.o \
+ $(OBJECTS) $(TARGET_DIR)/libck.a $(TARGET_DIR)/libck.so
diff --git a/src/ck_array.c b/src/ck_array.c
new file mode 100644
index 0000000..35b2502
--- /dev/null
+++ b/src/ck_array.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright 2013-2015 Samy Al Bahra
+ * Copyright 2013-2014 AppNexus, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_array.h>
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <ck_stdbool.h>
+#include <ck_string.h>
+
+static struct _ck_array *
+ck_array_create(struct ck_malloc *allocator, unsigned int length)
+{
+ struct _ck_array *active;
+
+ active = allocator->malloc(sizeof(struct _ck_array) + sizeof(void *) * length);
+ if (active == NULL)
+ return NULL;
+
+ active->n_committed = 0;
+ active->length = length;
+
+ return active;
+}
+
+bool
+ck_array_init(struct ck_array *array, unsigned int mode, struct ck_malloc *allocator, unsigned int length)
+{
+ struct _ck_array *active;
+
+ (void)mode;
+
+ if (allocator->realloc == NULL ||
+ allocator->malloc == NULL ||
+ allocator->free == NULL ||
+ length == 0)
+ return false;
+
+ active = ck_array_create(allocator, length);
+ if (active == NULL)
+ return false;
+
+ array->n_entries = 0;
+ array->allocator = allocator;
+ array->active = active;
+ array->transaction = NULL;
+ return true;
+}
+
+bool
+ck_array_put(struct ck_array *array, void *value)
+{
+ struct _ck_array *target;
+ unsigned int size;
+
+ /*
+ * If no transaction copy has been necessary, attempt to do in-place
+ * modification of the array.
+ */
+ if (array->transaction == NULL) {
+ target = array->active;
+
+ if (array->n_entries == target->length) {
+ size = target->length << 1;
+
+ target = array->allocator->realloc(target,
+ sizeof(struct _ck_array) + sizeof(void *) * array->n_entries,
+ sizeof(struct _ck_array) + sizeof(void *) * size,
+ true);
+
+ if (target == NULL)
+ return false;
+
+ ck_pr_store_uint(&target->length, size);
+
+ /* Serialize with respect to contents. */
+ ck_pr_fence_store();
+ ck_pr_store_ptr(&array->active, target);
+ }
+
+ target->values[array->n_entries++] = value;
+ return true;
+ }
+
+ target = array->transaction;
+ if (array->n_entries == target->length) {
+ size = target->length << 1;
+
+ target = array->allocator->realloc(target,
+ sizeof(struct _ck_array) + sizeof(void *) * array->n_entries,
+ sizeof(struct _ck_array) + sizeof(void *) * size,
+ true);
+
+ if (target == NULL)
+ return false;
+
+ target->length = size;
+ array->transaction = target;
+ }
+
+ target->values[array->n_entries++] = value;
+ return false;
+}
+
+int
+ck_array_put_unique(struct ck_array *array, void *value)
+{
+ unsigned int i, limit;
+ void **v;
+
+ limit = array->n_entries;
+ if (array->transaction != NULL) {
+ v = array->transaction->values;
+ } else {
+ v = array->active->values;
+ }
+
+ for (i = 0; i < limit; i++) {
+ if (v[i] == value)
+ return 1;
+ }
+
+ return -!ck_array_put(array, value);
+}
+
+bool
+ck_array_remove(struct ck_array *array, void *value)
+{
+ struct _ck_array *target;
+ unsigned int i;
+
+ if (array->transaction != NULL) {
+ target = array->transaction;
+
+ for (i = 0; i < array->n_entries; i++) {
+ if (target->values[i] == value) {
+ target->values[i] = target->values[--array->n_entries];
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ target = array->active;
+
+ for (i = 0; i < array->n_entries; i++) {
+ if (target->values[i] == value)
+ break;
+ }
+
+ if (i == array->n_entries)
+ return false;
+
+ /* If there are pending additions, immediately eliminate the operation. */
+ if (target->n_committed != array->n_entries) {
+ ck_pr_store_ptr(&target->values[i], target->values[--array->n_entries]);
+ return true;
+ }
+
+ /*
+ * The assumption is that these allocations are small to begin with.
+ * If there is no immediate opportunity for transaction, allocate a
+ * transactional array which will be applied upon commit time.
+ */
+ target = ck_array_create(array->allocator, array->n_entries);
+ if (target == NULL)
+ return false;
+
+ memcpy(target->values, array->active->values, sizeof(void *) * array->n_entries);
+ target->length = array->n_entries;
+ target->n_committed = array->n_entries;
+ target->values[i] = target->values[--array->n_entries];
+
+ array->transaction = target;
+ return true;
+}
+
+bool
+ck_array_commit(ck_array_t *array)
+{
+ struct _ck_array *m = array->transaction;
+
+ if (m != NULL) {
+ struct _ck_array *p;
+
+ m->n_committed = array->n_entries;
+ ck_pr_fence_store();
+ p = array->active;
+ ck_pr_store_ptr(&array->active, m);
+ array->allocator->free(p, sizeof(struct _ck_array) +
+ p->length * sizeof(void *), true);
+ array->transaction = NULL;
+
+ return true;
+ }
+
+ ck_pr_fence_store();
+ ck_pr_store_uint(&array->active->n_committed, array->n_entries);
+ return true;
+}
+
+void
+ck_array_deinit(struct ck_array *array, bool defer)
+{
+
+ array->allocator->free(array->active,
+ sizeof(struct _ck_array) + sizeof(void *) * array->active->length, defer);
+
+ if (array->transaction != NULL) {
+ array->allocator->free(array->transaction,
+ sizeof(struct _ck_array) + sizeof(void *) * array->transaction->length, defer);
+ }
+
+ array->transaction = array->active = NULL;
+ return;
+}
diff --git a/src/ck_barrier_centralized.c b/src/ck_barrier_centralized.c
new file mode 100644
index 0000000..ca8cc18
--- /dev/null
+++ b/src/ck_barrier_centralized.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_barrier.h>
+#include <ck_pr.h>
+
+void
+ck_barrier_centralized(struct ck_barrier_centralized *barrier,
+ struct ck_barrier_centralized_state *state,
+ unsigned int n_threads)
+{
+ unsigned int sense, value;
+
+ /*
+ * Every execution context has a sense associated with it.
+ * This sense is reversed when the barrier is entered. Every
+ * thread will spin on the global sense until the last thread
+ * reverses it.
+ */
+ sense = state->sense = ~state->sense;
+ value = ck_pr_faa_uint(&barrier->value, 1);
+ if (value == n_threads - 1) {
+ ck_pr_store_uint(&barrier->value, 0);
+ ck_pr_fence_memory();
+ ck_pr_store_uint(&barrier->sense, sense);
+ return;
+ }
+
+ ck_pr_fence_atomic_load();
+ while (sense != ck_pr_load_uint(&barrier->sense))
+ ck_pr_stall();
+
+ ck_pr_fence_acquire();
+ return;
+}
diff --git a/src/ck_barrier_combining.c b/src/ck_barrier_combining.c
new file mode 100644
index 0000000..3ee72fd
--- /dev/null
+++ b/src/ck_barrier_combining.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_barrier.h>
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <ck_spinlock.h>
+
+struct ck_barrier_combining_queue {
+ struct ck_barrier_combining_group *head;
+ struct ck_barrier_combining_group *tail;
+};
+
+CK_CC_INLINE static struct ck_barrier_combining_group *
+ck_barrier_combining_queue_dequeue(struct ck_barrier_combining_queue *queue)
+{
+ struct ck_barrier_combining_group *front = NULL;
+
+ if (queue->head != NULL) {
+ front = queue->head;
+ queue->head = queue->head->next;
+ }
+
+ return front;
+}
+
+CK_CC_INLINE static void
+ck_barrier_combining_insert(struct ck_barrier_combining_group *parent,
+ struct ck_barrier_combining_group *tnode,
+ struct ck_barrier_combining_group **child)
+{
+
+ *child = tnode;
+ tnode->parent = parent;
+
+ /*
+ * After inserting, we must increment the parent group's count for
+ * number of threads expected to reach it; otherwise, the
+ * barrier may end prematurely.
+ */
+ parent->k++;
+ return;
+}
+
+/*
+ * This implementation of software combining tree barriers
+ * uses level order traversal to insert new thread groups
+ * into the barrier's tree. We use a queue to implement this
+ * traversal.
+ */
+CK_CC_INLINE static void
+ck_barrier_combining_queue_enqueue(struct ck_barrier_combining_queue *queue,
+ struct ck_barrier_combining_group *node_value)
+{
+
+ node_value->next = NULL;
+ if (queue->head == NULL) {
+ queue->head = queue->tail = node_value;
+ return;
+ }
+
+ queue->tail->next = node_value;
+ queue->tail = node_value;
+
+ return;
+}
+
+
+void
+ck_barrier_combining_group_init(struct ck_barrier_combining *root,
+ struct ck_barrier_combining_group *tnode,
+ unsigned int nthr)
+{
+ struct ck_barrier_combining_group *node;
+ struct ck_barrier_combining_queue queue;
+
+ queue.head = queue.tail = NULL;
+
+ tnode->k = nthr;
+ tnode->count = 0;
+ tnode->sense = 0;
+ tnode->left = tnode->right = NULL;
+
+ /*
+ * Finds the first available node for linkage into the combining
+ * tree. The use of a spinlock is excusable as this is a one-time
+ * initialization cost.
+ */
+ ck_spinlock_fas_lock(&root->mutex);
+ ck_barrier_combining_queue_enqueue(&queue, root->root);
+ while (queue.head != NULL) {
+ node = ck_barrier_combining_queue_dequeue(&queue);
+
+ /* If the left child is free, link the group there. */
+ if (node->left == NULL) {
+ ck_barrier_combining_insert(node, tnode, &node->left);
+ goto leave;
+ }
+
+ /* If the right child is free, link the group there. */
+ if (node->right == NULL) {
+ ck_barrier_combining_insert(node, tnode, &node->right);
+ goto leave;
+ }
+
+ /*
+ * If unsuccessful, try inserting as a child of the children of the
+ * current node.
+ */
+ ck_barrier_combining_queue_enqueue(&queue, node->left);
+ ck_barrier_combining_queue_enqueue(&queue, node->right);
+ }
+
+leave:
+ ck_spinlock_fas_unlock(&root->mutex);
+ return;
+}
+
+void
+ck_barrier_combining_init(struct ck_barrier_combining *root,
+ struct ck_barrier_combining_group *init_root)
+{
+
+ init_root->k = 0;
+ init_root->count = 0;
+ init_root->sense = 0;
+ init_root->parent = init_root->left = init_root->right = NULL;
+ ck_spinlock_fas_init(&root->mutex);
+ root->root = init_root;
+ return;
+}
+
+static void
+ck_barrier_combining_aux(struct ck_barrier_combining *barrier,
+ struct ck_barrier_combining_group *tnode,
+ unsigned int sense)
+{
+
+ /*
+ * If this is the last thread in the group, it moves on to the parent group.
+ * Otherwise, it spins on this group's sense.
+ */
+ if (ck_pr_faa_uint(&tnode->count, 1) == tnode->k - 1) {
+ /*
+ * If we are and will be the last thread entering the barrier for the
+ * current group then signal the parent group if one exists.
+ */
+ if (tnode->parent != NULL)
+ ck_barrier_combining_aux(barrier, tnode->parent, sense);
+
+ /*
+ * Once the thread returns from its parent(s), it reinitializes the group's
+ * arrival count and signals other threads to continue by flipping the group
+ * sense. Order of these operations is not important since we assume a static
+ * number of threads are members of a barrier for the lifetime of the barrier.
+ * Since count is explicitly reinitialized, it is guaranteed that at any point
+ * tnode->count is equivalent to tnode->k if and only if that many threads
+ * are at the barrier.
+ */
+ ck_pr_store_uint(&tnode->count, 0);
+ ck_pr_fence_store();
+ ck_pr_store_uint(&tnode->sense, ~tnode->sense);
+ } else {
+ ck_pr_fence_memory();
+ while (sense != ck_pr_load_uint(&tnode->sense))
+ ck_pr_stall();
+ }
+
+ return;
+}
+
+void
+ck_barrier_combining(struct ck_barrier_combining *barrier,
+ struct ck_barrier_combining_group *tnode,
+ struct ck_barrier_combining_state *state)
+{
+
+ ck_barrier_combining_aux(barrier, tnode, state->sense);
+
+ /* Reverse the execution context's sense for the next barrier. */
+ state->sense = ~state->sense;
+ return;
+}
diff --git a/src/ck_barrier_dissemination.c b/src/ck_barrier_dissemination.c
new file mode 100644
index 0000000..df151d8
--- /dev/null
+++ b/src/ck_barrier_dissemination.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_barrier.h>
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <ck_spinlock.h>
+
+#include "ck_internal.h"
+
+void
+ck_barrier_dissemination_init(struct ck_barrier_dissemination *barrier,
+ struct ck_barrier_dissemination_flag **barrier_internal,
+ unsigned int nthr)
+{
+ unsigned int i, j, k, size, offset;
+ bool p = nthr & (nthr - 1);
+
+ barrier->nthr = nthr;
+ barrier->size = size = ck_internal_log(ck_internal_power_2(nthr));
+ ck_pr_store_uint(&barrier->tid, 0);
+
+ for (i = 0; i < nthr; ++i) {
+ barrier[i].flags[0] = barrier_internal[i];
+ barrier[i].flags[1] = barrier_internal[i] + size;
+ }
+
+ for (i = 0; i < nthr; ++i) {
+ for (k = 0, offset = 1; k < size; ++k, offset <<= 1) {
+ /*
+ * Determine the thread's partner, j, for the current round, k.
+ * Partners are chosen such that by the completion of the barrier,
+ * every thread has been directly (having one of its flag set) or
+ * indirectly (having one of its partners's flags set) signaled
+ * by every other thread in the barrier.
+ */
+ if (p == false)
+ j = (i + offset) & (nthr - 1);
+ else
+ j = (i + offset) % nthr;
+
+ /* Set the thread's partner for round k. */
+ barrier[i].flags[0][k].pflag = &barrier[j].flags[0][k].tflag;
+ barrier[i].flags[1][k].pflag = &barrier[j].flags[1][k].tflag;
+
+ /* Set the thread's flags to false. */
+ barrier[i].flags[0][k].tflag = barrier[i].flags[1][k].tflag = 0;
+ }
+ }
+
+ return;
+}
+
+void
+ck_barrier_dissemination_subscribe(struct ck_barrier_dissemination *barrier,
+ struct ck_barrier_dissemination_state *state)
+{
+
+ state->parity = 0;
+ state->sense = ~0;
+ state->tid = ck_pr_faa_uint(&barrier->tid, 1);
+ return;
+}
+
+unsigned int
+ck_barrier_dissemination_size(unsigned int nthr)
+{
+
+ return (ck_internal_log(ck_internal_power_2(nthr)) << 1);
+}
+
+void
+ck_barrier_dissemination(struct ck_barrier_dissemination *barrier,
+ struct ck_barrier_dissemination_state *state)
+{
+ unsigned int i;
+ unsigned int size = barrier->size;
+
+ for (i = 0; i < size; ++i) {
+ unsigned int *pflag, *tflag;
+
+ pflag = barrier[state->tid].flags[state->parity][i].pflag;
+ tflag = &barrier[state->tid].flags[state->parity][i].tflag;
+
+ /* Unblock current partner. */
+ ck_pr_store_uint(pflag, state->sense);
+
+ /* Wait until some other thread unblocks this one. */
+ while (ck_pr_load_uint(tflag) != state->sense)
+ ck_pr_stall();
+ }
+
+ /*
+ * Dissemination barriers use two sets of flags to prevent race conditions
+ * between successive calls to the barrier. Parity indicates which set will
+ * be used for the next barrier. They also use a sense reversal technique
+ * to avoid re-initialization of the flags for every two calls to the barrier.
+ */
+ if (state->parity == 1)
+ state->sense = ~state->sense;
+
+ state->parity = 1 - state->parity;
+
+ ck_pr_fence_acquire();
+ return;
+}
diff --git a/src/ck_barrier_mcs.c b/src/ck_barrier_mcs.c
new file mode 100644
index 0000000..cf06017
--- /dev/null
+++ b/src/ck_barrier_mcs.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_barrier.h>
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <ck_stdbool.h>
+
+void
+ck_barrier_mcs_init(struct ck_barrier_mcs *barrier, unsigned int nthr)
+{
+ unsigned int i, j;
+
+ ck_pr_store_uint(&barrier->tid, 0);
+
+ for (i = 0; i < nthr; ++i) {
+ for (j = 0; j < 4; ++j) {
+ /*
+ * If there are still threads that don't have parents,
+ * add it as a child.
+ */
+ barrier[i].havechild[j] = ((i << 2) + j < nthr - 1) ? ~0 : 0;
+
+ /*
+ * childnotready is initialized to havechild to ensure
+ * a thread does not wait for a child that does not exist.
+ */
+ barrier[i].childnotready[j] = barrier[i].havechild[j];
+ }
+
+ /* The root thread does not have a parent. */
+ barrier[i].parent = (i == 0) ?
+ &barrier[i].dummy :
+ &barrier[(i - 1) >> 2].childnotready[(i - 1) & 3];
+
+ /* Leaf threads do not have any children. */
+ barrier[i].children[0] = ((i << 1) + 1 >= nthr) ?
+ &barrier[i].dummy :
+ &barrier[(i << 1) + 1].parentsense;
+
+ barrier[i].children[1] = ((i << 1) + 2 >= nthr) ?
+ &barrier[i].dummy :
+ &barrier[(i << 1) + 2].parentsense;
+
+ barrier[i].parentsense = 0;
+ }
+
+ return;
+}
+
+void
+ck_barrier_mcs_subscribe(struct ck_barrier_mcs *barrier, struct ck_barrier_mcs_state *state)
+{
+
+ state->sense = ~0;
+ state->vpid = ck_pr_faa_uint(&barrier->tid, 1);
+ return;
+}
+
+CK_CC_INLINE static bool
+ck_barrier_mcs_check_children(unsigned int *childnotready)
+{
+
+ if (ck_pr_load_uint(&childnotready[0]) != 0)
+ return false;
+ if (ck_pr_load_uint(&childnotready[1]) != 0)
+ return false;
+ if (ck_pr_load_uint(&childnotready[2]) != 0)
+ return false;
+ if (ck_pr_load_uint(&childnotready[3]) != 0)
+ return false;
+
+ return true;
+}
+
+CK_CC_INLINE static void
+ck_barrier_mcs_reinitialize_children(struct ck_barrier_mcs *node)
+{
+
+ ck_pr_store_uint(&node->childnotready[0], node->havechild[0]);
+ ck_pr_store_uint(&node->childnotready[1], node->havechild[1]);
+ ck_pr_store_uint(&node->childnotready[2], node->havechild[2]);
+ ck_pr_store_uint(&node->childnotready[3], node->havechild[3]);
+ return;
+}
+
+void
+ck_barrier_mcs(struct ck_barrier_mcs *barrier,
+ struct ck_barrier_mcs_state *state)
+{
+
+ /*
+ * Wait until all children have reached the barrier and are done waiting
+ * for their children.
+ */
+ while (ck_barrier_mcs_check_children(barrier[state->vpid].childnotready) == false)
+ ck_pr_stall();
+
+ /* Reinitialize for next barrier. */
+ ck_barrier_mcs_reinitialize_children(&barrier[state->vpid]);
+
+ /* Inform parent thread and its children have arrived at the barrier. */
+ ck_pr_store_uint(barrier[state->vpid].parent, 0);
+
+ /* Wait until parent indicates all threads have arrived at the barrier. */
+ if (state->vpid != 0) {
+ while (ck_pr_load_uint(&barrier[state->vpid].parentsense) != state->sense)
+ ck_pr_stall();
+ }
+
+ /* Inform children of successful barrier. */
+ ck_pr_store_uint(barrier[state->vpid].children[0], state->sense);
+ ck_pr_store_uint(barrier[state->vpid].children[1], state->sense);
+ state->sense = ~state->sense;
+ ck_pr_fence_memory();
+ return;
+}
diff --git a/src/ck_barrier_tournament.c b/src/ck_barrier_tournament.c
new file mode 100644
index 0000000..e232dbc
--- /dev/null
+++ b/src/ck_barrier_tournament.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_barrier.h>
+#include <ck_pr.h>
+
+#include "ck_internal.h"
+
+/*
+ * This is a tournament barrier implementation. Threads are statically
+ * assigned roles to perform for each round of the barrier. Winners
+ * move on to the next round, while losers spin in their current rounds
+ * on their own flags. During the last round, the champion of the tournament
+ * sets the last flag that begins the wakeup process.
+ */
+
+enum {
+ CK_BARRIER_TOURNAMENT_BYE,
+ CK_BARRIER_TOURNAMENT_CHAMPION,
+ CK_BARRIER_TOURNAMENT_DROPOUT,
+ CK_BARRIER_TOURNAMENT_LOSER,
+ CK_BARRIER_TOURNAMENT_WINNER
+};
+
+void
+ck_barrier_tournament_subscribe(struct ck_barrier_tournament *barrier,
+ struct ck_barrier_tournament_state *state)
+{
+
+ state->sense = ~0;
+ state->vpid = ck_pr_faa_uint(&barrier->tid, 1);
+ return;
+}
+
+void
+ck_barrier_tournament_init(struct ck_barrier_tournament *barrier,
+ struct ck_barrier_tournament_round **rounds,
+ unsigned int nthr)
+{
+ unsigned int i, k, size, twok, twokm1, imod2k;
+
+ ck_pr_store_uint(&barrier->tid, 0);
+ barrier->size = size = ck_barrier_tournament_size(nthr);
+
+ for (i = 0; i < nthr; ++i) {
+ /* The first role is always CK_BARRIER_TOURNAMENT_DROPOUT. */
+ rounds[i][0].flag = 0;
+ rounds[i][0].role = CK_BARRIER_TOURNAMENT_DROPOUT;
+ for (k = 1, twok = 2, twokm1 = 1; k < size; ++k, twokm1 = twok, twok <<= 1) {
+ rounds[i][k].flag = 0;
+
+ imod2k = i & (twok - 1);
+ if (imod2k == 0) {
+ if ((i + twokm1 < nthr) && (twok < nthr))
+ rounds[i][k].role = CK_BARRIER_TOURNAMENT_WINNER;
+ else if (i + twokm1 >= nthr)
+ rounds[i][k].role = CK_BARRIER_TOURNAMENT_BYE;
+ }
+
+ if (imod2k == twokm1)
+ rounds[i][k].role = CK_BARRIER_TOURNAMENT_LOSER;
+ else if ((i == 0) && (twok >= nthr))
+ rounds[i][k].role = CK_BARRIER_TOURNAMENT_CHAMPION;
+
+ if (rounds[i][k].role == CK_BARRIER_TOURNAMENT_LOSER)
+ rounds[i][k].opponent = &rounds[i - twokm1][k].flag;
+ else if (rounds[i][k].role == CK_BARRIER_TOURNAMENT_WINNER ||
+ rounds[i][k].role == CK_BARRIER_TOURNAMENT_CHAMPION)
+ rounds[i][k].opponent = &rounds[i + twokm1][k].flag;
+ }
+ }
+
+ ck_pr_store_ptr(&barrier->rounds, rounds);
+ return;
+}
+
+unsigned int
+ck_barrier_tournament_size(unsigned int nthr)
+{
+
+ return (ck_internal_log(ck_internal_power_2(nthr)) + 1);
+}
+
+void
+ck_barrier_tournament(struct ck_barrier_tournament *barrier,
+ struct ck_barrier_tournament_state *state)
+{
+ struct ck_barrier_tournament_round **rounds = ck_pr_load_ptr(&barrier->rounds);
+ int round = 1;
+
+ if (barrier->size == 1)
+ return;
+
+ for (;; ++round) {
+ switch (rounds[state->vpid][round].role) {
+ case CK_BARRIER_TOURNAMENT_BYE:
+ break;
+ case CK_BARRIER_TOURNAMENT_CHAMPION:
+ /*
+ * The CK_BARRIER_TOURNAMENT_CHAMPION waits until it wins the tournament; it then
+ * sets the final flag before the wakeup phase of the barrier.
+ */
+ while (ck_pr_load_uint(&rounds[state->vpid][round].flag) != state->sense)
+ ck_pr_stall();
+
+ ck_pr_store_uint(rounds[state->vpid][round].opponent, state->sense);
+ goto wakeup;
+ case CK_BARRIER_TOURNAMENT_DROPOUT:
+ /* NOTREACHED */
+ break;
+ case CK_BARRIER_TOURNAMENT_LOSER:
+ /*
+ * CK_BARRIER_TOURNAMENT_LOSERs set the flags of their opponents and wait until
+ * their opponents release them after the tournament is over.
+ */
+ ck_pr_store_uint(rounds[state->vpid][round].opponent, state->sense);
+ while (ck_pr_load_uint(&rounds[state->vpid][round].flag) != state->sense)
+ ck_pr_stall();
+
+ goto wakeup;
+ case CK_BARRIER_TOURNAMENT_WINNER:
+ /*
+ * CK_BARRIER_TOURNAMENT_WINNERs wait until their current opponent sets their flag; they then
+ * continue to the next round of the tournament.
+ */
+ while (ck_pr_load_uint(&rounds[state->vpid][round].flag) != state->sense)
+ ck_pr_stall();
+ break;
+ }
+ }
+
+wakeup:
+ for (round -= 1 ;; --round) {
+ switch (rounds[state->vpid][round].role) {
+ case CK_BARRIER_TOURNAMENT_BYE:
+ break;
+ case CK_BARRIER_TOURNAMENT_CHAMPION:
+ /* NOTREACHED */
+ break;
+ case CK_BARRIER_TOURNAMENT_DROPOUT:
+ goto leave;
+ break;
+ case CK_BARRIER_TOURNAMENT_LOSER:
+ /* NOTREACHED */
+ break;
+ case CK_BARRIER_TOURNAMENT_WINNER:
+ /*
+ * Winners inform their old opponents the tournament is over
+ * by setting their flags.
+ */
+ ck_pr_store_uint(rounds[state->vpid][round].opponent, state->sense);
+ break;
+ }
+ }
+
+leave:
+ ck_pr_fence_memory();
+ state->sense = ~state->sense;
+ return;
+}
diff --git a/src/ck_epoch.c b/src/ck_epoch.c
new file mode 100644
index 0000000..a0e9180
--- /dev/null
+++ b/src/ck_epoch.c
@@ -0,0 +1,545 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * The implementation here is inspired from the work described in:
+ * Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University
+ * of Cambridge Computing Laboratory.
+ */
+
+#include <ck_backoff.h>
+#include <ck_cc.h>
+#include <ck_epoch.h>
+#include <ck_pr.h>
+#include <ck_stack.h>
+#include <ck_stdbool.h>
+#include <ck_string.h>
+
+/*
+ * Only three distinct values are used for reclamation, but reclamation occurs
+ * at e+2 rather than e+1. Any thread in a "critical section" would have
+ * acquired some snapshot (e) of the global epoch value (e_g) and set an active
+ * flag. Any hazardous references will only occur after a full memory barrier.
+ * For example, assume an initial e_g value of 1, e value of 0 and active value
+ * of 0.
+ *
+ * ck_epoch_begin(...)
+ * e = e_g
+ * active = 1
+ * memory_barrier();
+ *
+ * Any serialized reads may observe e = 0 or e = 1 with active = 0, or e = 0 or
+ * e = 1 with active = 1. The e_g value can only go from 1 to 2 if every thread
+ * has already observed the value of "1" (or the value we are incrementing
+ * from). This guarantees us that for any given value e_g, any threads with-in
+ * critical sections (referred to as "active" threads from here on) would have
+ * an e value of e_g-1 or e_g. This also means that hazardous references may be
+ * shared in both e_g-1 and e_g even if they are logically deleted in e_g.
+ *
+ * For example, assume all threads have an e value of e_g. Another thread may
+ * increment to e_g to e_g+1. Older threads may have a reference to an object
+ * which is only deleted in e_g+1. It could be that reader threads are
+ * executing some hash table look-ups, while some other writer thread (which
+ * causes epoch counter tick) actually deletes the same items that reader
+ * threads are looking up (this writer thread having an e value of e_g+1).
+ * This is possible if the writer thread re-observes the epoch after the
+ * counter tick.
+ *
+ * Psuedo-code for writer:
+ * ck_epoch_begin()
+ * ht_delete(x)
+ * ck_epoch_end()
+ * ck_epoch_begin()
+ * ht_delete(x)
+ * ck_epoch_end()
+ *
+ * Psuedo-code for reader:
+ * for (;;) {
+ * x = ht_lookup(x)
+ * ck_pr_inc(&x->value);
+ * }
+ *
+ * Of course, it is also possible for references logically deleted at e_g-1 to
+ * still be accessed at e_g as threads are "active" at the same time
+ * (real-world time) mutating shared objects.
+ *
+ * Now, if the epoch counter is ticked to e_g+1, then no new hazardous
+ * references could exist to objects logically deleted at e_g-1. The reason for
+ * this is that at e_g+1, all epoch read-side critical sections started at
+ * e_g-1 must have been completed. If any epoch read-side critical sections at
+ * e_g-1 were still active, then we would never increment to e_g+1 (active != 0
+ * ^ e != e_g). Additionally, e_g may still have hazardous references to
+ * objects logically deleted at e_g-1 which means objects logically deleted at
+ * e_g-1 cannot be deleted at e_g+1 unless all threads have observed e_g+1
+ * (since it is valid for active threads to be at e_g and threads at e_g still
+ * require safe memory accesses).
+ *
+ * However, at e_g+2, all active threads must be either at e_g+1 or e_g+2.
+ * Though e_g+2 may share hazardous references with e_g+1, and e_g+1 shares
+ * hazardous references to e_g, no active threads are at e_g or e_g-1. This
+ * means no hazardous references could exist to objects deleted at e_g-1 (at
+ * e_g+2).
+ *
+ * To summarize these important points,
+ * 1) Active threads will always have a value of e_g or e_g-1.
+ * 2) Items that are logically deleted e_g or e_g-1 cannot be physically
+ * deleted.
+ * 3) Objects logically deleted at e_g-1 can be physically destroyed at e_g+2
+ * or at e_g+1 if no threads are at e_g.
+ *
+ * Last but not least, if we are at e_g+2, then no active thread is at e_g
+ * which means it is safe to apply modulo-3 arithmetic to e_g value in order to
+ * re-use e_g to represent the e_g+3 state. This means it is sufficient to
+ * represent e_g using only the values 0, 1 or 2. Every time a thread re-visits
+ * a e_g (which can be determined with a non-empty deferral list) it can assume
+ * objects in the e_g deferral list involved at least three e_g transitions and
+ * are thus, safe, for physical deletion.
+ *
+ * Blocking semantics for epoch reclamation have additional restrictions.
+ * Though we only require three deferral lists, reasonable blocking semantics
+ * must be able to more gracefully handle bursty write work-loads which could
+ * easily cause e_g wrap-around if modulo-3 arithmetic is used. This allows for
+ * easy-to-trigger live-lock situations. The work-around to this is to not
+ * apply modulo arithmetic to e_g but only to deferral list indexing.
+ */
+#define CK_EPOCH_GRACE 3U
+
+enum {
+ CK_EPOCH_STATE_USED = 0,
+ CK_EPOCH_STATE_FREE = 1
+};
+
+CK_STACK_CONTAINER(struct ck_epoch_record, record_next,
+ ck_epoch_record_container)
+CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
+ ck_epoch_entry_container)
+
+#define CK_EPOCH_SENSE_MASK (CK_EPOCH_SENSE - 1)
+
+void
+_ck_epoch_delref(struct ck_epoch_record *record,
+ struct ck_epoch_section *section)
+{
+ struct ck_epoch_ref *current, *other;
+ unsigned int i = section->bucket;
+
+ current = &record->local.bucket[i];
+ current->count--;
+
+ if (current->count > 0)
+ return;
+
+ /*
+ * If the current bucket no longer has any references, then
+ * determine whether we have already transitioned into a newer
+ * epoch. If so, then make sure to update our shared snapshot
+ * to allow for forward progress.
+ *
+ * If no other active bucket exists, then the record will go
+ * inactive in order to allow for forward progress.
+ */
+ other = &record->local.bucket[(i + 1) &
+ CK_EPOCH_SENSE_MASK];
+ if (other->count > 0 &&
+ ((int)(current->epoch - other->epoch) < 0)) {
+ /*
+ * The other epoch value is actually the newest,
+ * transition to it.
+ */
+ ck_pr_store_uint(&record->epoch, other->epoch);
+ }
+
+ return;
+}
+
+void
+_ck_epoch_addref(struct ck_epoch_record *record,
+ struct ck_epoch_section *section)
+{
+ struct ck_epoch *global = record->global;
+ struct ck_epoch_ref *ref;
+ unsigned int epoch, i;
+
+ epoch = ck_pr_load_uint(&global->epoch);
+ i = epoch & CK_EPOCH_SENSE_MASK;
+ ref = &record->local.bucket[i];
+
+ if (ref->count++ == 0) {
+#ifndef CK_MD_TSO
+ struct ck_epoch_ref *previous;
+
+ /*
+ * The system has already ticked. If another non-zero bucket
+ * exists, make sure to order our observations with respect
+ * to it. Otherwise, it is possible to acquire a reference
+ * from the previous epoch generation.
+ *
+ * On TSO architectures, the monoticity of the global counter
+ * and load-{store, load} ordering are sufficient to guarantee
+ * this ordering.
+ */
+ previous = &record->local.bucket[(i + 1) &
+ CK_EPOCH_SENSE_MASK];
+ if (previous->count > 0)
+ ck_pr_fence_acqrel();
+#endif /* !CK_MD_TSO */
+
+ /*
+ * If this is this is a new reference into the current
+ * bucket then cache the associated epoch value.
+ */
+ ref->epoch = epoch;
+ }
+
+ section->bucket = i;
+ return;
+}
+
+void
+ck_epoch_init(struct ck_epoch *global)
+{
+
+ ck_stack_init(&global->records);
+ global->epoch = 1;
+ global->n_free = 0;
+ ck_pr_fence_store();
+ return;
+}
+
+struct ck_epoch_record *
+ck_epoch_recycle(struct ck_epoch *global)
+{
+ struct ck_epoch_record *record;
+ ck_stack_entry_t *cursor;
+ unsigned int state;
+
+ if (ck_pr_load_uint(&global->n_free) == 0)
+ return NULL;
+
+ CK_STACK_FOREACH(&global->records, cursor) {
+ record = ck_epoch_record_container(cursor);
+
+ if (ck_pr_load_uint(&record->state) == CK_EPOCH_STATE_FREE) {
+ /* Serialize with respect to deferral list clean-up. */
+ ck_pr_fence_load();
+ state = ck_pr_fas_uint(&record->state,
+ CK_EPOCH_STATE_USED);
+ if (state == CK_EPOCH_STATE_FREE) {
+ ck_pr_dec_uint(&global->n_free);
+ return record;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+void
+ck_epoch_register(struct ck_epoch *global, struct ck_epoch_record *record)
+{
+ size_t i;
+
+ record->global = global;
+ record->state = CK_EPOCH_STATE_USED;
+ record->active = 0;
+ record->epoch = 0;
+ record->n_dispatch = 0;
+ record->n_peak = 0;
+ record->n_pending = 0;
+ memset(&record->local, 0, sizeof record->local);
+
+ for (i = 0; i < CK_EPOCH_LENGTH; i++)
+ ck_stack_init(&record->pending[i]);
+
+ ck_pr_fence_store();
+ ck_stack_push_upmc(&global->records, &record->record_next);
+ return;
+}
+
+void
+ck_epoch_unregister(struct ck_epoch_record *record)
+{
+ struct ck_epoch *global = record->global;
+ size_t i;
+
+ record->active = 0;
+ record->epoch = 0;
+ record->n_dispatch = 0;
+ record->n_peak = 0;
+ record->n_pending = 0;
+ memset(&record->local, 0, sizeof record->local);
+
+ for (i = 0; i < CK_EPOCH_LENGTH; i++)
+ ck_stack_init(&record->pending[i]);
+
+ ck_pr_fence_store();
+ ck_pr_store_uint(&record->state, CK_EPOCH_STATE_FREE);
+ ck_pr_inc_uint(&global->n_free);
+ return;
+}
+
+static struct ck_epoch_record *
+ck_epoch_scan(struct ck_epoch *global,
+ struct ck_epoch_record *cr,
+ unsigned int epoch,
+ bool *af)
+{
+ ck_stack_entry_t *cursor;
+
+ if (cr == NULL) {
+ cursor = CK_STACK_FIRST(&global->records);
+ *af = false;
+ } else {
+ cursor = &cr->record_next;
+ *af = true;
+ }
+
+ while (cursor != NULL) {
+ unsigned int state, active;
+
+ cr = ck_epoch_record_container(cursor);
+
+ state = ck_pr_load_uint(&cr->state);
+ if (state & CK_EPOCH_STATE_FREE) {
+ cursor = CK_STACK_NEXT(cursor);
+ continue;
+ }
+
+ active = ck_pr_load_uint(&cr->active);
+ *af |= active;
+
+ if (active != 0 && ck_pr_load_uint(&cr->epoch) != epoch)
+ return cr;
+
+ cursor = CK_STACK_NEXT(cursor);
+ }
+
+ return NULL;
+}
+
+static void
+ck_epoch_dispatch(struct ck_epoch_record *record, unsigned int e)
+{
+ unsigned int epoch = e & (CK_EPOCH_LENGTH - 1);
+ ck_stack_entry_t *head, *next, *cursor;
+ unsigned int i = 0;
+
+ head = CK_STACK_FIRST(&record->pending[epoch]);
+ ck_stack_init(&record->pending[epoch]);
+
+ for (cursor = head; cursor != NULL; cursor = next) {
+ struct ck_epoch_entry *entry =
+ ck_epoch_entry_container(cursor);
+
+ next = CK_STACK_NEXT(cursor);
+ entry->function(entry);
+ i++;
+ }
+
+ if (record->n_pending > record->n_peak)
+ record->n_peak = record->n_pending;
+
+ record->n_dispatch += i;
+ record->n_pending -= i;
+ return;
+}
+
+/*
+ * Reclaim all objects associated with a record.
+ */
+void
+ck_epoch_reclaim(struct ck_epoch_record *record)
+{
+ unsigned int epoch;
+
+ for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++)
+ ck_epoch_dispatch(record, epoch);
+
+ return;
+}
+
+/*
+ * This function must not be called with-in read section.
+ */
+void
+ck_epoch_synchronize(struct ck_epoch_record *record)
+{
+ struct ck_epoch *global = record->global;
+ struct ck_epoch_record *cr;
+ unsigned int delta, epoch, goal, i;
+ bool active;
+
+ ck_pr_fence_memory();
+
+ /*
+ * The observation of the global epoch must be ordered with respect to
+ * all prior operations. The re-ordering of loads is permitted given
+ * monoticity of global epoch counter.
+ *
+ * If UINT_MAX concurrent mutations were to occur then it is possible
+ * to encounter an ABA-issue. If this is a concern, consider tuning
+ * write-side concurrency.
+ */
+ delta = epoch = ck_pr_load_uint(&global->epoch);
+ goal = epoch + CK_EPOCH_GRACE;
+
+ for (i = 0, cr = NULL; i < CK_EPOCH_GRACE - 1; cr = NULL, i++) {
+ bool r;
+
+ /*
+ * Determine whether all threads have observed the current
+ * epoch with respect to the updates on invocation.
+ */
+ while (cr = ck_epoch_scan(global, cr, delta, &active),
+ cr != NULL) {
+ unsigned int e_d;
+
+ ck_pr_stall();
+
+ /*
+ * Another writer may have already observed a grace
+ * period.
+ */
+ e_d = ck_pr_load_uint(&global->epoch);
+ if (e_d != delta) {
+ delta = e_d;
+ goto reload;
+ }
+ }
+
+ /*
+ * If we have observed all threads as inactive, then we assume
+ * we are at a grace period.
+ */
+ if (active == false)
+ break;
+
+ /*
+ * Increment current epoch. CAS semantics are used to eliminate
+ * increment operations for synchronization that occurs for the
+ * same global epoch value snapshot.
+ *
+ * If we can guarantee there will only be one active barrier or
+ * epoch tick at a given time, then it is sufficient to use an
+ * increment operation. In a multi-barrier workload, however,
+ * it is possible to overflow the epoch value if we apply
+ * modulo-3 arithmetic.
+ */
+ r = ck_pr_cas_uint_value(&global->epoch, delta, delta + 1,
+ &delta);
+
+ /* Order subsequent thread active checks. */
+ ck_pr_fence_atomic_load();
+
+ /*
+ * If CAS has succeeded, then set delta to latest snapshot.
+ * Otherwise, we have just acquired latest snapshot.
+ */
+ delta = delta + r;
+ continue;
+
+reload:
+ if ((goal > epoch) & (delta >= goal)) {
+ /*
+ * Right now, epoch overflow is handled as an edge
+ * case. If we have already observed an epoch
+ * generation, then we can be sure no hazardous
+ * references exist to objects from this generation. We
+ * can actually avoid an addtional scan step at this
+ * point.
+ */
+ break;
+ }
+ }
+
+ /*
+ * A majority of use-cases will not require full barrier semantics.
+ * However, if non-temporal instructions are used, full barrier
+ * semantics are necessary.
+ */
+ ck_pr_fence_memory();
+ record->epoch = delta;
+ return;
+}
+
+void
+ck_epoch_barrier(struct ck_epoch_record *record)
+{
+
+ ck_epoch_synchronize(record);
+ ck_epoch_reclaim(record);
+ return;
+}
+
+/*
+ * It may be worth it to actually apply these deferral semantics to an epoch
+ * that was observed at ck_epoch_call time. The problem is that the latter
+ * would require a full fence.
+ *
+ * ck_epoch_call will dispatch to the latest epoch snapshot that was observed.
+ * There are cases where it will fail to reclaim as early as it could. If this
+ * becomes a problem, we could actually use a heap for epoch buckets but that
+ * is far from ideal too.
+ */
+bool
+ck_epoch_poll(struct ck_epoch_record *record)
+{
+ bool active;
+ unsigned int epoch;
+ unsigned int snapshot;
+ struct ck_epoch_record *cr = NULL;
+ struct ck_epoch *global = record->global;
+
+ epoch = ck_pr_load_uint(&global->epoch);
+
+ /* Serialize epoch snapshots with respect to global epoch. */
+ ck_pr_fence_memory();
+ cr = ck_epoch_scan(global, cr, epoch, &active);
+ if (cr != NULL) {
+ record->epoch = epoch;
+ return false;
+ }
+
+ /* We are at a grace period if all threads are inactive. */
+ if (active == false) {
+ record->epoch = epoch;
+ for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++)
+ ck_epoch_dispatch(record, epoch);
+
+ return true;
+ }
+
+ /* If an active thread exists, rely on epoch observation. */
+ if (ck_pr_cas_uint_value(&global->epoch, epoch, epoch + 1,
+ &snapshot) == false) {
+ record->epoch = snapshot;
+ } else {
+ record->epoch = epoch + 1;
+ }
+
+ ck_epoch_dispatch(record, epoch + 1);
+ return true;
+}
diff --git a/src/ck_hp.c b/src/ck_hp.c
new file mode 100644
index 0000000..32df92e
--- /dev/null
+++ b/src/ck_hp.c
@@ -0,0 +1,323 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * (c) Copyright 2008, IBM Corporation.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This is an implementation of hazard pointers as detailed in:
+ * http://www.research.ibm.com/people/m/michael/ieeetpds-2004.pdf
+ *
+ * This API provides a publishing mechanism that defers destruction of
+ * hazard pointers until it is safe to do so. Preventing arbitrary re-use
+ * protects against the ABA problem and provides safe memory reclamation.
+ * The implementation was derived from the Hazard Pointers implementation
+ * from the Amino CBBS project. It has been heavily modified for Concurrency
+ * Kit.
+ */
+
+#include <ck_backoff.h>
+#include <ck_cc.h>
+#include <ck_hp.h>
+#include <ck_pr.h>
+#include <ck_stack.h>
+#include <ck_stdbool.h>
+#include <ck_stddef.h>
+#include <ck_stdlib.h>
+#include <ck_string.h>
+
+CK_STACK_CONTAINER(struct ck_hp_record, global_entry, ck_hp_record_container)
+CK_STACK_CONTAINER(struct ck_hp_hazard, pending_entry, ck_hp_hazard_container)
+
+void
+ck_hp_init(struct ck_hp *state,
+ unsigned int degree,
+ unsigned int threshold,
+ ck_hp_destructor_t destroy)
+{
+
+ state->threshold = threshold;
+ state->degree = degree;
+ state->destroy = destroy;
+ state->n_subscribers = 0;
+ state->n_free = 0;
+ ck_stack_init(&state->subscribers);
+ ck_pr_fence_store();
+
+ return;
+}
+
+void
+ck_hp_set_threshold(struct ck_hp *state, unsigned int threshold)
+{
+
+ ck_pr_store_uint(&state->threshold, threshold);
+ return;
+}
+
+struct ck_hp_record *
+ck_hp_recycle(struct ck_hp *global)
+{
+ struct ck_hp_record *record;
+ ck_stack_entry_t *entry;
+ int state;
+
+ if (ck_pr_load_uint(&global->n_free) == 0)
+ return NULL;
+
+ CK_STACK_FOREACH(&global->subscribers, entry) {
+ record = ck_hp_record_container(entry);
+
+ if (ck_pr_load_int(&record->state) == CK_HP_FREE) {
+ ck_pr_fence_load();
+ state = ck_pr_fas_int(&record->state, CK_HP_USED);
+ if (state == CK_HP_FREE) {
+ ck_pr_dec_uint(&global->n_free);
+ return record;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+void
+ck_hp_unregister(struct ck_hp_record *entry)
+{
+
+ entry->n_pending = 0;
+ entry->n_peak = 0;
+ entry->n_reclamations = 0;
+ ck_stack_init(&entry->pending);
+ ck_pr_fence_store();
+ ck_pr_store_int(&entry->state, CK_HP_FREE);
+ ck_pr_inc_uint(&entry->global->n_free);
+ return;
+}
+
+void
+ck_hp_register(struct ck_hp *state,
+ struct ck_hp_record *entry,
+ void **pointers)
+{
+
+ entry->state = CK_HP_USED;
+ entry->global = state;
+ entry->pointers = pointers;
+ entry->n_pending = 0;
+ entry->n_peak = 0;
+ entry->n_reclamations = 0;
+ memset(pointers, 0, state->degree * sizeof(void *));
+ ck_stack_init(&entry->pending);
+ ck_pr_fence_store();
+ ck_stack_push_upmc(&state->subscribers, &entry->global_entry);
+ ck_pr_inc_uint(&state->n_subscribers);
+ return;
+}
+
+static int
+hazard_compare(const void *a, const void *b)
+{
+ void * const *x;
+ void * const *y;
+
+ x = a;
+ y = b;
+ return ((*x > *y) - (*x < *y));
+}
+
+CK_CC_INLINE static bool
+ck_hp_member_scan(ck_stack_entry_t *entry, unsigned int degree, void *pointer)
+{
+ struct ck_hp_record *record;
+ unsigned int i;
+ void *hazard;
+
+ do {
+ record = ck_hp_record_container(entry);
+ if (ck_pr_load_int(&record->state) == CK_HP_FREE)
+ continue;
+
+ if (ck_pr_load_ptr(&record->pointers) == NULL)
+ continue;
+
+ for (i = 0; i < degree; i++) {
+ hazard = ck_pr_load_ptr(&record->pointers[i]);
+ if (hazard == pointer)
+ return (true);
+ }
+ } while ((entry = CK_STACK_NEXT(entry)) != NULL);
+
+ return (false);
+}
+
+CK_CC_INLINE static void *
+ck_hp_member_cache(struct ck_hp *global, void **cache, unsigned int *n_hazards)
+{
+ struct ck_hp_record *record;
+ ck_stack_entry_t *entry;
+ unsigned int hazards = 0;
+ unsigned int i;
+ void *pointer;
+
+ CK_STACK_FOREACH(&global->subscribers, entry) {
+ record = ck_hp_record_container(entry);
+ if (ck_pr_load_int(&record->state) == CK_HP_FREE)
+ continue;
+
+ if (ck_pr_load_ptr(&record->pointers) == NULL)
+ continue;
+
+ for (i = 0; i < global->degree; i++) {
+ if (hazards > CK_HP_CACHE)
+ break;
+
+ pointer = ck_pr_load_ptr(&record->pointers[i]);
+ if (pointer != NULL)
+ cache[hazards++] = pointer;
+ }
+ }
+
+ *n_hazards = hazards;
+ return (entry);
+}
+
+void
+ck_hp_reclaim(struct ck_hp_record *thread)
+{
+ struct ck_hp_hazard *hazard;
+ struct ck_hp *global = thread->global;
+ unsigned int n_hazards;
+ void **cache, *marker, *match;
+ ck_stack_entry_t *previous, *entry, *next;
+
+ /* Store as many entries as possible in local array. */
+ cache = thread->cache;
+ marker = ck_hp_member_cache(global, cache, &n_hazards);
+
+ /*
+ * In theory, there is an n such that (n * (log n) ** 2) < np.
+ */
+ qsort(cache, n_hazards, sizeof(void *), hazard_compare);
+
+ previous = NULL;
+ CK_STACK_FOREACH_SAFE(&thread->pending, entry, next) {
+ hazard = ck_hp_hazard_container(entry);
+ match = bsearch(&hazard->pointer, cache, n_hazards,
+ sizeof(void *), hazard_compare);
+ if (match != NULL) {
+ previous = entry;
+ continue;
+ }
+
+ if (marker != NULL &&
+ ck_hp_member_scan(marker, global->degree, hazard->pointer)) {
+ previous = entry;
+ continue;
+ }
+
+ thread->n_pending -= 1;
+
+ /* Remove from the pending stack. */
+ if (previous)
+ CK_STACK_NEXT(previous) = CK_STACK_NEXT(entry);
+ else
+ CK_STACK_FIRST(&thread->pending) = CK_STACK_NEXT(entry);
+
+ /* The entry is now safe to destroy. */
+ global->destroy(hazard->data);
+ thread->n_reclamations++;
+ }
+
+ return;
+}
+
+void
+ck_hp_retire(struct ck_hp_record *thread,
+ struct ck_hp_hazard *hazard,
+ void *data,
+ void *pointer)
+{
+
+ ck_pr_store_ptr(&hazard->pointer, pointer);
+ ck_pr_store_ptr(&hazard->data, data);
+ ck_stack_push_spnc(&thread->pending, &hazard->pending_entry);
+
+ thread->n_pending += 1;
+ if (thread->n_pending > thread->n_peak)
+ thread->n_peak = thread->n_pending;
+
+ return;
+}
+
+void
+ck_hp_free(struct ck_hp_record *thread,
+ struct ck_hp_hazard *hazard,
+ void *data,
+ void *pointer)
+{
+ struct ck_hp *global;
+
+ global = ck_pr_load_ptr(&thread->global);
+ ck_pr_store_ptr(&hazard->data, data);
+ ck_pr_store_ptr(&hazard->pointer, pointer);
+ ck_stack_push_spnc(&thread->pending, &hazard->pending_entry);
+
+ thread->n_pending += 1;
+ if (thread->n_pending > thread->n_peak)
+ thread->n_peak = thread->n_pending;
+
+ if (thread->n_pending >= global->threshold)
+ ck_hp_reclaim(thread);
+
+ return;
+}
+
+void
+ck_hp_purge(struct ck_hp_record *thread)
+{
+ ck_backoff_t backoff = CK_BACKOFF_INITIALIZER;
+
+ while (thread->n_pending > 0) {
+ ck_hp_reclaim(thread);
+ if (thread->n_pending > 0)
+ ck_backoff_eb(&backoff);
+ }
+
+ return;
+}
diff --git a/src/ck_hs.c b/src/ck_hs.c
new file mode 100644
index 0000000..31510ec
--- /dev/null
+++ b/src/ck_hs.c
@@ -0,0 +1,941 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_cc.h>
+#include <ck_hs.h>
+#include <ck_limits.h>
+#include <ck_md.h>
+#include <ck_pr.h>
+#include <ck_stdint.h>
+#include <ck_stdbool.h>
+#include <ck_string.h>
+
+#include "ck_internal.h"
+
+#ifndef CK_HS_PROBE_L1_SHIFT
+#define CK_HS_PROBE_L1_SHIFT 3ULL
+#endif /* CK_HS_PROBE_L1_SHIFT */
+
+#define CK_HS_PROBE_L1 (1 << CK_HS_PROBE_L1_SHIFT)
+#define CK_HS_PROBE_L1_MASK (CK_HS_PROBE_L1 - 1)
+
+#ifndef CK_HS_PROBE_L1_DEFAULT
+#define CK_HS_PROBE_L1_DEFAULT CK_MD_CACHELINE
+#endif
+
+#define CK_HS_VMA_MASK ((uintptr_t)((1ULL << CK_MD_VMA_BITS) - 1))
+#define CK_HS_VMA(x) \
+ ((void *)((uintptr_t)(x) & CK_HS_VMA_MASK))
+
+#define CK_HS_EMPTY NULL
+#define CK_HS_TOMBSTONE ((void *)~(uintptr_t)0)
+#define CK_HS_G (2)
+#define CK_HS_G_MASK (CK_HS_G - 1)
+
+#if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_STORE_8)
+#define CK_HS_WORD uint8_t
+#define CK_HS_WORD_MAX UINT8_MAX
+#define CK_HS_STORE(x, y) ck_pr_store_8(x, y)
+#define CK_HS_LOAD(x) ck_pr_load_8(x)
+#elif defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_STORE_16)
+#define CK_HS_WORD uint16_t
+#define CK_HS_WORD_MAX UINT16_MAX
+#define CK_HS_STORE(x, y) ck_pr_store_16(x, y)
+#define CK_HS_LOAD(x) ck_pr_load_16(x)
+#elif defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_STORE_32)
+#define CK_HS_WORD uint32_t
+#define CK_HS_WORD_MAX UINT32_MAX
+#define CK_HS_STORE(x, y) ck_pr_store_32(x, y)
+#define CK_HS_LOAD(x) ck_pr_load_32(x)
+#else
+#error "ck_hs is not supported on your platform."
+#endif
+
+enum ck_hs_probe_behavior {
+ CK_HS_PROBE = 0, /* Default behavior. */
+ CK_HS_PROBE_TOMBSTONE, /* Short-circuit on tombstone. */
+ CK_HS_PROBE_INSERT /* Short-circuit on probe bound if tombstone found. */
+};
+
+struct ck_hs_map {
+ unsigned int generation[CK_HS_G];
+ unsigned int probe_maximum;
+ unsigned long mask;
+ unsigned long step;
+ unsigned int probe_limit;
+ unsigned int tombstones;
+ unsigned long n_entries;
+ unsigned long capacity;
+ unsigned long size;
+ CK_HS_WORD *probe_bound;
+ const void **entries;
+};
+
+static inline void
+ck_hs_map_signal(struct ck_hs_map *map, unsigned long h)
+{
+
+ h &= CK_HS_G_MASK;
+ ck_pr_store_uint(&map->generation[h],
+ map->generation[h] + 1);
+ ck_pr_fence_store();
+ return;
+}
+
+void
+ck_hs_iterator_init(struct ck_hs_iterator *iterator)
+{
+
+ iterator->cursor = NULL;
+ iterator->offset = 0;
+ return;
+}
+
+bool
+ck_hs_next(struct ck_hs *hs, struct ck_hs_iterator *i, void **key)
+{
+ struct ck_hs_map *map = hs->map;
+ void *value;
+
+ if (i->offset >= map->capacity)
+ return false;
+
+ do {
+ value = CK_CC_DECONST_PTR(map->entries[i->offset]);
+ if (value != CK_HS_EMPTY && value != CK_HS_TOMBSTONE) {
+#ifdef CK_HS_PP
+ if (hs->mode & CK_HS_MODE_OBJECT)
+ value = CK_HS_VMA(value);
+#endif
+ i->offset++;
+ *key = value;
+ return true;
+ }
+ } while (++i->offset < map->capacity);
+
+ return false;
+}
+
+void
+ck_hs_stat(struct ck_hs *hs, struct ck_hs_stat *st)
+{
+ struct ck_hs_map *map = hs->map;
+
+ st->n_entries = map->n_entries;
+ st->tombstones = map->tombstones;
+ st->probe_maximum = map->probe_maximum;
+ return;
+}
+
+unsigned long
+ck_hs_count(struct ck_hs *hs)
+{
+
+ return hs->map->n_entries;
+}
+
+static void
+ck_hs_map_destroy(struct ck_malloc *m, struct ck_hs_map *map, bool defer)
+{
+
+ m->free(map, map->size, defer);
+ return;
+}
+
+void
+ck_hs_destroy(struct ck_hs *hs)
+{
+
+ ck_hs_map_destroy(hs->m, hs->map, false);
+ return;
+}
+
+static struct ck_hs_map *
+ck_hs_map_create(struct ck_hs *hs, unsigned long entries)
+{
+ struct ck_hs_map *map;
+ unsigned long size, n_entries, prefix, limit;
+
+ n_entries = ck_internal_power_2(entries);
+ if (n_entries < CK_HS_PROBE_L1)
+ n_entries = CK_HS_PROBE_L1;
+
+ size = sizeof(struct ck_hs_map) + (sizeof(void *) * n_entries + CK_MD_CACHELINE - 1);
+
+ if (hs->mode & CK_HS_MODE_DELETE) {
+ prefix = sizeof(CK_HS_WORD) * n_entries;
+ size += prefix;
+ } else {
+ prefix = 0;
+ }
+
+ map = hs->m->malloc(size);
+ if (map == NULL)
+ return NULL;
+
+ map->size = size;
+
+ /* We should probably use a more intelligent heuristic for default probe length. */
+ limit = ck_internal_max(n_entries >> (CK_HS_PROBE_L1_SHIFT + 2), CK_HS_PROBE_L1_DEFAULT);
+ if (limit > UINT_MAX)
+ limit = UINT_MAX;
+
+ map->probe_limit = (unsigned int)limit;
+ map->probe_maximum = 0;
+ map->capacity = n_entries;
+ map->step = ck_internal_bsf(n_entries);
+ map->mask = n_entries - 1;
+ map->n_entries = 0;
+
+ /* Align map allocation to cache line. */
+ map->entries = (void *)(((uintptr_t)&map[1] + prefix +
+ CK_MD_CACHELINE - 1) & ~(CK_MD_CACHELINE - 1));
+
+ memset(map->entries, 0, sizeof(void *) * n_entries);
+ memset(map->generation, 0, sizeof map->generation);
+
+ if (hs->mode & CK_HS_MODE_DELETE) {
+ map->probe_bound = (CK_HS_WORD *)&map[1];
+ memset(map->probe_bound, 0, prefix);
+ } else {
+ map->probe_bound = NULL;
+ }
+
+ /* Commit entries purge with respect to map publication. */
+ ck_pr_fence_store();
+ return map;
+}
+
+bool
+ck_hs_reset_size(struct ck_hs *hs, unsigned long capacity)
+{
+ struct ck_hs_map *map, *previous;
+
+ previous = hs->map;
+ map = ck_hs_map_create(hs, capacity);
+ if (map == NULL)
+ return false;
+
+ ck_pr_store_ptr(&hs->map, map);
+ ck_hs_map_destroy(hs->m, previous, true);
+ return true;
+}
+
+bool
+ck_hs_reset(struct ck_hs *hs)
+{
+ struct ck_hs_map *previous;
+
+ previous = hs->map;
+ return ck_hs_reset_size(hs, previous->capacity);
+}
+
+static inline unsigned long
+ck_hs_map_probe_next(struct ck_hs_map *map,
+ unsigned long offset,
+ unsigned long h,
+ unsigned long level,
+ unsigned long probes)
+{
+ unsigned long r, stride;
+
+ r = (h >> map->step) >> level;
+ stride = (r & ~CK_HS_PROBE_L1_MASK) << 1 | (r & CK_HS_PROBE_L1_MASK);
+
+ return (offset + (probes >> CK_HS_PROBE_L1_SHIFT) +
+ (stride | CK_HS_PROBE_L1)) & map->mask;
+}
+
+static inline void
+ck_hs_map_bound_set(struct ck_hs_map *m,
+ unsigned long h,
+ unsigned long n_probes)
+{
+ unsigned long offset = h & m->mask;
+
+ if (n_probes > m->probe_maximum)
+ ck_pr_store_uint(&m->probe_maximum, n_probes);
+
+ if (m->probe_bound != NULL && m->probe_bound[offset] < n_probes) {
+ if (n_probes > CK_HS_WORD_MAX)
+ n_probes = CK_HS_WORD_MAX;
+
+ CK_HS_STORE(&m->probe_bound[offset], n_probes);
+ ck_pr_fence_store();
+ }
+
+ return;
+}
+
+static inline unsigned int
+ck_hs_map_bound_get(struct ck_hs_map *m, unsigned long h)
+{
+ unsigned long offset = h & m->mask;
+ unsigned int r = CK_HS_WORD_MAX;
+
+ if (m->probe_bound != NULL) {
+ r = CK_HS_LOAD(&m->probe_bound[offset]);
+ if (r == CK_HS_WORD_MAX)
+ r = ck_pr_load_uint(&m->probe_maximum);
+ } else {
+ r = ck_pr_load_uint(&m->probe_maximum);
+ }
+
+ return r;
+}
+
+bool
+ck_hs_grow(struct ck_hs *hs,
+ unsigned long capacity)
+{
+ struct ck_hs_map *map, *update;
+ unsigned long k, i, j, offset, probes;
+ const void *previous, **bucket;
+
+restart:
+ map = hs->map;
+ if (map->capacity > capacity)
+ return false;
+
+ update = ck_hs_map_create(hs, capacity);
+ if (update == NULL)
+ return false;
+
+ for (k = 0; k < map->capacity; k++) {
+ unsigned long h;
+
+ previous = map->entries[k];
+ if (previous == CK_HS_EMPTY || previous == CK_HS_TOMBSTONE)
+ continue;
+
+#ifdef CK_HS_PP
+ if (hs->mode & CK_HS_MODE_OBJECT)
+ previous = CK_HS_VMA(previous);
+#endif
+
+ h = hs->hf(previous, hs->seed);
+ offset = h & update->mask;
+ i = probes = 0;
+
+ for (;;) {
+ bucket = (const void **)((uintptr_t)&update->entries[offset] & ~(CK_MD_CACHELINE - 1));
+
+ for (j = 0; j < CK_HS_PROBE_L1; j++) {
+ const void **cursor = bucket + ((j + offset) & (CK_HS_PROBE_L1 - 1));
+
+ if (probes++ == update->probe_limit)
+ break;
+
+ if (CK_CC_LIKELY(*cursor == CK_HS_EMPTY)) {
+ *cursor = map->entries[k];
+ update->n_entries++;
+
+ ck_hs_map_bound_set(update, h, probes);
+ break;
+ }
+ }
+
+ if (j < CK_HS_PROBE_L1)
+ break;
+
+ offset = ck_hs_map_probe_next(update, offset, h, i++, probes);
+ }
+
+ if (probes > update->probe_limit) {
+ /*
+ * We have hit the probe limit, map needs to be even larger.
+ */
+ ck_hs_map_destroy(hs->m, update, false);
+ capacity <<= 1;
+ goto restart;
+ }
+ }
+
+ ck_pr_fence_store();
+ ck_pr_store_ptr(&hs->map, update);
+ ck_hs_map_destroy(hs->m, map, true);
+ return true;
+}
+
+static void
+ck_hs_map_postinsert(struct ck_hs *hs, struct ck_hs_map *map)
+{
+
+ map->n_entries++;
+ if ((map->n_entries << 1) > map->capacity)
+ ck_hs_grow(hs, map->capacity << 1);
+
+ return;
+}
+
+bool
+ck_hs_rebuild(struct ck_hs *hs)
+{
+
+ return ck_hs_grow(hs, hs->map->capacity);
+}
+
+static const void **
+ck_hs_map_probe(struct ck_hs *hs,
+ struct ck_hs_map *map,
+ unsigned long *n_probes,
+ const void ***priority,
+ unsigned long h,
+ const void *key,
+ const void **object,
+ unsigned long probe_limit,
+ enum ck_hs_probe_behavior behavior)
+{
+ const void **bucket, **cursor, *k, *compare;
+ const void **pr = NULL;
+ unsigned long offset, j, i, probes, opl;
+
+#ifdef CK_HS_PP
+ /* If we are storing object pointers, then we may leverage pointer packing. */
+ unsigned long hv = 0;
+
+ if (hs->mode & CK_HS_MODE_OBJECT) {
+ hv = (h >> 25) & CK_HS_KEY_MASK;
+ compare = CK_HS_VMA(key);
+ } else {
+ compare = key;
+ }
+#else
+ compare = key;
+#endif
+
+ offset = h & map->mask;
+ *object = NULL;
+ i = probes = 0;
+
+ opl = probe_limit;
+ if (behavior == CK_HS_PROBE_INSERT)
+ probe_limit = ck_hs_map_bound_get(map, h);
+
+ for (;;) {
+ bucket = (const void **)((uintptr_t)&map->entries[offset] & ~(CK_MD_CACHELINE - 1));
+
+ for (j = 0; j < CK_HS_PROBE_L1; j++) {
+ cursor = bucket + ((j + offset) & (CK_HS_PROBE_L1 - 1));
+
+ if (probes++ == probe_limit) {
+ if (probe_limit == opl || pr != NULL) {
+ k = CK_HS_EMPTY;
+ goto leave;
+ }
+
+ /*
+ * If no eligible slot has been found yet, continue probe
+ * sequence with original probe limit.
+ */
+ probe_limit = opl;
+ }
+
+ k = ck_pr_load_ptr(cursor);
+ if (k == CK_HS_EMPTY)
+ goto leave;
+
+ if (k == CK_HS_TOMBSTONE) {
+ if (pr == NULL) {
+ pr = cursor;
+ *n_probes = probes;
+
+ if (behavior == CK_HS_PROBE_TOMBSTONE) {
+ k = CK_HS_EMPTY;
+ goto leave;
+ }
+ }
+
+ continue;
+ }
+
+#ifdef CK_HS_PP
+ if (hs->mode & CK_HS_MODE_OBJECT) {
+ if (((uintptr_t)k >> CK_MD_VMA_BITS) != hv)
+ continue;
+
+ k = CK_HS_VMA(k);
+ }
+#endif
+
+ if (k == compare)
+ goto leave;
+
+ if (hs->compare == NULL)
+ continue;
+
+ if (hs->compare(k, key) == true)
+ goto leave;
+ }
+
+ offset = ck_hs_map_probe_next(map, offset, h, i++, probes);
+ }
+
+leave:
+ if (probes > probe_limit) {
+ cursor = NULL;
+ } else {
+ *object = k;
+ }
+
+ if (pr == NULL)
+ *n_probes = probes;
+
+ *priority = pr;
+ return cursor;
+}
+
+static inline const void *
+ck_hs_marshal(unsigned int mode, const void *key, unsigned long h)
+{
+#ifdef CK_HS_PP
+ const void *insert;
+
+ if (mode & CK_HS_MODE_OBJECT) {
+ insert = (void *)((uintptr_t)CK_HS_VMA(key) |
+ ((h >> 25) << CK_MD_VMA_BITS));
+ } else {
+ insert = key;
+ }
+
+ return insert;
+#else
+ (void)mode;
+ (void)h;
+
+ return key;
+#endif
+}
+
+bool
+ck_hs_gc(struct ck_hs *hs, unsigned long cycles, unsigned long seed)
+{
+ unsigned long size = 0;
+ unsigned long i;
+ struct ck_hs_map *map = hs->map;
+ unsigned int maximum;
+ CK_HS_WORD *bounds = NULL;
+
+ if (map->n_entries == 0) {
+ ck_pr_store_uint(&map->probe_maximum, 0);
+ if (map->probe_bound != NULL)
+ memset(map->probe_bound, 0, sizeof(CK_HS_WORD) * map->capacity);
+
+ return true;
+ }
+
+ if (cycles == 0) {
+ maximum = 0;
+
+ if (map->probe_bound != NULL) {
+ size = sizeof(CK_HS_WORD) * map->capacity;
+ bounds = hs->m->malloc(size);
+ if (bounds == NULL)
+ return false;
+
+ memset(bounds, 0, size);
+ }
+ } else {
+ maximum = map->probe_maximum;
+ }
+
+ for (i = 0; i < map->capacity; i++) {
+ const void **first, *object, **slot, *entry;
+ unsigned long n_probes, offset, h;
+
+ entry = map->entries[(i + seed) & map->mask];
+ if (entry == CK_HS_EMPTY || entry == CK_HS_TOMBSTONE)
+ continue;
+
+#ifdef CK_HS_PP
+ if (hs->mode & CK_HS_MODE_OBJECT)
+ entry = CK_HS_VMA(entry);
+#endif
+
+ h = hs->hf(entry, hs->seed);
+ offset = h & map->mask;
+
+ slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, entry, &object,
+ ck_hs_map_bound_get(map, h), CK_HS_PROBE);
+
+ if (first != NULL) {
+ const void *insert = ck_hs_marshal(hs->mode, entry, h);
+
+ ck_pr_store_ptr(first, insert);
+ ck_hs_map_signal(map, h);
+ ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
+ }
+
+ if (cycles == 0) {
+ if (n_probes > maximum)
+ maximum = n_probes;
+
+ if (n_probes > CK_HS_WORD_MAX)
+ n_probes = CK_HS_WORD_MAX;
+
+ if (bounds != NULL && n_probes > bounds[offset])
+ bounds[offset] = n_probes;
+ } else if (--cycles == 0)
+ break;
+ }
+
+ /*
+ * The following only apply to garbage collection involving
+ * a full scan of all entries.
+ */
+ if (maximum != map->probe_maximum)
+ ck_pr_store_uint(&map->probe_maximum, maximum);
+
+ if (bounds != NULL) {
+ for (i = 0; i < map->capacity; i++)
+ CK_HS_STORE(&map->probe_bound[i], bounds[i]);
+
+ hs->m->free(bounds, size, false);
+ }
+
+ return true;
+}
+
+bool
+ck_hs_fas(struct ck_hs *hs,
+ unsigned long h,
+ const void *key,
+ void **previous)
+{
+ const void **slot, **first, *object, *insert;
+ struct ck_hs_map *map = hs->map;
+ unsigned long n_probes;
+
+ *previous = NULL;
+ slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object,
+ ck_hs_map_bound_get(map, h), CK_HS_PROBE);
+
+ /* Replacement semantics presume existence. */
+ if (object == NULL)
+ return false;
+
+ insert = ck_hs_marshal(hs->mode, key, h);
+
+ if (first != NULL) {
+ ck_pr_store_ptr(first, insert);
+ ck_hs_map_signal(map, h);
+ ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
+ } else {
+ ck_pr_store_ptr(slot, insert);
+ }
+
+ *previous = CK_CC_DECONST_PTR(object);
+ return true;
+}
+
+/*
+ * An apply function takes two arguments. The first argument is a pointer to a
+ * pre-existing object. The second argument is a pointer to the fifth argument
+ * passed to ck_hs_apply. If a non-NULL pointer is passed to the first argument
+ * and the return value of the apply function is NULL, then the pre-existing
+ * value is deleted. If the return pointer is the same as the one passed to the
+ * apply function then no changes are made to the hash table. If the first
+ * argument is non-NULL and the return pointer is different than that passed to
+ * the apply function, then the pre-existing value is replaced. For
+ * replacement, it is required that the value itself is identical to the
+ * previous value.
+ */
+bool
+ck_hs_apply(struct ck_hs *hs,
+ unsigned long h,
+ const void *key,
+ ck_hs_apply_fn_t *fn,
+ void *cl)
+{
+ const void **slot, **first, *object, *delta, *insert;
+ unsigned long n_probes;
+ struct ck_hs_map *map;
+
+restart:
+ map = hs->map;
+
+ slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, map->probe_limit, CK_HS_PROBE_INSERT);
+ if (slot == NULL && first == NULL) {
+ if (ck_hs_grow(hs, map->capacity << 1) == false)
+ return false;
+
+ goto restart;
+ }
+
+ delta = fn(CK_CC_DECONST_PTR(object), cl);
+ if (delta == NULL) {
+ /*
+ * The apply function has requested deletion. If the object doesn't exist,
+ * then exit early.
+ */
+ if (CK_CC_UNLIKELY(object == NULL))
+ return true;
+
+ /* Otherwise, mark slot as deleted. */
+ ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
+ map->n_entries--;
+ map->tombstones++;
+ return true;
+ }
+
+ /* The apply function has not requested hash set modification so exit early. */
+ if (delta == object)
+ return true;
+
+ /* A modification or insertion has been requested. */
+ ck_hs_map_bound_set(map, h, n_probes);
+
+ insert = ck_hs_marshal(hs->mode, delta, h);
+ if (first != NULL) {
+ /*
+ * This follows the same semantics as ck_hs_set, please refer to that
+ * function for documentation.
+ */
+ ck_pr_store_ptr(first, insert);
+
+ if (object != NULL) {
+ ck_hs_map_signal(map, h);
+ ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
+ }
+ } else {
+ /*
+ * If we are storing into same slot, then atomic store is sufficient
+ * for replacement.
+ */
+ ck_pr_store_ptr(slot, insert);
+ }
+
+ if (object == NULL)
+ ck_hs_map_postinsert(hs, map);
+
+ return true;
+}
+
+bool
+ck_hs_set(struct ck_hs *hs,
+ unsigned long h,
+ const void *key,
+ void **previous)
+{
+ const void **slot, **first, *object, *insert;
+ unsigned long n_probes;
+ struct ck_hs_map *map;
+
+ *previous = NULL;
+
+restart:
+ map = hs->map;
+
+ slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, map->probe_limit, CK_HS_PROBE_INSERT);
+ if (slot == NULL && first == NULL) {
+ if (ck_hs_grow(hs, map->capacity << 1) == false)
+ return false;
+
+ goto restart;
+ }
+
+ ck_hs_map_bound_set(map, h, n_probes);
+ insert = ck_hs_marshal(hs->mode, key, h);
+
+ if (first != NULL) {
+ /* If an earlier bucket was found, then store entry there. */
+ ck_pr_store_ptr(first, insert);
+
+ /*
+ * If a duplicate key was found, then delete it after
+ * signaling concurrent probes to restart. Optionally,
+ * it is possible to install tombstone after grace
+ * period if we can guarantee earlier position of
+ * duplicate key.
+ */
+ if (object != NULL) {
+ ck_hs_map_signal(map, h);
+ ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
+ }
+ } else {
+ /*
+ * If we are storing into same slot, then atomic store is sufficient
+ * for replacement.
+ */
+ ck_pr_store_ptr(slot, insert);
+ }
+
+ if (object == NULL)
+ ck_hs_map_postinsert(hs, map);
+
+ *previous = CK_CC_DECONST_PTR(object);
+ return true;
+}
+
+CK_CC_INLINE static bool
+ck_hs_put_internal(struct ck_hs *hs,
+ unsigned long h,
+ const void *key,
+ enum ck_hs_probe_behavior behavior)
+{
+ const void **slot, **first, *object, *insert;
+ unsigned long n_probes;
+ struct ck_hs_map *map;
+
+restart:
+ map = hs->map;
+
+ slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object,
+ map->probe_limit, behavior);
+
+ if (slot == NULL && first == NULL) {
+ if (ck_hs_grow(hs, map->capacity << 1) == false)
+ return false;
+
+ goto restart;
+ }
+
+ /* Fail operation if a match was found. */
+ if (object != NULL)
+ return false;
+
+ ck_hs_map_bound_set(map, h, n_probes);
+ insert = ck_hs_marshal(hs->mode, key, h);
+
+ if (first != NULL) {
+ /* Insert key into first bucket in probe sequence. */
+ ck_pr_store_ptr(first, insert);
+ } else {
+ /* An empty slot was found. */
+ ck_pr_store_ptr(slot, insert);
+ }
+
+ ck_hs_map_postinsert(hs, map);
+ return true;
+}
+
+bool
+ck_hs_put(struct ck_hs *hs,
+ unsigned long h,
+ const void *key)
+{
+
+ return ck_hs_put_internal(hs, h, key, CK_HS_PROBE_INSERT);
+}
+
+bool
+ck_hs_put_unique(struct ck_hs *hs,
+ unsigned long h,
+ const void *key)
+{
+
+ return ck_hs_put_internal(hs, h, key, CK_HS_PROBE_TOMBSTONE);
+}
+
+void *
+ck_hs_get(struct ck_hs *hs,
+ unsigned long h,
+ const void *key)
+{
+ const void **first, *object;
+ struct ck_hs_map *map;
+ unsigned long n_probes;
+ unsigned int g, g_p, probe;
+ unsigned int *generation;
+
+ do {
+ map = ck_pr_load_ptr(&hs->map);
+ generation = &map->generation[h & CK_HS_G_MASK];
+ g = ck_pr_load_uint(generation);
+ probe = ck_hs_map_bound_get(map, h);
+ ck_pr_fence_load();
+
+ ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, probe, CK_HS_PROBE);
+
+ ck_pr_fence_load();
+ g_p = ck_pr_load_uint(generation);
+ } while (g != g_p);
+
+ return CK_CC_DECONST_PTR(object);
+}
+
+void *
+ck_hs_remove(struct ck_hs *hs,
+ unsigned long h,
+ const void *key)
+{
+ const void **slot, **first, *object;
+ struct ck_hs_map *map = hs->map;
+ unsigned long n_probes;
+
+ slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object,
+ ck_hs_map_bound_get(map, h), CK_HS_PROBE);
+ if (object == NULL)
+ return NULL;
+
+ ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
+ map->n_entries--;
+ map->tombstones++;
+ return CK_CC_DECONST_PTR(object);
+}
+
+bool
+ck_hs_move(struct ck_hs *hs,
+ struct ck_hs *source,
+ ck_hs_hash_cb_t *hf,
+ ck_hs_compare_cb_t *compare,
+ struct ck_malloc *m)
+{
+
+ if (m == NULL || m->malloc == NULL || m->free == NULL || hf == NULL)
+ return false;
+
+ hs->mode = source->mode;
+ hs->seed = source->seed;
+ hs->map = source->map;
+ hs->m = m;
+ hs->hf = hf;
+ hs->compare = compare;
+ return true;
+}
+
+bool
+ck_hs_init(struct ck_hs *hs,
+ unsigned int mode,
+ ck_hs_hash_cb_t *hf,
+ ck_hs_compare_cb_t *compare,
+ struct ck_malloc *m,
+ unsigned long n_entries,
+ unsigned long seed)
+{
+
+ if (m == NULL || m->malloc == NULL || m->free == NULL || hf == NULL)
+ return false;
+
+ hs->m = m;
+ hs->mode = mode;
+ hs->seed = seed;
+ hs->hf = hf;
+ hs->compare = compare;
+
+ hs->map = ck_hs_map_create(hs, n_entries);
+ return hs->map != NULL;
+}
diff --git a/src/ck_ht.c b/src/ck_ht.c
new file mode 100644
index 0000000..2c864c5
--- /dev/null
+++ b/src/ck_ht.c
@@ -0,0 +1,1036 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#define CK_HT_IM
+#include <ck_ht.h>
+
+/*
+ * This implementation borrows several techniques from Josh Dybnis's
+ * nbds library which can be found at http://code.google.com/p/nbds
+ *
+ * This release currently only includes support for 64-bit platforms.
+ * We can address 32-bit platforms in a future release.
+ */
+#include <ck_cc.h>
+#include <ck_md.h>
+#include <ck_pr.h>
+#include <ck_stdint.h>
+#include <ck_stdbool.h>
+#include <ck_string.h>
+
+#include "ck_ht_hash.h"
+#include "ck_internal.h"
+
+#ifndef CK_HT_BUCKET_LENGTH
+
+#ifdef CK_HT_PP
+#define CK_HT_BUCKET_SHIFT 2ULL
+#else
+#define CK_HT_BUCKET_SHIFT 1ULL
+#endif
+
+#define CK_HT_BUCKET_LENGTH (1U << CK_HT_BUCKET_SHIFT)
+#define CK_HT_BUCKET_MASK (CK_HT_BUCKET_LENGTH - 1)
+#endif
+
+#ifndef CK_HT_PROBE_DEFAULT
+#define CK_HT_PROBE_DEFAULT 64ULL
+#endif
+
+#if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_STORE_8)
+#define CK_HT_WORD uint8_t
+#define CK_HT_WORD_MAX UINT8_MAX
+#define CK_HT_STORE(x, y) ck_pr_store_8(x, y)
+#define CK_HT_LOAD(x) ck_pr_load_8(x)
+#elif defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_STORE_16)
+#define CK_HT_WORD uint16_t
+#define CK_HT_WORD_MAX UINT16_MAX
+#define CK_HT_STORE(x, y) ck_pr_store_16(x, y)
+#define CK_HT_LOAD(x) ck_pr_load_16(x)
+#elif defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_STORE_32)
+#define CK_HT_WORD uint32_t
+#define CK_HT_WORD_MAX UINT32_MAX
+#define CK_HT_STORE(x, y) ck_pr_store_32(x, y)
+#define CK_HT_LOAD(x) ck_pr_load_32(x)
+#else
+#error "ck_ht is not supported on your platform."
+#endif
+
+struct ck_ht_map {
+ unsigned int mode;
+ CK_HT_TYPE deletions;
+ CK_HT_TYPE probe_maximum;
+ CK_HT_TYPE probe_length;
+ CK_HT_TYPE probe_limit;
+ CK_HT_TYPE size;
+ CK_HT_TYPE n_entries;
+ CK_HT_TYPE mask;
+ CK_HT_TYPE capacity;
+ CK_HT_TYPE step;
+ CK_HT_WORD *probe_bound;
+ struct ck_ht_entry *entries;
+};
+
+void
+ck_ht_stat(struct ck_ht *table,
+ struct ck_ht_stat *st)
+{
+ struct ck_ht_map *map = table->map;
+
+ st->n_entries = map->n_entries;
+ st->probe_maximum = map->probe_maximum;
+ return;
+}
+
+void
+ck_ht_hash(struct ck_ht_hash *h,
+ struct ck_ht *table,
+ const void *key,
+ uint16_t key_length)
+{
+
+ table->h(h, key, key_length, table->seed);
+ return;
+}
+
+void
+ck_ht_hash_direct(struct ck_ht_hash *h,
+ struct ck_ht *table,
+ uintptr_t key)
+{
+
+ ck_ht_hash(h, table, &key, sizeof(key));
+ return;
+}
+
+static void
+ck_ht_hash_wrapper(struct ck_ht_hash *h,
+ const void *key,
+ size_t length,
+ uint64_t seed)
+{
+
+ h->value = (unsigned long)MurmurHash64A(key, length, seed);
+ return;
+}
+
+static struct ck_ht_map *
+ck_ht_map_create(struct ck_ht *table, CK_HT_TYPE entries)
+{
+ struct ck_ht_map *map;
+ CK_HT_TYPE size;
+ uintptr_t prefix;
+ uint32_t n_entries;
+
+ n_entries = ck_internal_power_2(entries);
+ if (n_entries < CK_HT_BUCKET_LENGTH)
+ n_entries = CK_HT_BUCKET_LENGTH;
+
+ size = sizeof(struct ck_ht_map) +
+ (sizeof(struct ck_ht_entry) * n_entries + CK_MD_CACHELINE - 1);
+
+ if (table->mode & CK_HT_WORKLOAD_DELETE) {
+ prefix = sizeof(CK_HT_WORD) * n_entries;
+ size += prefix;
+ } else {
+ prefix = 0;
+ }
+
+ map = table->m->malloc(size);
+ if (map == NULL)
+ return NULL;
+
+ map->mode = table->mode;
+ map->size = size;
+ map->probe_limit = ck_internal_max_64(n_entries >>
+ (CK_HT_BUCKET_SHIFT + 2), CK_HT_PROBE_DEFAULT);
+
+ map->deletions = 0;
+ map->probe_maximum = 0;
+ map->capacity = n_entries;
+ map->step = ck_internal_bsf_64(map->capacity);
+ map->mask = map->capacity - 1;
+ map->n_entries = 0;
+ map->entries = (struct ck_ht_entry *)(((uintptr_t)&map[1] + prefix +
+ CK_MD_CACHELINE - 1) & ~(CK_MD_CACHELINE - 1));
+
+ if (table->mode & CK_HT_WORKLOAD_DELETE) {
+ map->probe_bound = (CK_HT_WORD *)&map[1];
+ memset(map->probe_bound, 0, prefix);
+ } else {
+ map->probe_bound = NULL;
+ }
+
+ memset(map->entries, 0, sizeof(struct ck_ht_entry) * n_entries);
+ ck_pr_fence_store();
+ return map;
+}
+
+static inline void
+ck_ht_map_bound_set(struct ck_ht_map *m,
+ struct ck_ht_hash h,
+ CK_HT_TYPE n_probes)
+{
+ CK_HT_TYPE offset = h.value & m->mask;
+
+ if (n_probes > m->probe_maximum)
+ CK_HT_TYPE_STORE(&m->probe_maximum, n_probes);
+
+ if (m->probe_bound != NULL && m->probe_bound[offset] < n_probes) {
+ if (n_probes >= CK_HT_WORD_MAX)
+ n_probes = CK_HT_WORD_MAX;
+
+ CK_HT_STORE(&m->probe_bound[offset], n_probes);
+ ck_pr_fence_store();
+ }
+
+ return;
+}
+
+static inline CK_HT_TYPE
+ck_ht_map_bound_get(struct ck_ht_map *m, struct ck_ht_hash h)
+{
+ CK_HT_TYPE offset = h.value & m->mask;
+ CK_HT_TYPE r = CK_HT_WORD_MAX;
+
+ if (m->probe_bound != NULL) {
+ r = CK_HT_LOAD(&m->probe_bound[offset]);
+ if (r == CK_HT_WORD_MAX)
+ r = CK_HT_TYPE_LOAD(&m->probe_maximum);
+ } else {
+ r = CK_HT_TYPE_LOAD(&m->probe_maximum);
+ }
+
+ return r;
+}
+
+static void
+ck_ht_map_destroy(struct ck_malloc *m, struct ck_ht_map *map, bool defer)
+{
+
+ m->free(map, map->size, defer);
+ return;
+}
+
+static inline size_t
+ck_ht_map_probe_next(struct ck_ht_map *map, size_t offset, ck_ht_hash_t h, size_t probes)
+{
+ ck_ht_hash_t r;
+ size_t stride;
+ unsigned long level = (unsigned long)probes >> CK_HT_BUCKET_SHIFT;
+
+ r.value = (h.value >> map->step) >> level;
+ stride = (r.value & ~CK_HT_BUCKET_MASK) << 1
+ | (r.value & CK_HT_BUCKET_MASK);
+
+ return (offset + level +
+ (stride | CK_HT_BUCKET_LENGTH)) & map->mask;
+}
+
+bool
+ck_ht_init(struct ck_ht *table,
+ unsigned int mode,
+ ck_ht_hash_cb_t *h,
+ struct ck_malloc *m,
+ CK_HT_TYPE entries,
+ uint64_t seed)
+{
+
+ if (m == NULL || m->malloc == NULL || m->free == NULL)
+ return false;
+
+ table->m = m;
+ table->mode = mode;
+ table->seed = seed;
+
+ if (h == NULL) {
+ table->h = ck_ht_hash_wrapper;
+ } else {
+ table->h = h;
+ }
+
+ table->map = ck_ht_map_create(table, entries);
+ return table->map != NULL;
+}
+
+static struct ck_ht_entry *
+ck_ht_map_probe_wr(struct ck_ht_map *map,
+ ck_ht_hash_t h,
+ ck_ht_entry_t *snapshot,
+ ck_ht_entry_t **available,
+ const void *key,
+ uint16_t key_length,
+ CK_HT_TYPE *probe_limit,
+ CK_HT_TYPE *probe_wr)
+{
+ struct ck_ht_entry *bucket, *cursor;
+ struct ck_ht_entry *first = NULL;
+ size_t offset, i, j;
+ CK_HT_TYPE probes = 0;
+ CK_HT_TYPE limit;
+
+ if (probe_limit == NULL) {
+ limit = ck_ht_map_bound_get(map, h);
+ } else {
+ limit = CK_HT_TYPE_MAX;
+ }
+
+ offset = h.value & map->mask;
+ for (i = 0; i < map->probe_limit; i++) {
+ /*
+ * Probe on a complete cache line first. Scan forward and wrap around to
+ * the beginning of the cache line. Only when the complete cache line has
+ * been scanned do we move on to the next row.
+ */
+ bucket = (void *)((uintptr_t)(map->entries + offset) &
+ ~(CK_MD_CACHELINE - 1));
+
+ for (j = 0; j < CK_HT_BUCKET_LENGTH; j++) {
+ uint16_t k;
+
+ if (probes++ > limit)
+ break;
+
+ cursor = bucket + ((j + offset) & (CK_HT_BUCKET_LENGTH - 1));
+
+ /*
+ * It is probably worth it to encapsulate probe state
+ * in order to prevent a complete reprobe sequence in
+ * the case of intermittent writers.
+ */
+ if (cursor->key == CK_HT_KEY_TOMBSTONE) {
+ if (first == NULL) {
+ first = cursor;
+ *probe_wr = probes;
+ }
+
+ continue;
+ }
+
+ if (cursor->key == CK_HT_KEY_EMPTY)
+ goto leave;
+
+ if (cursor->key == (uintptr_t)key)
+ goto leave;
+
+ if (map->mode & CK_HT_MODE_BYTESTRING) {
+ void *pointer;
+
+ /*
+ * Check memoized portion of hash value before
+ * expensive full-length comparison.
+ */
+ k = ck_ht_entry_key_length(cursor);
+ if (k != key_length)
+ continue;
+
+#ifdef CK_HT_PP
+ if ((cursor->value >> CK_MD_VMA_BITS) != ((h.value >> 32) & CK_HT_KEY_MASK))
+ continue;
+#else
+ if (cursor->hash != h.value)
+ continue;
+#endif
+
+ pointer = ck_ht_entry_key(cursor);
+ if (memcmp(pointer, key, key_length) == 0)
+ goto leave;
+ }
+ }
+
+ offset = ck_ht_map_probe_next(map, offset, h, probes);
+ }
+
+ cursor = NULL;
+
+leave:
+ if (probe_limit != NULL) {
+ *probe_limit = probes;
+ } else if (first == NULL) {
+ *probe_wr = probes;
+ }
+
+ *available = first;
+
+ if (cursor != NULL) {
+ *snapshot = *cursor;
+ }
+
+ return cursor;
+}
+
+bool
+ck_ht_gc(struct ck_ht *ht, unsigned long cycles, unsigned long seed)
+{
+ CK_HT_WORD *bounds = NULL;
+ struct ck_ht_map *map = ht->map;
+ CK_HT_TYPE maximum, i;
+ CK_HT_TYPE size = 0;
+
+ if (map->n_entries == 0) {
+ CK_HT_TYPE_STORE(&map->probe_maximum, 0);
+ if (map->probe_bound != NULL)
+ memset(map->probe_bound, 0, sizeof(CK_HT_WORD) * map->capacity);
+
+ return true;
+ }
+
+ if (cycles == 0) {
+ maximum = 0;
+
+ if (map->probe_bound != NULL) {
+ size = sizeof(CK_HT_WORD) * map->capacity;
+ bounds = ht->m->malloc(size);
+ if (bounds == NULL)
+ return false;
+
+ memset(bounds, 0, size);
+ }
+ } else {
+ maximum = map->probe_maximum;
+ }
+
+ for (i = 0; i < map->capacity; i++) {
+ struct ck_ht_entry *entry, *priority, snapshot;
+ struct ck_ht_hash h;
+ CK_HT_TYPE probes_wr;
+ CK_HT_TYPE offset;
+
+ entry = &map->entries[(i + seed) & map->mask];
+ if (entry->key == CK_HT_KEY_EMPTY ||
+ entry->key == CK_HT_KEY_TOMBSTONE) {
+ continue;
+ }
+
+ if (ht->mode & CK_HT_MODE_BYTESTRING) {
+#ifndef CK_HT_PP
+ h.value = entry->hash;
+#else
+ ht->h(&h, ck_ht_entry_key(entry), ck_ht_entry_key_length(entry),
+ ht->seed);
+#endif
+ entry = ck_ht_map_probe_wr(map, h, &snapshot, &priority,
+ ck_ht_entry_key(entry),
+ ck_ht_entry_key_length(entry),
+ NULL, &probes_wr);
+ } else {
+#ifndef CK_HT_PP
+ h.value = entry->hash;
+#else
+ ht->h(&h, &entry->key, sizeof(entry->key), ht->seed);
+#endif
+ entry = ck_ht_map_probe_wr(map, h, &snapshot, &priority,
+ (void *)entry->key,
+ sizeof(entry->key),
+ NULL, &probes_wr);
+ }
+
+ offset = h.value & map->mask;
+
+ if (priority != NULL) {
+ CK_HT_TYPE_STORE(&map->deletions, map->deletions + 1);
+ ck_pr_fence_store();
+#ifndef CK_HT_PP
+ CK_HT_TYPE_STORE(&priority->key_length, entry->key_length);
+ CK_HT_TYPE_STORE(&priority->hash, entry->hash);
+#endif
+ ck_pr_store_ptr_unsafe(&priority->value, (void *)entry->value);
+ ck_pr_fence_store();
+ ck_pr_store_ptr_unsafe(&priority->key, (void *)entry->key);
+ ck_pr_fence_store();
+ CK_HT_TYPE_STORE(&map->deletions, map->deletions + 1);
+ ck_pr_fence_store();
+ ck_pr_store_ptr_unsafe(&entry->key, (void *)CK_HT_KEY_TOMBSTONE);
+ ck_pr_fence_store();
+ }
+
+ if (cycles == 0) {
+ if (probes_wr > maximum)
+ maximum = probes_wr;
+
+ if (probes_wr >= CK_HT_WORD_MAX)
+ probes_wr = CK_HT_WORD_MAX;
+
+ if (bounds != NULL && probes_wr > bounds[offset])
+ bounds[offset] = probes_wr;
+ } else if (--cycles == 0)
+ break;
+ }
+
+ if (maximum != map->probe_maximum)
+ CK_HT_TYPE_STORE(&map->probe_maximum, maximum);
+
+ if (bounds != NULL) {
+ for (i = 0; i < map->capacity; i++)
+ CK_HT_STORE(&map->probe_bound[i], bounds[i]);
+
+ ht->m->free(bounds, size, false);
+ }
+
+ return true;
+}
+
+static struct ck_ht_entry *
+ck_ht_map_probe_rd(struct ck_ht_map *map,
+ ck_ht_hash_t h,
+ ck_ht_entry_t *snapshot,
+ const void *key,
+ uint16_t key_length)
+{
+ struct ck_ht_entry *bucket, *cursor;
+ size_t offset, i, j;
+ CK_HT_TYPE probes = 0;
+ CK_HT_TYPE probe_maximum;
+
+#ifndef CK_HT_PP
+ CK_HT_TYPE d = 0;
+ CK_HT_TYPE d_prime = 0;
+retry:
+#endif
+
+ probe_maximum = ck_ht_map_bound_get(map, h);
+ offset = h.value & map->mask;
+
+ for (i = 0; i < map->probe_limit; i++) {
+ /*
+ * Probe on a complete cache line first. Scan forward and wrap around to
+ * the beginning of the cache line. Only when the complete cache line has
+ * been scanned do we move on to the next row.
+ */
+ bucket = (void *)((uintptr_t)(map->entries + offset) &
+ ~(CK_MD_CACHELINE - 1));
+
+ for (j = 0; j < CK_HT_BUCKET_LENGTH; j++) {
+ uint16_t k;
+
+ if (probes++ > probe_maximum)
+ return NULL;
+
+ cursor = bucket + ((j + offset) & (CK_HT_BUCKET_LENGTH - 1));
+
+#ifdef CK_HT_PP
+ snapshot->key = (uintptr_t)ck_pr_load_ptr(&cursor->key);
+ ck_pr_fence_load();
+ snapshot->value = (uintptr_t)ck_pr_load_ptr(&cursor->value);
+#else
+ d = CK_HT_TYPE_LOAD(&map->deletions);
+ snapshot->key = (uintptr_t)ck_pr_load_ptr(&cursor->key);
+ ck_pr_fence_load();
+ snapshot->key_length = CK_HT_TYPE_LOAD(&cursor->key_length);
+ snapshot->hash = CK_HT_TYPE_LOAD(&cursor->hash);
+ snapshot->value = (uintptr_t)ck_pr_load_ptr(&cursor->value);
+#endif
+
+ /*
+ * It is probably worth it to encapsulate probe state
+ * in order to prevent a complete reprobe sequence in
+ * the case of intermittent writers.
+ */
+ if (snapshot->key == CK_HT_KEY_TOMBSTONE)
+ continue;
+
+ if (snapshot->key == CK_HT_KEY_EMPTY)
+ goto leave;
+
+ if (snapshot->key == (uintptr_t)key)
+ goto leave;
+
+ if (map->mode & CK_HT_MODE_BYTESTRING) {
+ void *pointer;
+
+ /*
+ * Check memoized portion of hash value before
+ * expensive full-length comparison.
+ */
+ k = ck_ht_entry_key_length(snapshot);
+ if (k != key_length)
+ continue;
+#ifdef CK_HT_PP
+ if ((snapshot->value >> CK_MD_VMA_BITS) != ((h.value >> 32) & CK_HT_KEY_MASK))
+ continue;
+#else
+ if (snapshot->hash != h.value)
+ continue;
+
+ d_prime = CK_HT_TYPE_LOAD(&map->deletions);
+
+ /*
+ * It is possible that the slot was
+ * replaced, initiate a re-probe.
+ */
+ if (d != d_prime)
+ goto retry;
+#endif
+
+ pointer = ck_ht_entry_key(snapshot);
+ if (memcmp(pointer, key, key_length) == 0)
+ goto leave;
+ }
+ }
+
+ offset = ck_ht_map_probe_next(map, offset, h, probes);
+ }
+
+ return NULL;
+
+leave:
+ return cursor;
+}
+
+CK_HT_TYPE
+ck_ht_count(struct ck_ht *table)
+{
+ struct ck_ht_map *map = ck_pr_load_ptr(&table->map);
+
+ return CK_HT_TYPE_LOAD(&map->n_entries);
+}
+
+bool
+ck_ht_next(struct ck_ht *table,
+ struct ck_ht_iterator *i,
+ struct ck_ht_entry **entry)
+{
+ struct ck_ht_map *map = table->map;
+ uintptr_t key;
+
+ if (i->offset >= map->capacity)
+ return false;
+
+ do {
+ key = map->entries[i->offset].key;
+ if (key != CK_HT_KEY_EMPTY && key != CK_HT_KEY_TOMBSTONE)
+ break;
+ } while (++i->offset < map->capacity);
+
+ if (i->offset >= map->capacity)
+ return false;
+
+ *entry = map->entries + i->offset++;
+ return true;
+}
+
+bool
+ck_ht_reset_size_spmc(struct ck_ht *table, CK_HT_TYPE size)
+{
+ struct ck_ht_map *map, *update;
+
+ map = table->map;
+ update = ck_ht_map_create(table, size);
+ if (update == NULL)
+ return false;
+
+ ck_pr_store_ptr_unsafe(&table->map, update);
+ ck_ht_map_destroy(table->m, map, true);
+ return true;
+}
+
+bool
+ck_ht_reset_spmc(struct ck_ht *table)
+{
+ struct ck_ht_map *map = table->map;
+
+ return ck_ht_reset_size_spmc(table, map->capacity);
+}
+
+bool
+ck_ht_grow_spmc(struct ck_ht *table, CK_HT_TYPE capacity)
+{
+ struct ck_ht_map *map, *update;
+ struct ck_ht_entry *bucket, *previous;
+ struct ck_ht_hash h;
+ size_t k, i, j, offset;
+ CK_HT_TYPE probes;
+
+restart:
+ map = table->map;
+
+ if (map->capacity >= capacity)
+ return false;
+
+ update = ck_ht_map_create(table, capacity);
+ if (update == NULL)
+ return false;
+
+ for (k = 0; k < map->capacity; k++) {
+ previous = &map->entries[k];
+
+ if (previous->key == CK_HT_KEY_EMPTY || previous->key == CK_HT_KEY_TOMBSTONE)
+ continue;
+
+ if (table->mode & CK_HT_MODE_BYTESTRING) {
+#ifdef CK_HT_PP
+ void *key;
+ uint16_t key_length;
+
+ key = ck_ht_entry_key(previous);
+ key_length = ck_ht_entry_key_length(previous);
+#endif
+
+#ifndef CK_HT_PP
+ h.value = previous->hash;
+#else
+ table->h(&h, key, key_length, table->seed);
+#endif
+ } else {
+#ifndef CK_HT_PP
+ h.value = previous->hash;
+#else
+ table->h(&h, &previous->key, sizeof(previous->key), table->seed);
+#endif
+ }
+
+ offset = h.value & update->mask;
+ probes = 0;
+
+ for (i = 0; i < update->probe_limit; i++) {
+ bucket = (void *)((uintptr_t)(update->entries + offset) & ~(CK_MD_CACHELINE - 1));
+
+ for (j = 0; j < CK_HT_BUCKET_LENGTH; j++) {
+ struct ck_ht_entry *cursor = bucket + ((j + offset) & (CK_HT_BUCKET_LENGTH - 1));
+
+ probes++;
+ if (CK_CC_LIKELY(cursor->key == CK_HT_KEY_EMPTY)) {
+ *cursor = *previous;
+ update->n_entries++;
+ ck_ht_map_bound_set(update, h, probes);
+ break;
+ }
+ }
+
+ if (j < CK_HT_BUCKET_LENGTH)
+ break;
+
+ offset = ck_ht_map_probe_next(update, offset, h, probes);
+ }
+
+ if (i == update->probe_limit) {
+ /*
+ * We have hit the probe limit, the map needs to be even
+ * larger.
+ */
+ ck_ht_map_destroy(table->m, update, false);
+ capacity <<= 1;
+ goto restart;
+ }
+ }
+
+ ck_pr_fence_store();
+ ck_pr_store_ptr_unsafe(&table->map, update);
+ ck_ht_map_destroy(table->m, map, true);
+ return true;
+}
+
+bool
+ck_ht_remove_spmc(struct ck_ht *table,
+ ck_ht_hash_t h,
+ ck_ht_entry_t *entry)
+{
+ struct ck_ht_map *map;
+ struct ck_ht_entry *candidate, snapshot;
+
+ map = table->map;
+
+ if (table->mode & CK_HT_MODE_BYTESTRING) {
+ candidate = ck_ht_map_probe_rd(map, h, &snapshot,
+ ck_ht_entry_key(entry),
+ ck_ht_entry_key_length(entry));
+ } else {
+ candidate = ck_ht_map_probe_rd(map, h, &snapshot,
+ (void *)entry->key,
+ sizeof(entry->key));
+ }
+
+ /* No matching entry was found. */
+ if (candidate == NULL || snapshot.key == CK_HT_KEY_EMPTY)
+ return false;
+
+ *entry = snapshot;
+
+ ck_pr_store_ptr_unsafe(&candidate->key, (void *)CK_HT_KEY_TOMBSTONE);
+ ck_pr_fence_store();
+ CK_HT_TYPE_STORE(&map->n_entries, map->n_entries - 1);
+ return true;
+}
+
+bool
+ck_ht_get_spmc(struct ck_ht *table,
+ ck_ht_hash_t h,
+ ck_ht_entry_t *entry)
+{
+ struct ck_ht_entry *candidate, snapshot;
+ struct ck_ht_map *map;
+ CK_HT_TYPE d, d_prime;
+
+restart:
+ map = ck_pr_load_ptr(&table->map);
+
+ /*
+ * Platforms that cannot read key and key_length atomically must reprobe
+ * on the scan of any single entry.
+ */
+ d = CK_HT_TYPE_LOAD(&map->deletions);
+
+ if (table->mode & CK_HT_MODE_BYTESTRING) {
+ candidate = ck_ht_map_probe_rd(map, h, &snapshot,
+ ck_ht_entry_key(entry), ck_ht_entry_key_length(entry));
+ } else {
+ candidate = ck_ht_map_probe_rd(map, h, &snapshot,
+ (void *)entry->key, sizeof(entry->key));
+ }
+
+ d_prime = CK_HT_TYPE_LOAD(&map->deletions);
+ if (d != d_prime) {
+ /*
+ * It is possible we have read (K, V'). Only valid states are
+ * (K, V), (K', V') and (T, V). Restart load operation in face
+ * of concurrent deletions or replacements.
+ */
+ goto restart;
+ }
+
+ if (candidate == NULL || snapshot.key == CK_HT_KEY_EMPTY)
+ return false;
+
+ *entry = snapshot;
+ return true;
+}
+
+bool
+ck_ht_set_spmc(struct ck_ht *table,
+ ck_ht_hash_t h,
+ ck_ht_entry_t *entry)
+{
+ struct ck_ht_entry snapshot, *candidate, *priority;
+ struct ck_ht_map *map;
+ CK_HT_TYPE probes, probes_wr;
+ bool empty = false;
+
+ for (;;) {
+ map = table->map;
+
+ if (table->mode & CK_HT_MODE_BYTESTRING) {
+ candidate = ck_ht_map_probe_wr(map, h, &snapshot, &priority,
+ ck_ht_entry_key(entry),
+ ck_ht_entry_key_length(entry),
+ &probes, &probes_wr);
+ } else {
+ candidate = ck_ht_map_probe_wr(map, h, &snapshot, &priority,
+ (void *)entry->key,
+ sizeof(entry->key),
+ &probes, &probes_wr);
+ }
+
+ if (priority != NULL) {
+ probes = probes_wr;
+ break;
+ }
+
+ if (candidate != NULL)
+ break;
+
+ if (ck_ht_grow_spmc(table, map->capacity << 1) == false)
+ return false;
+ }
+
+ if (candidate == NULL) {
+ candidate = priority;
+ empty = true;
+ }
+
+ if (candidate->key != CK_HT_KEY_EMPTY &&
+ priority != NULL && candidate != priority) {
+ /*
+ * Entry is moved into another position in probe sequence.
+ * We avoid a state of (K, B) (where [K, B] -> [K', B]) by
+ * guaranteeing a forced reprobe before transitioning from K to
+ * T. (K, B) implies (K, B, D') so we will reprobe successfully
+ * from this transient state.
+ */
+ probes = probes_wr;
+
+#ifndef CK_HT_PP
+ CK_HT_TYPE_STORE(&priority->key_length, entry->key_length);
+ CK_HT_TYPE_STORE(&priority->hash, entry->hash);
+#endif
+
+ /*
+ * Readers must observe version counter change before they
+ * observe re-use. If they observe re-use, it is at most
+ * a tombstone.
+ */
+ if (priority->value == CK_HT_KEY_TOMBSTONE) {
+ CK_HT_TYPE_STORE(&map->deletions, map->deletions + 1);
+ ck_pr_fence_store();
+ }
+
+ ck_pr_store_ptr_unsafe(&priority->value, (void *)entry->value);
+ ck_pr_fence_store();
+ ck_pr_store_ptr_unsafe(&priority->key, (void *)entry->key);
+ ck_pr_fence_store();
+
+ /*
+ * Make sure that readers who observe the tombstone would
+ * also observe counter change.
+ */
+ CK_HT_TYPE_STORE(&map->deletions, map->deletions + 1);
+ ck_pr_fence_store();
+
+ ck_pr_store_ptr_unsafe(&candidate->key, (void *)CK_HT_KEY_TOMBSTONE);
+ ck_pr_fence_store();
+ } else {
+ /*
+ * In this case we are inserting a new entry or replacing
+ * an existing entry. Yes, this can be combined into above branch,
+ * but isn't because you are actually looking at dying code
+ * (ck_ht is effectively deprecated and is being replaced soon).
+ */
+ bool replace = candidate->key != CK_HT_KEY_EMPTY &&
+ candidate->key != CK_HT_KEY_TOMBSTONE;
+
+ if (priority != NULL) {
+ if (priority->key == CK_HT_KEY_TOMBSTONE) {
+ CK_HT_TYPE_STORE(&map->deletions, map->deletions + 1);
+ ck_pr_fence_store();
+ }
+
+ candidate = priority;
+ probes = probes_wr;
+ }
+
+#ifdef CK_HT_PP
+ ck_pr_store_ptr_unsafe(&candidate->value, (void *)entry->value);
+ ck_pr_fence_store();
+ ck_pr_store_ptr_unsafe(&candidate->key, (void *)entry->key);
+#else
+ CK_HT_TYPE_STORE(&candidate->key_length, entry->key_length);
+ CK_HT_TYPE_STORE(&candidate->hash, entry->hash);
+ ck_pr_store_ptr_unsafe(&candidate->value, (void *)entry->value);
+ ck_pr_fence_store();
+ ck_pr_store_ptr_unsafe(&candidate->key, (void *)entry->key);
+#endif
+
+ /*
+ * If we are insert a new entry then increment number
+ * of entries associated with map.
+ */
+ if (replace == false)
+ CK_HT_TYPE_STORE(&map->n_entries, map->n_entries + 1);
+ }
+
+ ck_ht_map_bound_set(map, h, probes);
+
+ /* Enforce a load factor of 0.5. */
+ if (map->n_entries * 2 > map->capacity)
+ ck_ht_grow_spmc(table, map->capacity << 1);
+
+ if (empty == true) {
+ entry->key = CK_HT_KEY_EMPTY;
+ } else {
+ *entry = snapshot;
+ }
+
+ return true;
+}
+
+bool
+ck_ht_put_spmc(struct ck_ht *table,
+ ck_ht_hash_t h,
+ ck_ht_entry_t *entry)
+{
+ struct ck_ht_entry snapshot, *candidate, *priority;
+ struct ck_ht_map *map;
+ CK_HT_TYPE probes, probes_wr;
+
+ for (;;) {
+ map = table->map;
+
+ if (table->mode & CK_HT_MODE_BYTESTRING) {
+ candidate = ck_ht_map_probe_wr(map, h, &snapshot, &priority,
+ ck_ht_entry_key(entry),
+ ck_ht_entry_key_length(entry),
+ &probes, &probes_wr);
+ } else {
+ candidate = ck_ht_map_probe_wr(map, h, &snapshot, &priority,
+ (void *)entry->key,
+ sizeof(entry->key),
+ &probes, &probes_wr);
+ }
+
+ if (candidate != NULL || priority != NULL)
+ break;
+
+ if (ck_ht_grow_spmc(table, map->capacity << 1) == false)
+ return false;
+ }
+
+ if (priority != NULL) {
+ /* Version counter is updated before re-use. */
+ CK_HT_TYPE_STORE(&map->deletions, map->deletions + 1);
+ ck_pr_fence_store();
+
+ /* Re-use tombstone if one was found. */
+ candidate = priority;
+ probes = probes_wr;
+ } else if (candidate->key != CK_HT_KEY_EMPTY &&
+ candidate->key != CK_HT_KEY_TOMBSTONE) {
+ /*
+ * If the snapshot key is non-empty and the value field is not
+ * a tombstone then an identical key was found. As store does
+ * not implement replacement, we will fail.
+ */
+ return false;
+ }
+
+ ck_ht_map_bound_set(map, h, probes);
+
+#ifdef CK_HT_PP
+ ck_pr_store_ptr_unsafe(&candidate->value, (void *)entry->value);
+ ck_pr_fence_store();
+ ck_pr_store_ptr_unsafe(&candidate->key, (void *)entry->key);
+#else
+ CK_HT_TYPE_STORE(&candidate->key_length, entry->key_length);
+ CK_HT_TYPE_STORE(&candidate->hash, entry->hash);
+ ck_pr_store_ptr_unsafe(&candidate->value, (void *)entry->value);
+ ck_pr_fence_store();
+ ck_pr_store_ptr_unsafe(&candidate->key, (void *)entry->key);
+#endif
+
+ CK_HT_TYPE_STORE(&map->n_entries, map->n_entries + 1);
+
+ /* Enforce a load factor of 0.5. */
+ if (map->n_entries * 2 > map->capacity)
+ ck_ht_grow_spmc(table, map->capacity << 1);
+
+ return true;
+}
+
+void
+ck_ht_destroy(struct ck_ht *table)
+{
+
+ ck_ht_map_destroy(table->m, table->map, false);
+ return;
+}
diff --git a/src/ck_ht_hash.h b/src/ck_ht_hash.h
new file mode 100644
index 0000000..cd3d7a5
--- /dev/null
+++ b/src/ck_ht_hash.h
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra
+ * Copyright 2011-2014 AppNexus, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_HT_HASH_H
+#define CK_HT_HASH_H
+
+/*
+ * This is the Murmur hash written by Austin Appleby.
+ */
+
+#include <ck_stdint.h>
+#include <ck_string.h>
+
+//-----------------------------------------------------------------------------
+// MurmurHash3 was written by Austin Appleby, and is placed in the public
+// domain. The author hereby disclaims copyright to this source code.
+
+// Note - The x86 and x64 versions do _not_ produce the same results, as the
+// algorithms are optimized for their respective platforms. You can still
+// compile and run any of them on any platform, but your performance with the
+// non-native version will be less than optimal.
+
+//-----------------------------------------------------------------------------
+// Platform-specific functions and macros
+
+// Microsoft Visual Studio
+
+#if defined(_MSC_VER)
+
+#define FORCE_INLINE __forceinline
+
+#include <stdlib.h>
+
+#define ROTL32(x,y) _rotl(x,y)
+#define ROTL64(x,y) _rotl64(x,y)
+
+#define BIG_CONSTANT(x) (x)
+
+// Other compilers
+
+#else // defined(_MSC_VER)
+
+#define FORCE_INLINE inline __attribute__((always_inline))
+
+static inline uint32_t rotl32 ( uint32_t x, int8_t r )
+{
+ return (x << r) | (x >> (32 - r));
+}
+
+static inline uint64_t rotl64 ( uint64_t x, int8_t r )
+{
+ return (x << r) | (x >> (64 - r));
+}
+
+#define ROTL32(x,y) rotl32(x,y)
+#define ROTL64(x,y) rotl64(x,y)
+
+#define BIG_CONSTANT(x) (x##LLU)
+
+#endif // !defined(_MSC_VER)
+
+//-----------------------------------------------------------------------------
+// Block read - if your platform needs to do endian-swapping or can only
+// handle aligned reads, do the conversion here
+
+FORCE_INLINE static uint32_t getblock ( const uint32_t * p, int i )
+{
+ return p[i];
+}
+
+//-----------------------------------------------------------------------------
+// Finalization mix - force all bits of a hash block to avalanche
+
+FORCE_INLINE static uint32_t fmix ( uint32_t h )
+{
+ h ^= h >> 16;
+ h *= 0x85ebca6b;
+ h ^= h >> 13;
+ h *= 0xc2b2ae35;
+ h ^= h >> 16;
+
+ return h;
+}
+
+//-----------------------------------------------------------------------------
+
+static inline void MurmurHash3_x86_32 ( const void * key, int len,
+ uint32_t seed, uint32_t * out )
+{
+ const uint8_t * data = (const uint8_t*)key;
+ const int nblocks = len / 4;
+ int i;
+
+ uint32_t h1 = seed;
+
+ uint32_t c1 = 0xcc9e2d51;
+ uint32_t c2 = 0x1b873593;
+
+ //----------
+ // body
+
+ const uint32_t * blocks = (const uint32_t *)(const void *)(data + nblocks*4);
+
+ for(i = -nblocks; i; i++)
+ {
+ uint32_t k1 = getblock(blocks,i);
+
+ k1 *= c1;
+ k1 = ROTL32(k1,15);
+ k1 *= c2;
+
+ h1 ^= k1;
+ h1 = ROTL32(h1,13);
+ h1 = h1*5+0xe6546b64;
+ }
+
+ //----------
+ // tail
+
+ const uint8_t * tail = (const uint8_t*)(data + nblocks*4);
+
+ uint32_t k1 = 0;
+
+ switch(len & 3)
+ {
+ case 3: k1 ^= tail[2] << 16;
+ case 2: k1 ^= tail[1] << 8;
+ case 1: k1 ^= tail[0];
+ k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
+ };
+
+ //----------
+ // finalization
+
+ h1 ^= len;
+
+ h1 = fmix(h1);
+
+ *(uint32_t *)out = h1;
+}
+
+static inline uint64_t MurmurHash64A ( const void * key, int len, uint64_t seed )
+{
+ const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995);
+ const int r = 47;
+
+ uint64_t h = seed ^ (len * m);
+
+ const uint64_t * data = (const uint64_t *)key;
+ const uint64_t * end = data + (len/8);
+
+ while(data != end)
+ {
+ uint64_t k;
+
+ if (!((uintptr_t)data & 0x7))
+ k = *data++;
+ else {
+ memcpy(&k, data, sizeof(k));
+ data++;
+ }
+
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+
+ h ^= k;
+ h *= m;
+ }
+
+ const unsigned char * data2 = (const unsigned char*)data;
+
+ switch(len & 7)
+ {
+ case 7: h ^= (uint64_t)(data2[6]) << 48;
+ case 6: h ^= (uint64_t)(data2[5]) << 40;
+ case 5: h ^= (uint64_t)(data2[4]) << 32;
+ case 4: h ^= (uint64_t)(data2[3]) << 24;
+ case 3: h ^= (uint64_t)(data2[2]) << 16;
+ case 2: h ^= (uint64_t)(data2[1]) << 8;
+ case 1: h ^= (uint64_t)(data2[0]);
+ h *= m;
+ };
+
+ h ^= h >> r;
+ h *= m;
+ h ^= h >> r;
+
+ return h;
+}
+
+
+// 64-bit hash for 32-bit platforms
+
+static inline uint64_t MurmurHash64B ( const void * key, int len, uint64_t seed )
+{
+ const uint32_t m = 0x5bd1e995;
+ const int r = 24;
+
+ uint32_t h1 = (uint32_t)(seed) ^ len;
+ uint32_t h2 = (uint32_t)(seed >> 32);
+
+ const uint32_t * data = (const uint32_t *)key;
+
+ while(len >= 8)
+ {
+ uint32_t k1 = *data++;
+ k1 *= m; k1 ^= k1 >> r; k1 *= m;
+ h1 *= m; h1 ^= k1;
+ len -= 4;
+
+ uint32_t k2 = *data++;
+ k2 *= m; k2 ^= k2 >> r; k2 *= m;
+ h2 *= m; h2 ^= k2;
+ len -= 4;
+ }
+
+ if(len >= 4)
+ {
+ uint32_t k1 = *data++;
+ k1 *= m; k1 ^= k1 >> r; k1 *= m;
+ h1 *= m; h1 ^= k1;
+ len -= 4;
+ }
+
+ switch(len)
+ {
+ case 3: h2 ^= ((const unsigned char*)data)[2] << 16;
+ case 2: h2 ^= ((const unsigned char*)data)[1] << 8;
+ case 1: h2 ^= ((const unsigned char*)data)[0];
+ h2 *= m;
+ };
+
+ h1 ^= h2 >> 18; h1 *= m;
+ h2 ^= h1 >> 22; h2 *= m;
+ h1 ^= h2 >> 17; h1 *= m;
+ h2 ^= h1 >> 19; h2 *= m;
+
+ uint64_t h = h1;
+
+ h = (h << 32) | h2;
+
+ return h;
+}
+
+#endif /* CK_HT_HASH_H */
diff --git a/src/ck_internal.h b/src/ck_internal.h
new file mode 100644
index 0000000..7aad3d7
--- /dev/null
+++ b/src/ck_internal.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Several of these are from: http://graphics.stanford.edu/~seander/bithacks.html
+ */
+
+#define CK_INTERNAL_LOG_0 (0xAAAAAAAA)
+#define CK_INTERNAL_LOG_1 (0xCCCCCCCC)
+#define CK_INTERNAL_LOG_2 (0xF0F0F0F0)
+#define CK_INTERNAL_LOG_3 (0xFF00FF00)
+#define CK_INTERNAL_LOG_4 (0xFFFF0000)
+
+CK_CC_INLINE static uint32_t
+ck_internal_log(uint32_t v)
+{
+ uint32_t r = (v & CK_INTERNAL_LOG_0) != 0;
+
+ r |= ((v & CK_INTERNAL_LOG_4) != 0) << 4;
+ r |= ((v & CK_INTERNAL_LOG_3) != 0) << 3;
+ r |= ((v & CK_INTERNAL_LOG_2) != 0) << 2;
+ r |= ((v & CK_INTERNAL_LOG_1) != 0) << 1;
+ return (r);
+}
+
+CK_CC_INLINE static uint32_t
+ck_internal_power_2(uint32_t v)
+{
+
+ --v;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ return (++v);
+}
+
+CK_CC_INLINE static unsigned long
+ck_internal_max(unsigned long x, unsigned long y)
+{
+
+ return x ^ ((x ^ y) & -(x < y));
+}
+
+CK_CC_INLINE static uint64_t
+ck_internal_max_64(uint64_t x, uint64_t y)
+{
+
+ return x ^ ((x ^ y) & -(x < y));
+}
+
+CK_CC_INLINE static uint32_t
+ck_internal_max_32(uint32_t x, uint32_t y)
+{
+
+ return x ^ ((x ^ y) & -(x < y));
+}
+
+CK_CC_INLINE static unsigned long
+ck_internal_bsf(unsigned long v)
+{
+#if defined(__GNUC__)
+ return __builtin_ffs(v);
+#else
+ unsigned int i;
+ const unsigned int s = sizeof(unsigned long) * 8 - 1;
+
+ for (i = 0; i < s; i++) {
+ if (v & (1UL << (s - i)))
+ return sizeof(unsigned long) * 8 - i;
+ }
+
+ return 1;
+#endif /* !__GNUC__ */
+}
+
+CK_CC_INLINE static uint64_t
+ck_internal_bsf_64(uint64_t v)
+{
+#if defined(__GNUC__)
+ return __builtin_ffs(v);
+#else
+ unsigned int i;
+ const unsigned int s = sizeof(unsigned long) * 8 - 1;
+
+ for (i = 0; i < s; i++) {
+ if (v & (1ULL << (63U - i)))
+ return i;
+ }
+#endif /* !__GNUC__ */
+
+ return 1;
+}
+
diff --git a/src/ck_rhs.c b/src/ck_rhs.c
new file mode 100644
index 0000000..f6dd2ee
--- /dev/null
+++ b/src/ck_rhs.c
@@ -0,0 +1,1480 @@
+/*
+ * Copyright 2014-2015 Olivier Houchard.
+ * Copyright 2012-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_cc.h>
+#include <ck_rhs.h>
+#include <ck_limits.h>
+#include <ck_md.h>
+#include <ck_pr.h>
+#include <ck_stdint.h>
+#include <ck_stdbool.h>
+#include <ck_string.h>
+
+#include "ck_internal.h"
+
+#ifndef CK_RHS_PROBE_L1_SHIFT
+#define CK_RHS_PROBE_L1_SHIFT 3ULL
+#endif /* CK_RHS_PROBE_L1_SHIFT */
+
+#define CK_RHS_PROBE_L1 (1 << CK_RHS_PROBE_L1_SHIFT)
+#define CK_RHS_PROBE_L1_MASK (CK_RHS_PROBE_L1 - 1)
+
+#ifndef CK_RHS_PROBE_L1_DEFAULT
+#define CK_RHS_PROBE_L1_DEFAULT CK_MD_CACHELINE
+#endif
+
+#define CK_RHS_VMA_MASK ((uintptr_t)((1ULL << CK_MD_VMA_BITS) - 1))
+#define CK_RHS_VMA(x) \
+ ((void *)((uintptr_t)(x) & CK_RHS_VMA_MASK))
+
+#define CK_RHS_EMPTY NULL
+#define CK_RHS_G (1024)
+#define CK_RHS_G_MASK (CK_RHS_G - 1)
+
+#if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_STORE_8)
+#define CK_RHS_WORD uint8_t
+#define CK_RHS_WORD_MAX UINT8_MAX
+#define CK_RHS_STORE(x, y) ck_pr_store_8(x, y)
+#define CK_RHS_LOAD(x) ck_pr_load_8(x)
+#elif defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_STORE_16)
+#define CK_RHS_WORD uint16_t
+#define CK_RHS_WORD_MAX UINT16_MAX
+#define CK_RHS_STORE(x, y) ck_pr_store_16(x, y)
+#define CK_RHS_LOAD(x) ck_pr_load_16(x)
+#elif defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_STORE_32)
+#define CK_RHS_WORD uint32_t
+#define CK_RHS_WORD_MAX UINT32_MAX
+#define CK_RHS_STORE(x, y) ck_pr_store_32(x, y)
+#define CK_RHS_LOAD(x) ck_pr_load_32(x)
+#else
+#error "ck_rhs is not supported on your platform."
+#endif
+
+#define CK_RHS_MAX_WANTED 0xffff
+
+enum ck_rhs_probe_behavior {
+ CK_RHS_PROBE = 0, /* Default behavior. */
+ CK_RHS_PROBE_RH, /* Short-circuit if RH slot found. */
+ CK_RHS_PROBE_INSERT, /* Short-circuit on probe bound if tombstone found. */
+
+ CK_RHS_PROBE_ROBIN_HOOD,/* Look for the first slot available for the entry we are about to replace, only used to internally implement Robin Hood */
+ CK_RHS_PROBE_NO_RH, /* Don't do the RH dance */
+};
+struct ck_rhs_entry_desc {
+ unsigned int probes;
+ unsigned short wanted;
+ CK_RHS_WORD probe_bound;
+ bool in_rh;
+ const void *entry;
+} CK_CC_ALIGN(16);
+
+struct ck_rhs_no_entry_desc {
+ unsigned int probes;
+ unsigned short wanted;
+ CK_RHS_WORD probe_bound;
+ bool in_rh;
+} CK_CC_ALIGN(8);
+
+typedef long ck_rhs_probe_cb_t(struct ck_rhs *hs,
+ struct ck_rhs_map *map,
+ unsigned long *n_probes,
+ long *priority,
+ unsigned long h,
+ const void *key,
+ const void **object,
+ unsigned long probe_limit,
+ enum ck_rhs_probe_behavior behavior);
+
+struct ck_rhs_map {
+ unsigned int generation[CK_RHS_G];
+ unsigned int probe_maximum;
+ unsigned long mask;
+ unsigned long step;
+ unsigned int probe_limit;
+ unsigned long n_entries;
+ unsigned long capacity;
+ unsigned long size;
+ unsigned long max_entries;
+ char offset_mask;
+ union {
+ struct ck_rhs_entry_desc *descs;
+ struct ck_rhs_no_entry {
+ const void **entries;
+ struct ck_rhs_no_entry_desc *descs;
+ } no_entries;
+ } entries;
+ bool read_mostly;
+ ck_rhs_probe_cb_t *probe_func;
+};
+
+static CK_CC_INLINE const void *
+ck_rhs_entry(struct ck_rhs_map *map, long offset)
+{
+
+ if (map->read_mostly)
+ return (map->entries.no_entries.entries[offset]);
+ else
+ return (map->entries.descs[offset].entry);
+}
+
+static CK_CC_INLINE const void **
+ck_rhs_entry_addr(struct ck_rhs_map *map, long offset)
+{
+
+ if (map->read_mostly)
+ return (&map->entries.no_entries.entries[offset]);
+ else
+ return (&map->entries.descs[offset].entry);
+}
+
+static CK_CC_INLINE struct ck_rhs_entry_desc *
+ck_rhs_desc(struct ck_rhs_map *map, long offset)
+{
+
+ if (CK_CC_UNLIKELY(map->read_mostly))
+ return ((struct ck_rhs_entry_desc *)(void *)&map->entries.no_entries.descs[offset]);
+ else
+ return (&map->entries.descs[offset]);
+}
+
+static CK_CC_INLINE void
+ck_rhs_wanted_inc(struct ck_rhs_map *map, long offset)
+{
+
+ if (CK_CC_UNLIKELY(map->read_mostly))
+ map->entries.no_entries.descs[offset].wanted++;
+ else
+ map->entries.descs[offset].wanted++;
+}
+
+static CK_CC_INLINE unsigned int
+ck_rhs_probes(struct ck_rhs_map *map, long offset)
+{
+
+ if (CK_CC_UNLIKELY(map->read_mostly))
+ return (map->entries.no_entries.descs[offset].probes);
+ else
+ return (map->entries.descs[offset].probes);
+}
+
+static CK_CC_INLINE void
+ck_rhs_set_probes(struct ck_rhs_map *map, long offset, unsigned int value)
+{
+
+ if (CK_CC_UNLIKELY(map->read_mostly))
+ map->entries.no_entries.descs[offset].probes = value;
+ else
+ map->entries.descs[offset].probes = value;
+}
+
+static CK_CC_INLINE CK_RHS_WORD
+ck_rhs_probe_bound(struct ck_rhs_map *map, long offset)
+{
+
+ if (CK_CC_UNLIKELY(map->read_mostly))
+ return (map->entries.no_entries.descs[offset].probe_bound);
+ else
+ return (map->entries.descs[offset].probe_bound);
+}
+
+static CK_CC_INLINE CK_RHS_WORD *
+ck_rhs_probe_bound_addr(struct ck_rhs_map *map, long offset)
+{
+
+ if (CK_CC_UNLIKELY(map->read_mostly))
+ return (&map->entries.no_entries.descs[offset].probe_bound);
+ else
+ return (&map->entries.descs[offset].probe_bound);
+}
+
+
+static CK_CC_INLINE bool
+ck_rhs_in_rh(struct ck_rhs_map *map, long offset)
+{
+
+ if (CK_CC_UNLIKELY(map->read_mostly))
+ return (map->entries.no_entries.descs[offset].in_rh);
+ else
+ return (map->entries.descs[offset].in_rh);
+}
+
+static CK_CC_INLINE void
+ck_rhs_set_rh(struct ck_rhs_map *map, long offset)
+{
+
+ if (CK_CC_UNLIKELY(map->read_mostly))
+ map->entries.no_entries.descs[offset].in_rh = true;
+ else
+ map->entries.descs[offset].in_rh = true;
+}
+
+static CK_CC_INLINE void
+ck_rhs_unset_rh(struct ck_rhs_map *map, long offset)
+{
+
+ if (CK_CC_UNLIKELY(map->read_mostly))
+ map->entries.no_entries.descs[offset].in_rh = false;
+ else
+ map->entries.descs[offset].in_rh = false;
+}
+
+
+#define CK_RHS_DEFAULT_LOAD_FACTOR 50
+
+static ck_rhs_probe_cb_t ck_rhs_map_probe;
+static ck_rhs_probe_cb_t ck_rhs_map_probe_rm;
+
+bool
+ck_rhs_set_load_factor(struct ck_rhs *hs, unsigned int load_factor)
+{
+ struct ck_rhs_map *map = hs->map;
+
+ if (load_factor == 0 || load_factor > 100)
+ return false;
+
+ hs->load_factor = load_factor;
+ map->max_entries = (map->capacity * (unsigned long)hs->load_factor) / 100;
+ while (map->n_entries > map->max_entries) {
+ if (ck_rhs_grow(hs, map->capacity << 1) == false)
+ return false;
+ map = hs->map;
+ }
+ return true;
+}
+
+void
+ck_rhs_iterator_init(struct ck_rhs_iterator *iterator)
+{
+
+ iterator->cursor = NULL;
+ iterator->offset = 0;
+ return;
+}
+
+bool
+ck_rhs_next(struct ck_rhs *hs, struct ck_rhs_iterator *i, void **key)
+{
+ struct ck_rhs_map *map = hs->map;
+ void *value;
+
+ if (i->offset >= map->capacity)
+ return false;
+
+ do {
+ value = CK_CC_DECONST_PTR(ck_rhs_entry(map, i->offset));
+ if (value != CK_RHS_EMPTY) {
+#ifdef CK_RHS_PP
+ if (hs->mode & CK_RHS_MODE_OBJECT)
+ value = CK_RHS_VMA(value);
+#endif
+ i->offset++;
+ *key = value;
+ return true;
+ }
+ } while (++i->offset < map->capacity);
+
+ return false;
+}
+
+void
+ck_rhs_stat(struct ck_rhs *hs, struct ck_rhs_stat *st)
+{
+ struct ck_rhs_map *map = hs->map;
+
+ st->n_entries = map->n_entries;
+ st->probe_maximum = map->probe_maximum;
+ return;
+}
+
+unsigned long
+ck_rhs_count(struct ck_rhs *hs)
+{
+
+ return hs->map->n_entries;
+}
+
+static void
+ck_rhs_map_destroy(struct ck_malloc *m, struct ck_rhs_map *map, bool defer)
+{
+
+ m->free(map, map->size, defer);
+ return;
+}
+
+void
+ck_rhs_destroy(struct ck_rhs *hs)
+{
+
+ ck_rhs_map_destroy(hs->m, hs->map, false);
+ return;
+}
+
+static struct ck_rhs_map *
+ck_rhs_map_create(struct ck_rhs *hs, unsigned long entries)
+{
+ struct ck_rhs_map *map;
+ unsigned long size, n_entries, limit;
+
+ n_entries = ck_internal_power_2(entries);
+ if (n_entries < CK_RHS_PROBE_L1)
+ n_entries = CK_RHS_PROBE_L1;
+
+ if (hs->mode & CK_RHS_MODE_READ_MOSTLY)
+ size = sizeof(struct ck_rhs_map) +
+ (sizeof(void *) * n_entries +
+ sizeof(struct ck_rhs_no_entry_desc) * n_entries +
+ 2 * CK_MD_CACHELINE - 1);
+ else
+ size = sizeof(struct ck_rhs_map) +
+ (sizeof(struct ck_rhs_entry_desc) * n_entries +
+ CK_MD_CACHELINE - 1);
+ map = hs->m->malloc(size);
+ if (map == NULL)
+ return NULL;
+ map->read_mostly = !!(hs->mode & CK_RHS_MODE_READ_MOSTLY);
+
+ map->size = size;
+ /* We should probably use a more intelligent heuristic for default probe length. */
+ limit = ck_internal_max(n_entries >> (CK_RHS_PROBE_L1_SHIFT + 2), CK_RHS_PROBE_L1_DEFAULT);
+ if (limit > UINT_MAX)
+ limit = UINT_MAX;
+
+ map->probe_limit = (unsigned int)limit;
+ map->probe_maximum = 0;
+ map->capacity = n_entries;
+ map->step = ck_internal_bsf(n_entries);
+ map->mask = n_entries - 1;
+ map->n_entries = 0;
+
+ map->max_entries = (map->capacity * (unsigned long)hs->load_factor) / 100;
+ /* Align map allocation to cache line. */
+ if (map->read_mostly) {
+ map->entries.no_entries.entries = (void *)(((uintptr_t)&map[1] +
+ CK_MD_CACHELINE - 1) & ~(CK_MD_CACHELINE - 1));
+ map->entries.no_entries.descs = (void *)(((uintptr_t)map->entries.no_entries.entries + (sizeof(void *) * n_entries) + CK_MD_CACHELINE - 1) &~ (CK_MD_CACHELINE - 1));
+ memset(map->entries.no_entries.entries, 0,
+ sizeof(void *) * n_entries);
+ memset(map->entries.no_entries.descs, 0,
+ sizeof(struct ck_rhs_no_entry_desc) * n_entries);
+ map->offset_mask = (CK_MD_CACHELINE / sizeof(void *)) - 1;
+ map->probe_func = ck_rhs_map_probe_rm;
+
+ } else {
+ map->entries.descs = (void *)(((uintptr_t)&map[1] +
+ CK_MD_CACHELINE - 1) & ~(CK_MD_CACHELINE - 1));
+ memset(map->entries.descs, 0, sizeof(struct ck_rhs_entry_desc) * n_entries);
+ map->offset_mask = (CK_MD_CACHELINE / sizeof(struct ck_rhs_entry_desc)) - 1;
+ map->probe_func = ck_rhs_map_probe;
+ }
+ memset(map->generation, 0, sizeof map->generation);
+
+ /* Commit entries purge with respect to map publication. */
+ ck_pr_fence_store();
+ return map;
+}
+
+bool
+ck_rhs_reset_size(struct ck_rhs *hs, unsigned long capacity)
+{
+ struct ck_rhs_map *map, *previous;
+
+ previous = hs->map;
+ map = ck_rhs_map_create(hs, capacity);
+ if (map == NULL)
+ return false;
+
+ ck_pr_store_ptr(&hs->map, map);
+ ck_rhs_map_destroy(hs->m, previous, true);
+ return true;
+}
+
+bool
+ck_rhs_reset(struct ck_rhs *hs)
+{
+ struct ck_rhs_map *previous;
+
+ previous = hs->map;
+ return ck_rhs_reset_size(hs, previous->capacity);
+}
+
+static inline unsigned long
+ck_rhs_map_probe_next(struct ck_rhs_map *map,
+ unsigned long offset,
+ unsigned long probes)
+{
+
+ if (probes & map->offset_mask) {
+ offset = (offset &~ map->offset_mask) +
+ ((offset + 1) & map->offset_mask);
+ return offset;
+ } else
+ return (offset + probes) & map->mask;
+}
+
+static inline unsigned long
+ck_rhs_map_probe_prev(struct ck_rhs_map *map, unsigned long offset,
+ unsigned long probes)
+{
+
+ if (probes & map->offset_mask) {
+ offset = (offset &~ map->offset_mask) + ((offset - 1) &
+ map->offset_mask);
+ return offset;
+ } else
+ return ((offset - probes) & map->mask);
+}
+
+
+static inline void
+ck_rhs_map_bound_set(struct ck_rhs_map *m,
+ unsigned long h,
+ unsigned long n_probes)
+{
+ unsigned long offset = h & m->mask;
+ struct ck_rhs_entry_desc *desc;
+
+ if (n_probes > m->probe_maximum)
+ ck_pr_store_uint(&m->probe_maximum, n_probes);
+ if (!(m->read_mostly)) {
+ desc = &m->entries.descs[offset];
+
+ if (desc->probe_bound < n_probes) {
+ if (n_probes > CK_RHS_WORD_MAX)
+ n_probes = CK_RHS_WORD_MAX;
+
+ CK_RHS_STORE(&desc->probe_bound, n_probes);
+ ck_pr_fence_store();
+ }
+ }
+
+ return;
+}
+
+static inline unsigned int
+ck_rhs_map_bound_get(struct ck_rhs_map *m, unsigned long h)
+{
+ unsigned long offset = h & m->mask;
+ unsigned int r = CK_RHS_WORD_MAX;
+
+ if (m->read_mostly)
+ r = ck_pr_load_uint(&m->probe_maximum);
+ else {
+ r = CK_RHS_LOAD(&m->entries.descs[offset].probe_bound);
+ if (r == CK_RHS_WORD_MAX)
+ r = ck_pr_load_uint(&m->probe_maximum);
+ }
+ return r;
+}
+
+bool
+ck_rhs_grow(struct ck_rhs *hs,
+ unsigned long capacity)
+{
+ struct ck_rhs_map *map, *update;
+ const void *previous, *prev_saved;
+ unsigned long k, offset, probes;
+
+restart:
+ map = hs->map;
+ if (map->capacity > capacity)
+ return false;
+
+ update = ck_rhs_map_create(hs, capacity);
+ if (update == NULL)
+ return false;
+
+ for (k = 0; k < map->capacity; k++) {
+ unsigned long h;
+
+ prev_saved = previous = ck_rhs_entry(map, k);
+ if (previous == CK_RHS_EMPTY)
+ continue;
+
+#ifdef CK_RHS_PP
+ if (hs->mode & CK_RHS_MODE_OBJECT)
+ previous = CK_RHS_VMA(previous);
+#endif
+
+ h = hs->hf(previous, hs->seed);
+ offset = h & update->mask;
+ probes = 0;
+
+ for (;;) {
+ const void **cursor = ck_rhs_entry_addr(update, offset);
+
+ if (probes++ == update->probe_limit) {
+ /*
+ * We have hit the probe limit, map needs to be even larger.
+ */
+ ck_rhs_map_destroy(hs->m, update, false);
+ capacity <<= 1;
+ goto restart;
+ }
+
+ if (CK_CC_LIKELY(*cursor == CK_RHS_EMPTY)) {
+ *cursor = prev_saved;
+ update->n_entries++;
+ ck_rhs_set_probes(update, offset, probes);
+ ck_rhs_map_bound_set(update, h, probes);
+ break;
+ } else if (ck_rhs_probes(update, offset) < probes) {
+ const void *tmp = prev_saved;
+ unsigned int old_probes;
+ prev_saved = previous = *cursor;
+#ifdef CK_RHS_PP
+ if (hs->mode & CK_RHS_MODE_OBJECT)
+ previous = CK_RHS_VMA(previous);
+#endif
+ *cursor = tmp;
+ ck_rhs_map_bound_set(update, h, probes);
+ h = hs->hf(previous, hs->seed);
+ old_probes = ck_rhs_probes(update, offset);
+ ck_rhs_set_probes(update, offset, probes);
+ probes = old_probes - 1;
+ continue;
+ }
+ ck_rhs_wanted_inc(update, offset);
+ offset = ck_rhs_map_probe_next(update, offset, probes);
+ }
+
+ }
+
+ ck_pr_fence_store();
+ ck_pr_store_ptr(&hs->map, update);
+ ck_rhs_map_destroy(hs->m, map, true);
+ return true;
+}
+
+bool
+ck_rhs_rebuild(struct ck_rhs *hs)
+{
+
+ return ck_rhs_grow(hs, hs->map->capacity);
+}
+
+static long
+ck_rhs_map_probe_rm(struct ck_rhs *hs,
+ struct ck_rhs_map *map,
+ unsigned long *n_probes,
+ long *priority,
+ unsigned long h,
+ const void *key,
+ const void **object,
+ unsigned long probe_limit,
+ enum ck_rhs_probe_behavior behavior)
+{
+ const void *k;
+ const void *compare;
+ long pr = -1;
+ unsigned long offset, probes, opl;
+
+#ifdef CK_RHS_PP
+ /* If we are storing object pointers, then we may leverage pointer packing. */
+ unsigned long hv = 0;
+
+ if (hs->mode & CK_RHS_MODE_OBJECT) {
+ hv = (h >> 25) & CK_RHS_KEY_MASK;
+ compare = CK_RHS_VMA(key);
+ } else {
+ compare = key;
+ }
+#else
+ compare = key;
+#endif
+ *object = NULL;
+ if (behavior != CK_RHS_PROBE_ROBIN_HOOD) {
+ probes = 0;
+ offset = h & map->mask;
+ } else {
+ /* Restart from the bucket we were previously in */
+ probes = *n_probes;
+ offset = ck_rhs_map_probe_next(map, *priority,
+ probes);
+ }
+ opl = probe_limit;
+
+ for (;;) {
+ if (probes++ == probe_limit) {
+ if (probe_limit == opl || pr != -1) {
+ k = CK_RHS_EMPTY;
+ goto leave;
+ }
+ /*
+ * If no eligible slot has been found yet, continue probe
+ * sequence with original probe limit.
+ */
+ probe_limit = opl;
+ }
+ k = ck_pr_load_ptr(&map->entries.no_entries.entries[offset]);
+ if (k == CK_RHS_EMPTY)
+ goto leave;
+
+ if (behavior != CK_RHS_PROBE_NO_RH) {
+ struct ck_rhs_entry_desc *desc = (void *)&map->entries.no_entries.descs[offset];
+
+ if (pr == -1 &&
+ desc->in_rh == false && desc->probes < probes) {
+ pr = offset;
+ *n_probes = probes;
+
+ if (behavior == CK_RHS_PROBE_RH ||
+ behavior == CK_RHS_PROBE_ROBIN_HOOD) {
+ k = CK_RHS_EMPTY;
+ goto leave;
+ }
+ }
+ }
+
+ if (behavior != CK_RHS_PROBE_ROBIN_HOOD) {
+#ifdef CK_RHS_PP
+ if (hs->mode & CK_RHS_MODE_OBJECT) {
+ if (((uintptr_t)k >> CK_MD_VMA_BITS) != hv) {
+ offset = ck_rhs_map_probe_next(map, offset, probes);
+ continue;
+ }
+
+ k = CK_RHS_VMA(k);
+ }
+#endif
+
+ if (k == compare)
+ goto leave;
+
+ if (hs->compare == NULL) {
+ offset = ck_rhs_map_probe_next(map, offset, probes);
+ continue;
+ }
+
+ if (hs->compare(k, key) == true)
+ goto leave;
+ }
+ offset = ck_rhs_map_probe_next(map, offset, probes);
+ }
+leave:
+ if (probes > probe_limit) {
+ offset = -1;
+ } else {
+ *object = k;
+ }
+
+ if (pr == -1)
+ *n_probes = probes;
+
+ *priority = pr;
+ return offset;
+}
+
+static long
+ck_rhs_map_probe(struct ck_rhs *hs,
+ struct ck_rhs_map *map,
+ unsigned long *n_probes,
+ long *priority,
+ unsigned long h,
+ const void *key,
+ const void **object,
+ unsigned long probe_limit,
+ enum ck_rhs_probe_behavior behavior)
+{
+ const void *k;
+ const void *compare;
+ long pr = -1;
+ unsigned long offset, probes, opl;
+
+#ifdef CK_RHS_PP
+ /* If we are storing object pointers, then we may leverage pointer packing. */
+ unsigned long hv = 0;
+
+ if (hs->mode & CK_RHS_MODE_OBJECT) {
+ hv = (h >> 25) & CK_RHS_KEY_MASK;
+ compare = CK_RHS_VMA(key);
+ } else {
+ compare = key;
+ }
+#else
+ compare = key;
+#endif
+
+ *object = NULL;
+ if (behavior != CK_RHS_PROBE_ROBIN_HOOD) {
+ probes = 0;
+ offset = h & map->mask;
+ } else {
+ /* Restart from the bucket we were previously in */
+ probes = *n_probes;
+ offset = ck_rhs_map_probe_next(map, *priority,
+ probes);
+ }
+ opl = probe_limit;
+ if (behavior == CK_RHS_PROBE_INSERT)
+ probe_limit = ck_rhs_map_bound_get(map, h);
+
+ for (;;) {
+ if (probes++ == probe_limit) {
+ if (probe_limit == opl || pr != -1) {
+ k = CK_RHS_EMPTY;
+ goto leave;
+ }
+ /*
+ * If no eligible slot has been found yet, continue probe
+ * sequence with original probe limit.
+ */
+ probe_limit = opl;
+ }
+ k = ck_pr_load_ptr(&map->entries.descs[offset].entry);
+ if (k == CK_RHS_EMPTY)
+ goto leave;
+ if ((behavior != CK_RHS_PROBE_NO_RH)) {
+ struct ck_rhs_entry_desc *desc = &map->entries.descs[offset];
+
+ if (pr == -1 &&
+ desc->in_rh == false && desc->probes < probes) {
+ pr = offset;
+ *n_probes = probes;
+
+ if (behavior == CK_RHS_PROBE_RH ||
+ behavior == CK_RHS_PROBE_ROBIN_HOOD) {
+ k = CK_RHS_EMPTY;
+ goto leave;
+ }
+ }
+ }
+
+ if (behavior != CK_RHS_PROBE_ROBIN_HOOD) {
+#ifdef CK_RHS_PP
+ if (hs->mode & CK_RHS_MODE_OBJECT) {
+ if (((uintptr_t)k >> CK_MD_VMA_BITS) != hv) {
+ offset = ck_rhs_map_probe_next(map, offset, probes);
+ continue;
+ }
+
+ k = CK_RHS_VMA(k);
+ }
+#endif
+
+ if (k == compare)
+ goto leave;
+
+ if (hs->compare == NULL) {
+ offset = ck_rhs_map_probe_next(map, offset, probes);
+ continue;
+ }
+
+ if (hs->compare(k, key) == true)
+ goto leave;
+ }
+ offset = ck_rhs_map_probe_next(map, offset, probes);
+ }
+leave:
+ if (probes > probe_limit) {
+ offset = -1;
+ } else {
+ *object = k;
+ }
+
+ if (pr == -1)
+ *n_probes = probes;
+
+ *priority = pr;
+ return offset;
+}
+
+static inline const void *
+ck_rhs_marshal(unsigned int mode, const void *key, unsigned long h)
+{
+#ifdef CK_RHS_PP
+ const void *insert;
+
+ if (mode & CK_RHS_MODE_OBJECT) {
+ insert = (void *)((uintptr_t)CK_RHS_VMA(key) | ((h >> 25) << CK_MD_VMA_BITS));
+ } else {
+ insert = key;
+ }
+
+ return insert;
+#else
+ (void)mode;
+ (void)h;
+
+ return key;
+#endif
+}
+
+bool
+ck_rhs_gc(struct ck_rhs *hs)
+{
+ unsigned long i;
+ struct ck_rhs_map *map = hs->map;
+
+ unsigned int max_probes = 0;
+ for (i = 0; i < map->capacity; i++) {
+ if (ck_rhs_probes(map, i) > max_probes)
+ max_probes = ck_rhs_probes(map, i);
+ }
+ map->probe_maximum = max_probes;
+ return true;
+}
+
+static void
+ck_rhs_add_wanted(struct ck_rhs *hs, long end_offset, long old_slot,
+ unsigned long h)
+{
+ struct ck_rhs_map *map = hs->map;
+ long offset;
+ unsigned int probes = 1;
+ bool found_slot = false;
+ struct ck_rhs_entry_desc *desc;
+
+ offset = h & map->mask;
+
+ if (old_slot == -1)
+ found_slot = true;
+ while (offset != end_offset) {
+ if (offset == old_slot)
+ found_slot = true;
+ if (found_slot) {
+ desc = ck_rhs_desc(map, offset);
+ if (desc->wanted < CK_RHS_MAX_WANTED)
+ desc->wanted++;
+ }
+ offset = ck_rhs_map_probe_next(map, offset, probes);
+ probes++;
+ }
+}
+
+static unsigned long
+ck_rhs_remove_wanted(struct ck_rhs *hs, long offset, long limit)
+{
+ struct ck_rhs_map *map = hs->map;
+ int probes = ck_rhs_probes(map, offset);
+ bool do_remove = true;
+ struct ck_rhs_entry_desc *desc;
+
+ while (probes > 1) {
+ probes--;
+ offset = ck_rhs_map_probe_prev(map, offset, probes);
+ if (offset == limit)
+ do_remove = false;
+ if (do_remove) {
+ desc = ck_rhs_desc(map, offset);
+ if (desc->wanted != CK_RHS_MAX_WANTED)
+ desc->wanted--;
+ }
+ }
+ return offset;
+}
+
+static long
+ck_rhs_get_first_offset(struct ck_rhs_map *map, unsigned long offset, unsigned int probes)
+{
+ while (probes > (unsigned long)map->offset_mask + 1) {
+ offset -= ((probes - 1) &~ map->offset_mask);
+ offset &= map->mask;
+ offset = (offset &~ map->offset_mask) +
+ ((offset - map->offset_mask) & map->offset_mask);
+ probes -= map->offset_mask + 1;
+ }
+ return ((offset &~ map->offset_mask) + ((offset - (probes - 1)) & map->offset_mask));
+}
+
+#define CK_RHS_MAX_RH 512
+
+static int
+ck_rhs_put_robin_hood(struct ck_rhs *hs,
+ long orig_slot, struct ck_rhs_entry_desc *desc)
+{
+ long slot, first;
+ const void *object, *insert;
+ unsigned long n_probes;
+ struct ck_rhs_map *map;
+ unsigned long h = 0;
+ long prev;
+ void *key;
+ long prevs[CK_RHS_MAX_RH];
+ unsigned int prevs_nb = 0;
+ unsigned int i;
+
+ map = hs->map;
+ first = orig_slot;
+ n_probes = desc->probes;
+restart:
+ key = CK_CC_DECONST_PTR(ck_rhs_entry(map, first));
+ insert = key;
+#ifdef CK_RHS_PP
+ if (hs->mode & CK_RHS_MODE_OBJECT)
+ key = CK_RHS_VMA(key);
+#endif
+ orig_slot = first;
+ ck_rhs_set_rh(map, orig_slot);
+
+ slot = map->probe_func(hs, map, &n_probes, &first, h, key, &object,
+ map->probe_limit, prevs_nb == CK_RHS_MAX_RH ?
+ CK_RHS_PROBE_NO_RH : CK_RHS_PROBE_ROBIN_HOOD);
+
+ if (slot == -1 && first == -1) {
+ if (ck_rhs_grow(hs, map->capacity << 1) == false) {
+ desc->in_rh = false;
+
+ for (i = 0; i < prevs_nb; i++)
+ ck_rhs_unset_rh(map, prevs[i]);
+
+ return -1;
+ }
+
+ return 1;
+ }
+
+ if (first != -1) {
+ desc = ck_rhs_desc(map, first);
+ int old_probes = desc->probes;
+
+ desc->probes = n_probes;
+ h = ck_rhs_get_first_offset(map, first, n_probes);
+ ck_rhs_map_bound_set(map, h, n_probes);
+ prev = orig_slot;
+ prevs[prevs_nb++] = prev;
+ n_probes = old_probes;
+ goto restart;
+ } else {
+ /* An empty slot was found. */
+ h = ck_rhs_get_first_offset(map, slot, n_probes);
+ ck_rhs_map_bound_set(map, h, n_probes);
+ ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert);
+ ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]);
+ ck_pr_fence_atomic_store();
+ ck_rhs_set_probes(map, slot, n_probes);
+ desc->in_rh = 0;
+ ck_rhs_add_wanted(hs, slot, orig_slot, h);
+ }
+ while (prevs_nb > 0) {
+ prev = prevs[--prevs_nb];
+ ck_pr_store_ptr(ck_rhs_entry_addr(map, orig_slot),
+ ck_rhs_entry(map, prev));
+ h = ck_rhs_get_first_offset(map, orig_slot,
+ desc->probes);
+ ck_rhs_add_wanted(hs, orig_slot, prev, h);
+ ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]);
+ ck_pr_fence_atomic_store();
+ orig_slot = prev;
+ desc->in_rh = false;
+ desc = ck_rhs_desc(map, orig_slot);
+ }
+ return 0;
+}
+
+static void
+ck_rhs_do_backward_shift_delete(struct ck_rhs *hs, long slot)
+{
+ struct ck_rhs_map *map = hs->map;
+ struct ck_rhs_entry_desc *desc, *new_desc = NULL;
+ unsigned long h;
+
+ desc = ck_rhs_desc(map, slot);
+ h = ck_rhs_remove_wanted(hs, slot, -1);
+ while (desc->wanted > 0) {
+ unsigned long offset = 0, tmp_offset;
+ unsigned long wanted_probes = 1;
+ unsigned int probe = 0;
+ unsigned int max_probes;
+
+ /* Find a successor */
+ while (wanted_probes < map->probe_maximum) {
+ probe = wanted_probes;
+ offset = ck_rhs_map_probe_next(map, slot, probe);
+ while (probe < map->probe_maximum) {
+ new_desc = ck_rhs_desc(map, offset);
+ if (new_desc->probes == probe + 1)
+ break;
+ probe++;
+ offset = ck_rhs_map_probe_next(map, offset,
+ probe);
+ }
+ if (probe < map->probe_maximum)
+ break;
+ wanted_probes++;
+ }
+ if (!(wanted_probes < map->probe_maximum)) {
+ desc->wanted = 0;
+ break;
+ }
+ desc->probes = wanted_probes;
+ h = ck_rhs_remove_wanted(hs, offset, slot);
+ ck_pr_store_ptr(ck_rhs_entry_addr(map, slot),
+ ck_rhs_entry(map, offset));
+ ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]);
+ ck_pr_fence_atomic_store();
+ if (wanted_probes < CK_RHS_WORD_MAX) {
+ struct ck_rhs_entry_desc *hdesc = ck_rhs_desc(map, h);
+ if (hdesc->wanted == 1)
+ CK_RHS_STORE(&hdesc->probe_bound,
+ wanted_probes);
+ else if (hdesc->probe_bound == CK_RHS_WORD_MAX ||
+ hdesc->probe_bound == new_desc->probes) {
+ probe++;
+ if (hdesc->probe_bound == CK_RHS_WORD_MAX)
+ max_probes = map->probe_maximum;
+ else {
+ max_probes = hdesc->probe_bound;
+ max_probes--;
+ }
+ tmp_offset = ck_rhs_map_probe_next(map, offset,
+ probe);
+ while (probe < max_probes) {
+ if (h == (unsigned long)ck_rhs_get_first_offset(map, tmp_offset, probe))
+ break;
+ probe++;
+ tmp_offset = ck_rhs_map_probe_next(map, tmp_offset, probe);
+ }
+ if (probe == max_probes)
+ CK_RHS_STORE(&hdesc->probe_bound,
+ wanted_probes);
+ }
+ }
+ if (desc->wanted < CK_RHS_MAX_WANTED)
+ desc->wanted--;
+ slot = offset;
+ desc = new_desc;
+ }
+ ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), CK_RHS_EMPTY);
+ if ((desc->probes - 1) < CK_RHS_WORD_MAX)
+ CK_RHS_STORE(ck_rhs_probe_bound_addr(map, h),
+ desc->probes - 1);
+ desc->probes = 0;
+}
+
+bool
+ck_rhs_fas(struct ck_rhs *hs,
+ unsigned long h,
+ const void *key,
+ void **previous)
+{
+ long slot, first;
+ const void *object;
+ const void *insert;
+ unsigned long n_probes;
+ struct ck_rhs_map *map = hs->map;
+ struct ck_rhs_entry_desc *desc, *desc2;
+
+ *previous = NULL;
+restart:
+ slot = map->probe_func(hs, map, &n_probes, &first, h, key, &object,
+ ck_rhs_map_bound_get(map, h), CK_RHS_PROBE);
+
+ /* Replacement semantics presume existence. */
+ if (object == NULL)
+ return false;
+
+ insert = ck_rhs_marshal(hs->mode, key, h);
+
+ if (first != -1) {
+ int ret;
+
+ desc = ck_rhs_desc(map, slot);
+ desc2 = ck_rhs_desc(map, first);
+ desc->in_rh = true;
+ ret = ck_rhs_put_robin_hood(hs, first, desc2);
+ desc->in_rh = false;
+ if (CK_CC_UNLIKELY(ret == 1))
+ goto restart;
+ else if (CK_CC_UNLIKELY(ret != 0))
+ return false;
+ ck_pr_store_ptr(ck_rhs_entry_addr(map, first), insert);
+ ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]);
+ ck_pr_fence_atomic_store();
+ desc2->probes = n_probes;
+ ck_rhs_add_wanted(hs, first, -1, h);
+ ck_rhs_do_backward_shift_delete(hs, slot);
+ } else {
+ ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert);
+ ck_rhs_set_probes(map, slot, n_probes);
+ }
+ *previous = CK_CC_DECONST_PTR(object);
+ return true;
+}
+
+/*
+ * An apply function takes two arguments. The first argument is a pointer to a
+ * pre-existing object. The second argument is a pointer to the fifth argument
+ * passed to ck_hs_apply. If a non-NULL pointer is passed to the first argument
+ * and the return value of the apply function is NULL, then the pre-existing
+ * value is deleted. If the return pointer is the same as the one passed to the
+ * apply function then no changes are made to the hash table. If the first
+ * argument is non-NULL and the return pointer is different than that passed to
+ * the apply function, then the pre-existing value is replaced. For
+ * replacement, it is required that the value itself is identical to the
+ * previous value.
+ */
+bool
+ck_rhs_apply(struct ck_rhs *hs,
+ unsigned long h,
+ const void *key,
+ ck_rhs_apply_fn_t *fn,
+ void *cl)
+{
+ const void *insert;
+ const void *object, *delta = false;
+ unsigned long n_probes;
+ long slot, first;
+ struct ck_rhs_map *map;
+ bool delta_set = false;
+
+restart:
+ map = hs->map;
+
+ slot = map->probe_func(hs, map, &n_probes, &first, h, key, &object, map->probe_limit, CK_RHS_PROBE_INSERT);
+ if (slot == -1 && first == -1) {
+ if (ck_rhs_grow(hs, map->capacity << 1) == false)
+ return false;
+
+ goto restart;
+ }
+ if (!delta_set) {
+ delta = fn(CK_CC_DECONST_PTR(object), cl);
+ delta_set = true;
+ }
+
+ if (delta == NULL) {
+ /*
+ * The apply function has requested deletion. If the object doesn't exist,
+ * then exit early.
+ */
+ if (CK_CC_UNLIKELY(object == NULL))
+ return true;
+
+ /* Otherwise, delete it. */
+ ck_rhs_do_backward_shift_delete(hs, slot);
+ return true;
+ }
+
+ /* The apply function has not requested hash set modification so exit early. */
+ if (delta == object)
+ return true;
+
+ /* A modification or insertion has been requested. */
+ ck_rhs_map_bound_set(map, h, n_probes);
+
+ insert = ck_rhs_marshal(hs->mode, delta, h);
+ if (first != -1) {
+ /*
+ * This follows the same semantics as ck_hs_set, please refer to that
+ * function for documentation.
+ */
+ struct ck_rhs_entry_desc *desc = NULL, *desc2;
+ if (slot != -1) {
+ desc = ck_rhs_desc(map, slot);
+ desc->in_rh = true;
+ }
+ desc2 = ck_rhs_desc(map, first);
+ int ret = ck_rhs_put_robin_hood(hs, first, desc2);
+ if (slot != -1)
+ desc->in_rh = false;
+
+ if (CK_CC_UNLIKELY(ret == 1))
+ goto restart;
+ if (CK_CC_UNLIKELY(ret == -1))
+ return false;
+ /* If an earlier bucket was found, then store entry there. */
+ ck_pr_store_ptr(ck_rhs_entry_addr(map, first), insert);
+ desc2->probes = n_probes;
+ /*
+ * If a duplicate key was found, then delete it after
+ * signaling concurrent probes to restart. Optionally,
+ * it is possible to install tombstone after grace
+ * period if we can guarantee earlier position of
+ * duplicate key.
+ */
+ ck_rhs_add_wanted(hs, first, -1, h);
+ if (object != NULL) {
+ ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]);
+ ck_pr_fence_atomic_store();
+ ck_rhs_do_backward_shift_delete(hs, slot);
+ }
+ } else {
+ /*
+ * If we are storing into same slot, then atomic store is sufficient
+ * for replacement.
+ */
+ ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert);
+ ck_rhs_set_probes(map, slot, n_probes);
+ if (object == NULL)
+ ck_rhs_add_wanted(hs, slot, -1, h);
+ }
+
+ if (object == NULL) {
+ map->n_entries++;
+ if ((map->n_entries ) > map->max_entries)
+ ck_rhs_grow(hs, map->capacity << 1);
+ }
+ return true;
+}
+
+bool
+ck_rhs_set(struct ck_rhs *hs,
+ unsigned long h,
+ const void *key,
+ void **previous)
+{
+ long slot, first;
+ const void *object;
+ const void *insert;
+ unsigned long n_probes;
+ struct ck_rhs_map *map;
+
+ *previous = NULL;
+
+restart:
+ map = hs->map;
+
+ slot = map->probe_func(hs, map, &n_probes, &first, h, key, &object, map->probe_limit, CK_RHS_PROBE_INSERT);
+ if (slot == -1 && first == -1) {
+ if (ck_rhs_grow(hs, map->capacity << 1) == false)
+ return false;
+
+ goto restart;
+ }
+ ck_rhs_map_bound_set(map, h, n_probes);
+ insert = ck_rhs_marshal(hs->mode, key, h);
+
+ if (first != -1) {
+ struct ck_rhs_entry_desc *desc = NULL, *desc2;
+ if (slot != -1) {
+ desc = ck_rhs_desc(map, slot);
+ desc->in_rh = true;
+ }
+ desc2 = ck_rhs_desc(map, first);
+ int ret = ck_rhs_put_robin_hood(hs, first, desc2);
+ if (slot != -1)
+ desc->in_rh = false;
+
+ if (CK_CC_UNLIKELY(ret == 1))
+ goto restart;
+ if (CK_CC_UNLIKELY(ret == -1))
+ return false;
+ /* If an earlier bucket was found, then store entry there. */
+ ck_pr_store_ptr(ck_rhs_entry_addr(map, first), insert);
+ desc2->probes = n_probes;
+ /*
+ * If a duplicate key was found, then delete it after
+ * signaling concurrent probes to restart. Optionally,
+ * it is possible to install tombstone after grace
+ * period if we can guarantee earlier position of
+ * duplicate key.
+ */
+ ck_rhs_add_wanted(hs, first, -1, h);
+ if (object != NULL) {
+ ck_pr_inc_uint(&map->generation[h & CK_RHS_G_MASK]);
+ ck_pr_fence_atomic_store();
+ ck_rhs_do_backward_shift_delete(hs, slot);
+ }
+
+ } else {
+ /*
+ * If we are storing into same slot, then atomic store is sufficient
+ * for replacement.
+ */
+ ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert);
+ ck_rhs_set_probes(map, slot, n_probes);
+ if (object == NULL)
+ ck_rhs_add_wanted(hs, slot, -1, h);
+ }
+
+ if (object == NULL) {
+ map->n_entries++;
+ if ((map->n_entries ) > map->max_entries)
+ ck_rhs_grow(hs, map->capacity << 1);
+ }
+
+ *previous = CK_CC_DECONST_PTR(object);
+ return true;
+}
+
+static bool
+ck_rhs_put_internal(struct ck_rhs *hs,
+ unsigned long h,
+ const void *key,
+ enum ck_rhs_probe_behavior behavior)
+{
+ long slot, first;
+ const void *object;
+ const void *insert;
+ unsigned long n_probes;
+ struct ck_rhs_map *map;
+
+restart:
+ map = hs->map;
+
+ slot = map->probe_func(hs, map, &n_probes, &first, h, key, &object,
+ map->probe_limit, behavior);
+
+ if (slot == -1 && first == -1) {
+ if (ck_rhs_grow(hs, map->capacity << 1) == false)
+ return false;
+
+ goto restart;
+ }
+
+ /* Fail operation if a match was found. */
+ if (object != NULL)
+ return false;
+
+ ck_rhs_map_bound_set(map, h, n_probes);
+ insert = ck_rhs_marshal(hs->mode, key, h);
+
+ if (first != -1) {
+ struct ck_rhs_entry_desc *desc = ck_rhs_desc(map, first);
+ int ret = ck_rhs_put_robin_hood(hs, first, desc);
+ if (CK_CC_UNLIKELY(ret == 1))
+ return ck_rhs_put_internal(hs, h, key, behavior);
+ else if (CK_CC_UNLIKELY(ret == -1))
+ return false;
+ /* Insert key into first bucket in probe sequence. */
+ ck_pr_store_ptr(ck_rhs_entry_addr(map, first), insert);
+ desc->probes = n_probes;
+ ck_rhs_add_wanted(hs, first, -1, h);
+ } else {
+ /* An empty slot was found. */
+ ck_pr_store_ptr(ck_rhs_entry_addr(map, slot), insert);
+ ck_rhs_set_probes(map, slot, n_probes);
+ ck_rhs_add_wanted(hs, slot, -1, h);
+ }
+
+ map->n_entries++;
+ if ((map->n_entries ) > map->max_entries)
+ ck_rhs_grow(hs, map->capacity << 1);
+ return true;
+}
+
+bool
+ck_rhs_put(struct ck_rhs *hs,
+ unsigned long h,
+ const void *key)
+{
+
+ return ck_rhs_put_internal(hs, h, key, CK_RHS_PROBE_INSERT);
+}
+
+bool
+ck_rhs_put_unique(struct ck_rhs *hs,
+ unsigned long h,
+ const void *key)
+{
+
+ return ck_rhs_put_internal(hs, h, key, CK_RHS_PROBE_RH);
+}
+
+void *
+ck_rhs_get(struct ck_rhs *hs,
+ unsigned long h,
+ const void *key)
+{
+ long first;
+ const void *object;
+ struct ck_rhs_map *map;
+ unsigned long n_probes;
+ unsigned int g, g_p, probe;
+ unsigned int *generation;
+
+ do {
+ map = ck_pr_load_ptr(&hs->map);
+ generation = &map->generation[h & CK_RHS_G_MASK];
+ g = ck_pr_load_uint(generation);
+ probe = ck_rhs_map_bound_get(map, h);
+ ck_pr_fence_load();
+
+ first = -1;
+ map->probe_func(hs, map, &n_probes, &first, h, key, &object, probe, CK_RHS_PROBE_NO_RH);
+
+ ck_pr_fence_load();
+ g_p = ck_pr_load_uint(generation);
+ } while (g != g_p);
+
+ return CK_CC_DECONST_PTR(object);
+}
+
+void *
+ck_rhs_remove(struct ck_rhs *hs,
+ unsigned long h,
+ const void *key)
+{
+ long slot, first;
+ const void *object;
+ struct ck_rhs_map *map = hs->map;
+ unsigned long n_probes;
+
+ slot = map->probe_func(hs, map, &n_probes, &first, h, key, &object,
+ ck_rhs_map_bound_get(map, h), CK_RHS_PROBE_NO_RH);
+ if (object == NULL)
+ return NULL;
+
+ map->n_entries--;
+ ck_rhs_do_backward_shift_delete(hs, slot);
+ return CK_CC_DECONST_PTR(object);
+}
+
+bool
+ck_rhs_move(struct ck_rhs *hs,
+ struct ck_rhs *source,
+ ck_rhs_hash_cb_t *hf,
+ ck_rhs_compare_cb_t *compare,
+ struct ck_malloc *m)
+{
+
+ if (m == NULL || m->malloc == NULL || m->free == NULL || hf == NULL)
+ return false;
+
+ hs->mode = source->mode;
+ hs->seed = source->seed;
+ hs->map = source->map;
+ hs->load_factor = source->load_factor;
+ hs->m = m;
+ hs->hf = hf;
+ hs->compare = compare;
+ return true;
+}
+
+bool
+ck_rhs_init(struct ck_rhs *hs,
+ unsigned int mode,
+ ck_rhs_hash_cb_t *hf,
+ ck_rhs_compare_cb_t *compare,
+ struct ck_malloc *m,
+ unsigned long n_entries,
+ unsigned long seed)
+{
+
+ if (m == NULL || m->malloc == NULL || m->free == NULL || hf == NULL)
+ return false;
+
+ hs->m = m;
+ hs->mode = mode;
+ hs->seed = seed;
+ hs->hf = hf;
+ hs->compare = compare;
+ hs->load_factor = CK_RHS_DEFAULT_LOAD_FACTOR;
+
+ hs->map = ck_rhs_map_create(hs, n_entries);
+ return hs->map != NULL;
+}
diff --git a/tools/feature.sh b/tools/feature.sh
new file mode 100755
index 0000000..f6c8934
--- /dev/null
+++ b/tools/feature.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+# This will generate the list of feature flags for implemented symbols.
+
+echo '/* DO NOT EDIT. This is auto-generated from feature.sh */'
+nm ../regressions/ck_pr/validate/ck_pr_cas|cut -d ' ' -f 3|sed s/ck_pr/ck_f_pr/|awk '/^ck_f_pr/ {print "#define " toupper($1);}'|sort