summaryrefslogtreecommitdiffstats
path: root/regressions
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2021-07-23 11:24:09 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2021-07-23 11:24:09 +0000
commite36b37583bebd229102f46c4ed7d2f6fad8697d4 (patch)
tree73937b6f051fcaaa1ccbdfbaa9f3a1f36bbedb9e /regressions
parentInitial commit. (diff)
downloadck-e36b37583bebd229102f46c4ed7d2f6fad8697d4.tar.xz
ck-e36b37583bebd229102f46c4ed7d2f6fad8697d4.zip
Adding upstream version 0.6.0.upstream/0.6.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'regressions')
-rw-r--r--regressions/Makefile128
-rw-r--r--regressions/Makefile.unsupported9
-rw-r--r--regressions/ck_array/validate/Makefile17
-rw-r--r--regressions/ck_array/validate/serial.c178
-rw-r--r--regressions/ck_backoff/validate/Makefile15
-rw-r--r--regressions/ck_backoff/validate/validate.c60
-rw-r--r--regressions/ck_barrier/benchmark/Makefile14
-rw-r--r--regressions/ck_barrier/benchmark/throughput.c136
-rw-r--r--regressions/ck_barrier/validate/Makefile34
-rw-r--r--regressions/ck_barrier/validate/barrier_centralized.c121
-rw-r--r--regressions/ck_barrier/validate/barrier_combining.c143
-rw-r--r--regressions/ck_barrier/validate/barrier_dissemination.c144
-rw-r--r--regressions/ck_barrier/validate/barrier_mcs.c131
-rw-r--r--regressions/ck_barrier/validate/barrier_tournament.c142
-rw-r--r--regressions/ck_bitmap/validate/Makefile17
-rw-r--r--regressions/ck_bitmap/validate/serial.c372
-rw-r--r--regressions/ck_brlock/benchmark/Makefile17
-rw-r--r--regressions/ck_brlock/benchmark/latency.c103
-rw-r--r--regressions/ck_brlock/benchmark/throughput.c164
-rw-r--r--regressions/ck_brlock/validate/Makefile17
-rw-r--r--regressions/ck_brlock/validate/validate.c155
-rw-r--r--regressions/ck_bytelock/benchmark/Makefile14
-rw-r--r--regressions/ck_bytelock/benchmark/latency.c99
-rw-r--r--regressions/ck_bytelock/validate/Makefile17
-rw-r--r--regressions/ck_bytelock/validate/validate.c166
-rw-r--r--regressions/ck_cohort/benchmark/Makefile17
-rw-r--r--regressions/ck_cohort/benchmark/ck_cohort.c8
-rw-r--r--regressions/ck_cohort/benchmark/throughput.c239
-rw-r--r--regressions/ck_cohort/ck_cohort.h35
-rw-r--r--regressions/ck_cohort/validate/Makefile17
-rw-r--r--regressions/ck_cohort/validate/validate.c205
-rw-r--r--regressions/ck_epoch/validate/Makefile42
-rw-r--r--regressions/ck_epoch/validate/ck_epoch_call.c64
-rw-r--r--regressions/ck_epoch/validate/ck_epoch_poll.c236
-rw-r--r--regressions/ck_epoch/validate/ck_epoch_section.c311
-rw-r--r--regressions/ck_epoch/validate/ck_epoch_section_2.c195
-rw-r--r--regressions/ck_epoch/validate/ck_epoch_synchronize.c249
-rw-r--r--regressions/ck_epoch/validate/ck_stack.c164
-rw-r--r--regressions/ck_epoch/validate/torture.c234
-rw-r--r--regressions/ck_fifo/benchmark/Makefile14
-rw-r--r--regressions/ck_fifo/benchmark/latency.c157
-rw-r--r--regressions/ck_fifo/validate/Makefile29
-rw-r--r--regressions/ck_fifo/validate/ck_fifo_mpmc.c168
-rw-r--r--regressions/ck_fifo/validate/ck_fifo_mpmc_iterator.c90
-rw-r--r--regressions/ck_fifo/validate/ck_fifo_spsc.c177
-rw-r--r--regressions/ck_fifo/validate/ck_fifo_spsc_iterator.c83
-rw-r--r--regressions/ck_hp/benchmark/Makefile17
-rw-r--r--regressions/ck_hp/benchmark/fifo_latency.c94
-rw-r--r--regressions/ck_hp/benchmark/stack_latency.c95
-rw-r--r--regressions/ck_hp/validate/Makefile33
-rw-r--r--regressions/ck_hp/validate/ck_hp_fifo.c187
-rw-r--r--regressions/ck_hp/validate/ck_hp_fifo_donner.c213
-rw-r--r--regressions/ck_hp/validate/ck_hp_stack.c165
-rw-r--r--regressions/ck_hp/validate/nbds_haz_test.c226
-rw-r--r--regressions/ck_hp/validate/serial.c127
-rw-r--r--regressions/ck_hs/benchmark/Makefile23
-rw-r--r--regressions/ck_hs/benchmark/apply.c260
-rw-r--r--regressions/ck_hs/benchmark/parallel_bytestring.c602
-rw-r--r--regressions/ck_hs/benchmark/serial.c517
-rw-r--r--regressions/ck_hs/validate/Makefile17
-rw-r--r--regressions/ck_hs/validate/serial.c315
-rw-r--r--regressions/ck_ht/benchmark/Makefile27
-rw-r--r--regressions/ck_ht/benchmark/parallel_bytestring.c559
-rw-r--r--regressions/ck_ht/benchmark/parallel_direct.c545
-rw-r--r--regressions/ck_ht/benchmark/serial.c387
-rw-r--r--regressions/ck_ht/validate/Makefile21
-rw-r--r--regressions/ck_ht/validate/serial.c309
-rw-r--r--regressions/ck_pflock/benchmark/Makefile17
-rw-r--r--regressions/ck_pflock/benchmark/latency.c72
-rw-r--r--regressions/ck_pflock/benchmark/throughput.c163
-rw-r--r--regressions/ck_pflock/validate/Makefile17
-rw-r--r--regressions/ck_pflock/validate/validate.c151
-rw-r--r--regressions/ck_pr/benchmark/Makefile31
-rw-r--r--regressions/ck_pr/benchmark/benchmark.h130
-rw-r--r--regressions/ck_pr/benchmark/ck_pr_add_64.c16
-rw-r--r--regressions/ck_pr/benchmark/ck_pr_cas_64.c16
-rw-r--r--regressions/ck_pr/benchmark/ck_pr_cas_64_2.c17
-rw-r--r--regressions/ck_pr/benchmark/ck_pr_faa_64.c16
-rw-r--r--regressions/ck_pr/benchmark/ck_pr_fas_64.c17
-rw-r--r--regressions/ck_pr/benchmark/ck_pr_neg_64.c16
-rw-r--r--regressions/ck_pr/benchmark/fp.c66
-rw-r--r--regressions/ck_pr/validate/Makefile84
-rw-r--r--regressions/ck_pr/validate/ck_pr_add.c151
-rw-r--r--regressions/ck_pr/validate/ck_pr_and.c147
-rw-r--r--regressions/ck_pr/validate/ck_pr_bin.c94
-rw-r--r--regressions/ck_pr/validate/ck_pr_btc.c96
-rw-r--r--regressions/ck_pr/validate/ck_pr_btr.c97
-rw-r--r--regressions/ck_pr/validate/ck_pr_bts.c97
-rw-r--r--regressions/ck_pr/validate/ck_pr_btx.c112
-rw-r--r--regressions/ck_pr/validate/ck_pr_cas.c158
-rw-r--r--regressions/ck_pr/validate/ck_pr_dec.c143
-rw-r--r--regressions/ck_pr/validate/ck_pr_faa.c152
-rw-r--r--regressions/ck_pr/validate/ck_pr_fas.c148
-rw-r--r--regressions/ck_pr/validate/ck_pr_fax.c121
-rw-r--r--regressions/ck_pr/validate/ck_pr_inc.c143
-rw-r--r--regressions/ck_pr/validate/ck_pr_load.c149
-rw-r--r--regressions/ck_pr/validate/ck_pr_n.c90
-rw-r--r--regressions/ck_pr/validate/ck_pr_or.c149
-rw-r--r--regressions/ck_pr/validate/ck_pr_store.c150
-rw-r--r--regressions/ck_pr/validate/ck_pr_sub.c151
-rw-r--r--regressions/ck_pr/validate/ck_pr_unary.c117
-rw-r--r--regressions/ck_pr/validate/ck_pr_xor.c147
-rw-r--r--regressions/ck_queue/validate/Makefile26
-rw-r--r--regressions/ck_queue/validate/ck_list.c236
-rw-r--r--regressions/ck_queue/validate/ck_slist.c217
-rw-r--r--regressions/ck_queue/validate/ck_stailq.c256
-rw-r--r--regressions/ck_rhs/benchmark/Makefile17
-rw-r--r--regressions/ck_rhs/benchmark/parallel_bytestring.c599
-rw-r--r--regressions/ck_rhs/benchmark/serial.c517
-rw-r--r--regressions/ck_rhs/validate/Makefile17
-rw-r--r--regressions/ck_rhs/validate/serial.c310
-rw-r--r--regressions/ck_ring/benchmark/Makefile14
-rw-r--r--regressions/ck_ring/benchmark/latency.c142
-rw-r--r--regressions/ck_ring/validate/Makefile40
-rw-r--r--regressions/ck_ring/validate/ck_ring_mpmc.c448
-rw-r--r--regressions/ck_ring/validate/ck_ring_mpmc_template.c349
-rw-r--r--regressions/ck_ring/validate/ck_ring_spmc.c340
-rw-r--r--regressions/ck_ring/validate/ck_ring_spmc_template.c350
-rw-r--r--regressions/ck_ring/validate/ck_ring_spsc.c213
-rw-r--r--regressions/ck_rwcohort/benchmark/Makefile32
-rw-r--r--regressions/ck_rwcohort/benchmark/ck_neutral.c7
-rw-r--r--regressions/ck_rwcohort/benchmark/ck_rp.c7
-rw-r--r--regressions/ck_rwcohort/benchmark/ck_wp.c7
-rw-r--r--regressions/ck_rwcohort/benchmark/latency.h106
-rw-r--r--regressions/ck_rwcohort/benchmark/throughput.h245
-rw-r--r--regressions/ck_rwcohort/ck_neutral.h8
-rw-r--r--regressions/ck_rwcohort/ck_rp.h8
-rw-r--r--regressions/ck_rwcohort/ck_wp.h8
-rw-r--r--regressions/ck_rwcohort/validate/Makefile25
-rw-r--r--regressions/ck_rwcohort/validate/ck_neutral.c2
-rw-r--r--regressions/ck_rwcohort/validate/ck_rp.c2
-rw-r--r--regressions/ck_rwcohort/validate/ck_wp.c2
-rw-r--r--regressions/ck_rwcohort/validate/validate.h209
-rw-r--r--regressions/ck_rwlock/benchmark/Makefile17
-rw-r--r--regressions/ck_rwlock/benchmark/latency.c134
-rw-r--r--regressions/ck_rwlock/benchmark/throughput.c254
-rw-r--r--regressions/ck_rwlock/validate/Makefile17
-rw-r--r--regressions/ck_rwlock/validate/validate.c447
-rw-r--r--regressions/ck_sequence/benchmark/Makefile18
-rw-r--r--regressions/ck_sequence/benchmark/ck_sequence.c91
-rw-r--r--regressions/ck_sequence/validate/Makefile17
-rw-r--r--regressions/ck_sequence/validate/ck_sequence.c171
-rw-r--r--regressions/ck_spinlock/benchmark/Makefile87
-rw-r--r--regressions/ck_spinlock/benchmark/ck_anderson.c8
-rw-r--r--regressions/ck_spinlock/benchmark/ck_cas.c8
-rw-r--r--regressions/ck_spinlock/benchmark/ck_clh.c7
-rw-r--r--regressions/ck_spinlock/benchmark/ck_dec.c7
-rw-r--r--regressions/ck_spinlock/benchmark/ck_fas.c7
-rw-r--r--regressions/ck_spinlock/benchmark/ck_hclh.c7
-rw-r--r--regressions/ck_spinlock/benchmark/ck_mcs.c7
-rw-r--r--regressions/ck_spinlock/benchmark/ck_spinlock.c7
-rw-r--r--regressions/ck_spinlock/benchmark/ck_ticket.c8
-rw-r--r--regressions/ck_spinlock/benchmark/ck_ticket_pb.c7
-rw-r--r--regressions/ck_spinlock/benchmark/latency.h76
-rw-r--r--regressions/ck_spinlock/benchmark/linux_spinlock.c7
-rw-r--r--regressions/ck_spinlock/benchmark/throughput.h218
-rw-r--r--regressions/ck_spinlock/ck_anderson.h11
-rw-r--r--regressions/ck_spinlock/ck_cas.h6
-rw-r--r--regressions/ck_spinlock/ck_clh.h9
-rw-r--r--regressions/ck_spinlock/ck_dec.h6
-rw-r--r--regressions/ck_spinlock/ck_fas.h6
-rw-r--r--regressions/ck_spinlock/ck_hclh.h16
-rw-r--r--regressions/ck_spinlock/ck_mcs.h7
-rw-r--r--regressions/ck_spinlock/ck_spinlock.h6
-rw-r--r--regressions/ck_spinlock/ck_ticket.h11
-rw-r--r--regressions/ck_spinlock/ck_ticket_pb.h6
-rw-r--r--regressions/ck_spinlock/linux_spinlock.h39
-rw-r--r--regressions/ck_spinlock/validate/Makefile57
-rw-r--r--regressions/ck_spinlock/validate/ck_anderson.c2
-rw-r--r--regressions/ck_spinlock/validate/ck_cas.c2
-rw-r--r--regressions/ck_spinlock/validate/ck_clh.c2
-rw-r--r--regressions/ck_spinlock/validate/ck_dec.c2
-rw-r--r--regressions/ck_spinlock/validate/ck_fas.c2
-rw-r--r--regressions/ck_spinlock/validate/ck_hclh.c2
-rw-r--r--regressions/ck_spinlock/validate/ck_mcs.c2
-rw-r--r--regressions/ck_spinlock/validate/ck_spinlock.c2
-rw-r--r--regressions/ck_spinlock/validate/ck_ticket.c2
-rw-r--r--regressions/ck_spinlock/validate/ck_ticket_pb.c2
-rw-r--r--regressions/ck_spinlock/validate/linux_spinlock.c14
-rw-r--r--regressions/ck_spinlock/validate/validate.h180
-rw-r--r--regressions/ck_stack/benchmark/Makefile14
-rw-r--r--regressions/ck_stack/benchmark/latency.c176
-rw-r--r--regressions/ck_stack/validate/Makefile56
-rw-r--r--regressions/ck_stack/validate/pair.c249
-rw-r--r--regressions/ck_stack/validate/pop.c269
-rw-r--r--regressions/ck_stack/validate/push.c248
-rw-r--r--regressions/ck_stack/validate/serial.c84
-rw-r--r--regressions/ck_swlock/benchmark/Makefile17
-rw-r--r--regressions/ck_swlock/benchmark/latency.c86
-rw-r--r--regressions/ck_swlock/benchmark/throughput.c183
-rw-r--r--regressions/ck_swlock/validate/Makefile17
-rw-r--r--regressions/ck_swlock/validate/validate.c455
-rw-r--r--regressions/ck_tflock/benchmark/Makefile17
-rw-r--r--regressions/ck_tflock/benchmark/latency.c73
-rw-r--r--regressions/ck_tflock/benchmark/throughput.c182
-rw-r--r--regressions/ck_tflock/validate/Makefile17
-rw-r--r--regressions/ck_tflock/validate/validate.c158
-rw-r--r--regressions/common.h471
198 files changed, 22879 insertions, 0 deletions
diff --git a/regressions/Makefile b/regressions/Makefile
new file mode 100644
index 0000000..3195e52
--- /dev/null
+++ b/regressions/Makefile
@@ -0,0 +1,128 @@
+DIR=array \
+ backoff \
+ barrier \
+ bitmap \
+ brlock \
+ bytelock \
+ cohort \
+ epoch \
+ fifo \
+ hp \
+ hs \
+ rhs \
+ ht \
+ pflock \
+ pr \
+ queue \
+ ring \
+ rwlock \
+ swlock \
+ sequence \
+ spinlock \
+ stack \
+ swlock \
+ tflock
+
+.PHONY: all clean check
+
+all:
+ $(MAKE) -C ./ck_array/validate all
+ $(MAKE) -C ./ck_cohort/validate all
+ $(MAKE) -C ./ck_cohort/benchmark all
+ $(MAKE) -C ./ck_bitmap/validate all
+ $(MAKE) -C ./ck_backoff/validate all
+ $(MAKE) -C ./ck_queue/validate all
+ $(MAKE) -C ./ck_brlock/validate all
+ $(MAKE) -C ./ck_ht/validate all
+ $(MAKE) -C ./ck_ht/benchmark all
+ $(MAKE) -C ./ck_brlock/benchmark all
+ $(MAKE) -C ./ck_spinlock/validate all
+ $(MAKE) -C ./ck_spinlock/benchmark all
+ $(MAKE) -C ./ck_fifo/validate all
+ $(MAKE) -C ./ck_fifo/benchmark all
+ $(MAKE) -C ./ck_pr/validate all
+ $(MAKE) -C ./ck_pr/benchmark all
+ $(MAKE) -C ./ck_hs/benchmark all
+ $(MAKE) -C ./ck_hs/validate all
+ $(MAKE) -C ./ck_rhs/benchmark all
+ $(MAKE) -C ./ck_rhs/validate all
+ $(MAKE) -C ./ck_barrier/validate all
+ $(MAKE) -C ./ck_barrier/benchmark all
+ $(MAKE) -C ./ck_bytelock/validate all
+ $(MAKE) -C ./ck_bytelock/benchmark all
+ $(MAKE) -C ./ck_epoch/validate all
+ $(MAKE) -C ./ck_rwcohort/validate all
+ $(MAKE) -C ./ck_rwcohort/benchmark all
+ $(MAKE) -C ./ck_sequence/validate all
+ $(MAKE) -C ./ck_sequence/benchmark all
+ $(MAKE) -C ./ck_stack/validate all
+ $(MAKE) -C ./ck_stack/benchmark all
+ $(MAKE) -C ./ck_ring/validate all
+ $(MAKE) -C ./ck_ring/benchmark all
+ $(MAKE) -C ./ck_rwlock/validate all
+ $(MAKE) -C ./ck_rwlock/benchmark all
+ $(MAKE) -C ./ck_tflock/validate all
+ $(MAKE) -C ./ck_tflock/benchmark all
+ $(MAKE) -C ./ck_swlock/validate all
+ $(MAKE) -C ./ck_swlock/benchmark all
+ $(MAKE) -C ./ck_pflock/validate all
+ $(MAKE) -C ./ck_pflock/benchmark all
+ $(MAKE) -C ./ck_hp/validate all
+ $(MAKE) -C ./ck_hp/benchmark all
+
+clean:
+ $(MAKE) -C ./ck_array/validate clean
+ $(MAKE) -C ./ck_pflock/validate clean
+ $(MAKE) -C ./ck_pflock/benchmark clean
+ $(MAKE) -C ./ck_tflock/validate clean
+ $(MAKE) -C ./ck_tflock/benchmark clean
+ $(MAKE) -C ./ck_rwcohort/validate clean
+ $(MAKE) -C ./ck_rwcohort/benchmark clean
+ $(MAKE) -C ./ck_backoff/validate clean
+ $(MAKE) -C ./ck_bitmap/validate clean
+ $(MAKE) -C ./ck_queue/validate clean
+ $(MAKE) -C ./ck_cohort/validate clean
+ $(MAKE) -C ./ck_cohort/benchmark clean
+ $(MAKE) -C ./ck_brlock/validate clean
+ $(MAKE) -C ./ck_ht/validate clean
+ $(MAKE) -C ./ck_ht/benchmark clean
+ $(MAKE) -C ./ck_hs/validate clean
+ $(MAKE) -C ./ck_hs/benchmark clean
+ $(MAKE) -C ./ck_rhs/validate clean
+ $(MAKE) -C ./ck_rhs/benchmark clean
+ $(MAKE) -C ./ck_brlock/benchmark clean
+ $(MAKE) -C ./ck_spinlock/validate clean
+ $(MAKE) -C ./ck_spinlock/benchmark clean
+ $(MAKE) -C ./ck_fifo/validate clean
+ $(MAKE) -C ./ck_fifo/benchmark clean
+ $(MAKE) -C ./ck_pr/validate clean
+ $(MAKE) -C ./ck_pr/benchmark clean
+ $(MAKE) -C ./ck_barrier/validate clean
+ $(MAKE) -C ./ck_barrier/benchmark clean
+ $(MAKE) -C ./ck_bytelock/validate clean
+ $(MAKE) -C ./ck_bytelock/benchmark clean
+ $(MAKE) -C ./ck_epoch/validate clean
+ $(MAKE) -C ./ck_sequence/validate clean
+ $(MAKE) -C ./ck_sequence/benchmark clean
+ $(MAKE) -C ./ck_stack/validate clean
+ $(MAKE) -C ./ck_stack/benchmark clean
+ $(MAKE) -C ./ck_ring/validate clean
+ $(MAKE) -C ./ck_ring/benchmark clean
+ $(MAKE) -C ./ck_rwlock/validate clean
+ $(MAKE) -C ./ck_rwlock/benchmark clean
+ $(MAKE) -C ./ck_swlock/validate clean
+ $(MAKE) -C ./ck_swlock/benchmark clean
+ $(MAKE) -C ./ck_pflock/validate clean
+ $(MAKE) -C ./ck_pflock/benchmark clean
+ $(MAKE) -C ./ck_hp/validate clean
+ $(MAKE) -C ./ck_hp/benchmark clean
+
+check: all
+ rc=0; \
+ for d in $(DIR) ; do \
+ echo "----[ Testing $$d...."; \
+ $(MAKE) -C ./ck_$$d/validate check || rc=1; \
+ echo; \
+ done; \
+ exit $$rc
+
diff --git a/regressions/Makefile.unsupported b/regressions/Makefile.unsupported
new file mode 100644
index 0000000..90aa877
--- /dev/null
+++ b/regressions/Makefile.unsupported
@@ -0,0 +1,9 @@
+.PHONY: all clean check
+
+all:
+ @echo Regressions are currently unsupported for out-of-source builds
+
+clean: all
+
+check: all
+
diff --git a/regressions/ck_array/validate/Makefile b/regressions/ck_array/validate/Makefile
new file mode 100644
index 0000000..3c48167
--- /dev/null
+++ b/regressions/ck_array/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean distribution
+
+OBJECTS=serial
+
+all: $(OBJECTS)
+
+serial: serial.c ../../../include/ck_array.h ../../../src/ck_array.c
+ $(CC) $(CFLAGS) -o serial serial.c ../../../src/ck_array.c
+
+check: all
+ ./serial
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE -ggdb
diff --git a/regressions/ck_array/validate/serial.c b/regressions/ck_array/validate/serial.c
new file mode 100644
index 0000000..b6d7b56
--- /dev/null
+++ b/regressions/ck_array/validate/serial.c
@@ -0,0 +1,178 @@
+#include <ck_array.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../../common.h"
+
+#ifndef ITERATION
+#define ITERATION 128
+#endif
+
+static void
+my_free(void *p, size_t m, bool d)
+{
+
+ (void)m;
+ (void)d;
+
+ free(p);
+ return;
+}
+
+static void *
+my_malloc(size_t b)
+{
+
+ return malloc(b);
+}
+
+static void *
+my_realloc(void *r, size_t a, size_t b, bool d)
+{
+
+ (void)a;
+ (void)d;
+
+ return realloc(r, b);
+}
+
+int
+main(void)
+{
+ void *r;
+ uintptr_t i;
+ ck_array_t array;
+ ck_array_iterator_t iterator;
+ struct ck_malloc m = {
+ .malloc = my_malloc,
+ .free = NULL,
+ .realloc = my_realloc
+ };
+
+ if (ck_array_init(&array, CK_ARRAY_MODE_SPMC, &m, 4) == true)
+ ck_error("ck_array_init with NULL free succeeded\n");
+
+ m.free = my_free;
+ if (ck_array_init(&array, CK_ARRAY_MODE_SPMC, &m, 4) == false)
+ ck_error("ck_array_init\n");
+
+ for (i = 0; i < ITERATION; i++) {
+ if (ck_array_put(&array, (void *)i) == false)
+ ck_error("ck_error_put\n");
+
+ if (ck_array_remove(&array, (void *)i) == false)
+ ck_error("ck_error_remove after put\n");
+ }
+
+ i = 0; CK_ARRAY_FOREACH(&array, &iterator, &r) i++;
+ if (i != 0)
+ ck_error("Non-empty array after put -> remove workload.\n");
+
+ ck_array_commit(&array);
+
+ i = 0; CK_ARRAY_FOREACH(&array, &iterator, &r) i++;
+ if (i != 0)
+ ck_error("Non-empty array after put -> remove -> commit workload.\n");
+
+ for (i = 0; i < ITERATION; i++) {
+ if (ck_array_put(&array, (void *)i) == false)
+ ck_error("ck_error_put\n");
+ }
+
+ i = 0; CK_ARRAY_FOREACH(&array, &iterator, &r) i++;
+ if (i != 0)
+ ck_error("Non-empty array after put workload.\n");
+
+ for (i = 0; i < ITERATION; i++) {
+ if (ck_array_remove(&array, (void *)i) == false)
+ ck_error("ck_error_remove after put\n");
+ }
+
+ i = 0; CK_ARRAY_FOREACH(&array, &iterator, &r) i++;
+ if (i != 0)
+ ck_error("Non-empty array after put -> remove workload.\n");
+
+ ck_array_commit(&array);
+
+ i = 0; CK_ARRAY_FOREACH(&array, &iterator, &r) i++;
+ if (i != 0)
+ ck_error("Non-empty array after put -> remove -> commit workload.\n");
+
+ for (i = 0; i < ITERATION; i++) {
+ if (ck_array_put(&array, (void *)i) == false)
+ ck_error("ck_error_put\n");
+ }
+
+ ck_array_commit(&array);
+
+ i = 0;
+ CK_ARRAY_FOREACH(&array, &iterator, &r) {
+ i++;
+ }
+
+ if (i != ITERATION)
+ ck_error("Incorrect item count in iteration\n");
+
+ ck_array_remove(&array, (void *)(uintptr_t)0);
+ ck_array_remove(&array, (void *)(uintptr_t)1);
+ ck_array_commit(&array);
+ i = 0; CK_ARRAY_FOREACH(&array, &iterator, &r) i++;
+ if (i != ITERATION - 2 || ck_array_length(&array) != ITERATION - 2)
+ ck_error("Incorrect item count in iteration after remove\n");
+
+ if (ck_array_put_unique(&array, (void *)UINTPTR_MAX) != 0)
+ ck_error("Unique value put failed.\n");
+
+ if (ck_array_put_unique(&array, (void *)(uintptr_t)4) != 1)
+ ck_error("put of 4 not detected as non-unique.\n");
+
+ if (ck_array_put_unique(&array, (void *)UINTPTR_MAX) != 1)
+ ck_error("put of UINTPTR_MAX not detected as non-unique.\n");
+
+ ck_array_commit(&array);
+ i = 0;
+ CK_ARRAY_FOREACH(&array, &iterator, &r) {
+ i++;
+ }
+ if (i != ITERATION - 1 || ck_array_length(&array) != ITERATION - 1)
+ ck_error("Incorrect item count in iteration after unique put\n");
+
+ if (ck_array_initialized(&array) == false)
+ ck_error("Error, expected array to be initialized.\n");
+
+ for (i = 0; i < ITERATION * 4; i++) {
+ ck_array_remove(&array, (void *)i);
+ }
+
+ for (i = 0; i < ITERATION * 16; i++) {
+ ck_array_put(&array, (void *)i);
+ }
+
+ ck_array_commit(&array);
+
+ for (i = 0; i < ITERATION * 128; i++) {
+ ck_array_put(&array, (void *)i);
+ if (ck_array_put_unique(&array, (void *)i) != 1)
+ ck_error("put_unique for non-unique value should fail.\n");
+ }
+
+ for (i = 0; i < ITERATION * 64; i++) {
+ bool f = ck_array_remove(&array, (void *)i);
+
+ if (f == false && i < ITERATION * 144)
+ ck_error("Remove failed for existing entry.\n");
+
+ if (f == true && i > ITERATION * 144)
+ ck_error("Remove succeeded for non-existing entry.\n");
+ }
+
+ ck_array_commit(&array);
+ ck_array_deinit(&array, false);
+
+ if (ck_array_initialized(&array) == true)
+ ck_error("Error, expected array to be uninitialized.\n");
+
+ return 0;
+}
+
diff --git a/regressions/ck_backoff/validate/Makefile b/regressions/ck_backoff/validate/Makefile
new file mode 100644
index 0000000..39e6d4f
--- /dev/null
+++ b/regressions/ck_backoff/validate/Makefile
@@ -0,0 +1,15 @@
+.PHONY: check clean
+
+all: validate
+
+validate: validate.c ../../../include/ck_backoff.h
+ $(CC) $(CFLAGS) -o validate validate.c
+
+check: all
+ ./validate
+
+clean:
+ rm -rf validate *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE
diff --git a/regressions/ck_backoff/validate/validate.c b/regressions/ck_backoff/validate/validate.c
new file mode 100644
index 0000000..137d48e
--- /dev/null
+++ b/regressions/ck_backoff/validate/validate.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <ck_backoff.h>
+#include "../../common.h"
+
+int
+main(void)
+{
+ ck_backoff_t backoff = CK_BACKOFF_INITIALIZER;
+ const ck_backoff_t ceiling = CK_BACKOFF_CEILING + 1;
+ unsigned int i = 0;
+
+ fprintf(stderr, "Ceiling is: %u (%#x)\n", CK_BACKOFF_CEILING, CK_BACKOFF_CEILING);
+
+ for (;;) {
+ ck_backoff_t previous = backoff;
+ ck_backoff_eb(&backoff);
+
+ printf("EB %u\n", backoff);
+ if (previous == ceiling) {
+ if (backoff != ceiling)
+ ck_error("[C] GB: expected %u, got %u\n", ceiling, backoff);
+
+ if (i++ >= 1)
+ break;
+ } else if (previous != backoff >> 1) {
+ ck_error("[N] GB: expected %u (%u), got %u\n", previous << 1, previous, backoff);
+ }
+ }
+
+ return 0;
+}
+
diff --git a/regressions/ck_barrier/benchmark/Makefile b/regressions/ck_barrier/benchmark/Makefile
new file mode 100644
index 0000000..ea973d2
--- /dev/null
+++ b/regressions/ck_barrier/benchmark/Makefile
@@ -0,0 +1,14 @@
+.PHONY: clean distribution
+
+OBJECTS=throughput
+
+all: $(OBJECTS)
+
+throughput: throughput.c ../../../include/ck_barrier.h ../../../src/ck_barrier_centralized.c
+ $(CC) $(CFLAGS) -o throughput throughput.c ../../../src/ck_barrier_centralized.c
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_barrier/benchmark/throughput.c b/regressions/ck_barrier/benchmark/throughput.c
new file mode 100644
index 0000000..1a1c013
--- /dev/null
+++ b/regressions/ck_barrier/benchmark/throughput.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <pthread.h>
+#include <unistd.h>
+#include <ck_stdint.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <ck_pr.h>
+#include <ck_barrier.h>
+
+#include "../../common.h"
+
+#if defined(CK_F_PR_INC_64) && defined(CK_F_PR_LOAD_64)
+static int done = 0;
+static struct affinity a;
+static int nthr;
+static int tid;
+static ck_barrier_centralized_t barrier = CK_BARRIER_CENTRALIZED_INITIALIZER;
+struct counter {
+ uint64_t value;
+} CK_CC_CACHELINE;
+struct counter *counters;
+
+static void *
+thread(void *null CK_CC_UNUSED)
+{
+ ck_barrier_centralized_state_t state = CK_BARRIER_CENTRALIZED_STATE_INITIALIZER;
+ int id;
+
+ id = ck_pr_faa_int(&tid, 1);
+ aff_iterate(&a);
+
+ while (ck_pr_load_int(&done) == 0) {
+ ck_barrier_centralized(&barrier, &state, nthr);
+ ck_pr_inc_64(&counters[id].value);
+ ck_barrier_centralized(&barrier, &state, nthr);
+ ck_pr_inc_64(&counters[id].value);
+ ck_barrier_centralized(&barrier, &state, nthr);
+ ck_pr_inc_64(&counters[id].value);
+ ck_barrier_centralized(&barrier, &state, nthr);
+ ck_pr_inc_64(&counters[id].value);
+ ck_barrier_centralized(&barrier, &state, nthr);
+ ck_pr_inc_64(&counters[id].value);
+ ck_barrier_centralized(&barrier, &state, nthr);
+ ck_pr_inc_64(&counters[id].value);
+ ck_barrier_centralized(&barrier, &state, nthr);
+ ck_pr_inc_64(&counters[id].value);
+ ck_barrier_centralized(&barrier, &state, nthr);
+ ck_pr_inc_64(&counters[id].value);
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+ uint64_t count;
+ int i;
+
+ if (argc != 3) {
+ ck_error("Correct usage: <number of threads> <affinity delta>\n");
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ counters = calloc(sizeof(struct counter), nthr);
+ if (counters == NULL) {
+ ck_error("ERROR: Could not allocate counters\n");
+ }
+
+ a.delta = atoi(argv[2]);
+
+ fprintf(stderr, "Creating threads (barrier)...");
+ for (i = 0; i < nthr; ++i) {
+ if (pthread_create(&threads[i], NULL, thread, NULL)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ common_sleep(10);
+
+ count = 0;
+ ck_pr_store_int(&done, 1);
+ for (i = 0; i < nthr; ++i)
+ count += ck_pr_load_64(&counters[i].value);
+ printf("%d %16" PRIu64 "\n", nthr, count);
+
+ return (0);
+}
+#else
+int
+main(void)
+{
+
+ fputs("Unsupported.", stderr);
+ return 0;
+}
+#endif
+
diff --git a/regressions/ck_barrier/validate/Makefile b/regressions/ck_barrier/validate/Makefile
new file mode 100644
index 0000000..f31a1a6
--- /dev/null
+++ b/regressions/ck_barrier/validate/Makefile
@@ -0,0 +1,34 @@
+.PHONY: check clean distribution
+
+OBJECTS=barrier_centralized barrier_combining barrier_dissemination barrier_tournament barrier_mcs
+
+all: $(OBJECTS)
+
+barrier_centralized: barrier_centralized.c ../../../include/ck_barrier.h ../../../src/ck_barrier_centralized.c
+ $(CC) $(CFLAGS) -o barrier_centralized barrier_centralized.c ../../../src/ck_barrier_centralized.c
+
+barrier_combining: barrier_combining.c ../../../include/ck_barrier.h ../../../src/ck_barrier_combining.c
+ $(CC) $(CFLAGS) -o barrier_combining barrier_combining.c ../../../src/ck_barrier_combining.c
+
+barrier_dissemination: barrier_dissemination.c ../../../include/ck_barrier.h ../../../src/ck_barrier_dissemination.c
+ $(CC) $(CFLAGS) -o barrier_dissemination barrier_dissemination.c ../../../src/ck_barrier_dissemination.c
+
+barrier_tournament: barrier_tournament.c ../../../include/ck_barrier.h ../../../src/ck_barrier_tournament.c
+ $(CC) $(CFLAGS) -o barrier_tournament barrier_tournament.c ../../../src/ck_barrier_tournament.c
+
+barrier_mcs: barrier_mcs.c ../../../include/ck_barrier.h ../../../src/ck_barrier_mcs.c
+ $(CC) $(CFLAGS) -o barrier_mcs barrier_mcs.c ../../../src/ck_barrier_mcs.c
+
+check: all
+ rc=0; \
+ for d in $(OBJECTS) ; do \
+ echo $$d; \
+ ./$$d $(CORES) 1 1 || rc=1; \
+ done; \
+ exit $$rc
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_barrier/validate/barrier_centralized.c b/regressions/ck_barrier/validate/barrier_centralized.c
new file mode 100644
index 0000000..551913a
--- /dev/null
+++ b/regressions/ck_barrier/validate/barrier_centralized.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_barrier.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 5000000
+#endif
+
+#ifndef ENTRIES
+#define ENTRIES 512
+#endif
+
+static struct affinity a;
+static int nthr;
+static int counters[ENTRIES];
+static ck_barrier_centralized_t barrier = CK_BARRIER_CENTRALIZED_INITIALIZER;
+static int barrier_wait;
+
+static void *
+thread(void *null CK_CC_UNUSED)
+{
+ ck_barrier_centralized_state_t state = CK_BARRIER_CENTRALIZED_STATE_INITIALIZER;
+ int j, counter;
+ int i = 0;
+
+ aff_iterate(&a);
+
+ ck_pr_inc_int(&barrier_wait);
+ while (ck_pr_load_int(&barrier_wait) != nthr)
+ ck_pr_stall();
+
+ for (j = 0; j < ITERATE; j++) {
+ i = j++ & (ENTRIES - 1);
+ ck_pr_inc_int(&counters[i]);
+ ck_barrier_centralized(&barrier, &state, nthr);
+ counter = ck_pr_load_int(&counters[i]);
+ if (counter != nthr * (j / ENTRIES + 1)) {
+ ck_error("FAILED [%d:%d]: %d != %d\n", i, j - 1, counter, nthr);
+ }
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+ int i;
+
+ if (argc < 3) {
+ ck_error("Usage: correct <number of threads> <affinity delta>\n");
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ a.delta = atoi(argv[2]);
+
+ fprintf(stderr, "Creating threads (barrier)...");
+ for (i = 0; i < nthr; i++) {
+ if (pthread_create(&threads[i], NULL, thread, NULL)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Waiting for threads to finish correctness regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+
+
+ return (0);
+}
+
diff --git a/regressions/ck_barrier/validate/barrier_combining.c b/regressions/ck_barrier/validate/barrier_combining.c
new file mode 100644
index 0000000..98fa0cf
--- /dev/null
+++ b/regressions/ck_barrier/validate/barrier_combining.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_barrier.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 5000000
+#endif
+
+#ifndef ENTRIES
+#define ENTRIES 512
+#endif
+
+static struct affinity a;
+static int nthr;
+static int ngroups;
+static int counters[ENTRIES];
+static ck_barrier_combining_t barrier;
+static int barrier_wait;
+
+static void *
+thread(void *group)
+{
+ ck_barrier_combining_state_t state = CK_BARRIER_COMBINING_STATE_INITIALIZER;
+ int j, counter;
+ int i = 0;
+
+ aff_iterate(&a);
+
+ ck_pr_inc_int(&barrier_wait);
+ while (ck_pr_load_int(&barrier_wait) != (nthr * ngroups))
+ ck_pr_stall();
+
+ for (j = 0; j < ITERATE; j++) {
+ i = j++ & (ENTRIES - 1);
+ ck_pr_inc_int(&counters[i]);
+ ck_barrier_combining(&barrier, group, &state);
+ counter = ck_pr_load_int(&counters[i]);
+ if (counter != nthr * ngroups * (j / ENTRIES + 1)) {
+ ck_error("FAILED [%d:%d]: %d != %d\n", i, j - 1, counter, nthr * ngroups);
+ }
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+ ck_barrier_combining_group_t *groupings;
+ ck_barrier_combining_group_t *init_root;
+ int i;
+
+ init_root = malloc(sizeof(ck_barrier_combining_group_t));
+ if (init_root == NULL) {
+ ck_error("ERROR: Could not allocate initial barrier structure\n");
+ }
+ ck_barrier_combining_init(&barrier, init_root);
+
+ if (argc < 4) {
+ ck_error("Usage: correct <total groups> <threads per group> <affinity delta>\n");
+ }
+
+ ngroups = atoi(argv[1]);
+ if (ngroups <= 0) {
+ ck_error("ERROR: Number of groups must be greater than 0\n");
+ }
+
+ nthr = atoi(argv[2]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ groupings = malloc(sizeof(ck_barrier_combining_group_t) * ngroups);
+ if (groupings == NULL) {
+ ck_error("Could not allocate thread barrier grouping structures\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr * ngroups);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ a.delta = atoi(argv[3]);
+
+ for (i = 0; i < ngroups; i++)
+ ck_barrier_combining_group_init(&barrier, groupings + i, nthr);
+
+ fprintf(stderr, "Creating threads (barrier)...");
+ for (i = 0; i < (nthr * ngroups); i++) {
+ if (pthread_create(&threads[i], NULL, thread, groupings + (i % ngroups))) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Waiting for threads to finish correctness regression...");
+ for (i = 0; i < (nthr * ngroups); i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+
+ return (0);
+}
+
diff --git a/regressions/ck_barrier/validate/barrier_dissemination.c b/regressions/ck_barrier/validate/barrier_dissemination.c
new file mode 100644
index 0000000..e8acc10
--- /dev/null
+++ b/regressions/ck_barrier/validate/barrier_dissemination.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_barrier.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 5000000
+#endif
+
+#ifndef ENTRIES
+#define ENTRIES 512
+#endif
+
+static struct affinity a;
+static int nthr;
+static int counters[ENTRIES];
+static int barrier_wait;
+
+static void *
+thread(void *b)
+{
+ ck_barrier_dissemination_t *barrier = b;
+ ck_barrier_dissemination_state_t state;
+ int j, k, counter;
+ int i = 0;
+
+ aff_iterate(&a);
+ ck_barrier_dissemination_subscribe(barrier, &state);
+
+ ck_pr_inc_int(&barrier_wait);
+ while (ck_pr_load_int(&barrier_wait) != nthr)
+ ck_pr_stall();
+
+ for (j = 0, k = 0; j < ITERATE; j++, k++) {
+ i = j++ & (ENTRIES - 1);
+ ck_pr_inc_int(&counters[i]);
+ ck_barrier_dissemination(barrier, &state);
+ counter = ck_pr_load_int(&counters[i]);
+ if (counter != nthr * (j / ENTRIES + 1)) {
+ ck_error("FAILED [%d:%d]: %d != %d\n", i, j - 1, counter, nthr);
+ }
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ ck_barrier_dissemination_t *barrier;
+ ck_barrier_dissemination_flag_t **barrier_internal;
+ pthread_t *threads;
+ int i, size;
+
+ if (argc < 3) {
+ ck_error("Usage: correct <number of threads> <affinity delta>\n");
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ a.delta = atoi(argv[2]);
+
+ barrier = malloc(sizeof(ck_barrier_dissemination_t) * nthr);
+ if (barrier == NULL) {
+ ck_error("ERROR: Could not allocate barrier structures\n");
+ }
+
+ barrier_internal = malloc(sizeof(ck_barrier_dissemination_flag_t *) * nthr);
+ if (barrier_internal == NULL) {
+ ck_error("ERROR: Could not allocate barrier structures\n");
+ }
+
+ size = ck_barrier_dissemination_size(nthr);
+ for (i = 0; i < nthr; ++i) {
+ barrier_internal[i] = malloc(sizeof(ck_barrier_dissemination_flag_t) * size);
+ if (barrier_internal[i] == NULL) {
+ ck_error("ERROR: Could not allocate barrier structures\n");
+ }
+ }
+ ck_barrier_dissemination_init(barrier, barrier_internal, nthr);
+
+ fprintf(stderr, "Creating threads (barrier)...");
+ for (i = 0; i < nthr; i++) {
+ if (pthread_create(&threads[i], NULL, thread, barrier)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Waiting for threads to finish correctness regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+
+
+ return (0);
+}
+
diff --git a/regressions/ck_barrier/validate/barrier_mcs.c b/regressions/ck_barrier/validate/barrier_mcs.c
new file mode 100644
index 0000000..c2e3f2b
--- /dev/null
+++ b/regressions/ck_barrier/validate/barrier_mcs.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_barrier.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 5000000
+#endif
+
+#ifndef ENTRIES
+#define ENTRIES 512
+#endif
+
+static struct affinity a;
+static int nthr;
+static int counters[ENTRIES];
+static int barrier_wait;
+
+static void *
+thread(void *b)
+{
+ ck_barrier_mcs_t *barrier = b;
+ ck_barrier_mcs_state_t state;
+ int j, counter;
+ int i = 0;
+
+ aff_iterate(&a);
+
+ ck_barrier_mcs_subscribe(barrier, &state);
+
+ ck_pr_inc_int(&barrier_wait);
+ while (ck_pr_load_int(&barrier_wait) != nthr)
+ ck_pr_stall();
+
+ for (j = 0; j < ITERATE; j++) {
+ i = j++ & (ENTRIES - 1);
+ ck_pr_inc_int(&counters[i]);
+ ck_barrier_mcs(barrier, &state);
+ counter = ck_pr_load_int(&counters[i]);
+ if (counter != nthr * (j / ENTRIES + 1)) {
+ ck_error("FAILED [%d:%d]: %d != %d\n", i, j - 1, counter, nthr);
+ }
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+ ck_barrier_mcs_t *barrier;
+ int i;
+
+ if (argc < 3) {
+ ck_error("Usage: correct <number of threads> <affinity delta>\n");
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ barrier = malloc(sizeof(ck_barrier_mcs_t) * nthr);
+ if (barrier == NULL) {
+ ck_error("ERROR: Could not allocate barrier structures\n");
+ }
+ ck_barrier_mcs_init(barrier, nthr);
+
+ a.delta = atoi(argv[2]);
+
+ fprintf(stderr, "Creating threads (barrier)...");
+ for (i = 0; i < nthr; i++) {
+ if (pthread_create(&threads[i], NULL, thread, barrier)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Waiting for threads to finish correctness regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+
+
+ return (0);
+}
+
diff --git a/regressions/ck_barrier/validate/barrier_tournament.c b/regressions/ck_barrier/validate/barrier_tournament.c
new file mode 100644
index 0000000..f51dab8
--- /dev/null
+++ b/regressions/ck_barrier/validate/barrier_tournament.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_barrier.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 5000000
+#endif
+
+#ifndef ENTRIES
+#define ENTRIES 512
+#endif
+
+static struct affinity a;
+static int nthr;
+static int counters[ENTRIES];
+static int barrier_wait;
+static ck_barrier_tournament_t barrier;
+
+static void *
+thread(CK_CC_UNUSED void *unused)
+{
+ ck_barrier_tournament_state_t state;
+ int j, counter;
+ int i = 0;
+
+ aff_iterate(&a);
+ ck_barrier_tournament_subscribe(&barrier, &state);
+
+ ck_pr_inc_int(&barrier_wait);
+ while (ck_pr_load_int(&barrier_wait) != nthr)
+ ck_pr_stall();
+
+ for (j = 0; j < ITERATE; j++) {
+ i = j++ & (ENTRIES - 1);
+ ck_pr_inc_int(&counters[i]);
+ ck_barrier_tournament(&barrier, &state);
+ counter = ck_pr_load_int(&counters[i]);
+ if (counter != nthr * (j / ENTRIES + 1)) {
+ ck_error("FAILED [%d:%d]: %d != %d\n", i, j - 1, counter, nthr);
+ }
+ }
+
+ ck_pr_inc_int(&barrier_wait);
+ while (ck_pr_load_int(&barrier_wait) != nthr * 2)
+ ck_pr_stall();
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+ ck_barrier_tournament_round_t **rounds;
+ int i;
+ unsigned int size;
+
+ if (argc < 3) {
+ ck_error("Usage: correct <number of threads> <affinity delta>\n");
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+ a.delta = atoi(argv[2]);
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ rounds = malloc(sizeof(ck_barrier_tournament_round_t *) * nthr);
+ if (rounds == NULL) {
+ ck_error("ERROR: Could not allocate barrier structures\n");
+ }
+
+ size = ck_barrier_tournament_size(nthr);
+ for (i = 0; i < nthr; ++i) {
+ rounds[i] = malloc(sizeof(ck_barrier_tournament_round_t) * size);
+ if (rounds[i] == NULL) {
+ ck_error("ERROR: Could not allocate barrier structures\n");
+ }
+ }
+
+ ck_barrier_tournament_init(&barrier, rounds, nthr);
+
+ fprintf(stderr, "Creating threads (barrier)...");
+ for (i = 0; i < nthr; i++) {
+ if (pthread_create(&threads[i], NULL, thread, NULL)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Waiting for threads to finish correctness regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+
+ return (0);
+}
+
diff --git a/regressions/ck_bitmap/validate/Makefile b/regressions/ck_bitmap/validate/Makefile
new file mode 100644
index 0000000..85e13c8
--- /dev/null
+++ b/regressions/ck_bitmap/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean
+
+OBJECTS=serial
+
+all: $(OBJECTS)
+
+serial: serial.c ../../../include/ck_bitmap.h
+ $(CC) $(CFLAGS) -o serial serial.c
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+check: all
+ ./serial
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE
diff --git a/regressions/ck_bitmap/validate/serial.c b/regressions/ck_bitmap/validate/serial.c
new file mode 100644
index 0000000..ba52588
--- /dev/null
+++ b/regressions/ck_bitmap/validate/serial.c
@@ -0,0 +1,372 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra.
+ * Copyright 2012-2014 AppNexus, Inc.
+ * Copyright 2012 Shreyas Prasad.
+ * Copyright 2014 Paul Khuong.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_bitmap.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "../../common.h"
+
+#ifndef STATIC_LENGTH
+#define STATIC_LENGTH 256
+#endif
+
+static unsigned int length = 256;
+static ck_bitmap_t *g_bits;
+
+static void
+check_iteration(ck_bitmap_t *bits, unsigned int len, bool initial)
+{
+ ck_bitmap_iterator_t iter;
+ unsigned int i = 0, j;
+
+ len += 1;
+ if (initial == true) {
+ if (bits == g_bits)
+ len = length;
+ else
+ len = STATIC_LENGTH;
+ }
+
+ ck_bitmap_iterator_init(&iter, bits);
+ for (j = 0; ck_bitmap_next(bits, &iter, &i) == true; j++) {
+ if (i == j)
+ continue;
+
+ ck_error("[4] ERROR: Expected bit %u, got bit %u\n", j, i);
+ }
+
+ if (j != len) {
+ ck_error("[5] ERROR: Expected length %u, got length %u\n", len, j);
+ }
+
+ return;
+}
+
+static void
+test(ck_bitmap_t *bits, unsigned int n_length, bool initial)
+{
+ bool r;
+ unsigned int i;
+ CK_BITMAP_INSTANCE(8) u;
+
+ CK_BITMAP_INIT(&u, 8, false);
+ CK_BITMAP_SET(&u, 1);
+ CK_BITMAP_SET(&u, 4);
+
+ for (i = 0; i < n_length; i++) {
+ if (ck_bitmap_test(bits, i) == !initial) {
+ ck_error("[0] ERROR [%u]: Expected %u got %u\n", i,
+ initial, !initial);
+ }
+ }
+
+ for (i = 0; i < n_length; i++) {
+ ck_bitmap_set(bits, i);
+ if (ck_bitmap_test(bits, i) == false) {
+ ck_error("[1] ERROR: Expected bit to be set: %u\n", i);
+ }
+
+ ck_bitmap_reset(bits, i);
+ if (ck_bitmap_test(bits, i) == true) {
+ ck_error("[2] ERROR: Expected bit to be cleared: %u\n", i);
+ }
+
+ r = ck_bitmap_bts(bits, i);
+ if (r == true) {
+ ck_error("[3] ERROR: Expected bit to be cleared before 1st bts: %u\n", i);
+ }
+ if (ck_bitmap_test(bits, i) == false) {
+ ck_error("[4] ERROR: Expected bit to be set: %u\n", i);
+ }
+ r = ck_bitmap_bts(bits, i);
+ if (r == false) {
+ ck_error("[5] ERROR: Expected bit to be set before 2nd bts: %u\n", i);
+ }
+ if (ck_bitmap_test(bits, i) == false) {
+ ck_error("[6] ERROR: Expected bit to be set: %u\n", i);
+ }
+
+ ck_bitmap_reset(bits, i);
+ if (ck_bitmap_test(bits, i) == true) {
+ ck_error("[7] ERROR: Expected bit to be cleared: %u\n", i);
+ }
+
+ ck_bitmap_set(bits, i);
+ if (ck_bitmap_test(bits, i) == false) {
+ ck_error("[8] ERROR: Expected bit to be set: %u\n", i);
+ }
+
+ check_iteration(bits, i, initial);
+ }
+
+ for (i = 0; i < n_length; i++) {
+ if (ck_bitmap_test(bits, i) == false) {
+ ck_error("[9] ERROR: Expected bit to be set: %u\n", i);
+ }
+ }
+
+ ck_bitmap_clear(bits);
+
+ for (i = 0; i < n_length; i++) {
+ if (ck_bitmap_test(bits, i) == true) {
+ ck_error("[10] ERROR: Expected bit to be reset: %u\n", i);
+ }
+ }
+
+ ck_bitmap_union(bits, CK_BITMAP(&u));
+ if (ck_bitmap_test(bits, 1) == false ||
+ ck_bitmap_test(bits, 4) == false) {
+ ck_error("ERROR: Expected union semantics.\n");
+ }
+
+ return;
+}
+
+static void
+test_init(bool init)
+{
+ ck_bitmap_t *bitmap;
+ size_t bytes;
+ unsigned int i;
+
+ bytes = ck_bitmap_size(length);
+ bitmap = malloc(bytes);
+ memset(bitmap, random(), bytes);
+
+ ck_bitmap_init(bitmap, length, init);
+
+ if (ck_bitmap_bits(bitmap) != length) {
+ ck_error("ERROR: Expected length %u got %u\n",
+ length, ck_bitmap_bits(bitmap));
+ }
+
+ for (i = 0; i < length; i++) {
+ if (ck_bitmap_test(bitmap, i) != init) {
+ ck_error("ERROR: Expected bit %i at index %u, got %i\n",
+ (int)init, i, (int)(!init));
+ }
+ }
+
+ free(bitmap);
+}
+
+static ck_bitmap_t *
+random_init(void)
+{
+ ck_bitmap_t *bitmap;
+ unsigned int i;
+
+ bitmap = malloc(ck_bitmap_size(length));
+ ck_bitmap_init(bitmap, length, false);
+
+ for (i = 0; i < length; i++) {
+ if (random() & 1) {
+ ck_bitmap_set(bitmap, i);
+ }
+ }
+
+ return bitmap;
+}
+
+static ck_bitmap_t *
+copy(const ck_bitmap_t *src)
+{
+ ck_bitmap_t *bitmap;
+ size_t bytes = ck_bitmap_size(ck_bitmap_bits(src));
+
+ bitmap = malloc(bytes);
+ memcpy(bitmap, src, bytes);
+ return bitmap;
+}
+
+static void
+test_counts(const ck_bitmap_t *x, const ck_bitmap_t *y)
+{
+ unsigned int count = 0;
+ unsigned int count_intersect = 0;
+ unsigned int i;
+
+ for (i = 0; i <= length * 2; i++) {
+ unsigned actual_limit = i;
+ unsigned int r;
+ bool check;
+
+ if (actual_limit > ck_bitmap_bits(x))
+ actual_limit = ck_bitmap_bits(x);
+
+ check = ck_bitmap_empty(x, i);
+ if (check != (count == 0)) {
+ ck_error("ck_bitmap_empty(%u): got %i expected %i\n",
+ i, (int)check, (int)(count == 0));
+ }
+
+ check = ck_bitmap_full(x, i);
+ if (check != (count == actual_limit)) {
+ ck_error("ck_bitmap_full(%u): got %i expected %i\n",
+ i, (int)check, (int)(count == i));
+ }
+
+ r = ck_bitmap_count(x, i);
+ if (r != count) {
+ ck_error("ck_bitmap_count(%u): got %u expected %u\n",
+ i, r, count);
+ }
+
+ r = ck_bitmap_count_intersect(x, y, i);
+ if (r != count_intersect) {
+ ck_error("ck_bitmap_count_intersect(%u): got %u expected %u\n",
+ i, r, count_intersect);
+ }
+
+ if (i < length) {
+ count += ck_bitmap_test(x, i);
+ count_intersect += ck_bitmap_test(x, i) & ck_bitmap_test(y, i);
+ }
+ }
+}
+
+static void
+random_test(unsigned int seed)
+{
+ ck_bitmap_t *x, *x_copy, *y;
+ unsigned int i;
+
+ srandom(seed);
+
+ test_init(false);
+ test_init(true);
+
+ x = random_init();
+ y = random_init();
+
+#define TEST(routine, expected) do { \
+ x_copy = copy(x); \
+ routine(x_copy, y); \
+ for (i = 0; i < length; i++) { \
+ bool xi = ck_bitmap_test(x, i); \
+ bool yi = ck_bitmap_test(y, i); \
+ bool ri = ck_bitmap_test(x_copy, i); \
+ bool wanted = expected(xi, yi); \
+ \
+ if (ri != wanted) { \
+ ck_error("In " #routine " at %u: " \
+ "got %i expected %i\n", \
+ i, ri, wanted); \
+ } \
+ } \
+ free(x_copy); \
+ } while (0)
+
+#define OR(x, y) (x | y)
+#define AND(x, y) (x & y)
+#define ANDC2(x, y) (x & (~y))
+
+ TEST(ck_bitmap_union, OR);
+ TEST(ck_bitmap_intersection, AND);
+ TEST(ck_bitmap_intersection_negate, ANDC2);
+
+#undef ANDC2
+#undef AND
+#undef OR
+#undef TEST
+
+ test_counts(x, y);
+
+ for (i = 0; i < 4; i++) {
+ ck_bitmap_init(x, length, i & 1);
+ ck_bitmap_init(y, length, i >> 1);
+ test_counts(x, y);
+ }
+
+ free(y);
+ free(x);
+}
+
+int
+main(int argc, char *argv[])
+{
+ unsigned int bytes, base;
+ size_t i, j;
+
+ if (argc >= 2) {
+ length = atoi(argv[1]);
+ }
+
+ base = ck_bitmap_base(length);
+ bytes = ck_bitmap_size(length);
+ fprintf(stderr, "Configuration: %u bytes\n",
+ bytes);
+
+ g_bits = malloc(bytes);
+ memset(g_bits->map, 0xFF, base);
+ ck_bitmap_init(g_bits, length, false);
+ test(g_bits, length, false);
+
+ memset(g_bits->map, 0x00, base);
+ ck_bitmap_init(g_bits, length, true);
+ test(g_bits, length, true);
+
+ ck_bitmap_test(g_bits, length - 1);
+
+ CK_BITMAP_INSTANCE(STATIC_LENGTH) sb;
+ fprintf(stderr, "Static configuration: %zu bytes\n",
+ sizeof(sb));
+ memset(CK_BITMAP_BUFFER(&sb), 0xFF, ck_bitmap_base(STATIC_LENGTH));
+ CK_BITMAP_INIT(&sb, STATIC_LENGTH, false);
+ test(CK_BITMAP(&sb), STATIC_LENGTH, false);
+ memset(CK_BITMAP_BUFFER(&sb), 0x00, ck_bitmap_base(STATIC_LENGTH));
+ CK_BITMAP_INIT(&sb, STATIC_LENGTH, true);
+ test(CK_BITMAP(&sb), STATIC_LENGTH, true);
+
+ CK_BITMAP_CLEAR(&sb);
+ if (CK_BITMAP_TEST(&sb, 1) == true) {
+ ck_error("ERROR: Expected bit to be reset.\n");
+ }
+
+ CK_BITMAP_SET(&sb, 1);
+ if (CK_BITMAP_TEST(&sb, 1) == false) {
+ ck_error("ERROR: Expected bit to be set.\n");
+ }
+
+ CK_BITMAP_RESET(&sb, 1);
+ if (CK_BITMAP_TEST(&sb, 1) == true) {
+ ck_error("ERROR: Expected bit to be reset.\n");
+ }
+
+ for (i = 0; i < 4 * sizeof(unsigned int) * CHAR_BIT; i++) {
+ length = i;
+ for (j = 0; j < 10; j++) {
+ random_test(i * 10 + j);
+ }
+ }
+
+ return 0;
+}
diff --git a/regressions/ck_brlock/benchmark/Makefile b/regressions/ck_brlock/benchmark/Makefile
new file mode 100644
index 0000000..cd12e7c
--- /dev/null
+++ b/regressions/ck_brlock/benchmark/Makefile
@@ -0,0 +1,17 @@
+.PHONY: clean distribution
+
+OBJECTS=latency throughput
+
+all: $(OBJECTS)
+
+latency: latency.c ../../../include/ck_brlock.h
+ $(CC) $(CFLAGS) -o latency latency.c
+
+throughput: throughput.c ../../../include/ck_brlock.h
+ $(CC) $(CFLAGS) -o throughput throughput.c
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_brlock/benchmark/latency.c b/regressions/ck_brlock/benchmark/latency.c
new file mode 100644
index 0000000..4db8e26
--- /dev/null
+++ b/regressions/ck_brlock/benchmark/latency.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_brlock.h>
+#include <ck_rwlock.h>
+#include <inttypes.h>
+#include <stdio.h>
+
+#include "../../common.h"
+
+#ifndef STEPS
+#define STEPS 1000000
+#endif
+
+int
+main(void)
+{
+ uint64_t s_b, e_b, i;
+ ck_brlock_t brlock = CK_BRLOCK_INITIALIZER;
+ ck_brlock_reader_t r[8];
+ ck_rwlock_t naive;
+
+ for (i = 0; i < sizeof(r) / sizeof(*r); i++)
+ ck_brlock_read_register(&brlock, &r[i]);
+
+ for (i = 0; i < STEPS; i++) {
+ ck_brlock_write_lock(&brlock);
+ ck_brlock_write_unlock(&brlock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_brlock_write_lock(&brlock);
+ ck_brlock_write_unlock(&brlock);
+ }
+ e_b = rdtsc();
+ printf("WRITE: brlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ ck_rwlock_init(&naive);
+ for (i = 0; i < STEPS; i++) {
+ ck_rwlock_write_lock(&naive);
+ ck_rwlock_write_unlock(&naive);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_rwlock_write_lock(&naive);
+ ck_rwlock_write_unlock(&naive);
+ }
+ e_b = rdtsc();
+ printf("WRITE: naive %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ for (i = 0; i < STEPS; i++) {
+ ck_brlock_read_lock(&brlock, &r[0]);
+ ck_brlock_read_unlock(&r[0]);
+ }
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_brlock_read_lock(&brlock, &r[0]);
+ ck_brlock_read_unlock(&r[0]);
+ }
+ e_b = rdtsc();
+ printf("READ: brlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ for (i = 0; i < STEPS; i++) {
+ ck_rwlock_read_lock(&naive);
+ ck_rwlock_read_unlock(&naive);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_rwlock_read_lock(&naive);
+ ck_rwlock_read_unlock(&naive);
+ }
+ e_b = rdtsc();
+ printf("READ: naive %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ return (0);
+}
+
diff --git a/regressions/ck_brlock/benchmark/throughput.c b/regressions/ck_brlock/benchmark/throughput.c
new file mode 100644
index 0000000..27ed803
--- /dev/null
+++ b/regressions/ck_brlock/benchmark/throughput.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_brlock.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "../../common.h"
+
+#ifndef STEPS
+#define STEPS 1000000
+#endif
+
+static int barrier;
+static int threads;
+static unsigned int flag CK_CC_CACHELINE;
+static ck_brlock_t brlock = CK_BRLOCK_INITIALIZER;
+static struct affinity affinity;
+
+static void *
+thread_brlock(void *pun)
+{
+ uint64_t s_b, e_b, a, i;
+ ck_brlock_reader_t r;
+ uint64_t *value = pun;
+
+ if (aff_iterate(&affinity) != 0) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_brlock_read_register(&brlock, &r);
+ ck_pr_inc_int(&barrier);
+ while (ck_pr_load_int(&barrier) != threads)
+ ck_pr_stall();
+
+ for (i = 1, a = 0;; i++) {
+ s_b = rdtsc();
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ ck_brlock_read_lock(&brlock, &r);
+ ck_brlock_read_unlock(&r);
+ e_b = rdtsc();
+
+ a += (e_b - s_b) >> 4;
+
+ if (ck_pr_load_uint(&flag) == 1)
+ break;
+ }
+
+ ck_pr_inc_int(&barrier);
+ while (ck_pr_load_int(&barrier) != threads * 2)
+ ck_pr_stall();
+
+ *value = (a / i);
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int t;
+ pthread_t *p;
+ uint64_t *latency;
+
+ if (argc != 3) {
+ ck_error("Usage: throughput <delta> <threads>\n");
+ }
+
+ threads = atoi(argv[2]);
+ if (threads <= 0) {
+ ck_error("ERROR: Threads must be a value > 0.\n");
+ }
+
+ p = malloc(sizeof(pthread_t) * threads);
+ if (p == NULL) {
+ ck_error("ERROR: Failed to initialize thread.\n");
+ }
+
+ latency = malloc(sizeof(uint64_t) * threads);
+ if (latency == NULL) {
+ ck_error("ERROR: Failed to create latency buffer.\n");
+ }
+
+ affinity.delta = atoi(argv[1]);
+ affinity.request = 0;
+
+ fprintf(stderr, "Creating threads (brlock)...");
+ for (t = 0; t < threads; t++) {
+ if (pthread_create(&p[t], NULL, thread_brlock, latency + t) != 0) {
+ ck_error("ERROR: Could not create thread %d\n", t);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ common_sleep(10);
+ ck_pr_store_uint(&flag, 1);
+
+ fprintf(stderr, "Waiting for threads to finish acquisition regression...");
+ for (t = 0; t < threads; t++)
+ pthread_join(p[t], NULL);
+ fprintf(stderr, "done\n\n");
+
+ for (t = 1; t <= threads; t++)
+ printf("%10u %20" PRIu64 "\n", t, latency[t - 1]);
+
+ return (0);
+}
+
diff --git a/regressions/ck_brlock/validate/Makefile b/regressions/ck_brlock/validate/Makefile
new file mode 100644
index 0000000..3a49c43
--- /dev/null
+++ b/regressions/ck_brlock/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean distribution
+
+OBJECTS=validate
+
+all: $(OBJECTS)
+
+validate: validate.c ../../../include/ck_brlock.h
+ $(CC) $(CFLAGS) -o validate validate.c
+
+check: all
+ ./validate $(CORES) 1
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_brlock/validate/validate.c b/regressions/ck_brlock/validate/validate.c
new file mode 100644
index 0000000..20f285a
--- /dev/null
+++ b/regressions/ck_brlock/validate/validate.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_brlock.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 1000000
+#endif
+
+static struct affinity a;
+static unsigned int locked = 0;
+static int nthr;
+static ck_brlock_t lock = CK_BRLOCK_INITIALIZER;
+
+static void *
+thread(void *null CK_CC_UNUSED)
+{
+ ck_brlock_reader_t r;
+ int i = ITERATE;
+ unsigned int l;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_brlock_read_register(&lock, &r);
+
+ while (i--) {
+ ck_brlock_write_lock(&lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_brlock_write_unlock(&lock);
+
+ ck_brlock_read_lock(&lock, &r);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_brlock_read_unlock(&r);
+ }
+
+ ck_brlock_read_unregister(&lock, &r);
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+ int i;
+
+ if (argc != 3) {
+ ck_error("Usage: validate <number of threads> <affinity delta>\n");
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ a.delta = atoi(argv[2]);
+
+ fprintf(stderr, "Creating threads (mutual exclusion)...");
+ for (i = 0; i < nthr; i++) {
+ if (pthread_create(&threads[i], NULL, thread, NULL)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Waiting for threads to finish correctness regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+
+ return (0);
+}
+
diff --git a/regressions/ck_bytelock/benchmark/Makefile b/regressions/ck_bytelock/benchmark/Makefile
new file mode 100644
index 0000000..c819099
--- /dev/null
+++ b/regressions/ck_bytelock/benchmark/Makefile
@@ -0,0 +1,14 @@
+.PHONY: clean distribution
+
+OBJECTS=latency
+
+all: $(OBJECTS)
+
+latency: latency.c
+ $(CC) $(CFLAGS) -o latency latency.c
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_bytelock/benchmark/latency.c b/regressions/ck_bytelock/benchmark/latency.c
new file mode 100644
index 0000000..be30165
--- /dev/null
+++ b/regressions/ck_bytelock/benchmark/latency.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_bytelock.h>
+#include <ck_rwlock.h>
+#include <inttypes.h>
+#include <stdio.h>
+
+#include "../../common.h"
+
+#ifndef STEPS
+#define STEPS 1000000
+#endif
+
+int
+main(void)
+{
+ uint64_t s_b, e_b, i;
+ ck_bytelock_t bytelock = CK_BYTELOCK_INITIALIZER;
+ ck_rwlock_t naive;
+
+ for (i = 0; i < STEPS; i++) {
+ ck_bytelock_write_lock(&bytelock, 1);
+ ck_bytelock_write_unlock(&bytelock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_bytelock_write_lock(&bytelock, 1);
+ ck_bytelock_write_unlock(&bytelock);
+ }
+ e_b = rdtsc();
+ printf("WRITE: bytelock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ ck_rwlock_init(&naive);
+ for (i = 0; i < STEPS; i++) {
+ ck_rwlock_write_lock(&naive);
+ ck_rwlock_write_unlock(&naive);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_rwlock_write_lock(&naive);
+ ck_rwlock_write_unlock(&naive);
+ }
+ e_b = rdtsc();
+ printf("WRITE: naive %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ for (i = 0; i < STEPS; i++) {
+ ck_bytelock_read_lock(&bytelock, 1);
+ ck_bytelock_read_unlock(&bytelock, 1);
+ }
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_bytelock_read_lock(&bytelock, 1);
+ ck_bytelock_read_unlock(&bytelock, 1);
+ }
+ e_b = rdtsc();
+ printf("READ: bytelock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ for (i = 0; i < STEPS; i++) {
+ ck_rwlock_read_lock(&naive);
+ ck_rwlock_read_unlock(&naive);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_rwlock_read_lock(&naive);
+ ck_rwlock_read_unlock(&naive);
+ }
+ e_b = rdtsc();
+ printf("READ: naive %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ return (0);
+}
+
diff --git a/regressions/ck_bytelock/validate/Makefile b/regressions/ck_bytelock/validate/Makefile
new file mode 100644
index 0000000..2a890e0
--- /dev/null
+++ b/regressions/ck_bytelock/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean distribution
+
+OBJECTS=validate
+
+all: $(OBJECTS)
+
+validate: validate.c ../../../include/ck_bytelock.h
+ $(CC) $(CFLAGS) -o validate validate.c
+
+check: all
+ ./validate $(CORES) 1
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_bytelock/validate/validate.c b/regressions/ck_bytelock/validate/validate.c
new file mode 100644
index 0000000..c164ce4
--- /dev/null
+++ b/regressions/ck_bytelock/validate/validate.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_bytelock.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 5000000
+#endif
+
+struct block {
+ unsigned int tid;
+};
+
+static struct affinity a;
+static unsigned int locked = 0;
+static int nthr;
+static ck_bytelock_t lock CK_CC_CACHELINE = CK_BYTELOCK_INITIALIZER;
+
+static void *
+thread(void *null)
+{
+ struct block *context = null;
+ int i = ITERATE;
+ unsigned int l;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ if (context->tid == (unsigned int)nthr - 1)
+ context->tid = sizeof(lock.readers) + 1;
+
+ while (i--) {
+ ck_bytelock_write_lock(&lock, context->tid);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_bytelock_write_unlock(&lock);
+
+ ck_bytelock_read_lock(&lock, context->tid);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_bytelock_read_unlock(&lock, context->tid);
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+ struct block *context;
+ int i;
+
+ if (argc != 3) {
+ ck_error("Usage: correct <number of threads> <affinity delta>\n");
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ context = malloc(sizeof(struct block) * nthr);
+ if (context == NULL) {
+ ck_error("ERROR: Could not allocate thread contexts\n");
+ }
+
+ a.delta = atoi(argv[2]);
+
+ fprintf(stderr, "Creating threads (mutual exclusion)...");
+ for (i = 0; i < nthr; i++) {
+ context[i].tid = i + 1;
+ if (pthread_create(&threads[i], NULL, thread, context + i)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Waiting for threads to finish correctness regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+
+ return (0);
+}
+
diff --git a/regressions/ck_cohort/benchmark/Makefile b/regressions/ck_cohort/benchmark/Makefile
new file mode 100644
index 0000000..6af18b9
--- /dev/null
+++ b/regressions/ck_cohort/benchmark/Makefile
@@ -0,0 +1,17 @@
+.PHONY: all clean
+
+OBJECTS=ck_cohort.THROUGHPUT ck_cohort.LATENCY
+
+all: $(OBJECTS)
+
+ck_cohort.THROUGHPUT: ck_cohort.c
+ $(CC) $(CFLAGS) -o ck_cohort.THROUGHPUT throughput.c -lm
+
+ck_cohort.LATENCY: ck_cohort.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_cohort.LATENCY ck_cohort.c
+
+clean:
+ rm -rf *.dSYM *.exe $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE -lm
diff --git a/regressions/ck_cohort/benchmark/ck_cohort.c b/regressions/ck_cohort/benchmark/ck_cohort.c
new file mode 100644
index 0000000..954b616
--- /dev/null
+++ b/regressions/ck_cohort/benchmark/ck_cohort.c
@@ -0,0 +1,8 @@
+#include "../ck_cohort.h"
+
+#include <ck_cohort.h>
+#ifdef THROUGHPUT
+#include "../../ck_spinlock/benchmark/throughput.h"
+#elif defined(LATENCY)
+#include "../../ck_spinlock/benchmark/latency.h"
+#endif
diff --git a/regressions/ck_cohort/benchmark/throughput.c b/regressions/ck_cohort/benchmark/throughput.c
new file mode 100644
index 0000000..7c4776d
--- /dev/null
+++ b/regressions/ck_cohort/benchmark/throughput.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2013-2015 Samy Al Bahra.
+ * Copyright 2013 Brendon Scheinman.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_cohort.h>
+#include <ck_md.h>
+#include <ck_spinlock.h>
+
+#include "../../common.h"
+
+#define max(x, y) (((x) > (y)) ? (x) : (y))
+
+static struct affinity a;
+static unsigned int ready;
+
+struct counters {
+ uint64_t value;
+} CK_CC_CACHELINE;
+
+static struct counters *count;
+static uint64_t nthr;
+static unsigned int n_cohorts;
+static unsigned int barrier;
+static int critical CK_CC_CACHELINE;
+
+static void
+ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+
+ (void)context;
+ ck_spinlock_fas_lock(lock);
+ return;
+}
+
+static void
+ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+
+ (void)context;
+ ck_spinlock_fas_unlock(lock);
+ return;
+}
+
+static bool
+ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+
+ (void)context;
+ return ck_spinlock_fas_locked(lock);
+}
+
+CK_COHORT_PROTOTYPE(basic,
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context,
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context)
+
+struct cohort_record {
+ CK_COHORT_INSTANCE(basic) cohort;
+} CK_CC_CACHELINE;
+static struct cohort_record *cohorts;
+
+static ck_spinlock_t global_lock = CK_SPINLOCK_INITIALIZER;
+
+struct block {
+ unsigned int tid;
+};
+
+static void *
+fairness(void *null)
+{
+ struct block *context = null;
+ unsigned int i = context->tid;
+ volatile int j;
+ long int base;
+ unsigned int core;
+ CK_COHORT_INSTANCE(basic) *cohort;
+
+
+ if (aff_iterate_core(&a, &core)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ cohort = &((cohorts + (core / (int)(a.delta)) % n_cohorts)->cohort);
+
+ while (ck_pr_load_uint(&ready) == 0);
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) != nthr);
+
+ while (ck_pr_load_uint(&ready)) {
+ CK_COHORT_LOCK(basic, cohort, NULL, NULL);
+
+ count[i].value++;
+ if (critical) {
+ base = common_lrand48() % critical;
+ for (j = 0; j < base; j++);
+ }
+
+ CK_COHORT_UNLOCK(basic, cohort, NULL, NULL);
+ }
+
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ uint64_t v, d;
+ unsigned int i;
+ pthread_t *threads;
+ struct block *context;
+ ck_spinlock_t *local_lock;
+
+ if (argc != 5) {
+ ck_error("Usage: ck_cohort <number of cohorts> <threads per cohort> "
+ "<affinity delta> <critical section>\n");
+ }
+
+ n_cohorts = atoi(argv[1]);
+ if (n_cohorts <= 0) {
+ ck_error("ERROR: Number of cohorts must be greater than 0\n");
+ }
+
+ nthr = n_cohorts * atoi(argv[2]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ critical = atoi(argv[4]);
+ if (critical < 0) {
+ ck_error("ERROR: critical section cannot be negative\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ cohorts = malloc(sizeof(struct cohort_record) * n_cohorts);
+ if (cohorts == NULL) {
+ ck_error("ERROR: Could not allocate cohort structures\n");
+ }
+
+ context = malloc(sizeof(struct block) * nthr);
+ if (context == NULL) {
+ ck_error("ERROR: Could not allocate thread contexts\n");
+ }
+
+ a.delta = atoi(argv[2]);
+ a.request = 0;
+
+ count = malloc(sizeof(*count) * nthr);
+ if (count == NULL) {
+ ck_error("ERROR: Could not create acquisition buffer\n");
+ }
+ memset(count, 0, sizeof(*count) * nthr);
+
+ fprintf(stderr, "Creating cohorts...");
+ for (i = 0 ; i < n_cohorts ; i++) {
+ local_lock = malloc(max(CK_MD_CACHELINE, sizeof(ck_spinlock_t)));
+ if (local_lock == NULL) {
+ ck_error("ERROR: Could not allocate local lock\n");
+ }
+ CK_COHORT_INIT(basic, &((cohorts + i)->cohort), &global_lock, local_lock,
+ CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT);
+ local_lock = NULL;
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Creating threads (fairness)...");
+ for (i = 0; i < nthr; i++) {
+ context[i].tid = i;
+ if (pthread_create(&threads[i], NULL, fairness, context + i)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ ck_pr_store_uint(&ready, 1);
+ common_sleep(10);
+ ck_pr_store_uint(&ready, 0);
+
+ fprintf(stderr, "Waiting for threads to finish acquisition regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done\n\n");
+
+ for (i = 0, v = 0; i < nthr; i++) {
+ printf("%d %15" PRIu64 "\n", i, count[i].value);
+ v += count[i].value;
+ }
+
+ printf("\n# total : %15" PRIu64 "\n", v);
+ printf("# throughput : %15" PRIu64 " a/s\n", (v /= nthr) / 10);
+
+ for (i = 0, d = 0; i < nthr; i++)
+ d += (count[i].value - v) * (count[i].value - v);
+
+ printf("# average : %15" PRIu64 "\n", v);
+ printf("# deviation : %.2f (%.2f%%)\n\n", sqrt(d / nthr), (sqrt(d / nthr) / v) * 100.00);
+
+ return 0;
+}
diff --git a/regressions/ck_cohort/ck_cohort.h b/regressions/ck_cohort/ck_cohort.h
new file mode 100644
index 0000000..b0d7f0a
--- /dev/null
+++ b/regressions/ck_cohort/ck_cohort.h
@@ -0,0 +1,35 @@
+#define LOCK_NAME "ck_cohort"
+#define LOCK_DEFINE \
+ static ck_spinlock_fas_t global_fas_lock = CK_SPINLOCK_FAS_INITIALIZER; \
+ static ck_spinlock_fas_t local_fas_lock = CK_SPINLOCK_FAS_INITIALIZER; \
+ static void \
+ ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context) \
+ { \
+ (void)context; \
+ ck_spinlock_fas_lock(lock); \
+ } \
+ \
+ static void \
+ ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context) \
+ { \
+ (void)context; \
+ ck_spinlock_fas_unlock(lock); \
+ } \
+ \
+ static bool \
+ ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context) \
+ { \
+ (void)context; \
+ return ck_spinlock_fas_locked(lock); \
+ } \
+ CK_COHORT_PROTOTYPE(fas_fas, \
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, \
+ ck_spinlock_fas_locked_with_context, ck_spinlock_fas_lock_with_context, \
+ ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context) \
+ static CK_COHORT_INSTANCE(fas_fas) CK_CC_CACHELINE cohort = CK_COHORT_INITIALIZER
+
+
+#define LOCK_INIT CK_COHORT_INIT(fas_fas, &cohort, &global_fas_lock, &local_fas_lock, \
+ CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT)
+#define LOCK CK_COHORT_LOCK(fas_fas, &cohort, NULL, NULL)
+#define UNLOCK CK_COHORT_UNLOCK(fas_fas, &cohort, NULL, NULL)
diff --git a/regressions/ck_cohort/validate/Makefile b/regressions/ck_cohort/validate/Makefile
new file mode 100644
index 0000000..145af3a
--- /dev/null
+++ b/regressions/ck_cohort/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean distribution
+
+OBJECTS=validate
+
+all: $(OBJECTS)
+
+validate: validate.c ../../../include/ck_cohort.h
+ $(CC) $(CFLAGS) -o validate validate.c
+
+check: all
+ ./validate `expr $(CORES) / 2` 2 1
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_cohort/validate/validate.c b/regressions/ck_cohort/validate/validate.c
new file mode 100644
index 0000000..cffbf77
--- /dev/null
+++ b/regressions/ck_cohort/validate/validate.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2013-2015 Samy Al Bahra.
+ * Copyright 2013 Brendon Scheinman.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <ck_pr.h>
+#include <ck_cohort.h>
+#include <ck_spinlock.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 1000000
+#endif
+
+static struct affinity a;
+static unsigned int locked;
+static int nthr;
+static ck_spinlock_fas_t global_fas_lock = CK_SPINLOCK_FAS_INITIALIZER;
+
+static void
+ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+ (void)context;
+ ck_spinlock_fas_lock(lock);
+}
+
+static void
+ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+ (void)context;
+ ck_spinlock_fas_unlock(lock);
+}
+
+static bool
+ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+ (void)context;
+ return ck_spinlock_fas_locked(lock);
+}
+
+static bool
+ck_spinlock_fas_trylock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+ (void)context;
+ return ck_spinlock_fas_trylock(lock);
+}
+
+CK_COHORT_TRYLOCK_PROTOTYPE(fas_fas,
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context,
+ ck_spinlock_fas_locked_with_context, ck_spinlock_fas_trylock_with_context,
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context,
+ ck_spinlock_fas_locked_with_context, ck_spinlock_fas_trylock_with_context)
+static CK_COHORT_INSTANCE(fas_fas) *cohorts;
+static int n_cohorts;
+
+static void *
+thread(void *null CK_CC_UNUSED)
+{
+ int i = ITERATE;
+ unsigned int l;
+ unsigned int core;
+ CK_COHORT_INSTANCE(fas_fas) *cohort;
+
+ if (aff_iterate_core(&a, &core)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ cohort = cohorts + (core / (int)(a.delta)) % n_cohorts;
+
+ while (i--) {
+
+ if (i & 1) {
+ CK_COHORT_LOCK(fas_fas, cohort, NULL, NULL);
+ } else {
+ while (CK_COHORT_TRYLOCK(fas_fas, cohort, NULL, NULL, NULL) == false) {
+ ck_pr_stall();
+ }
+ }
+
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ CK_COHORT_UNLOCK(fas_fas, cohort, NULL, NULL);
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+ int threads_per_cohort;
+ ck_spinlock_fas_t *local_lock;
+ int i;
+
+ if (argc != 4) {
+ ck_error("Usage: validate <number of cohorts> <threads per cohort> <affinity delta>\n");
+ }
+
+ n_cohorts = atoi(argv[1]);
+ if (n_cohorts <= 0) {
+ fprintf(stderr, "setting number of cohorts per thread to 1\n");
+ n_cohorts = 1;
+ }
+
+ threads_per_cohort = atoi(argv[2]);
+ if (threads_per_cohort <= 0) {
+ ck_error("ERROR: Threads per cohort must be greater than 0\n");
+ }
+
+ nthr = n_cohorts * threads_per_cohort;
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ a.delta = atoi(argv[3]);
+
+ fprintf(stderr, "Creating cohorts...");
+ cohorts = malloc(sizeof(CK_COHORT_INSTANCE(fas_fas)) * n_cohorts);
+ for (i = 0 ; i < n_cohorts ; i++) {
+ local_lock = malloc(sizeof(ck_spinlock_fas_t));
+ CK_COHORT_INIT(fas_fas, cohorts + i, &global_fas_lock, local_lock,
+ CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT);
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Creating threads...");
+ for (i = 0; i < nthr; i++) {
+ if (pthread_create(&threads[i], NULL, thread, NULL)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Waiting for threads to finish correctness regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+
+ return (0);
+}
+
diff --git a/regressions/ck_epoch/validate/Makefile b/regressions/ck_epoch/validate/Makefile
new file mode 100644
index 0000000..446c008
--- /dev/null
+++ b/regressions/ck_epoch/validate/Makefile
@@ -0,0 +1,42 @@
+.PHONY: check clean distribution
+
+OBJECTS=ck_stack ck_epoch_synchronize ck_epoch_poll ck_epoch_call \
+ ck_epoch_section ck_epoch_section_2 torture
+HALF=`expr $(CORES) / 2`
+
+all: $(OBJECTS)
+
+check: all
+ ./ck_stack $(CORES) 1
+ ./ck_epoch_synchronize $(HALF) $(HALF) 1
+ ./ck_epoch_poll $(CORES) 1 1
+ ./ck_epoch_section
+ ./ck_epoch_section_2 $(HALF) $(HALF) 1
+ ./torture $(HALF) $(HALF) 1
+
+ck_epoch_synchronize: ck_epoch_synchronize.c ../../../include/ck_stack.h ../../../include/ck_epoch.h ../../../src/ck_epoch.c
+ $(CC) $(CFLAGS) -o ck_epoch_synchronize ck_epoch_synchronize.c ../../../src/ck_epoch.c
+
+ck_epoch_poll: ck_epoch_poll.c ../../../include/ck_stack.h ../../../include/ck_epoch.h ../../../src/ck_epoch.c
+ $(CC) $(CFLAGS) -o ck_epoch_poll ck_epoch_poll.c ../../../src/ck_epoch.c
+
+torture: torture.c ../../../include/ck_epoch.h ../../../src/ck_epoch.c
+ $(CC) $(CFLAGS) -o torture torture.c ../../../src/ck_epoch.c
+
+ck_epoch_section: ck_epoch_section.c ../../../include/ck_epoch.h ../../../src/ck_epoch.c
+ $(CC) $(CFLAGS) -o ck_epoch_section ck_epoch_section.c ../../../src/ck_epoch.c
+
+ck_epoch_section_2: ck_epoch_section_2.c ../../../include/ck_epoch.h ../../../src/ck_epoch.c
+ $(CC) $(CFLAGS) -o ck_epoch_section_2 ck_epoch_section_2.c ../../../src/ck_epoch.c
+
+ck_epoch_call: ck_epoch_call.c ../../../include/ck_stack.h ../../../include/ck_epoch.h ../../../src/ck_epoch.c
+ $(CC) $(CFLAGS) -o ck_epoch_call ck_epoch_call.c ../../../src/ck_epoch.c
+
+ck_stack: ck_stack.c ../../../include/ck_stack.h ../../../include/ck_epoch.h ../../../src/ck_epoch.c
+ $(CC) $(CFLAGS) -o ck_stack ck_stack.c ../../../src/ck_epoch.c
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_epoch/validate/ck_epoch_call.c b/regressions/ck_epoch/validate/ck_epoch_call.c
new file mode 100644
index 0000000..29e0df8
--- /dev/null
+++ b/regressions/ck_epoch/validate/ck_epoch_call.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2014 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <ck_epoch.h>
+
+#include "../../common.h"
+
+static ck_epoch_t epoch;
+static unsigned int counter;
+static ck_epoch_record_t record[2];
+
+static void
+cb(ck_epoch_entry_t *p)
+{
+
+ if (counter == 0)
+ ck_epoch_call(&record[1], p, cb);
+
+ printf("Counter value: %u -> %u\n",
+ counter, counter + 1);
+ counter++;
+ return;
+}
+
+int
+main(void)
+{
+ ck_epoch_entry_t entry;
+
+ ck_epoch_register(&epoch, &record[0]);
+ ck_epoch_register(&epoch, &record[1]);
+
+ ck_epoch_call(&record[1], &entry, cb);
+ ck_epoch_barrier(&record[1]);
+ ck_epoch_barrier(&record[1]);
+ if (counter != 2)
+ ck_error("Expected counter value 2, read %u.\n", counter);
+
+ return 0;
+}
diff --git a/regressions/ck_epoch/validate/ck_epoch_poll.c b/regressions/ck_epoch/validate/ck_epoch_poll.c
new file mode 100644
index 0000000..aec6dd0
--- /dev/null
+++ b/regressions/ck_epoch/validate/ck_epoch_poll.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_backoff.h>
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <string.h>
+#include <ck_epoch.h>
+#include <ck_stack.h>
+
+#include "../../common.h"
+
+static unsigned int n_rd;
+static unsigned int n_wr;
+static unsigned int n_threads;
+static unsigned int barrier;
+static unsigned int e_barrier;
+static unsigned int readers;
+static unsigned int writers;
+
+#ifndef PAIRS_S
+#define PAIRS_S 100000
+#endif
+
+#ifndef ITERATE_S
+#define ITERATE_S 20
+#endif
+
+struct node {
+ unsigned int value;
+ ck_stack_entry_t stack_entry;
+ ck_epoch_entry_t epoch_entry;
+};
+static ck_stack_t stack = CK_STACK_INITIALIZER;
+static ck_epoch_t stack_epoch;
+CK_STACK_CONTAINER(struct node, stack_entry, stack_container)
+CK_EPOCH_CONTAINER(struct node, epoch_entry, epoch_container)
+static struct affinity a;
+static const char animate[] = "-/|\\";
+
+static void
+destructor(ck_epoch_entry_t *p)
+{
+ struct node *e = epoch_container(p);
+
+ free(e);
+ return;
+}
+
+static void *
+read_thread(void *unused CK_CC_UNUSED)
+{
+ unsigned int j;
+ ck_epoch_record_t record CK_CC_CACHELINE;
+ ck_stack_entry_t *cursor, *n;
+
+ ck_epoch_register(&stack_epoch, &record);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < n_threads);
+
+ while (CK_STACK_ISEMPTY(&stack) == true) {
+ if (ck_pr_load_uint(&readers) != 0)
+ break;
+
+ ck_pr_stall();
+ }
+
+ j = 0;
+ for (;;) {
+ ck_epoch_begin(&record, NULL);
+ CK_STACK_FOREACH(&stack, cursor) {
+ if (cursor == NULL)
+ continue;
+
+ n = CK_STACK_NEXT(cursor);
+ j += ck_pr_load_ptr(&n) != NULL;
+ }
+ ck_epoch_end(&record, NULL);
+
+ if (j != 0 && ck_pr_load_uint(&readers) == 0)
+ ck_pr_store_uint(&readers, 1);
+
+ if (CK_STACK_ISEMPTY(&stack) == true &&
+ ck_pr_load_uint(&e_barrier) != 0)
+ break;
+ }
+
+ ck_pr_inc_uint(&e_barrier);
+ while (ck_pr_load_uint(&e_barrier) < n_threads);
+
+ fprintf(stderr, "[R] Observed entries: %u\n", j);
+ return (NULL);
+}
+
+static void *
+write_thread(void *unused CK_CC_UNUSED)
+{
+ struct node **entry, *e;
+ unsigned int i, j, tid;
+ ck_epoch_record_t record;
+ ck_stack_entry_t *s;
+
+ ck_epoch_register(&stack_epoch, &record);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ tid = ck_pr_faa_uint(&writers, 1);
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < n_threads);
+
+ entry = malloc(sizeof(struct node *) * PAIRS_S);
+ if (entry == NULL) {
+ ck_error("Failed allocation.\n");
+ }
+
+ for (j = 0; j < ITERATE_S; j++) {
+ for (i = 0; i < PAIRS_S; i++) {
+ entry[i] = malloc(sizeof(struct node));
+ if (entry == NULL) {
+ ck_error("Failed individual allocation\n");
+ }
+ }
+
+ for (i = 0; i < PAIRS_S; i++) {
+ ck_stack_push_upmc(&stack, &entry[i]->stack_entry);
+ }
+
+ while (ck_pr_load_uint(&readers) == 0)
+ ck_pr_stall();
+
+ if (tid == 0) {
+ fprintf(stderr, "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b[W] %2.2f: %c",
+ (double)j / ITERATE_S, animate[i % strlen(animate)]);
+ }
+
+ for (i = 0; i < PAIRS_S; i++) {
+ ck_epoch_begin(&record, NULL);
+ s = ck_stack_pop_upmc(&stack);
+ e = stack_container(s);
+ ck_epoch_end(&record, NULL);
+
+ ck_epoch_call(&record, &e->epoch_entry, destructor);
+ ck_epoch_poll(&record);
+ }
+ }
+
+ ck_epoch_barrier(&record);
+
+ if (tid == 0) {
+ fprintf(stderr, "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b[W] Peak: %u (%2.2f%%)\n Reclamations: %lu\n\n",
+ record.n_peak,
+ (double)record.n_peak / ((double)PAIRS_S * ITERATE_S) * 100,
+ record.n_dispatch);
+ }
+
+ ck_pr_inc_uint(&e_barrier);
+ while (ck_pr_load_uint(&e_barrier) < n_threads);
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ unsigned int i;
+ pthread_t *threads;
+
+ if (argc != 4) {
+ ck_error("Usage: stack <#readers> <#writers> <affinity delta>\n");
+ }
+
+ n_rd = atoi(argv[1]);
+ n_wr = atoi(argv[2]);
+ n_threads = n_wr + n_rd;
+
+ a.delta = atoi(argv[3]);
+ a.request = 0;
+
+ threads = malloc(sizeof(pthread_t) * n_threads);
+ ck_epoch_init(&stack_epoch);
+
+ for (i = 0; i < n_rd; i++)
+ pthread_create(threads + i, NULL, read_thread, NULL);
+
+ do {
+ pthread_create(threads + i, NULL, write_thread, NULL);
+ } while (++i < n_wr + n_rd);
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(threads[i], NULL);
+
+ return (0);
+}
diff --git a/regressions/ck_epoch/validate/ck_epoch_section.c b/regressions/ck_epoch/validate/ck_epoch_section.c
new file mode 100644
index 0000000..12bcca1
--- /dev/null
+++ b/regressions/ck_epoch/validate/ck_epoch_section.c
@@ -0,0 +1,311 @@
+/*
+ * Copyright 2015 John Esmet.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <time.h>
+#include <unistd.h>
+
+#include <ck_epoch.h>
+
+#include "../../common.h"
+
+static ck_epoch_t epc;
+static ck_epoch_record_t record, record2;
+static unsigned int cleanup_calls;
+
+static void
+setup_test(void)
+{
+
+ ck_epoch_init(&epc);
+ ck_epoch_register(&epc, &record);
+ ck_epoch_register(&epc, &record2);
+ cleanup_calls = 0;
+
+ return;
+}
+
+static void
+teardown_test(void)
+{
+
+ memset(&epc, 0, sizeof(ck_epoch_t));
+ ck_epoch_unregister(&record);
+ memset(&record, 0, sizeof(ck_epoch_record_t));
+ memset(&record2, 0, sizeof(ck_epoch_record_t));
+ cleanup_calls = 0;
+
+ return;
+}
+
+static void
+cleanup(ck_epoch_entry_t *e)
+{
+ (void) e;
+
+ cleanup_calls++;
+
+ return;
+}
+
+static void
+test_simple_read_section(void)
+{
+ ck_epoch_entry_t entry;
+ ck_epoch_section_t section;
+
+ memset(&entry, 0, sizeof(ck_epoch_entry_t));
+ setup_test();
+
+ ck_epoch_begin(&record, &section);
+ ck_epoch_call(&record, &entry, cleanup);
+ assert(cleanup_calls == 0);
+ ck_epoch_end(&record, &section);
+ ck_epoch_barrier(&record);
+ assert(cleanup_calls == 1);
+
+ teardown_test();
+ return;
+}
+
+static void
+test_nested_read_section(void)
+{
+ ck_epoch_entry_t entry1, entry2;
+ ck_epoch_section_t section1, section2;
+
+ memset(&entry1, 0, sizeof(ck_epoch_entry_t));
+ memset(&entry2, 0, sizeof(ck_epoch_entry_t));
+ setup_test();
+
+ ck_epoch_begin(&record, &section1);
+ ck_epoch_call(&record, &entry1, cleanup);
+ assert(cleanup_calls == 0);
+
+ ck_epoch_begin(&record, &section2);
+ ck_epoch_call(&record, &entry2, cleanup);
+ assert(cleanup_calls == 0);
+
+ ck_epoch_end(&record, &section2);
+ assert(cleanup_calls == 0);
+
+ ck_epoch_end(&record, &section1);
+ assert(cleanup_calls == 0);
+
+ ck_epoch_barrier(&record);
+ assert(cleanup_calls == 2);
+
+ teardown_test();
+ return;
+}
+
+struct obj {
+ ck_epoch_entry_t entry;
+ unsigned int destroyed;
+};
+
+static void *
+barrier_work(void *arg)
+{
+ unsigned int *run;
+
+ run = (unsigned int *)arg;
+ while (ck_pr_load_uint(run) != 0) {
+ /*
+ * Need to use record2, as record is local
+ * to the test thread.
+ */
+ ck_epoch_barrier(&record2);
+ usleep(5 * 1000);
+ }
+
+ return NULL;
+}
+
+static void *
+reader_work(void *arg)
+{
+ ck_epoch_record_t local_record;
+ ck_epoch_section_t section;
+ struct obj *o;
+
+ ck_epoch_register(&epc, &local_record);
+
+ o = (struct obj *)arg;
+
+ /*
+ * Begin a read section. The calling thread has an open read section,
+ * so the object should not be destroyed for the lifetime of this
+ * thread.
+ */
+ ck_epoch_begin(&local_record, &section);
+ usleep((common_rand() % 100) * 1000);
+ assert(ck_pr_load_uint(&o->destroyed) == 0);
+ ck_epoch_end(&local_record, &section);
+
+ ck_epoch_unregister(&local_record);
+
+ return NULL;
+}
+
+static void
+obj_destroy(ck_epoch_entry_t *e)
+{
+ struct obj *o;
+
+ o = (struct obj *)e;
+ ck_pr_fas_uint(&o->destroyed, 1);
+
+ return;
+}
+
+static void
+test_single_reader_with_barrier_thread(void)
+{
+ const int num_sections = 10;
+ struct obj o;
+ unsigned int run;
+ pthread_t thread;
+ ck_epoch_section_t sections[num_sections];
+ int shuffled[num_sections];
+
+ run = 1;
+ memset(&o, 0, sizeof(struct obj));
+ common_srand(time(NULL));
+ setup_test();
+
+ if (pthread_create(&thread, NULL, barrier_work, &run) != 0) {
+ abort();
+ }
+
+ /* Start a bunch of sections. */
+ for (int i = 0; i < num_sections; i++) {
+ ck_epoch_begin(&record, &sections[i]);
+ shuffled[i] = i;
+ if (i == num_sections / 2) {
+ usleep(1 * 1000);
+ }
+ }
+
+ /* Generate a shuffle. */
+ for (int i = num_sections - 1; i >= 0; i--) {
+ int k = common_rand() % (i + 1);
+ int tmp = shuffled[k];
+ shuffled[k] = shuffled[i];
+ shuffled[i] = tmp;
+ }
+
+ ck_epoch_call(&record, &o.entry, obj_destroy);
+
+ /* Close the sections in shuffle-order. */
+ for (int i = 0; i < num_sections; i++) {
+ ck_epoch_end(&record, &sections[shuffled[i]]);
+ if (i != num_sections - 1) {
+ assert(ck_pr_load_uint(&o.destroyed) == 0);
+ usleep(3 * 1000);
+ }
+ }
+
+ ck_pr_store_uint(&run, 0);
+ if (pthread_join(thread, NULL) != 0) {
+ abort();
+ }
+
+ ck_epoch_barrier(&record);
+ assert(ck_pr_load_uint(&o.destroyed) == 1);
+
+ teardown_test();
+
+ return;
+}
+
+static void
+test_multiple_readers_with_barrier_thread(void)
+{
+ const int num_readers = 10;
+ struct obj o;
+ unsigned int run;
+ ck_epoch_section_t section;
+ pthread_t threads[num_readers + 1];
+
+ run = 1;
+ memset(&o, 0, sizeof(struct obj));
+ memset(&section, 0, sizeof(ck_epoch_section_t));
+ common_srand(time(NULL));
+ setup_test();
+
+ /* Create a thread to call barrier() while we create reader threads.
+ * Each barrier will attempt to move the global epoch forward so
+ * it will make the read section code coverage more interesting. */
+ if (pthread_create(&threads[num_readers], NULL,
+ barrier_work, &run) != 0) {
+ abort();
+ }
+
+ ck_epoch_begin(&record, &section);
+ ck_epoch_call(&record, &o.entry, obj_destroy);
+
+ for (int i = 0; i < num_readers; i++) {
+ if (pthread_create(&threads[i], NULL, reader_work, &o) != 0) {
+ abort();
+ }
+ }
+
+ ck_epoch_end(&record, &section);
+
+ ck_pr_store_uint(&run, 0);
+ if (pthread_join(threads[num_readers], NULL) != 0) {
+ abort();
+ }
+
+ /* After the barrier, the object should be destroyed and readers
+ * should return. */
+ for (int i = 0; i < num_readers; i++) {
+ if (pthread_join(threads[i], NULL) != 0) {
+ abort();
+ }
+ }
+
+ teardown_test();
+ return;
+}
+
+int
+main(void)
+{
+
+ test_simple_read_section();
+ test_nested_read_section();
+ test_single_reader_with_barrier_thread();
+ test_multiple_readers_with_barrier_thread();
+
+ return 0;
+}
diff --git a/regressions/ck_epoch/validate/ck_epoch_section_2.c b/regressions/ck_epoch/validate/ck_epoch_section_2.c
new file mode 100644
index 0000000..aed3661
--- /dev/null
+++ b/regressions/ck_epoch/validate/ck_epoch_section_2.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <string.h>
+#include <ck_epoch.h>
+#include <ck_stack.h>
+
+#include "../../common.h"
+
+static unsigned int n_rd;
+static unsigned int n_wr;
+static unsigned int n_threads;
+static unsigned int barrier;
+static unsigned int leave;
+
+#ifndef PAIRS_S
+#define PAIRS_S 10000
+#endif
+
+#ifndef CK_EPOCH_T_DEPTH
+#define CK_EPOCH_T_DEPTH 8
+#endif
+
+static ck_epoch_t epoch;
+static struct affinity a;
+
+static void *
+read_thread(void *unused CK_CC_UNUSED)
+{
+ ck_epoch_record_t *record;
+ unsigned long long i = 0;
+
+ record = malloc(sizeof *record);
+ assert(record != NULL);
+ ck_epoch_register(&epoch, record);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < n_threads);
+
+ for (;;) {
+ ck_epoch_section_t section[2];
+ ck_epoch_section_t junk[CK_EPOCH_T_DEPTH];
+ unsigned int j;
+
+ ck_epoch_begin(record, &section[0]);
+
+ for (j = 0; j < CK_EPOCH_T_DEPTH; j++)
+ ck_epoch_begin(record, &junk[j]);
+ for (j = 0; j < CK_EPOCH_T_DEPTH; j++)
+ ck_epoch_end(record, &junk[j]);
+
+ if (i > 0)
+ ck_epoch_end(record, &section[1]);
+
+ /* Wait for the next synchronize operation. */
+ while ((ck_pr_load_uint(&epoch.epoch) & 1) ==
+ section[0].bucket) {
+ i++;
+
+ if (!(i % 10000000)) {
+ fprintf(stderr, "%u %u %u\n",
+ ck_pr_load_uint(&epoch.epoch),
+ section[0].bucket, record->epoch);
+ }
+
+ while ((ck_pr_load_uint(&epoch.epoch) & 1) ==
+ section[0].bucket) {
+ if (ck_pr_load_uint(&leave) == 1)
+ break;
+
+ ck_pr_stall();
+ }
+ }
+
+ ck_epoch_begin(record, &section[1]);
+
+ assert(section[0].bucket != section[1].bucket);
+ ck_epoch_end(record, &section[0]);
+
+ assert(ck_pr_load_uint(&record->active) > 0);
+
+ if (ck_pr_load_uint(&leave) == 1) {
+ ck_epoch_end(record, &section[1]);
+ break;
+ }
+
+ i++;
+ }
+
+ return NULL;
+}
+
+static void *
+write_thread(void *unused CK_CC_UNUSED)
+{
+ ck_epoch_record_t record;
+ unsigned long iterations = 0;
+
+ ck_epoch_register(&epoch, &record);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < n_threads);
+
+ for (;;) {
+ if (!(iterations % 1048575))
+ fprintf(stderr, ".");
+
+ ck_epoch_synchronize(&record);
+ iterations++;
+
+ if (ck_pr_load_uint(&leave) == 1)
+ break;
+ }
+
+ fprintf(stderr, "%lu iterations\n", iterations);
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ unsigned int i;
+ pthread_t *threads;
+
+ if (argc != 4) {
+ ck_error("Usage: stack <#readers> <#writers> <affinity delta>\n");
+ }
+
+ n_rd = atoi(argv[1]);
+ n_wr = atoi(argv[2]);
+ n_threads = n_wr + n_rd;
+
+ a.delta = atoi(argv[3]);
+ a.request = 0;
+
+ threads = malloc(sizeof(pthread_t) * n_threads);
+ ck_epoch_init(&epoch);
+
+ for (i = 0; i < n_rd; i++)
+ pthread_create(threads + i, NULL, read_thread, NULL);
+
+ do {
+ pthread_create(threads + i, NULL, write_thread, NULL);
+ } while (++i < n_wr + n_rd);
+
+ common_sleep(10);
+ ck_pr_store_uint(&leave, 1);
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(threads[i], NULL);
+
+ return (0);
+}
diff --git a/regressions/ck_epoch/validate/ck_epoch_synchronize.c b/regressions/ck_epoch/validate/ck_epoch_synchronize.c
new file mode 100644
index 0000000..a03a4f7
--- /dev/null
+++ b/regressions/ck_epoch/validate/ck_epoch_synchronize.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_backoff.h>
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <string.h>
+#include <ck_epoch.h>
+#include <ck_stack.h>
+
+#include "../../common.h"
+
+static unsigned int n_rd;
+static unsigned int n_wr;
+static unsigned int n_threads;
+static unsigned int barrier;
+static unsigned int e_barrier;
+static unsigned int readers;
+static unsigned int writers;
+
+#ifndef PAIRS_S
+#define PAIRS_S 10000
+#endif
+
+#ifndef ITERATE_S
+#define ITERATE_S 20
+#endif
+
+struct node {
+ unsigned int value;
+ ck_stack_entry_t stack_entry;
+ ck_epoch_entry_t epoch_entry;
+};
+static ck_stack_t stack = CK_STACK_INITIALIZER;
+static ck_epoch_t stack_epoch;
+CK_STACK_CONTAINER(struct node, stack_entry, stack_container)
+CK_EPOCH_CONTAINER(struct node, epoch_entry, epoch_container)
+static struct affinity a;
+static const char animate[] = "-/|\\";
+
+static void
+destructor(ck_epoch_entry_t *p)
+{
+ struct node *e = epoch_container(p);
+
+ free(e);
+ return;
+}
+
+static void *
+read_thread(void *unused CK_CC_UNUSED)
+{
+ unsigned int j;
+ ck_epoch_record_t record CK_CC_CACHELINE;
+ ck_stack_entry_t *cursor;
+ ck_stack_entry_t *n;
+ unsigned int i;
+
+ ck_epoch_register(&stack_epoch, &record);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < n_threads);
+
+ while (CK_STACK_ISEMPTY(&stack) == true) {
+ if (ck_pr_load_uint(&readers) != 0)
+ break;
+
+ ck_pr_stall();
+ }
+
+ j = 0;
+ for (;;) {
+ i = 0;
+
+ ck_epoch_begin(&record, NULL);
+ CK_STACK_FOREACH(&stack, cursor) {
+ if (cursor == NULL)
+ continue;
+
+ n = CK_STACK_NEXT(cursor);
+ j += ck_pr_load_ptr(&n) != NULL;
+
+ if (i++ > 4098)
+ break;
+ }
+ ck_epoch_end(&record, NULL);
+
+ if (j != 0 && ck_pr_load_uint(&readers) == 0)
+ ck_pr_store_uint(&readers, 1);
+
+ if (CK_STACK_ISEMPTY(&stack) == true &&
+ ck_pr_load_uint(&e_barrier) != 0)
+ break;
+ }
+
+ ck_pr_inc_uint(&e_barrier);
+ while (ck_pr_load_uint(&e_barrier) < n_threads);
+
+ fprintf(stderr, "[R] Observed entries: %u\n", j);
+ return (NULL);
+}
+
+static void *
+write_thread(void *unused CK_CC_UNUSED)
+{
+ struct node **entry, *e;
+ unsigned int i, j, tid;
+ ck_epoch_record_t record;
+ ck_stack_entry_t *s;
+
+ ck_epoch_register(&stack_epoch, &record);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ tid = ck_pr_faa_uint(&writers, 1);
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < n_threads);
+
+ entry = malloc(sizeof(struct node *) * PAIRS_S);
+ if (entry == NULL) {
+ ck_error("Failed allocation.\n");
+ }
+
+ for (j = 0; j < ITERATE_S; j++) {
+ for (i = 0; i < PAIRS_S; i++) {
+ entry[i] = malloc(sizeof(struct node));
+ if (entry == NULL) {
+ ck_error("Failed individual allocation\n");
+ }
+ }
+
+ for (i = 0; i < PAIRS_S; i++) {
+ ck_stack_push_upmc(&stack, &entry[i]->stack_entry);
+ }
+
+ while (ck_pr_load_uint(&readers) == 0)
+ ck_pr_stall();
+
+ for (i = 0; i < PAIRS_S; i++) {
+ ck_epoch_begin(&record, NULL);
+ s = ck_stack_pop_upmc(&stack);
+ e = stack_container(s);
+ ck_epoch_end(&record, NULL);
+
+ if (i & 1) {
+ ck_epoch_synchronize(&record);
+ ck_epoch_reclaim(&record);
+ ck_epoch_call(&record, &e->epoch_entry, destructor);
+ } else {
+ ck_epoch_barrier(&record);
+ destructor(&e->epoch_entry);
+ }
+
+ if (tid == 0 && (i % 16384) == 0) {
+ fprintf(stderr, "[W] %2.2f: %c\n",
+ (double)j / ITERATE_S, animate[i % strlen(animate)]);
+ }
+ }
+ }
+
+ ck_epoch_synchronize(&record);
+
+ if (tid == 0) {
+ fprintf(stderr, "[W] Peak: %u (%2.2f%%)\n Reclamations: %lu\n\n",
+ record.n_peak,
+ (double)record.n_peak / ((double)PAIRS_S * ITERATE_S) * 100,
+ record.n_dispatch);
+ }
+
+ ck_pr_inc_uint(&e_barrier);
+ while (ck_pr_load_uint(&e_barrier) < n_threads);
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ unsigned int i;
+ pthread_t *threads;
+
+ if (argc != 4) {
+ ck_error("Usage: stack <#readers> <#writers> <affinity delta>\n");
+ }
+
+ n_rd = atoi(argv[1]);
+ n_wr = atoi(argv[2]);
+ n_threads = n_wr + n_rd;
+
+ a.delta = atoi(argv[3]);
+ a.request = 0;
+
+ threads = malloc(sizeof(pthread_t) * n_threads);
+ ck_epoch_init(&stack_epoch);
+
+ for (i = 0; i < n_rd; i++)
+ pthread_create(threads + i, NULL, read_thread, NULL);
+
+ do {
+ pthread_create(threads + i, NULL, write_thread, NULL);
+ } while (++i < n_wr + n_rd);
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(threads[i], NULL);
+
+ return (0);
+}
diff --git a/regressions/ck_epoch/validate/ck_stack.c b/regressions/ck_epoch/validate/ck_stack.c
new file mode 100644
index 0000000..fc50228
--- /dev/null
+++ b/regressions/ck_epoch/validate/ck_stack.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_backoff.h>
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <ck_epoch.h>
+#include <ck_stack.h>
+
+#include "../../common.h"
+
+static unsigned int n_threads;
+static unsigned int barrier;
+static unsigned int e_barrier;
+
+#ifndef PAIRS
+#define PAIRS 5000000
+#endif
+
+struct node {
+ unsigned int value;
+ ck_epoch_entry_t epoch_entry;
+ ck_stack_entry_t stack_entry;
+};
+static ck_stack_t stack = {NULL, NULL};
+static ck_epoch_t stack_epoch;
+CK_STACK_CONTAINER(struct node, stack_entry, stack_container)
+CK_EPOCH_CONTAINER(struct node, epoch_entry, epoch_container)
+static struct affinity a;
+
+static void
+destructor(ck_epoch_entry_t *p)
+{
+ struct node *e = epoch_container(p);
+
+ free(e);
+ return;
+}
+
+static void *
+thread(void *unused CK_CC_UNUSED)
+{
+ struct node **entry, *e;
+ ck_epoch_record_t record;
+ ck_stack_entry_t *s;
+ unsigned long smr = 0;
+ unsigned int i;
+
+ ck_epoch_register(&stack_epoch, &record);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ entry = malloc(sizeof(struct node *) * PAIRS);
+ if (entry == NULL) {
+ ck_error("Failed allocation.\n");
+ }
+
+ for (i = 0; i < PAIRS; i++) {
+ entry[i] = malloc(sizeof(struct node));
+ if (entry == NULL) {
+ ck_error("Failed individual allocation\n");
+ }
+ }
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < n_threads);
+
+ for (i = 0; i < PAIRS; i++) {
+ ck_epoch_begin(&record, NULL);
+ ck_stack_push_upmc(&stack, &entry[i]->stack_entry);
+ s = ck_stack_pop_upmc(&stack);
+ ck_epoch_end(&record, NULL);
+
+ e = stack_container(s);
+ ck_epoch_call(&record, &e->epoch_entry, destructor);
+ smr += ck_epoch_poll(&record) == false;
+ }
+
+ ck_pr_inc_uint(&e_barrier);
+ while (ck_pr_load_uint(&e_barrier) < n_threads);
+
+ fprintf(stderr, "Deferrals: %lu (%2.2f)\n", smr, (double)smr / PAIRS);
+ fprintf(stderr, "Peak: %u (%2.2f%%), %u pending\nReclamations: %lu\n\n",
+ record.n_peak,
+ (double)record.n_peak / PAIRS * 100,
+ record.n_pending,
+ record.n_dispatch);
+
+ ck_epoch_barrier(&record);
+ ck_pr_inc_uint(&e_barrier);
+ while (ck_pr_load_uint(&e_barrier) < (n_threads << 1));
+
+ if (record.n_pending != 0) {
+ ck_error("ERROR: %u pending, expecting none.\n",
+ record.n_pending);
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ unsigned int i;
+ pthread_t *threads;
+
+ if (argc != 3) {
+ ck_error("Usage: stack <threads> <affinity delta>\n");
+ }
+
+ n_threads = atoi(argv[1]);
+ a.delta = atoi(argv[2]);
+ a.request = 0;
+
+ threads = malloc(sizeof(pthread_t) * n_threads);
+
+ ck_epoch_init(&stack_epoch);
+
+ for (i = 0; i < n_threads; i++)
+ pthread_create(threads + i, NULL, thread, NULL);
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(threads[i], NULL);
+
+ return (0);
+}
diff --git a/regressions/ck_epoch/validate/torture.c b/regressions/ck_epoch/validate/torture.c
new file mode 100644
index 0000000..ce3c049
--- /dev/null
+++ b/regressions/ck_epoch/validate/torture.c
@@ -0,0 +1,234 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <string.h>
+#include <ck_epoch.h>
+#include <ck_stack.h>
+
+#include "../../common.h"
+
+static unsigned int n_rd;
+static unsigned int n_wr;
+static unsigned int n_threads;
+static unsigned int barrier;
+static unsigned int leave;
+static unsigned int first;
+
+struct {
+ unsigned int value;
+} valid CK_CC_CACHELINE = { 1 };
+
+struct {
+ unsigned int value;
+} invalid CK_CC_CACHELINE;
+
+#ifndef PAIRS_S
+#define PAIRS_S 10000
+#endif
+
+#ifndef CK_EPOCH_T_DEPTH
+#define CK_EPOCH_T_DEPTH 8
+#endif
+
+static ck_epoch_t epoch;
+static struct affinity a;
+
+static void
+test(struct ck_epoch_record *record)
+{
+ unsigned int j[3];
+ unsigned int b, c;
+ const unsigned int r = 100;
+ size_t i;
+
+ for (i = 0; i < 8; i++) {
+ ck_epoch_begin(record, NULL);
+ c = ck_pr_load_uint(&invalid.value);
+ ck_pr_fence_load();
+ b = ck_pr_load_uint(&valid.value);
+ ck_test(c > b, "Invalid value: %u > %u\n", c, b);
+ ck_epoch_end(record, NULL);
+ }
+
+ ck_epoch_begin(record, NULL);
+
+ /* This implies no early load of epoch occurs. */
+ j[0] = record->epoch;
+
+
+ /* We should observe up to one epoch migration. */
+ do {
+ ck_pr_fence_load();
+ j[1] = ck_pr_load_uint(&epoch.epoch);
+
+ if (ck_pr_load_uint(&leave) == 1) {
+ ck_epoch_end(record, NULL);
+ return;
+ }
+ } while (j[1] == j[0]);
+
+ /* No more epoch migrations should occur */
+ for (i = 0; i < r; i++) {
+ ck_pr_fence_strict_load();
+ j[2] = ck_pr_load_uint(&epoch.epoch);
+
+ ck_test(j[2] != j[1], "Inconsistency detected: %u %u %u\n",
+ j[0], j[1], j[2]);
+ }
+
+ ck_epoch_end(record, NULL);
+ return;
+}
+
+static void *
+read_thread(void *unused CK_CC_UNUSED)
+{
+ ck_epoch_record_t *record;
+
+ record = malloc(sizeof *record);
+ assert(record != NULL);
+ ck_epoch_register(&epoch, record);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < n_threads);
+
+ do {
+ test(record);
+ test(record);
+ test(record);
+ test(record);
+ } while (ck_pr_load_uint(&leave) == 0);
+
+ ck_pr_dec_uint(&n_rd);
+
+ return NULL;
+}
+
+static void *
+write_thread(void *unused CK_CC_UNUSED)
+{
+ ck_epoch_record_t *record;
+ unsigned long iterations = 0;
+ bool c = ck_pr_faa_uint(&first, 1);
+
+ record = malloc(sizeof *record);
+ assert(record != NULL);
+ ck_epoch_register(&epoch, record);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < n_threads);
+
+ do {
+ /*
+ * A thread should never observe invalid.value > valid.value.
+ * inside a protected section. Only
+ * invalid.value <= valid.value is valid.
+ */
+ if (!c) ck_pr_store_uint(&valid.value, 1);
+ ck_epoch_synchronize(record);
+ if (!c) ck_pr_store_uint(&invalid.value, 1);
+
+ ck_pr_fence_store();
+ if (!c) ck_pr_store_uint(&valid.value, 2);
+ ck_epoch_synchronize(record);
+ if (!c) ck_pr_store_uint(&invalid.value, 2);
+
+ ck_pr_fence_store();
+ if (!c) ck_pr_store_uint(&valid.value, 3);
+ ck_epoch_synchronize(record);
+ if (!c) ck_pr_store_uint(&invalid.value, 3);
+
+ ck_pr_fence_store();
+ if (!c) ck_pr_store_uint(&valid.value, 4);
+ ck_epoch_synchronize(record);
+ if (!c) ck_pr_store_uint(&invalid.value, 4);
+
+ ck_epoch_synchronize(record);
+ if (!c) ck_pr_store_uint(&invalid.value, 0);
+ ck_epoch_synchronize(record);
+
+ iterations += 4;
+ } while (ck_pr_load_uint(&leave) == 0 &&
+ ck_pr_load_uint(&n_rd) > 0);
+
+ fprintf(stderr, "%lu iterations\n", iterations);
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ unsigned int i;
+ pthread_t *threads;
+
+ if (argc != 4) {
+ ck_error("Usage: stack <#readers> <#writers> <affinity delta>\n");
+ }
+
+ n_rd = atoi(argv[1]);
+ n_wr = atoi(argv[2]);
+ n_threads = n_wr + n_rd;
+
+ a.delta = atoi(argv[3]);
+ a.request = 0;
+
+ threads = malloc(sizeof(pthread_t) * n_threads);
+ ck_epoch_init(&epoch);
+
+ for (i = 0; i < n_rd; i++)
+ pthread_create(threads + i, NULL, read_thread, NULL);
+
+ do {
+ pthread_create(threads + i, NULL, write_thread, NULL);
+ } while (++i < n_wr + n_rd);
+
+ common_sleep(30);
+ ck_pr_store_uint(&leave, 1);
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(threads[i], NULL);
+
+ return 0;
+}
diff --git a/regressions/ck_fifo/benchmark/Makefile b/regressions/ck_fifo/benchmark/Makefile
new file mode 100644
index 0000000..6e2df2a
--- /dev/null
+++ b/regressions/ck_fifo/benchmark/Makefile
@@ -0,0 +1,14 @@
+.PHONY: clean distribution
+
+OBJECTS=latency
+
+all: $(OBJECTS)
+
+latency: latency.c
+ $(CC) $(CFLAGS) -o latency latency.c
+
+clean:
+ rm -rf *~ *.o *.dSYM *.exe $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_fifo/benchmark/latency.c b/regressions/ck_fifo/benchmark/latency.c
new file mode 100644
index 0000000..267452f
--- /dev/null
+++ b/regressions/ck_fifo/benchmark/latency.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_fifo.h>
+#include <ck_spinlock.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include "../../common.h"
+
+#ifndef ENTRIES
+#define ENTRIES 4096
+#endif
+
+#ifndef STEPS
+#define STEPS 40000
+#endif
+
+int
+main(void)
+{
+ ck_spinlock_fas_t mutex = CK_SPINLOCK_FAS_INITIALIZER;
+ void *r;
+ uint64_t s, e, a;
+ unsigned int i;
+ unsigned int j;
+
+#if defined(CK_F_FIFO_SPSC)
+ ck_fifo_spsc_t spsc_fifo;
+ ck_fifo_spsc_entry_t spsc_entry[ENTRIES];
+ ck_fifo_spsc_entry_t spsc_stub;
+#endif
+
+#if defined(CK_F_FIFO_MPMC)
+ ck_fifo_mpmc_t mpmc_fifo;
+ ck_fifo_mpmc_entry_t mpmc_entry[ENTRIES];
+ ck_fifo_mpmc_entry_t mpmc_stub;
+ ck_fifo_mpmc_entry_t *garbage;
+#endif
+
+#ifdef CK_F_FIFO_SPSC
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_fifo_spsc_init(&spsc_fifo, &spsc_stub);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++) {
+ ck_spinlock_fas_lock(&mutex);
+ ck_fifo_spsc_enqueue(&spsc_fifo, spsc_entry + j, NULL);
+ ck_spinlock_fas_unlock(&mutex);
+ }
+ e = rdtsc();
+
+ a += e - s;
+ }
+ printf(" spinlock_enqueue: %16" PRIu64 "\n", a / STEPS / (sizeof(spsc_entry) / sizeof(*spsc_entry)));
+
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_fifo_spsc_init(&spsc_fifo, &spsc_stub);
+ for (j = 0; j < ENTRIES; j++)
+ ck_fifo_spsc_enqueue(&spsc_fifo, spsc_entry + j, NULL);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++) {
+ ck_spinlock_fas_lock(&mutex);
+ ck_fifo_spsc_dequeue(&spsc_fifo, &r);
+ ck_spinlock_fas_unlock(&mutex);
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ printf(" spinlock_dequeue: %16" PRIu64 "\n", a / STEPS / (sizeof(spsc_entry) / sizeof(*spsc_entry)));
+
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_fifo_spsc_init(&spsc_fifo, &spsc_stub);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++)
+ ck_fifo_spsc_enqueue(&spsc_fifo, spsc_entry + j, NULL);
+ e = rdtsc();
+
+ a += e - s;
+ }
+ printf("ck_fifo_spsc_enqueue: %16" PRIu64 "\n", a / STEPS / (sizeof(spsc_entry) / sizeof(*spsc_entry)));
+
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_fifo_spsc_init(&spsc_fifo, &spsc_stub);
+ for (j = 0; j < ENTRIES; j++)
+ ck_fifo_spsc_enqueue(&spsc_fifo, spsc_entry + j, NULL);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++)
+ ck_fifo_spsc_dequeue(&spsc_fifo, &r);
+ e = rdtsc();
+ a += e - s;
+ }
+ printf("ck_fifo_spsc_dequeue: %16" PRIu64 "\n", a / STEPS / (sizeof(spsc_entry) / sizeof(*spsc_entry)));
+#endif
+
+#ifdef CK_F_FIFO_MPMC
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_fifo_mpmc_init(&mpmc_fifo, &mpmc_stub);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++)
+ ck_fifo_mpmc_enqueue(&mpmc_fifo, mpmc_entry + j, NULL);
+ e = rdtsc();
+
+ a += e - s;
+ }
+ printf("ck_fifo_mpmc_enqueue: %16" PRIu64 "\n", a / STEPS / (sizeof(mpmc_entry) / sizeof(*mpmc_entry)));
+
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_fifo_mpmc_init(&mpmc_fifo, &mpmc_stub);
+ for (j = 0; j < ENTRIES; j++)
+ ck_fifo_mpmc_enqueue(&mpmc_fifo, mpmc_entry + j, NULL);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++)
+ ck_fifo_mpmc_dequeue(&mpmc_fifo, &r, &garbage);
+ e = rdtsc();
+ a += e - s;
+ }
+ printf("ck_fifo_mpmc_dequeue: %16" PRIu64 "\n", a / STEPS / (sizeof(mpmc_entry) / sizeof(*mpmc_entry)));
+#endif
+
+ return 0;
+}
diff --git a/regressions/ck_fifo/validate/Makefile b/regressions/ck_fifo/validate/Makefile
new file mode 100644
index 0000000..6bfc696
--- /dev/null
+++ b/regressions/ck_fifo/validate/Makefile
@@ -0,0 +1,29 @@
+.PHONY: check clean distribution
+
+OBJECTS=ck_fifo_spsc ck_fifo_mpmc ck_fifo_spsc_iterator ck_fifo_mpmc_iterator
+
+all: $(OBJECTS)
+
+check: all
+ ./ck_fifo_spsc $(CORES) 1 64000
+ ./ck_fifo_mpmc $(CORES) 1 16000
+ ./ck_fifo_spsc_iterator
+ ./ck_fifo_mpmc_iterator
+
+ck_fifo_spsc: ck_fifo_spsc.c ../../../include/ck_fifo.h
+ $(CC) $(CFLAGS) -o ck_fifo_spsc ck_fifo_spsc.c
+
+ck_fifo_mpmc: ck_fifo_mpmc.c ../../../include/ck_fifo.h
+ $(CC) $(CFLAGS) -o ck_fifo_mpmc ck_fifo_mpmc.c
+
+ck_fifo_spsc_iterator: ck_fifo_spsc_iterator.c ../../../include/ck_fifo.h
+ $(CC) $(CFLAGS) -o ck_fifo_spsc_iterator ck_fifo_spsc_iterator.c
+
+ck_fifo_mpmc_iterator: ck_fifo_mpmc_iterator.c ../../../include/ck_fifo.h
+ $(CC) $(CFLAGS) -o ck_fifo_mpmc_iterator ck_fifo_mpmc_iterator.c
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_fifo/validate/ck_fifo_mpmc.c b/regressions/ck_fifo/validate/ck_fifo_mpmc.c
new file mode 100644
index 0000000..89eb2f4
--- /dev/null
+++ b/regressions/ck_fifo/validate/ck_fifo_mpmc.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <pthread.h>
+#include <ck_fifo.h>
+
+#include "../../common.h"
+
+#ifdef CK_F_FIFO_MPMC
+#ifndef ITERATIONS
+#define ITERATIONS 128
+#endif
+
+struct context {
+ unsigned int tid;
+ unsigned int previous;
+ unsigned int next;
+};
+
+struct entry {
+ int tid;
+ int value;
+};
+
+static int nthr;
+
+#ifdef CK_F_FIFO_MPMC
+static ck_fifo_mpmc_t fifo CK_CC_CACHELINE;
+#endif
+
+static struct affinity a;
+static int size;
+static unsigned int barrier;
+
+static void *
+test(void *c)
+{
+#ifdef CK_F_FIFO_MPMC
+ struct context *context = c;
+ struct entry *entry;
+ ck_fifo_mpmc_entry_t *fifo_entry, *garbage;
+ int i, j;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < (unsigned int)nthr);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ fifo_entry = malloc(sizeof(ck_fifo_mpmc_entry_t));
+ entry = malloc(sizeof(struct entry));
+ entry->tid = context->tid;
+ ck_fifo_mpmc_enqueue(&fifo, fifo_entry, entry);
+ if (ck_fifo_mpmc_dequeue(&fifo, &entry, &garbage) == false) {
+ ck_error("ERROR [%u] Queue should never be empty.\n", context->tid);
+ }
+
+ if (entry->tid < 0 || entry->tid >= nthr) {
+ ck_error("ERROR [%u] Incorrect value in entry.\n", entry->tid);
+ }
+ }
+ }
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ fifo_entry = malloc(sizeof(ck_fifo_mpmc_entry_t));
+ entry = malloc(sizeof(struct entry));
+ entry->tid = context->tid;
+ while (ck_fifo_mpmc_tryenqueue(&fifo, fifo_entry, entry) == false)
+ ck_pr_stall();
+
+ while (ck_fifo_mpmc_trydequeue(&fifo, &entry, &garbage) == false)
+ ck_pr_stall();
+
+ if (entry->tid < 0 || entry->tid >= nthr) {
+ ck_error("ERROR [%u] Incorrect value in entry when using try interface.\n", entry->tid);
+ }
+ }
+ }
+#endif
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ int i, r;
+ struct context *context;
+ ck_fifo_mpmc_entry_t *garbage;
+ pthread_t *thread;
+
+ if (argc != 4) {
+ ck_error("Usage: validate <threads> <affinity delta> <size>\n");
+ }
+
+ a.request = 0;
+ a.delta = atoi(argv[2]);
+
+ nthr = atoi(argv[1]);
+ assert(nthr >= 1);
+
+ size = atoi(argv[3]);
+ assert(size > 0);
+
+ context = malloc(sizeof(*context) * nthr);
+ assert(context);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread);
+
+ ck_fifo_mpmc_init(&fifo, malloc(sizeof(ck_fifo_mpmc_entry_t)));
+ ck_fifo_mpmc_deinit(&fifo, &garbage);
+ if (garbage == NULL)
+ ck_error("ERROR: Expected non-NULL stub node on deinit.\n");
+ free(garbage);
+ ck_fifo_mpmc_init(&fifo, malloc(sizeof(ck_fifo_mpmc_entry_t)));
+
+ for (i = 0; i < nthr; i++) {
+ context[i].tid = i;
+ r = pthread_create(thread + i, NULL, test, context + i);
+ assert(r == 0);
+ }
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ return (0);
+}
+#else
+int
+main(void)
+{
+ fprintf(stderr, "Unsupported.\n");
+ return 0;
+}
+#endif
+
diff --git a/regressions/ck_fifo/validate/ck_fifo_mpmc_iterator.c b/regressions/ck_fifo/validate/ck_fifo_mpmc_iterator.c
new file mode 100644
index 0000000..5ac8175
--- /dev/null
+++ b/regressions/ck_fifo/validate/ck_fifo_mpmc_iterator.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_fifo.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#ifdef CK_F_FIFO_MPMC
+struct example {
+ int x;
+};
+
+static ck_fifo_mpmc_t mpmc_fifo;
+
+int
+main(void)
+{
+ int i, length = 3;
+ struct example *examples;
+ ck_fifo_mpmc_entry_t *stub, *entries, *entry, *next;
+
+ stub = malloc(sizeof(ck_fifo_mpmc_entry_t));
+ if (stub == NULL)
+ exit(EXIT_FAILURE);
+
+ ck_fifo_mpmc_init(&mpmc_fifo, stub);
+
+ entries = malloc(sizeof(ck_fifo_mpmc_entry_t) * length);
+ if (entries == NULL)
+ exit(EXIT_FAILURE);
+
+ examples = malloc(sizeof(struct example) * length);
+ /* Need these for this unit test. */
+ if (examples == NULL)
+ exit(EXIT_FAILURE);
+
+ for (i = 0; i < length; ++i) {
+ examples[i].x = i;
+ ck_fifo_mpmc_enqueue(&mpmc_fifo, entries + i, examples + i);
+ }
+
+ puts("Testing CK_FIFO_MPMC_FOREACH.");
+ CK_FIFO_MPMC_FOREACH(&mpmc_fifo, entry) {
+ printf("Next value in fifo: %d\n", ((struct example *)entry->value)->x);
+ }
+
+ puts("Testing CK_FIFO_MPMC_FOREACH_SAFE.");
+ CK_FIFO_MPMC_FOREACH_SAFE(&mpmc_fifo, entry, next) {
+ if (entry->next.pointer != next)
+ exit(EXIT_FAILURE);
+ printf("Next value in fifo: %d\n", ((struct example *)entry->value)->x);
+ }
+
+ free(examples);
+ free(entries);
+ free(stub);
+
+ return (0);
+}
+#else
+int
+main(void)
+{
+ return (0);
+}
+#endif
diff --git a/regressions/ck_fifo/validate/ck_fifo_spsc.c b/regressions/ck_fifo/validate/ck_fifo_spsc.c
new file mode 100644
index 0000000..3d6c38c
--- /dev/null
+++ b/regressions/ck_fifo/validate/ck_fifo_spsc.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <pthread.h>
+
+#include <ck_fifo.h>
+
+#include "../../common.h"
+
+#ifndef ITERATIONS
+#define ITERATIONS 128
+#endif
+
+struct context {
+ unsigned int tid;
+ unsigned int previous;
+ unsigned int next;
+};
+
+struct entry {
+ int tid;
+ int value;
+};
+
+static int nthr;
+static ck_fifo_spsc_t *fifo;
+static struct affinity a;
+static int size;
+static unsigned int barrier;
+
+static void *
+test(void *c)
+{
+ struct context *context = c;
+ struct entry *entry;
+ ck_fifo_spsc_entry_t *fifo_entry;
+ int i, j;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+#ifdef DEBUG
+ fprintf(stderr, "%p %u: %u -> %u\n", fifo+context->tid, context->tid, context->previous, context->tid);
+#endif
+
+ if (context->tid == 0) {
+ struct entry *entries;
+
+ entries = malloc(sizeof(struct entry) * size);
+ assert(entries != NULL);
+
+ for (i = 0; i < size; i++) {
+ entries[i].value = i;
+ entries[i].tid = 0;
+
+ fifo_entry = malloc(sizeof(ck_fifo_spsc_entry_t));
+ ck_fifo_spsc_enqueue(fifo + context->tid, fifo_entry, entries + i);
+ }
+ }
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < (unsigned int)nthr);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ while (ck_fifo_spsc_dequeue(fifo + context->previous, &entry) == false);
+ if (context->previous != (unsigned int)entry->tid) {
+ ck_error("T [%u:%p] %u != %u\n",
+ context->tid, (void *)entry, entry->tid, context->previous);
+ }
+
+ if (entry->value != j) {
+ ck_error("V [%u:%p] %u != %u\n",
+ context->tid, (void *)entry, entry->value, j);
+ }
+
+ entry->tid = context->tid;
+ fifo_entry = ck_fifo_spsc_recycle(fifo + context->tid);
+ if (fifo_entry == NULL)
+ fifo_entry = malloc(sizeof(ck_fifo_spsc_entry_t));
+
+ ck_fifo_spsc_enqueue(fifo + context->tid, fifo_entry, entry);
+ }
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ int i, r;
+ struct context *context;
+ pthread_t *thread;
+
+ if (argc != 4) {
+ ck_error("Usage: validate <threads> <affinity delta> <size>\n");
+ }
+
+ a.request = 0;
+ a.delta = atoi(argv[2]);
+
+ nthr = atoi(argv[1]);
+ assert(nthr >= 1);
+
+ size = atoi(argv[3]);
+ assert(size > 0);
+
+ fifo = malloc(sizeof(ck_fifo_spsc_t) * nthr);
+ assert(fifo);
+
+ context = malloc(sizeof(*context) * nthr);
+ assert(context);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread);
+
+ for (i = 0; i < nthr; i++) {
+ ck_fifo_spsc_entry_t *garbage;
+
+ context[i].tid = i;
+ if (i == 0) {
+ context[i].previous = nthr - 1;
+ context[i].next = i + 1;
+ } else if (i == nthr - 1) {
+ context[i].next = 0;
+ context[i].previous = i - 1;
+ } else {
+ context[i].next = i + 1;
+ context[i].previous = i - 1;
+ }
+
+ ck_fifo_spsc_init(fifo + i, malloc(sizeof(ck_fifo_spsc_entry_t)));
+ ck_fifo_spsc_deinit(fifo + i, &garbage);
+ if (garbage == NULL)
+ ck_error("ERROR: Expected non-NULL stub node on deinit.\n");
+
+ free(garbage);
+ ck_fifo_spsc_init(fifo + i, malloc(sizeof(ck_fifo_spsc_entry_t)));
+ r = pthread_create(thread + i, NULL, test, context + i);
+ assert(r == 0);
+ }
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ return (0);
+}
+
diff --git a/regressions/ck_fifo/validate/ck_fifo_spsc_iterator.c b/regressions/ck_fifo/validate/ck_fifo_spsc_iterator.c
new file mode 100644
index 0000000..97804de
--- /dev/null
+++ b/regressions/ck_fifo/validate/ck_fifo_spsc_iterator.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_fifo.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+struct example {
+ int x;
+};
+
+static ck_fifo_spsc_t spsc_fifo;
+
+int
+main(void)
+{
+ int i, length = 3;
+ struct example *examples;
+ ck_fifo_spsc_entry_t *stub, *entries, *entry, *next;
+
+ stub = malloc(sizeof(ck_fifo_spsc_entry_t));
+ if (stub == NULL)
+ exit(EXIT_FAILURE);
+
+ ck_fifo_spsc_init(&spsc_fifo, stub);
+
+ entries = malloc(sizeof(ck_fifo_spsc_entry_t) * length);
+ if (entries == NULL)
+ exit(EXIT_FAILURE);
+
+ examples = malloc(sizeof(struct example) * length);
+ /* Need these for this unit test. */
+ if (examples == NULL)
+ exit(EXIT_FAILURE);
+
+ for (i = 0; i < length; ++i) {
+ examples[i].x = i;
+ ck_fifo_spsc_enqueue(&spsc_fifo, entries + i, examples + i);
+ }
+
+ puts("Testing CK_FIFO_SPSC_FOREACH.");
+ CK_FIFO_SPSC_FOREACH(&spsc_fifo, entry) {
+ printf("Next value in fifo: %d\n", ((struct example *)entry->value)->x);
+ }
+
+ puts("Testing CK_FIFO_SPSC_FOREACH_SAFE.");
+ CK_FIFO_SPSC_FOREACH_SAFE(&spsc_fifo, entry, next) {
+ if (entry->next != next)
+ exit(EXIT_FAILURE);
+ printf("Next value in fifo: %d\n", ((struct example *)entry->value)->x);
+ }
+
+ free(examples);
+ free(entries);
+ free(stub);
+
+ return (0);
+}
+
diff --git a/regressions/ck_hp/benchmark/Makefile b/regressions/ck_hp/benchmark/Makefile
new file mode 100644
index 0000000..2025ea9
--- /dev/null
+++ b/regressions/ck_hp/benchmark/Makefile
@@ -0,0 +1,17 @@
+.PHONY: clean distribution
+
+OBJECTS=fifo_latency stack_latency
+
+all: $(OBJECTS)
+
+fifo_latency: fifo_latency.c
+ $(CC) $(CFLAGS) -o fifo_latency ../../../src/ck_hp.c fifo_latency.c
+
+stack_latency: stack_latency.c
+ $(CC) $(CFLAGS) -o stack_latency ../../../src/ck_hp.c stack_latency.c
+
+clean:
+ rm -rf *~ *.o *.dSYM *.exe $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_hp/benchmark/fifo_latency.c b/regressions/ck_hp/benchmark/fifo_latency.c
new file mode 100644
index 0000000..77ee2a7
--- /dev/null
+++ b/regressions/ck_hp/benchmark/fifo_latency.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_hp.h>
+#include <ck_hp_fifo.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../../common.h"
+
+#ifndef ENTRIES
+#define ENTRIES 4096
+#endif
+
+#ifndef STEPS
+#define STEPS 40000
+#endif
+
+static ck_hp_fifo_t fifo;
+static ck_hp_t fifo_hp;
+
+int
+main(void)
+{
+ void *r;
+ uint64_t s, e, a;
+ unsigned int i;
+ unsigned int j;
+ ck_hp_fifo_entry_t hp_entry[ENTRIES];
+ ck_hp_fifo_entry_t hp_stub;
+ ck_hp_record_t record;
+
+ ck_hp_init(&fifo_hp, CK_HP_FIFO_SLOTS_COUNT, 1000000, NULL);
+
+ r = malloc(CK_HP_FIFO_SLOTS_SIZE);
+ if (r == NULL) {
+ ck_error("ERROR: Failed to allocate slots.\n");
+ }
+ ck_hp_register(&fifo_hp, &record, r);
+
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_hp_fifo_init(&fifo, &hp_stub);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++)
+ ck_hp_fifo_enqueue_mpmc(&record, &fifo, hp_entry + j, NULL);
+ e = rdtsc();
+
+ a += e - s;
+ }
+ printf("ck_hp_fifo_enqueue_mpmc: %16" PRIu64 "\n", a / STEPS / ENTRIES);
+
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_hp_fifo_init(&fifo, &hp_stub);
+ for (j = 0; j < ENTRIES; j++)
+ ck_hp_fifo_enqueue_mpmc(&record, &fifo, hp_entry + j, NULL);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++)
+ ck_hp_fifo_dequeue_mpmc(&record, &fifo, &r);
+ e = rdtsc();
+ a += e - s;
+ }
+ printf("ck_hp_fifo_dequeue_mpmc: %16" PRIu64 "\n", a / STEPS / ENTRIES);
+
+ return 0;
+}
diff --git a/regressions/ck_hp/benchmark/stack_latency.c b/regressions/ck_hp/benchmark/stack_latency.c
new file mode 100644
index 0000000..c336de6
--- /dev/null
+++ b/regressions/ck_hp/benchmark/stack_latency.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_hp.h>
+#include <ck_hp_stack.h>
+#include <ck_stack.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../../common.h"
+
+#ifndef ENTRIES
+#define ENTRIES 4096
+#endif
+
+#ifndef STEPS
+#define STEPS 40000
+#endif
+
+static ck_stack_t stack;
+static ck_hp_t stack_hp;
+
+int
+main(void)
+{
+ ck_hp_record_t record;
+ ck_stack_entry_t entry[ENTRIES];
+ uint64_t s, e, a;
+ unsigned int i;
+ unsigned int j;
+ void *r;
+
+ ck_hp_init(&stack_hp, CK_HP_STACK_SLOTS_COUNT, 1000000, NULL);
+ r = malloc(CK_HP_STACK_SLOTS_SIZE);
+ if (r == NULL) {
+ ck_error("ERROR: Failed to allocate slots.\n");
+ }
+ ck_hp_register(&stack_hp, &record, (void *)r);
+
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_stack_init(&stack);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++)
+ ck_hp_stack_push_mpmc(&stack, entry + j);
+ e = rdtsc();
+
+ a += e - s;
+ }
+ printf("ck_hp_stack_push_mpmc: %16" PRIu64 "\n", a / STEPS / ENTRIES);
+
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_stack_init(&stack);
+
+ for (j = 0; j < ENTRIES; j++)
+ ck_hp_stack_push_mpmc(&stack, entry + j);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++) {
+ r = ck_hp_stack_pop_mpmc(&record, &stack);
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ printf(" ck_hp_stack_pop_mpmc: %16" PRIu64 "\n", a / STEPS / ENTRIES);
+
+ return 0;
+}
diff --git a/regressions/ck_hp/validate/Makefile b/regressions/ck_hp/validate/Makefile
new file mode 100644
index 0000000..476b34f
--- /dev/null
+++ b/regressions/ck_hp/validate/Makefile
@@ -0,0 +1,33 @@
+.PHONY: check clean distribution
+
+OBJECTS=ck_hp_stack nbds_haz_test serial ck_hp_fifo ck_hp_fifo_donner
+
+all: $(OBJECTS)
+
+check: all
+ ./serial
+ ./ck_hp_stack $(CORES) 100 1
+ ./ck_hp_fifo $(CORES) 1 16384 100
+ ./nbds_haz_test $(CORES) 15 1
+ ./ck_hp_fifo_donner $(CORES) 16384
+
+ck_hp_stack: ../../../src/ck_hp.c ck_hp_stack.c ../../../include/ck_hp_stack.h
+ $(CC) $(CFLAGS) ../../../src/ck_hp.c -o ck_hp_stack ck_hp_stack.c
+
+ck_hp_fifo: ../../../src/ck_hp.c ck_hp_fifo.c ../../../include/ck_hp_fifo.h
+ $(CC) $(CFLAGS) ../../../src/ck_hp.c -o ck_hp_fifo ck_hp_fifo.c
+
+ck_hp_fifo_donner: ../../../src/ck_hp.c ck_hp_fifo_donner.c ../../../include/ck_hp_fifo.h
+ $(CC) $(CFLAGS) ../../../src/ck_hp.c -o ck_hp_fifo_donner ck_hp_fifo_donner.c
+
+serial: ../../../src/ck_hp.c serial.c ../../../include/ck_hp_stack.h
+ $(CC) $(CFLAGS) ../../../src/ck_hp.c -o serial serial.c
+
+nbds_haz_test: ../../../src/ck_hp.c nbds_haz_test.c
+ $(CC) $(CFLAGS) ../../../src/ck_hp.c -o nbds_haz_test nbds_haz_test.c
+
+clean:
+ rm -rf *~ *.o *.dSYM *.exe $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_hp/validate/ck_hp_fifo.c b/regressions/ck_hp/validate/ck_hp_fifo.c
new file mode 100644
index 0000000..4454283
--- /dev/null
+++ b/regressions/ck_hp/validate/ck_hp_fifo.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <pthread.h>
+#include <ck_hp_fifo.h>
+
+#include "../../common.h"
+
+#ifndef ITERATIONS
+#define ITERATIONS 128
+#endif
+
+struct context {
+ unsigned int tid;
+ unsigned int previous;
+ unsigned int next;
+};
+
+struct entry {
+ int tid;
+ int value;
+};
+
+static ck_hp_fifo_t fifo;
+static ck_hp_t fifo_hp;
+static int nthr;
+
+static struct affinity a;
+static int size;
+static unsigned int barrier;
+static unsigned int e_barrier;
+
+static void *
+test(void *c)
+{
+ struct context *context = c;
+ struct entry *entry;
+ ck_hp_fifo_entry_t *fifo_entry;
+ ck_hp_record_t record;
+ int i, j;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_hp_register(&fifo_hp, &record, malloc(sizeof(void *) * 2));
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < (unsigned int)nthr);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ fifo_entry = malloc(sizeof(ck_hp_fifo_entry_t));
+ entry = malloc(sizeof(struct entry));
+ entry->tid = context->tid;
+ ck_hp_fifo_enqueue_mpmc(&record, &fifo, fifo_entry, entry);
+
+ ck_pr_barrier();
+
+ fifo_entry = ck_hp_fifo_dequeue_mpmc(&record, &fifo, &entry);
+ if (fifo_entry == NULL) {
+ ck_error("ERROR [%u] Queue should never be empty.\n", context->tid);
+ }
+
+ ck_pr_barrier();
+
+ if (entry->tid < 0 || entry->tid >= nthr) {
+ ck_error("ERROR [%u] Incorrect value in entry.\n", entry->tid);
+ }
+
+ ck_hp_free(&record, &fifo_entry->hazard, fifo_entry, fifo_entry);
+ }
+ }
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ fifo_entry = malloc(sizeof(ck_hp_fifo_entry_t));
+ entry = malloc(sizeof(struct entry));
+ entry->tid = context->tid;
+
+ while (ck_hp_fifo_tryenqueue_mpmc(&record, &fifo, fifo_entry, entry) == false)
+ ck_pr_stall();
+
+ while (fifo_entry = ck_hp_fifo_trydequeue_mpmc(&record, &fifo, &entry), fifo_entry == NULL)
+ ck_pr_stall();
+
+ if (entry->tid < 0 || entry->tid >= nthr) {
+ ck_error("ERROR [%u] Incorrect value in entry.\n", entry->tid);
+ }
+
+ ck_hp_free(&record, &fifo_entry->hazard, fifo_entry, fifo_entry);
+ }
+ }
+
+ ck_pr_inc_uint(&e_barrier);
+ while (ck_pr_load_uint(&e_barrier) < (unsigned int)nthr);
+
+ return (NULL);
+}
+
+static void
+destructor(void *p)
+{
+
+ free(p);
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int i, r;
+ struct context *context;
+ pthread_t *thread;
+ int threshold;
+
+ if (argc != 5) {
+ ck_error("Usage: validate <threads> <affinity delta> <size> <threshold>\n");
+ }
+
+ a.delta = atoi(argv[2]);
+
+ nthr = atoi(argv[1]);
+ assert(nthr >= 1);
+
+ size = atoi(argv[3]);
+ assert(size > 0);
+
+ threshold = atoi(argv[4]);
+ assert(threshold > 0);
+
+ context = malloc(sizeof(*context) * nthr);
+ assert(context);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread);
+
+ ck_hp_init(&fifo_hp, 2, threshold, destructor);
+ ck_hp_fifo_init(&fifo, malloc(sizeof(ck_hp_fifo_entry_t)));
+
+ ck_hp_fifo_entry_t *entry;
+ ck_hp_fifo_deinit(&fifo, &entry);
+
+ if (entry == NULL)
+ ck_error("ERROR: Expected non-NULL stub node.\n");
+
+ free(entry);
+ ck_hp_fifo_init(&fifo, malloc(sizeof(ck_hp_fifo_entry_t)));
+
+ for (i = 0; i < nthr; i++) {
+ context[i].tid = i;
+ r = pthread_create(thread + i, NULL, test, context + i);
+ assert(r == 0);
+ }
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ return (0);
+}
+
diff --git a/regressions/ck_hp/validate/ck_hp_fifo_donner.c b/regressions/ck_hp/validate/ck_hp_fifo_donner.c
new file mode 100644
index 0000000..1b52a37
--- /dev/null
+++ b/regressions/ck_hp/validate/ck_hp_fifo_donner.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2012 Hendrik Donner
+ * Copyright 2012-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_hp.h>
+#include <ck_hp_fifo.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <pthread.h>
+#include <sys/time.h>
+#include <assert.h>
+#include "../../common.h"
+
+/* FIFO queue */
+static ck_hp_fifo_t fifo;
+
+/* Hazard pointer global */
+static ck_hp_t fifo_hp;
+
+/* thread local element count */
+static unsigned long *count;
+
+static unsigned long thread_count;
+
+static unsigned int start_barrier;
+static unsigned int end_barrier;
+
+/* destructor for FIFO queue */
+static void
+destructor(void *p)
+{
+
+ free(p);
+ return;
+}
+
+/* entry struct for FIFO queue entries */
+struct entry {
+ unsigned long value;
+};
+
+/* function for thread */
+static void *
+queue_50_50(void *elements)
+{
+ struct entry *entry;
+ ck_hp_fifo_entry_t *fifo_entry;
+ ck_hp_record_t *record;
+ void *slots;
+ unsigned long j, element_count = *(unsigned long *)elements;
+ unsigned int seed;
+
+ record = malloc(sizeof(ck_hp_record_t));
+ assert(record);
+
+ slots = malloc(CK_HP_FIFO_SLOTS_SIZE);
+ assert(slots);
+
+ /* different seed for each thread */
+ seed = 1337; /*(unsigned int) pthread_self(); */
+
+ /*
+ * This subscribes the thread to the fifo_hp state using the thread-owned
+ * record.
+ * FIFO queue needs 2 hazard pointers.
+ */
+ ck_hp_register(&fifo_hp, record, slots);
+
+ /* start barrier */
+ ck_pr_inc_uint(&start_barrier);
+ while (ck_pr_load_uint(&start_barrier) < thread_count + 1)
+ ck_pr_stall();
+
+ /* 50/50 enqueue-dequeue */
+ for(j = 0; j < element_count; j++) {
+ /* rand_r with thread local state should be thread safe */
+ if( 50 < (1+(int) (100.0*common_rand_r(&seed)/(RAND_MAX+1.0)))) {
+ /* This is the container for the enqueued data. */
+ fifo_entry = malloc(sizeof(ck_hp_fifo_entry_t));
+
+ if (fifo_entry == NULL) {
+ exit(EXIT_FAILURE);
+ }
+
+ /* This is the data. */
+ entry = malloc(sizeof(struct entry));
+ if (entry != NULL) {
+ entry->value = j;
+ }
+
+ /*
+ * Enqueue the value of the pointer entry into FIFO queue using the
+ * container fifo_entry.
+ */
+ ck_hp_fifo_enqueue_mpmc(record, &fifo, fifo_entry, entry);
+ } else {
+ /*
+ * ck_hp_fifo_dequeue_mpmc will return a pointer to the first unused node and store
+ * the value of the first pointer in the FIFO queue in entry.
+ */
+ fifo_entry = ck_hp_fifo_dequeue_mpmc(record, &fifo, &entry);
+ if (fifo_entry != NULL) {
+ /*
+ * Safely reclaim memory associated with fifo_entry.
+ * This inserts garbage into a local list. Once the list (plist) reaches
+ * a length of 100, ck_hp_free will attempt to reclaim all references
+ * to objects on the list.
+ */
+ ck_hp_free(record, &fifo_entry->hazard, fifo_entry, fifo_entry);
+ }
+ }
+ }
+
+ /* end barrier */
+ ck_pr_inc_uint(&end_barrier);
+ while (ck_pr_load_uint(&end_barrier) < thread_count + 1)
+ ck_pr_stall();
+
+ return NULL;
+}
+
+int
+main(int argc, char** argv)
+{
+ ck_hp_fifo_entry_t *stub;
+ unsigned long element_count, i;
+ pthread_t *thr;
+
+ if (argc != 3) {
+ ck_error("Usage: cktest <thread_count> <element_count>\n");
+ }
+
+ /* Get element count from argument */
+ element_count = atoi(argv[2]);
+
+ /* Get element count from argument */
+ thread_count = atoi(argv[1]);
+
+ /* pthread handles */
+ thr = malloc(sizeof(pthread_t) * thread_count);
+
+ /* array for local operation count */
+ count = malloc(sizeof(unsigned long *) * thread_count);
+
+ /*
+ * Initialize global hazard pointer safe memory reclamation to execute free()
+ * when a fifo_entry is safe to be deleted.
+ * Hazard pointer scan routine will be called when the thread local intern plist's
+ * size exceed 100 entries.
+ */
+
+ /* FIFO queue needs 2 hazard pointers */
+ ck_hp_init(&fifo_hp, CK_HP_FIFO_SLOTS_COUNT, 100, destructor);
+
+ /* The FIFO requires one stub entry on initialization. */
+ stub = malloc(sizeof(ck_hp_fifo_entry_t));
+
+ /* Behavior is undefined if stub is NULL. */
+ if (stub == NULL) {
+ exit(EXIT_FAILURE);
+ }
+
+ /* This is called once to initialize the fifo. */
+ ck_hp_fifo_init(&fifo, stub);
+
+ /* Create threads */
+ for (i = 0; i < thread_count; i++) {
+ count[i] = (element_count + i) / thread_count;
+ if (pthread_create(&thr[i], NULL, queue_50_50, (void *) &count[i]) != 0) {
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ /* start barrier */
+ ck_pr_inc_uint(&start_barrier);
+ while (ck_pr_load_uint(&start_barrier) < thread_count + 1);
+
+ /* end barrier */
+ ck_pr_inc_uint(&end_barrier);
+ while (ck_pr_load_uint(&end_barrier) < thread_count + 1);
+
+ /* Join threads */
+ for (i = 0; i < thread_count; i++)
+ pthread_join(thr[i], NULL);
+
+ return 0;
+}
+
diff --git a/regressions/ck_hp/validate/ck_hp_stack.c b/regressions/ck_hp/validate/ck_hp_stack.c
new file mode 100644
index 0000000..ad9b927
--- /dev/null
+++ b/regressions/ck_hp/validate/ck_hp_stack.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_backoff.h>
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <ck_hp.h>
+#include <ck_stack.h>
+#include <ck_hp_stack.h>
+
+#include "../../common.h"
+
+static unsigned int threshold;
+static unsigned int n_threads;
+static unsigned int barrier;
+static unsigned int e_barrier;
+
+#ifndef PAIRS
+#define PAIRS 5000000
+#endif
+
+struct node {
+ unsigned int value;
+ ck_hp_hazard_t hazard;
+ ck_stack_entry_t stack_entry;
+};
+static ck_stack_t stack = {NULL, NULL};
+static ck_hp_t stack_hp;
+CK_STACK_CONTAINER(struct node, stack_entry, stack_container)
+static struct affinity a;
+
+static void *
+thread(void *unused CK_CC_UNUSED)
+{
+ struct node **entry, *e;
+ unsigned int i;
+ ck_hp_record_t record;
+ void **pointers;
+ ck_stack_entry_t *s;
+
+ unused = NULL;
+ pointers = malloc(sizeof(void *));
+ ck_hp_register(&stack_hp, &record, pointers);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ entry = malloc(sizeof(struct node *) * PAIRS);
+ if (entry == NULL) {
+ ck_error("Failed allocation.\n");
+ }
+
+ for (i = 0; i < PAIRS; i++) {
+ entry[i] = malloc(sizeof(struct node));
+ if (entry == NULL) {
+ ck_error("Failed individual allocation\n");
+ }
+ }
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < n_threads)
+ ck_pr_stall();
+
+ for (i = 0; i < PAIRS; i++) {
+ ck_hp_stack_push_mpmc(&stack, &entry[i]->stack_entry);
+ s = ck_hp_stack_pop_mpmc(&record, &stack);
+ e = stack_container(s);
+ ck_hp_free(&record, &e->hazard, e, s);
+ }
+
+ ck_pr_inc_uint(&e_barrier);
+ while (ck_pr_load_uint(&e_barrier) < n_threads)
+ ck_pr_stall();
+
+ fprintf(stderr, "Peak: %u (%2.2f%%)\nReclamations: %" PRIu64 "\n\n",
+ record.n_peak,
+ (double)record.n_peak / PAIRS * 100,
+ record.n_reclamations);
+
+ ck_hp_clear(&record);
+ ck_hp_purge(&record);
+
+ ck_pr_inc_uint(&e_barrier);
+ while (ck_pr_load_uint(&e_barrier) < (n_threads << 1));
+
+ if (record.n_pending != 0) {
+ ck_error("ERROR: %u pending, expecting none.\n",
+ record.n_pending);
+ }
+
+ return (NULL);
+}
+
+static void
+destructor(void *p)
+{
+
+ free(p);
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ unsigned int i;
+ pthread_t *threads;
+
+ if (argc != 4) {
+ ck_error("Usage: stack <threads> <threshold> <delta>\n");
+ }
+
+ n_threads = atoi(argv[1]);
+ threshold = atoi(argv[2]);
+ a.delta = atoi(argv[3]);
+ a.request = 0;
+
+ threads = malloc(sizeof(pthread_t) * n_threads);
+
+ ck_hp_init(&stack_hp, 1, threshold, destructor);
+
+ for (i = 0; i < n_threads; i++)
+ pthread_create(threads + i, NULL, thread, NULL);
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(threads[i], NULL);
+
+ return (0);
+}
diff --git a/regressions/ck_hp/validate/nbds_haz_test.c b/regressions/ck_hp/validate/nbds_haz_test.c
new file mode 100644
index 0000000..9b85e76
--- /dev/null
+++ b/regressions/ck_hp/validate/nbds_haz_test.c
@@ -0,0 +1,226 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * This is a unit test similar to the implementation in John Dybnis's nbds
+ * test.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+
+#include <ck_backoff.h>
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <ck_hp.h>
+
+#include "../../common.h"
+
+#define STACK_CONTAINER(T, M, N) CK_CC_CONTAINER(stack_entry_t, T, M, N)
+
+struct stack_entry {
+ struct stack_entry *next;
+} CK_CC_ALIGN(8);
+typedef struct stack_entry stack_entry_t;
+
+struct stack {
+ struct stack_entry *head;
+ char *generation;
+} CK_CC_PACKED CK_CC_ALIGN(16);
+typedef struct stack hp_stack_t;
+
+static unsigned int threshold;
+static unsigned int n_threads;
+static unsigned int barrier;
+static unsigned int e_barrier;
+static unsigned int global_tid;
+static unsigned int pops;
+static unsigned int pushs;
+
+#ifndef PAIRS
+#define PAIRS 1000000
+#endif
+
+struct node {
+ unsigned int value;
+ ck_hp_hazard_t hazard;
+ stack_entry_t stack_entry;
+};
+hp_stack_t stack = {NULL, NULL};
+ck_hp_t stack_hp;
+
+STACK_CONTAINER(struct node, stack_entry, stack_container)
+static struct affinity a;
+
+/*
+ * Stack producer operation safe for multiple unique producers and multiple consumers.
+ */
+CK_CC_INLINE static void
+stack_push_mpmc(struct stack *target, struct stack_entry *entry)
+{
+ struct stack_entry *lstack;
+ ck_backoff_t backoff = CK_BACKOFF_INITIALIZER;
+
+ lstack = ck_pr_load_ptr(&target->head);
+ ck_pr_store_ptr(&entry->next, lstack);
+ ck_pr_fence_store();
+
+ while (ck_pr_cas_ptr_value(&target->head, lstack, entry, &lstack) == false) {
+ ck_pr_store_ptr(&entry->next, lstack);
+ ck_pr_fence_store();
+ ck_backoff_eb(&backoff);
+ }
+
+ return;
+}
+
+/*
+ * Stack consumer operation safe for multiple unique producers and multiple consumers.
+ */
+CK_CC_INLINE static struct stack_entry *
+stack_pop_mpmc(ck_hp_record_t *record, struct stack *target)
+{
+ struct stack_entry *entry;
+ ck_backoff_t backoff = CK_BACKOFF_INITIALIZER;
+
+ do {
+ entry = ck_pr_load_ptr(&target->head);
+ if (entry == NULL)
+ return (NULL);
+
+ ck_hp_set_fence(record, 0, entry);
+ } while (entry != ck_pr_load_ptr(&target->head));
+
+ while (ck_pr_cas_ptr_value(&target->head, entry, entry->next, &entry) == false) {
+ if (ck_pr_load_ptr(&entry) == NULL)
+ break;
+
+ ck_hp_set_fence(record, 0, entry);
+ if (entry != ck_pr_load_ptr(&target->head))
+ continue;
+
+ ck_backoff_eb(&backoff);
+ }
+
+ return (entry);
+}
+
+static void *
+thread(void *unused CK_CC_UNUSED)
+{
+ struct node *entry, *e;
+ unsigned int i;
+ ck_hp_record_t record;
+ void **pointers;
+ stack_entry_t *s;
+ unsigned int tid = ck_pr_faa_uint(&global_tid, 1) + 1;
+ unsigned int r = (unsigned int)(tid + 1) * 0x5bd1e995;
+
+ unused = NULL;
+ pointers = malloc(sizeof(void *));
+ ck_hp_register(&stack_hp, &record, pointers);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) < n_threads)
+ ck_pr_stall();
+
+ for (i = 0; i < PAIRS; i++) {
+ r ^= r << 6; r ^= r >> 21; r ^= r << 7;
+
+ if (r & 0x1000) {
+ entry = malloc(sizeof(struct node));
+ assert(entry);
+ stack_push_mpmc(&stack, &entry->stack_entry);
+ ck_pr_inc_uint(&pushs);
+ } else {
+ s = stack_pop_mpmc(&record, &stack);
+ if (s == NULL)
+ continue;
+
+ e = stack_container(s);
+ ck_hp_free(&record, &e->hazard, e, s);
+ ck_pr_inc_uint(&pops);
+ }
+ }
+
+ ck_pr_inc_uint(&e_barrier);
+ while (ck_pr_load_uint(&e_barrier) < n_threads);
+
+ return (NULL);
+}
+
+static void
+destructor(void *p)
+{
+ free(p);
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ unsigned int i;
+ pthread_t *threads;
+
+ if (argc != 4) {
+ ck_error("Usage: stack <threads> <threshold> <delta>\n");
+ }
+
+ n_threads = atoi(argv[1]);
+ threshold = atoi(argv[2]);
+ a.delta = atoi(argv[3]);
+ a.request = 0;
+
+ threads = malloc(sizeof(pthread_t) * n_threads);
+
+ ck_hp_init(&stack_hp, 1, threshold, destructor);
+
+ for (i = 0; i < n_threads; i++)
+ pthread_create(threads + i, NULL, thread, NULL);
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(threads[i], NULL);
+
+ fprintf(stderr, "Push: %u\nPop: %u\n", pushs, pops);
+ return (0);
+}
diff --git a/regressions/ck_hp/validate/serial.c b/regressions/ck_hp/validate/serial.c
new file mode 100644
index 0000000..fd31581
--- /dev/null
+++ b/regressions/ck_hp/validate/serial.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2010-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <ck_hp.h>
+
+#include "../../common.h"
+
+struct entry {
+ unsigned int value;
+ ck_hp_hazard_t hazard;
+};
+
+static void
+destructor(void *pointer)
+{
+
+ fprintf(stderr, "Free %p\n", pointer);
+ free(pointer);
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ ck_hp_t state;
+ ck_hp_record_t record[2];
+ void **pointers;
+ struct entry *entry, *other;
+
+ (void)argc;
+ (void)argv;
+
+ ck_hp_init(&state, 1, 1, destructor);
+
+ pointers = malloc(sizeof(void *));
+ if (pointers == NULL) {
+ ck_error("ERROR: Failed to allocate slot.\n");
+ }
+ ck_hp_register(&state, &record[0], pointers);
+ ck_hp_reclaim(&record[0]);
+
+ entry = malloc(sizeof *entry);
+ ck_hp_set(&record[0], 0, entry);
+ ck_hp_reclaim(&record[0]);
+ ck_hp_free(&record[0], &entry->hazard, entry, entry);
+ ck_hp_reclaim(&record[0]);
+ ck_hp_set(&record[0], 0, NULL);
+ ck_hp_reclaim(&record[0]);
+
+ entry = malloc(sizeof *entry);
+ ck_hp_set(&record[0], 0, entry);
+ ck_hp_reclaim(&record[0]);
+ ck_hp_free(&record[0], &entry->hazard, entry, entry);
+ ck_hp_reclaim(&record[0]);
+ ck_hp_set(&record[0], 0, NULL);
+ ck_hp_reclaim(&record[0]);
+
+ pointers = malloc(sizeof(void *));
+ if (pointers == NULL) {
+ ck_error("ERROR: Failed to allocate slot.\n");
+ }
+ ck_hp_register(&state, &record[1], pointers);
+ ck_hp_reclaim(&record[1]);
+
+ entry = malloc(sizeof *entry);
+ ck_hp_set(&record[1], 0, entry);
+ ck_hp_reclaim(&record[1]);
+ ck_hp_free(&record[1], &entry->hazard, entry, entry);
+ ck_hp_reclaim(&record[1]);
+ ck_hp_set(&record[1], 0, NULL);
+ ck_hp_reclaim(&record[1]);
+
+ printf("Allocating entry and freeing in other HP record...\n");
+ entry = malloc(sizeof *entry);
+ entry->value = 42;
+ ck_hp_set(&record[0], 0, entry);
+ ck_hp_free(&record[1], &entry->hazard, entry, entry);
+ ck_pr_store_uint(&entry->value, 1);
+
+ other = malloc(sizeof *other);
+ other->value = 24;
+ ck_hp_set(&record[1], 0, other);
+ ck_hp_free(&record[0], &other->hazard, other, other);
+ ck_pr_store_uint(&other->value, 32);
+ ck_hp_set(&record[0], 0, NULL);
+ ck_hp_reclaim(&record[1]);
+ ck_hp_set(&record[1], 0, NULL);
+ ck_hp_reclaim(&record[0]);
+ ck_hp_reclaim(&record[1]);
+
+ return 0;
+}
diff --git a/regressions/ck_hs/benchmark/Makefile b/regressions/ck_hs/benchmark/Makefile
new file mode 100644
index 0000000..23b6745
--- /dev/null
+++ b/regressions/ck_hs/benchmark/Makefile
@@ -0,0 +1,23 @@
+.PHONY: clean distribution
+
+OBJECTS=serial parallel_bytestring parallel_bytestring.delete apply
+
+all: $(OBJECTS)
+
+serial: serial.c ../../../include/ck_hs.h ../../../src/ck_hs.c
+ $(CC) $(CFLAGS) -o serial serial.c ../../../src/ck_hs.c
+
+apply: apply.c ../../../include/ck_hs.h ../../../src/ck_hs.c
+ $(CC) $(CFLAGS) -o apply apply.c ../../../src/ck_hs.c
+
+parallel_bytestring: parallel_bytestring.c ../../../include/ck_hs.h ../../../src/ck_hs.c ../../../src/ck_epoch.c
+ $(CC) $(PTHREAD_CFLAGS) $(CFLAGS) -o parallel_bytestring parallel_bytestring.c ../../../src/ck_hs.c ../../../src/ck_epoch.c
+
+parallel_bytestring.delete: parallel_bytestring.c ../../../include/ck_hs.h ../../../src/ck_hs.c ../../../src/ck_epoch.c
+ $(CC) $(PTHREAD_CFLAGS) $(CFLAGS) -DHS_DELETE -o parallel_bytestring.delete parallel_bytestring.c ../../../src/ck_hs.c ../../../src/ck_epoch.c
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE
diff --git a/regressions/ck_hs/benchmark/apply.c b/regressions/ck_hs/benchmark/apply.c
new file mode 100644
index 0000000..ca4a3da
--- /dev/null
+++ b/regressions/ck_hs/benchmark/apply.c
@@ -0,0 +1,260 @@
+/*
+ * Copyright 2014 Samy Al Bahra.
+ * Copyright 2014 Backtrace I/O, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyrighs
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyrighs
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_hs.h>
+
+#include <assert.h>
+#include <ck_malloc.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include "../../common.h"
+#include "../../../src/ck_ht_hash.h"
+
+static ck_hs_t hs;
+static char **keys;
+static size_t keys_length = 0;
+static size_t keys_capacity = 128;
+static unsigned long global_seed;
+
+static void *
+hs_malloc(size_t r)
+{
+
+ return malloc(r);
+}
+
+static void
+hs_free(void *p, size_t b, bool r)
+{
+
+ (void)b;
+ (void)r;
+
+ free(p);
+
+ return;
+}
+
+static struct ck_malloc my_allocator = {
+ .malloc = hs_malloc,
+ .free = hs_free
+};
+
+static unsigned long
+hs_hash(const void *object, unsigned long seed)
+{
+ const char *c = object;
+ unsigned long h;
+
+ h = (unsigned long)MurmurHash64A(c, strlen(c), seed);
+ return h;
+}
+
+static bool
+hs_compare(const void *previous, const void *compare)
+{
+
+ return strcmp(previous, compare) == 0;
+}
+
+static void
+set_destroy(void)
+{
+
+ ck_hs_destroy(&hs);
+ return;
+}
+
+static void
+set_init(unsigned int size, unsigned int mode)
+{
+
+ if (ck_hs_init(&hs, CK_HS_MODE_OBJECT | CK_HS_MODE_SPMC | mode, hs_hash, hs_compare,
+ &my_allocator, size, global_seed) == false) {
+ perror("ck_hs_init");
+ exit(EXIT_FAILURE);
+ }
+
+ return;
+}
+
+static size_t
+set_count(void)
+{
+
+ return ck_hs_count(&hs);
+}
+
+static bool
+set_reset(void)
+{
+
+ return ck_hs_reset(&hs);
+}
+
+static void *
+test_apply(void *key, void *closure)
+{
+
+ (void)key;
+
+ return closure;
+}
+
+static void
+run_test(const char *file, size_t r, unsigned int size, unsigned int mode)
+{
+ FILE *fp;
+ char buffer[512];
+ size_t i, j;
+ unsigned int d = 0;
+ uint64_t s, e, a, gp, agp;
+ struct ck_hs_stat st;
+ char **t;
+
+ keys = malloc(sizeof(char *) * keys_capacity);
+ assert(keys != NULL);
+
+ fp = fopen(file, "r");
+ assert(fp != NULL);
+
+ while (fgets(buffer, sizeof(buffer), fp) != NULL) {
+ buffer[strlen(buffer) - 1] = '\0';
+ keys[keys_length++] = strdup(buffer);
+ assert(keys[keys_length - 1] != NULL);
+
+ if (keys_length == keys_capacity) {
+ t = realloc(keys, sizeof(char *) * (keys_capacity *= 2));
+ assert(t != NULL);
+ keys = t;
+ }
+ }
+
+ t = realloc(keys, sizeof(char *) * keys_length);
+ assert(t != NULL);
+ keys = t;
+
+ set_init(size, mode);
+ for (i = 0; i < keys_length; i++) {
+ unsigned long h = CK_HS_HASH(&hs, hs_hash, keys[i]);
+
+ if (ck_hs_get(&hs, h, keys[i]) == false) {
+ if (ck_hs_put(&hs, h, keys[i]) == false)
+ ck_error("ERROR: Failed get to put workload.\n");
+ } else {
+ d++;
+ }
+ }
+ ck_hs_stat(&hs, &st);
+
+ fprintf(stderr, "# %zu entries stored, %u duplicates, %u probe.\n",
+ set_count(), d, st.probe_maximum);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ if (set_reset() == false)
+ ck_error("ERROR: Failed to reset hash table.\n");
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ unsigned long h = CK_HS_HASH(&hs, hs_hash, keys[i]);
+
+ if (ck_hs_get(&hs, h, keys[i]) == false &&
+ ck_hs_put(&hs, h, keys[i]) == false) {
+ ck_error("ERROR: Failed get to put workload.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ gp = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ if (set_reset() == false)
+ ck_error("ERROR: Failed to reset hash table.\n");
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ unsigned long h = CK_HS_HASH(&hs, hs_hash, keys[i]);
+
+ if (ck_hs_apply(&hs, h, keys[i], test_apply, (void *)keys[i]) == false)
+ ck_error("ERROR: Failed in apply workload.\n");
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ agp = a / (r * keys_length);
+
+ fclose(fp);
+
+ for (i = 0; i < keys_length; i++) {
+ free(keys[i]);
+ }
+
+ printf("Get to put: %" PRIu64 " ticks\n", gp);
+ printf(" Apply: %" PRIu64 " ticks\n", agp);
+
+ free(keys);
+ keys_length = 0;
+ set_destroy();
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ unsigned int r, size;
+
+ common_srand48((long int)time(NULL));
+ if (argc < 2) {
+ ck_error("Usage: ck_hs <dictionary> [<repetitions> <initial size>]\n");
+ }
+
+ r = 16;
+ if (argc >= 3)
+ r = atoi(argv[2]);
+
+ size = 8;
+ if (argc >= 4)
+ size = atoi(argv[3]);
+
+ global_seed = common_lrand48();
+ run_test(argv[1], r, size, 0);
+
+ printf("\n==============================================\n"
+ "Delete mode\n"
+ "==============================================\n");
+ run_test(argv[1], r, size, CK_HS_MODE_DELETE);
+ return 0;
+}
+
diff --git a/regressions/ck_hs/benchmark/parallel_bytestring.c b/regressions/ck_hs/benchmark/parallel_bytestring.c
new file mode 100644
index 0000000..6d38379
--- /dev/null
+++ b/regressions/ck_hs/benchmark/parallel_bytestring.c
@@ -0,0 +1,602 @@
+/*
+ * Copyright 2012 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyrighs
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyrighs
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include "../../common.h"
+#include <ck_hs.h>
+#include "../../../src/ck_ht_hash.h"
+#include <assert.h>
+#include <ck_epoch.h>
+#include <ck_malloc.h>
+#include <ck_pr.h>
+#include <ck_spinlock.h>
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+
+static ck_hs_t hs CK_CC_CACHELINE;
+static char **keys;
+static size_t keys_length = 0;
+static size_t keys_capacity = 128;
+static ck_epoch_t epoch_hs;
+static ck_epoch_record_t epoch_wr;
+static int n_threads;
+static bool next_stage;
+
+enum state {
+ HS_STATE_STOP = 0,
+ HS_STATE_GET,
+ HS_STATE_STRICT_REPLACEMENT,
+ HS_STATE_DELETION,
+ HS_STATE_REPLACEMENT,
+ HS_STATE_COUNT
+};
+
+static ck_spinlock_t mtx = CK_SPINLOCK_INITIALIZER;
+static struct affinity affinerator = AFFINITY_INITIALIZER;
+static uint64_t accumulator[HS_STATE_COUNT];
+static int barrier[HS_STATE_COUNT];
+static int state;
+
+struct hs_epoch {
+ ck_epoch_entry_t epoch_entry;
+};
+
+COMMON_ALARM_DECLARE_GLOBAL(hs_alarm, alarm_event, next_stage)
+
+static void
+alarm_handler(int s)
+{
+
+ (void)s;
+ next_stage = true;
+ return;
+}
+
+static unsigned long
+hs_hash(const void *object, unsigned long seed)
+{
+ const char *c = object;
+ unsigned long h;
+
+ h = (unsigned long)MurmurHash64A(c, strlen(c), seed);
+ return h;
+}
+
+static bool
+hs_compare(const void *previous, const void *compare)
+{
+
+ return strcmp(previous, compare) == 0;
+}
+
+static void
+hs_destroy(ck_epoch_entry_t *e)
+{
+
+ free(e);
+ return;
+}
+
+static void *
+hs_malloc(size_t r)
+{
+ ck_epoch_entry_t *b;
+
+ b = malloc(sizeof(*b) + r);
+ return b + 1;
+}
+
+static void
+hs_free(void *p, size_t b, bool r)
+{
+ struct hs_epoch *e = p;
+
+ (void)b;
+
+ if (r == true) {
+ /* Destruction requires safe memory reclamation. */
+ ck_epoch_call(&epoch_wr, &(--e)->epoch_entry, hs_destroy);
+ } else {
+ free(--e);
+ }
+
+ return;
+}
+
+static struct ck_malloc my_allocator = {
+ .malloc = hs_malloc,
+ .free = hs_free
+};
+
+static void
+set_init(void)
+{
+ unsigned int mode = CK_HS_MODE_OBJECT | CK_HS_MODE_SPMC;
+
+#ifdef HS_DELETE
+ mode |= CK_HS_MODE_DELETE;
+#endif
+
+ ck_epoch_init(&epoch_hs);
+ ck_epoch_register(&epoch_hs, &epoch_wr);
+ common_srand48((long int)time(NULL));
+ if (ck_hs_init(&hs, mode, hs_hash, hs_compare, &my_allocator, 65536, common_lrand48()) == false) {
+ perror("ck_hs_init");
+ exit(EXIT_FAILURE);
+ }
+
+ return;
+}
+
+static bool
+set_remove(const char *value)
+{
+ unsigned long h;
+
+ h = CK_HS_HASH(&hs, hs_hash, value);
+ return (bool)ck_hs_remove(&hs, h, value);
+}
+
+static bool
+set_replace(const char *value)
+{
+ unsigned long h;
+ void *previous;
+
+ h = CK_HS_HASH(&hs, hs_hash, value);
+ return ck_hs_set(&hs, h, value, &previous);
+}
+
+static bool
+set_swap(const char *value)
+{
+ unsigned long h;
+ void *previous;
+
+ h = CK_HS_HASH(&hs, hs_hash, value);
+ return ck_hs_fas(&hs, h, value, &previous);
+}
+
+static void *
+set_get(const char *value)
+{
+ unsigned long h;
+ void *v;
+
+ h = CK_HS_HASH(&hs, hs_hash, value);
+ v = ck_hs_get(&hs, h, value);
+ return v;
+}
+
+static bool
+set_insert(const char *value)
+{
+ unsigned long h;
+
+ h = CK_HS_HASH(&hs, hs_hash, value);
+ return ck_hs_put(&hs, h, value);
+}
+
+static size_t
+set_count(void)
+{
+
+ return ck_hs_count(&hs);
+}
+
+static bool
+set_reset(void)
+{
+
+ return ck_hs_reset(&hs);
+}
+
+static void *
+reader(void *unused)
+{
+ size_t i;
+ ck_epoch_record_t epoch_record;
+ int state_previous = HS_STATE_STOP;
+ int n_state = 0;
+ uint64_t s, j, a;
+
+ (void)unused;
+ if (aff_iterate(&affinerator) != 0)
+ perror("WARNING: Failed to affine thread");
+
+ s = j = a = 0;
+ ck_epoch_register(&epoch_hs, &epoch_record);
+ for (;;) {
+ j++;
+ ck_epoch_begin(&epoch_record, NULL);
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ char *r;
+
+ r = set_get(keys[i]);
+ if (r == NULL) {
+ if (n_state == HS_STATE_STRICT_REPLACEMENT) {
+ ck_error("ERROR: Did not find during replacement: %s\n", keys[i]);
+ }
+
+ continue;
+ }
+
+ if (strcmp(r, keys[i]) == 0)
+ continue;
+
+ ck_error("ERROR: Found invalid value: [%s] but expected [%s]\n", (char *)r, keys[i]);
+ }
+ a += rdtsc() - s;
+ ck_epoch_end(&epoch_record, NULL);
+
+ n_state = ck_pr_load_int(&state);
+ if (n_state != state_previous) {
+ ck_spinlock_lock(&mtx);
+ accumulator[state_previous] += a / (j * keys_length);
+ ck_spinlock_unlock(&mtx);
+
+ ck_pr_inc_int(&barrier[state_previous]);
+ while (ck_pr_load_int(&barrier[state_previous]) != n_threads + 1)
+ ck_pr_stall();
+
+ state_previous = n_state;
+ s = j = a = 0;
+ }
+ }
+
+ return NULL;
+}
+
+static uint64_t
+acc(size_t i)
+{
+ uint64_t r;
+
+ ck_spinlock_lock(&mtx);
+ r = accumulator[i];
+ ck_spinlock_unlock(&mtx);
+
+ return r;
+}
+
+int
+main(int argc, char *argv[])
+{
+ FILE *fp;
+ char buffer[512];
+ size_t i, j, r;
+ unsigned int d = 0;
+ uint64_t s, e, a, repeated;
+ char **t;
+ pthread_t *readers;
+ double p_r, p_d;
+
+ COMMON_ALARM_DECLARE_LOCAL(hs_alarm, alarm_event)
+
+ r = 20;
+ s = 8;
+ p_d = 0.5;
+ p_r = 0.5;
+ n_threads = CORES - 1;
+
+ if (argc < 2) {
+ ck_error("Usage: parallel <dictionary> [<interval length> <initial size> <readers>\n"
+ " <probability of replacement> <probability of deletion> <epoch threshold>]\n");
+ }
+
+ if (argc >= 3)
+ r = atoi(argv[2]);
+
+ if (argc >= 4)
+ s = (uint64_t)atoi(argv[3]);
+
+ if (argc >= 5) {
+ n_threads = atoi(argv[4]);
+ if (n_threads < 1) {
+ ck_error("ERROR: Number of readers must be >= 1.\n");
+ }
+ }
+
+ if (argc >= 6) {
+ p_r = atof(argv[5]) / 100.00;
+ if (p_r < 0) {
+ ck_error("ERROR: Probability of replacement must be >= 0 and <= 100.\n");
+ }
+ }
+
+ if (argc >= 7) {
+ p_d = atof(argv[6]) / 100.00;
+ if (p_d < 0) {
+ ck_error("ERROR: Probability of deletion must be >= 0 and <= 100.\n");
+ }
+ }
+
+ COMMON_ALARM_INIT(hs_alarm, alarm_event, r)
+
+ affinerator.delta = 1;
+ readers = malloc(sizeof(pthread_t) * n_threads);
+ assert(readers != NULL);
+
+ keys = malloc(sizeof(char *) * keys_capacity);
+ assert(keys != NULL);
+
+ fp = fopen(argv[1], "r");
+ assert(fp != NULL);
+
+ while (fgets(buffer, sizeof(buffer), fp) != NULL) {
+ buffer[strlen(buffer) - 1] = '\0';
+ keys[keys_length++] = strdup(buffer);
+ assert(keys[keys_length - 1] != NULL);
+
+ if (keys_length == keys_capacity) {
+ t = realloc(keys, sizeof(char *) * (keys_capacity *= 2));
+ assert(t != NULL);
+ keys = t;
+ }
+ }
+
+ t = realloc(keys, sizeof(char *) * keys_length);
+ assert(t != NULL);
+ keys = t;
+
+ set_init();
+
+ for (i = 0; i < (size_t)n_threads; i++) {
+ if (pthread_create(&readers[i], NULL, reader, NULL) != 0) {
+ ck_error("ERROR: Failed to create thread %zu.\n", i);
+ }
+ }
+
+ for (i = 0; i < keys_length; i++)
+ d += set_insert(keys[i]) == false;
+
+ fprintf(stderr, " [S] %d readers, 1 writer.\n", n_threads);
+ fprintf(stderr, " [S] %zu entries stored and %u duplicates.\n\n",
+ set_count(), d);
+
+ fprintf(stderr, " ,- BASIC TEST\n");
+ fprintf(stderr, " | Executing SMR test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ if (set_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ d += set_insert(keys[i]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ fprintf(stderr, " | Executing replacement test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_replace(keys[i]);
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ fprintf(stderr, " | Executing get test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ if (set_get(keys[i]) == NULL) {
+ ck_error("ERROR: Unexpected NULL value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ a = 0;
+ fprintf(stderr, " | Executing removal test...");
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_remove(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ for (i = 0; i < keys_length; i++)
+ set_insert(keys[i]);
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ fprintf(stderr, " | Executing negative look-up test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ set_get("\x50\x03\x04\x05\x06\x10");
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ ck_epoch_record_t epoch_temporary = epoch_wr;
+ ck_epoch_synchronize(&epoch_wr);
+
+ fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
+ "%u pending, %u peak, %lu reclamations\n\n",
+ epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
+ epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
+
+ fprintf(stderr, " ,- READER CONCURRENCY\n");
+ fprintf(stderr, " | Executing reader test...");
+
+ ck_pr_store_int(&state, HS_STATE_GET);
+ while (ck_pr_load_int(&barrier[HS_STATE_STOP]) != n_threads)
+ ck_pr_stall();
+ ck_pr_inc_int(&barrier[HS_STATE_STOP]);
+ common_sleep(r);
+ ck_pr_store_int(&state, HS_STATE_STRICT_REPLACEMENT);
+ while (ck_pr_load_int(&barrier[HS_STATE_GET]) != n_threads)
+ ck_pr_stall();
+
+ fprintf(stderr, "done (reader = %" PRIu64 " ticks)\n",
+ acc(HS_STATE_GET) / n_threads);
+
+ fprintf(stderr, " | Executing strict replacement test...");
+
+ a = repeated = 0;
+ common_alarm(alarm_handler, &alarm_event, r);
+
+ ck_pr_inc_int(&barrier[HS_STATE_GET]);
+ for (;;) {
+ repeated++;
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ if (i & 1) {
+ set_replace(keys[i]);
+ } else {
+ set_swap(keys[i]);
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+
+ if (next_stage == true) {
+ next_stage = false;
+ break;
+ }
+ }
+
+ ck_pr_store_int(&state, HS_STATE_DELETION);
+ while (ck_pr_load_int(&barrier[HS_STATE_STRICT_REPLACEMENT]) != n_threads)
+ ck_pr_stall();
+ set_reset();
+ ck_epoch_synchronize(&epoch_wr);
+ fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
+ a / (repeated * keys_length), acc(HS_STATE_STRICT_REPLACEMENT) / n_threads);
+
+ common_alarm(alarm_handler, &alarm_event, r);
+
+ fprintf(stderr, " | Executing deletion test (%.2f)...", p_d * 100);
+ a = repeated = 0;
+ ck_pr_inc_int(&barrier[HS_STATE_STRICT_REPLACEMENT]);
+ for (;;) {
+ double delete;
+
+ repeated++;
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ set_insert(keys[i]);
+ if (p_d != 0.0) {
+ delete = common_drand48();
+ if (delete <= p_d)
+ set_remove(keys[i]);
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+
+ if (next_stage == true) {
+ next_stage = false;
+ break;
+ }
+ }
+ ck_pr_store_int(&state, HS_STATE_REPLACEMENT);
+ while (ck_pr_load_int(&barrier[HS_STATE_DELETION]) != n_threads)
+ ck_pr_stall();
+
+ set_reset();
+ ck_epoch_synchronize(&epoch_wr);
+ fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
+ a / (repeated * keys_length), acc(HS_STATE_DELETION) / n_threads);
+
+ common_alarm(alarm_handler, &alarm_event, r);
+
+ fprintf(stderr, " | Executing replacement test (%.2f)...", p_r * 100);
+ a = repeated = 0;
+ ck_pr_inc_int(&barrier[HS_STATE_DELETION]);
+ for (;;) {
+ double delete, replace;
+
+ repeated++;
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ set_insert(keys[i]);
+ if (p_d != 0.0) {
+ delete = common_drand48();
+ if (delete <= p_d)
+ set_remove(keys[i]);
+ } else {
+ delete = 0.0;
+ }
+
+ if (p_r != 0.0) {
+ replace = common_drand48();
+ if (replace <= p_r) {
+ if ((i & 1) || (delete <= p_d)) {
+ set_replace(keys[i]);
+ } else {
+ set_swap(keys[i]);
+ }
+ }
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+
+ if (next_stage == true) {
+ next_stage = false;
+ break;
+ }
+ }
+ ck_pr_store_int(&state, HS_STATE_STOP);
+ while (ck_pr_load_int(&barrier[HS_STATE_REPLACEMENT]) != n_threads)
+ ck_pr_stall();
+ set_reset();
+ ck_epoch_synchronize(&epoch_wr);
+ fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
+ a / (repeated * keys_length), acc(HS_STATE_REPLACEMENT) / n_threads);
+
+ ck_pr_inc_int(&barrier[HS_STATE_REPLACEMENT]);
+ epoch_temporary = epoch_wr;
+ ck_epoch_synchronize(&epoch_wr);
+
+ fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
+ "%u pending, %u peak, %lu reclamations\n\n",
+ epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
+ epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
+ return 0;
+}
+
diff --git a/regressions/ck_hs/benchmark/serial.c b/regressions/ck_hs/benchmark/serial.c
new file mode 100644
index 0000000..ac4caff
--- /dev/null
+++ b/regressions/ck_hs/benchmark/serial.c
@@ -0,0 +1,517 @@
+/*
+ * Copyright 2012 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyrighs
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyrighs
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_hs.h>
+
+#include <assert.h>
+#include <ck_malloc.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include "../../common.h"
+#include "../../../src/ck_ht_hash.h"
+
+static ck_hs_t hs;
+static char **keys;
+static size_t keys_length = 0;
+static size_t keys_capacity = 128;
+static unsigned long global_seed;
+
+static void *
+hs_malloc(size_t r)
+{
+
+ return malloc(r);
+}
+
+static void
+hs_free(void *p, size_t b, bool r)
+{
+
+ (void)b;
+ (void)r;
+
+ free(p);
+
+ return;
+}
+
+static struct ck_malloc my_allocator = {
+ .malloc = hs_malloc,
+ .free = hs_free
+};
+
+static unsigned long
+hs_hash(const void *object, unsigned long seed)
+{
+ const char *c = object;
+ unsigned long h;
+
+ h = (unsigned long)MurmurHash64A(c, strlen(c), seed);
+ return h;
+}
+
+static bool
+hs_compare(const void *previous, const void *compare)
+{
+
+ return strcmp(previous, compare) == 0;
+}
+
+static void
+set_destroy(void)
+{
+
+ ck_hs_destroy(&hs);
+ return;
+}
+
+static void
+set_init(unsigned int size, unsigned int mode)
+{
+
+ if (ck_hs_init(&hs, CK_HS_MODE_OBJECT | CK_HS_MODE_SPMC | mode, hs_hash, hs_compare,
+ &my_allocator, size, global_seed) == false) {
+ perror("ck_hs_init");
+ exit(EXIT_FAILURE);
+ }
+
+ return;
+}
+
+static bool
+set_remove(const char *value)
+{
+ unsigned long h;
+
+ h = CK_HS_HASH(&hs, hs_hash, value);
+ return ck_hs_remove(&hs, h, value) != NULL;
+}
+
+static bool
+set_swap(const char *value)
+{
+ unsigned long h;
+ void *previous;
+
+ h = CK_HS_HASH(&hs, hs_hash, value);
+ return ck_hs_fas(&hs, h, value, &previous);
+}
+
+static bool
+set_replace(const char *value)
+{
+ unsigned long h;
+ void *previous;
+
+ h = CK_HS_HASH(&hs, hs_hash, value);
+ ck_hs_set(&hs, h, value, &previous);
+ return previous == value;
+}
+
+static void *
+set_get(const char *value)
+{
+ unsigned long h;
+ void *v;
+
+ h = CK_HS_HASH(&hs, hs_hash, value);
+ v = ck_hs_get(&hs, h, value);
+ return v;
+}
+
+static bool
+set_insert(const char *value)
+{
+ unsigned long h;
+
+ h = CK_HS_HASH(&hs, hs_hash, value);
+ return ck_hs_put(&hs, h, value);
+}
+
+static bool
+set_insert_unique(const char *value)
+{
+ unsigned long h;
+
+ h = CK_HS_HASH(&hs, hs_hash, value);
+ return ck_hs_put_unique(&hs, h, value);
+}
+
+static size_t
+set_count(void)
+{
+
+ return ck_hs_count(&hs);
+}
+
+static bool
+set_reset(void)
+{
+
+ return ck_hs_reset(&hs);
+}
+
+static void
+set_gc(void)
+{
+
+ ck_hs_gc(&hs, 0, 0);
+ return;
+}
+
+static void
+set_rebuild(void)
+{
+
+ ck_hs_rebuild(&hs);
+ return;
+}
+
+static void
+keys_shuffle(char **k)
+{
+ size_t i, j;
+ char *t;
+
+ for (i = keys_length; i > 1; i--) {
+ j = rand() % (i - 1);
+
+ if (j != i - 1) {
+ t = k[i - 1];
+ k[i - 1] = k[j];
+ k[j] = t;
+ }
+ }
+
+ return;
+}
+
+static void
+run_test(const char *file, size_t r, unsigned int size, unsigned int mode)
+{
+ FILE *fp;
+ char buffer[512];
+ size_t i, j;
+ unsigned int d = 0;
+ uint64_t s, e, a, ri, si, ai, sr, rg, sg, ag, sd, ng, ss, sts, su, sgc, sb;
+ struct ck_hs_stat st;
+ char **t;
+
+ keys = malloc(sizeof(char *) * keys_capacity);
+ assert(keys != NULL);
+
+ fp = fopen(file, "r");
+ assert(fp != NULL);
+
+ while (fgets(buffer, sizeof(buffer), fp) != NULL) {
+ buffer[strlen(buffer) - 1] = '\0';
+ keys[keys_length++] = strdup(buffer);
+ assert(keys[keys_length - 1] != NULL);
+
+ if (keys_length == keys_capacity) {
+ t = realloc(keys, sizeof(char *) * (keys_capacity *= 2));
+ assert(t != NULL);
+ keys = t;
+ }
+ }
+
+ t = realloc(keys, sizeof(char *) * keys_length);
+ assert(t != NULL);
+ keys = t;
+
+ set_init(size, mode);
+ for (i = 0; i < keys_length; i++)
+ d += set_insert(keys[i]) == false;
+ ck_hs_stat(&hs, &st);
+
+ fprintf(stderr, "# %zu entries stored, %u duplicates, %u probe.\n",
+ set_count(), d, st.probe_maximum);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ if (set_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = keys_length; i > 0; i--)
+ d += set_insert(keys[i - 1]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ ri = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ if (set_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ d += set_insert(keys[i]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ si = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ keys_shuffle(keys);
+
+ if (set_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ d += set_insert(keys[i]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ ai = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_swap(keys[i]);
+ e = rdtsc();
+ a += e - s;
+ }
+ ss = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_replace(keys[i]);
+ e = rdtsc();
+ a += e - s;
+ }
+ sr = a / (r * keys_length);
+
+ set_reset();
+ for (i = 0; i < keys_length; i++)
+ set_insert(keys[i]);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = keys_length; i > 0; i--) {
+ if (set_get(keys[i - 1]) == NULL) {
+ ck_error("ERROR: Unexpected NULL value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ rg = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ if (set_get(keys[i]) == NULL) {
+ ck_error("ERROR: Unexpected NULL value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ sg = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ keys_shuffle(keys);
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ if (set_get(keys[i]) == NULL) {
+ ck_error("ERROR: Unexpected NULL value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ ag = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_remove(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ for (i = 0; i < keys_length; i++)
+ set_insert(keys[i]);
+ }
+ sd = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ set_get("\x50\x03\x04\x05\x06\x10");
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ ng = a / (r * keys_length);
+
+ set_reset();
+ for (i = 0; i < keys_length; i++)
+ set_insert(keys[i]);
+ for (i = 0; i < keys_length; i++)
+ set_remove(keys[i]);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_insert(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ for (i = 0; i < keys_length; i++)
+ set_remove(keys[i]);
+ }
+ sts = a / (r * keys_length);
+
+ set_reset();
+
+ /* Prune duplicates. */
+ for (i = 0; i < keys_length; i++) {
+ if (set_insert(keys[i]) == true)
+ continue;
+
+ free(keys[i]);
+ keys[i] = keys[--keys_length];
+ }
+
+ for (i = 0; i < keys_length; i++)
+ set_remove(keys[i]);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_insert_unique(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ for (i = 0; i < keys_length; i++)
+ set_remove(keys[i]);
+ }
+ su = a / (r * keys_length);
+
+ for (i = 0; i < keys_length; i++)
+ set_insert_unique(keys[i]);
+
+ for (i = 0; i < keys_length / 2; i++)
+ set_remove(keys[i]);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ set_gc();
+ e = rdtsc();
+ a += e - s;
+ }
+ sgc = a / r;
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ set_rebuild();
+ e = rdtsc();
+ a += e - s;
+ }
+ sb = a / r;
+
+ printf("%zu "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 "\n",
+ keys_length, ri, si, ai, ss, sr, rg, sg, ag, sd, ng, sts, su, sgc, sb);
+
+ fclose(fp);
+
+ for (i = 0; i < keys_length; i++) {
+ free(keys[i]);
+ }
+
+ free(keys);
+ keys_length = 0;
+ set_destroy();
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ unsigned int r, size;
+
+ common_srand48((long int)time(NULL));
+ if (argc < 2) {
+ ck_error("Usage: ck_hs <dictionary> [<repetitions> <initial size>]\n");
+ }
+
+ r = 16;
+ if (argc >= 3)
+ r = atoi(argv[2]);
+
+ size = 8;
+ if (argc >= 4)
+ size = atoi(argv[3]);
+
+ global_seed = common_lrand48();
+ run_test(argv[1], r, size, 0);
+ run_test(argv[1], r, size, CK_HS_MODE_DELETE);
+ fprintf(stderr, "# reverse_insertion serial_insertion random_insertion serial_swap "
+ "serial_replace reverse_get serial_get random_get serial_remove negative_get tombstone "
+ "set_unique gc rebuild\n\n");
+
+ return 0;
+}
+
diff --git a/regressions/ck_hs/validate/Makefile b/regressions/ck_hs/validate/Makefile
new file mode 100644
index 0000000..a96e652
--- /dev/null
+++ b/regressions/ck_hs/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean distribution
+
+OBJECTS=serial
+
+all: $(OBJECTS)
+
+serial: serial.c ../../../include/ck_hs.h ../../../src/ck_hs.c
+ $(CC) $(CFLAGS) -o serial serial.c ../../../src/ck_hs.c
+
+check: all
+ ./serial
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE
diff --git a/regressions/ck_hs/validate/serial.c b/regressions/ck_hs/validate/serial.c
new file mode 100644
index 0000000..a16fc82
--- /dev/null
+++ b/regressions/ck_hs/validate/serial.c
@@ -0,0 +1,315 @@
+/*
+ * Copyright 2012 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyrighs
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyrighs
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_hs.h>
+
+#include <assert.h>
+#include <ck_malloc.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "../../common.h"
+
+static void *
+hs_malloc(size_t r)
+{
+
+ return malloc(r);
+}
+
+static void
+hs_free(void *p, size_t b, bool r)
+{
+
+ (void)b;
+ (void)r;
+ free(p);
+ return;
+}
+
+static struct ck_malloc my_allocator = {
+ .malloc = hs_malloc,
+ .free = hs_free
+};
+
+const char *test[] = { "Samy", "Al", "Bahra", "dances", "in", "the", "wind.", "Once",
+ "upon", "a", "time", "his", "gypsy", "ate", "one", "itsy",
+ "bitsy", "spider.", "What", "goes", "up", "must",
+ "come", "down.", "What", "is", "down", "stays",
+ "down.", "A", "B", "C", "D", "E", "F", "G", "H",
+ "I", "J", "K", "L", "M", "N", "O", "P", "Q" };
+
+const char *negative = "negative";
+
+/* Purposefully crappy hash function. */
+static unsigned long
+hs_hash(const void *object, unsigned long seed)
+{
+ const char *c = object;
+ unsigned long h;
+
+ (void)seed;
+ h = c[0];
+ return h;
+}
+
+static bool
+hs_compare(const void *previous, const void *compare)
+{
+
+ return strcmp(previous, compare) == 0;
+}
+
+static void *
+test_ip(void *key, void *closure)
+{
+ const char *a = key;
+ const char *b = closure;
+
+ if (strcmp(a, b) != 0)
+ ck_error("Mismatch: %s != %s\n", a, b);
+
+ return closure;
+}
+
+static void *
+test_negative(void *key, void *closure)
+{
+
+ (void)closure;
+ if (key != NULL)
+ ck_error("ERROR: Apply callback expects NULL argument instead of [%s]\n", key);
+
+ return NULL;
+}
+
+static void *
+test_unique(void *key, void *closure)
+{
+
+ if (key != NULL)
+ ck_error("ERROR: Apply callback expects NULL argument instead of [%s]\n", key);
+
+ return closure;
+}
+
+static void *
+test_remove(void *key, void *closure)
+{
+
+ (void)key;
+ (void)closure;
+
+ return NULL;
+}
+
+static void
+run_test(unsigned int is, unsigned int ad)
+{
+ ck_hs_t hs[16];
+ const size_t size = sizeof(hs) / sizeof(*hs);
+ size_t i, j;
+ const char *blob = "#blobs";
+ unsigned long h;
+
+ if (ck_hs_init(&hs[0], CK_HS_MODE_SPMC | CK_HS_MODE_OBJECT | ad, hs_hash, hs_compare, &my_allocator, is, 6602834) == false)
+ ck_error("ck_hs_init\n");
+
+ for (j = 0; j < size; j++) {
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ h = test[i][0];
+ if (ck_hs_get(&hs[j], h, test[i]) != NULL) {
+ continue;
+ }
+
+ if (i & 1) {
+ if (ck_hs_put_unique(&hs[j], h, test[i]) == false)
+ ck_error("ERROR [%zu]: Failed to insert unique (%s)\n", j, test[i]);
+ } else if (ck_hs_apply(&hs[j], h, test[i], test_unique, (void *)(uintptr_t)test[i]) == false) {
+ ck_error("ERROR: Failed to apply for insertion.\n");
+ }
+
+ if (i & 1) {
+ if (ck_hs_remove(&hs[j], h, test[i]) == false)
+ ck_error("ERROR [%zu]: Failed to remove unique (%s)\n", j, test[i]);
+ } else if (ck_hs_apply(&hs[j], h, test[i], test_remove, NULL) == false) {
+ ck_error("ERROR: Failed to remove apply.\n");
+ }
+
+ if (ck_hs_apply(&hs[j], h, test[i], test_negative, (char *)(uintptr_t)test[i]) == false)
+ ck_error("ERROR: Failed to apply.\n");
+
+ break;
+ }
+
+ if (ck_hs_gc(&hs[j], 0, 0) == false)
+ ck_error("ERROR: Failed to GC empty set.\n");
+
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ h = test[i][0];
+ ck_hs_put(&hs[j], h, test[i]);
+ if (ck_hs_put(&hs[j], h, test[i]) == true) {
+ ck_error("ERROR [%u] [1]: put must fail on collision (%s).\n", is, test[i]);
+ }
+ if (ck_hs_get(&hs[j], h, test[i]) == NULL) {
+ ck_error("ERROR [%u]: get must not fail after put\n", is);
+ }
+ }
+
+ /* Test grow semantics. */
+ ck_hs_grow(&hs[j], 128);
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ h = test[i][0];
+ if (ck_hs_put(&hs[j], h, test[i]) == true) {
+ ck_error("ERROR [%u] [2]: put must fail on collision.\n", is);
+ }
+
+ if (ck_hs_get(&hs[j], h, test[i]) == NULL) {
+ ck_error("ERROR [%u]: get must not fail\n", is);
+ }
+ }
+
+ h = blob[0];
+ if (ck_hs_get(&hs[j], h, blob) == NULL) {
+ if (j > 0)
+ ck_error("ERROR [%u]: Blob must always exist after first.\n", is);
+
+ if (ck_hs_put(&hs[j], h, blob) == false) {
+ ck_error("ERROR [%u]: A unique blob put failed.\n", is);
+ }
+ } else {
+ if (ck_hs_put(&hs[j], h, blob) == true) {
+ ck_error("ERROR [%u]: Duplicate blob put succeeded.\n", is);
+ }
+ }
+
+ /* Grow set and check get semantics. */
+ ck_hs_grow(&hs[j], 512);
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ h = test[i][0];
+ if (ck_hs_get(&hs[j], h, test[i]) == NULL) {
+ ck_error("ERROR [%u]: get must not fail\n", is);
+ }
+ }
+
+ /* Delete and check negative membership. */
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ void *r;
+
+ h = test[i][0];
+ if (ck_hs_get(&hs[j], h, test[i]) == NULL)
+ continue;
+
+ if (r = ck_hs_remove(&hs[j], h, test[i]), r == NULL) {
+ ck_error("ERROR [%u]: remove must not fail\n", is);
+ }
+
+ if (strcmp(r, test[i]) != 0) {
+ ck_error("ERROR [%u]: Removed incorrect node (%s != %s)\n", (char *)r, test[i], is);
+ }
+ }
+
+ /* Test replacement semantics. */
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ void *r;
+ bool d;
+
+ h = test[i][0];
+ d = ck_hs_get(&hs[j], h, test[i]) != NULL;
+ if (ck_hs_set(&hs[j], h, test[i], &r) == false) {
+ ck_error("ERROR [%u]: Failed to set\n", is);
+ }
+
+ /* Expected replacement. */
+ if (d == true && (r == NULL || strcmp(r, test[i]) != 0)) {
+ ck_error("ERROR [%u]: Incorrect previous value: %s != %s\n",
+ is, test[i], (char *)r);
+ }
+
+ /* Replacement should succeed. */
+ if (ck_hs_fas(&hs[j], h, test[i], &r) == false)
+ ck_error("ERROR [%u]: ck_hs_fas must succeed.\n", is);
+
+ if (strcmp(r, test[i]) != 0) {
+ ck_error("ERROR [%u]: Incorrect replaced value: %s != %s\n",
+ is, test[i], (char *)r);
+ }
+
+ if (ck_hs_fas(&hs[j], h, negative, &r) == true)
+ ck_error("ERROR [%u]: Replacement of negative should fail.\n", is);
+
+ if (ck_hs_set(&hs[j], h, test[i], &r) == false) {
+ ck_error("ERROR [%u]: Failed to set [1]\n", is);
+ }
+
+ if (strcmp(r, test[i]) != 0) {
+ ck_error("ERROR [%u]: Invalid &hs[j]: %s != %s\n", is, test[i], (char *)r);
+ }
+
+ /* Attempt in-place mutation. */
+ if (ck_hs_apply(&hs[j], h, test[i], test_ip, (void *)(uintptr_t)test[i]) == false)
+ ck_error("ERROR [%u]: Failed to apply: %s != %s\n", is, (char *)r, test[i]);
+
+ d = ck_hs_get(&hs[j], h, test[i]) != NULL;
+ if (d == false)
+ ck_error("ERROR [%u]: Expected [%s] to exist.\n", is, test[i]);
+ }
+
+ if (j == size - 1)
+ break;
+
+ if (ck_hs_move(&hs[j + 1], &hs[j], hs_hash, hs_compare, &my_allocator) == false)
+ ck_error("Failed to move hash table");
+
+ if (j & 1) {
+ ck_hs_gc(&hs[j + 1], 0, 0);
+ } else {
+ ck_hs_gc(&hs[j + 1], 26, 26);
+ }
+
+ if (ck_hs_rebuild(&hs[j + 1]) == false)
+ ck_error("Failed to rebuild");
+ }
+
+ return;
+}
+
+int
+main(void)
+{
+ unsigned int k;
+
+ for (k = 16; k <= 64; k <<= 1) {
+ run_test(k, 0);
+ run_test(k, CK_HS_MODE_DELETE);
+ break;
+ }
+
+ return 0;
+}
+
diff --git a/regressions/ck_ht/benchmark/Makefile b/regressions/ck_ht/benchmark/Makefile
new file mode 100644
index 0000000..fa31274
--- /dev/null
+++ b/regressions/ck_ht/benchmark/Makefile
@@ -0,0 +1,27 @@
+.PHONY: clean distribution
+
+OBJECTS=serial serial.delete parallel_bytestring parallel_bytestring.delete parallel_direct
+
+all: $(OBJECTS)
+
+serial: serial.c ../../../include/ck_ht.h ../../../src/ck_ht.c
+ $(CC) $(CFLAGS) -o serial serial.c ../../../src/ck_ht.c
+
+serial.delete: serial.c ../../../include/ck_ht.h ../../../src/ck_ht.c
+ $(CC) $(CFLAGS) -DHT_DELETE -o serial.delete serial.c ../../../src/ck_ht.c
+
+parallel_bytestring.delete: parallel_bytestring.c ../../../include/ck_ht.h ../../../src/ck_ht.c ../../../src/ck_epoch.c
+ $(CC) $(PTHREAD_CFLAGS) $(CFLAGS) -DHT_DELETE -o parallel_bytestring.delete parallel_bytestring.c ../../../src/ck_ht.c ../../../src/ck_epoch.c
+
+parallel_bytestring: parallel_bytestring.c ../../../include/ck_ht.h ../../../src/ck_ht.c ../../../src/ck_epoch.c
+ $(CC) $(PTHREAD_CFLAGS) $(CFLAGS) -o parallel_bytestring parallel_bytestring.c ../../../src/ck_ht.c ../../../src/ck_epoch.c
+
+parallel_direct: parallel_direct.c ../../../include/ck_ht.h ../../../src/ck_ht.c ../../../src/ck_epoch.c
+ $(CC) $(PTHREAD_CFLAGS) $(CFLAGS) -o parallel_direct parallel_direct.c ../../../src/ck_ht.c ../../../src/ck_epoch.c
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE
+
diff --git a/regressions/ck_ht/benchmark/parallel_bytestring.c b/regressions/ck_ht/benchmark/parallel_bytestring.c
new file mode 100644
index 0000000..f3d3854
--- /dev/null
+++ b/regressions/ck_ht/benchmark/parallel_bytestring.c
@@ -0,0 +1,559 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_ht.h>
+
+
+#include <assert.h>
+#include <ck_epoch.h>
+#include <ck_malloc.h>
+#include <ck_pr.h>
+#include <ck_spinlock.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+
+#include "../../common.h"
+
+static ck_ht_t ht CK_CC_CACHELINE;
+static char **keys;
+static size_t keys_length = 0;
+static size_t keys_capacity = 128;
+static ck_epoch_t epoch_ht;
+static ck_epoch_record_t epoch_wr;
+static int n_threads;
+static bool next_stage;
+
+enum state {
+ HT_STATE_STOP = 0,
+ HT_STATE_GET,
+ HT_STATE_STRICT_REPLACEMENT,
+ HT_STATE_DELETION,
+ HT_STATE_REPLACEMENT,
+ HT_STATE_COUNT
+};
+
+static struct affinity affinerator = AFFINITY_INITIALIZER;
+static uint64_t accumulator[HT_STATE_COUNT];
+static ck_spinlock_t accumulator_mutex = CK_SPINLOCK_INITIALIZER;
+static int barrier[HT_STATE_COUNT];
+static int state;
+
+struct ht_epoch {
+ ck_epoch_entry_t epoch_entry;
+};
+
+COMMON_ALARM_DECLARE_GLOBAL(ht_alarm, alarm_event, next_stage)
+
+static void
+alarm_handler(int s)
+{
+
+ (void)s;
+ next_stage = true;
+ return;
+}
+
+static void
+ht_destroy(ck_epoch_entry_t *e)
+{
+
+ free(e);
+ return;
+}
+
+static void *
+ht_malloc(size_t r)
+{
+ ck_epoch_entry_t *b;
+
+ b = malloc(sizeof(*b) + r);
+ return b + 1;
+}
+
+static void
+ht_free(void *p, size_t b, bool r)
+{
+ struct ht_epoch *e = p;
+
+ (void)b;
+
+ if (r == true) {
+ /* Destruction requires safe memory reclamation. */
+ ck_epoch_call(&epoch_wr, &(--e)->epoch_entry, ht_destroy);
+ } else {
+ free(--e);
+ }
+
+ return;
+}
+
+static struct ck_malloc my_allocator = {
+ .malloc = ht_malloc,
+ .free = ht_free
+};
+
+static void
+table_init(void)
+{
+ unsigned int mode = CK_HT_MODE_BYTESTRING;
+
+#ifdef HT_DELETE
+ mode |= CK_HT_WORKLOAD_DELETE;
+#endif
+
+ ck_epoch_init(&epoch_ht);
+ ck_epoch_register(&epoch_ht, &epoch_wr);
+ common_srand48((long int)time(NULL));
+ if (ck_ht_init(&ht, mode, NULL, &my_allocator, 8, common_lrand48()) == false) {
+ perror("ck_ht_init");
+ exit(EXIT_FAILURE);
+ }
+
+ return;
+}
+
+static bool
+table_remove(const char *value)
+{
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+ size_t l = strlen(value);
+
+ ck_ht_hash(&h, &ht, value, l);
+ ck_ht_entry_key_set(&entry, value, l);
+ return ck_ht_remove_spmc(&ht, h, &entry);
+}
+
+static bool
+table_replace(const char *value)
+{
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+ size_t l = strlen(value);
+
+ ck_ht_hash(&h, &ht, value, l);
+ ck_ht_entry_set(&entry, h, value, l, "REPLACED");
+ return ck_ht_set_spmc(&ht, h, &entry);
+}
+
+static void *
+table_get(const char *value)
+{
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+ size_t l = strlen(value);
+
+ ck_ht_hash(&h, &ht, value, l);
+ ck_ht_entry_key_set(&entry, value, l);
+ if (ck_ht_get_spmc(&ht, h, &entry) == true)
+ return ck_ht_entry_value(&entry);
+
+ return NULL;
+}
+
+static bool
+table_insert(const char *value)
+{
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+ size_t l = strlen(value);
+
+ ck_ht_hash(&h, &ht, value, l);
+ ck_ht_entry_set(&entry, h, value, l, value);
+ return ck_ht_put_spmc(&ht, h, &entry);
+}
+
+static size_t
+table_count(void)
+{
+
+ return ck_ht_count(&ht);
+}
+
+static bool
+table_reset(void)
+{
+
+ return ck_ht_reset_spmc(&ht);
+}
+
+static void *
+reader(void *unused)
+{
+ size_t i;
+ ck_epoch_record_t epoch_record;
+ int state_previous = HT_STATE_STOP;
+ int n_state;
+ uint64_t s, j, a;
+
+ (void)unused;
+ if (aff_iterate(&affinerator) != 0)
+ perror("WARNING: Failed to affine thread");
+
+ s = j = a = 0;
+ ck_epoch_register(&epoch_ht, &epoch_record);
+ for (;;) {
+ j++;
+ ck_epoch_begin(&epoch_record, NULL);
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ char *r;
+
+ r = table_get(keys[i]);
+ if (r == NULL)
+ continue;
+
+ if (strcmp(r, "REPLACED") == 0)
+ continue;
+
+ if (strcmp(r, keys[i]) == 0)
+ continue;
+
+ ck_error("ERROR: Found invalid value: [%s] but expected [%s]\n", r, keys[i]);
+ }
+ a += rdtsc() - s;
+ ck_epoch_end(&epoch_record, NULL);
+
+ n_state = ck_pr_load_int(&state);
+ if (n_state != state_previous) {
+ ck_spinlock_lock(&accumulator_mutex);
+ accumulator[state_previous] += a / (j * keys_length);
+ ck_spinlock_unlock(&accumulator_mutex);
+ ck_pr_inc_int(&barrier[state_previous]);
+ while (ck_pr_load_int(&barrier[state_previous]) != n_threads + 1)
+ ck_pr_stall();
+
+ state_previous = n_state;
+ s = j = a = 0;
+ }
+ }
+
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ FILE *fp;
+ char buffer[512];
+ size_t i, j, r;
+ unsigned int d = 0;
+ uint64_t s, e, a, repeated;
+ char **t;
+ pthread_t *readers;
+ double p_r, p_d;
+
+ COMMON_ALARM_DECLARE_LOCAL(ht_alarm, alarm_event)
+
+ r = 20;
+ s = 8;
+ p_d = 0.5;
+ p_r = 0.5;
+ n_threads = CORES - 1;
+
+ if (argc < 2) {
+ ck_error("Usage: parallel <dictionary> [<interval length> <initial size> <readers>\n"
+ " <probability of replacement> <probability of deletion> <epoch threshold>]\n");
+ }
+
+ if (argc >= 3)
+ r = atoi(argv[2]);
+
+ if (argc >= 4)
+ s = (uint64_t)atoi(argv[3]);
+
+ if (argc >= 5) {
+ n_threads = atoi(argv[4]);
+ if (n_threads < 1) {
+ ck_error("ERROR: Number of readers must be >= 1.\n");
+ }
+ }
+
+ if (argc >= 6) {
+ p_r = atof(argv[5]) / 100.00;
+ if (p_r < 0) {
+ ck_error("ERROR: Probability of replacement must be >= 0 and <= 100.\n");
+ }
+ }
+
+ if (argc >= 7) {
+ p_d = atof(argv[6]) / 100.00;
+ if (p_d < 0) {
+ ck_error("ERROR: Probability of deletion must be >= 0 and <= 100.\n");
+ }
+ }
+
+ COMMON_ALARM_INIT(ht_alarm, alarm_event, r)
+
+ affinerator.delta = 1;
+ readers = malloc(sizeof(pthread_t) * n_threads);
+ assert(readers != NULL);
+
+ keys = malloc(sizeof(char *) * keys_capacity);
+ assert(keys != NULL);
+
+ fp = fopen(argv[1], "r");
+ assert(fp != NULL);
+
+ while (fgets(buffer, sizeof(buffer), fp) != NULL) {
+ buffer[strlen(buffer) - 1] = '\0';
+ keys[keys_length++] = strdup(buffer);
+ assert(keys[keys_length - 1] != NULL);
+
+ if (keys_length == keys_capacity) {
+ t = realloc(keys, sizeof(char *) * (keys_capacity *= 2));
+ assert(t != NULL);
+ keys = t;
+ }
+ }
+
+ t = realloc(keys, sizeof(char *) * keys_length);
+ assert(t != NULL);
+ keys = t;
+
+ table_init();
+
+ for (i = 0; i < (size_t)n_threads; i++) {
+ if (pthread_create(&readers[i], NULL, reader, NULL) != 0) {
+ ck_error("ERROR: Failed to create thread %zu.\n", i);
+ }
+ }
+
+ for (i = 0; i < keys_length; i++)
+ d += table_insert(keys[i]) == false;
+
+ fprintf(stderr, " [S] %d readers, 1 writer.\n", n_threads);
+ fprintf(stderr, " [S] %zu entries stored and %u duplicates.\n\n",
+ table_count(), d);
+
+ fprintf(stderr, " ,- BASIC TEST\n");
+ fprintf(stderr, " | Executing SMR test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ if (table_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ d += table_insert(keys[i]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ fprintf(stderr, " | Executing replacement test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ table_replace(keys[i]);
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ fprintf(stderr, " | Executing get test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ if (table_get(keys[i]) == NULL) {
+ ck_error("ERROR: Unexpected NULL value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ a = 0;
+ fprintf(stderr, " | Executing removal test...");
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ table_remove(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ for (i = 0; i < keys_length; i++)
+ table_insert(keys[i]);
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ fprintf(stderr, " | Executing negative look-up test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ table_get("\x50\x03\x04\x05\x06\x10");
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ ck_epoch_record_t epoch_temporary = epoch_wr;
+ ck_epoch_synchronize(&epoch_wr);
+
+ fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
+ "%u pending, %u peak, %lu reclamations\n\n",
+ epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
+ epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
+
+ fprintf(stderr, " ,- READER CONCURRENCY\n");
+ fprintf(stderr, " | Executing reader test...");
+
+ ck_pr_store_int(&state, HT_STATE_GET);
+ while (ck_pr_load_int(&barrier[HT_STATE_STOP]) != n_threads)
+ ck_pr_stall();
+ ck_pr_inc_int(&barrier[HT_STATE_STOP]);
+ common_sleep(r);
+ ck_pr_store_int(&state, HT_STATE_STRICT_REPLACEMENT);
+ while (ck_pr_load_int(&barrier[HT_STATE_GET]) != n_threads)
+ ck_pr_stall();
+ fprintf(stderr, "done (reader = %" PRIu64 " ticks)\n",
+ accumulator[HT_STATE_GET] / n_threads);
+
+ fprintf(stderr, " | Executing strict replacement test...");
+
+ a = repeated = 0;
+ common_alarm(alarm_handler, &alarm_event, r);
+
+ ck_pr_inc_int(&barrier[HT_STATE_GET]);
+ for (;;) {
+ repeated++;
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ table_replace(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ if (next_stage == true) {
+ next_stage = false;
+ break;
+ }
+ }
+
+ ck_pr_store_int(&state, HT_STATE_DELETION);
+ while (ck_pr_load_int(&barrier[HT_STATE_STRICT_REPLACEMENT]) != n_threads)
+ ck_pr_stall();
+ table_reset();
+ ck_epoch_synchronize(&epoch_wr);
+ fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
+ a / (repeated * keys_length), accumulator[HT_STATE_STRICT_REPLACEMENT] / n_threads);
+
+ common_alarm(alarm_handler, &alarm_event, r);
+
+ fprintf(stderr, " | Executing deletion test (%.2f)...", p_d * 100);
+ a = repeated = 0;
+ ck_pr_inc_int(&barrier[HT_STATE_STRICT_REPLACEMENT]);
+ for (;;) {
+ double delete;
+
+ repeated++;
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ table_insert(keys[i]);
+ if (p_d != 0.0) {
+ delete = common_drand48();
+ if (delete <= p_d)
+ table_remove(keys[i]);
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+
+ if (next_stage == true) {
+ next_stage = false;
+ break;
+ }
+ }
+ ck_pr_store_int(&state, HT_STATE_REPLACEMENT);
+ while (ck_pr_load_int(&barrier[HT_STATE_DELETION]) != n_threads)
+ ck_pr_stall();
+
+ table_reset();
+ ck_epoch_synchronize(&epoch_wr);
+ fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
+ a / (repeated * keys_length), accumulator[HT_STATE_DELETION] / n_threads);
+
+ common_alarm(alarm_handler, &alarm_event, r);
+
+ fprintf(stderr, " | Executing replacement test (%.2f)...", p_r * 100);
+ a = repeated = 0;
+ ck_pr_inc_int(&barrier[HT_STATE_DELETION]);
+ for (;;) {
+ double replace, delete;
+
+ repeated++;
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ table_insert(keys[i]);
+ if (p_d != 0.0) {
+ delete = common_drand48();
+ if (delete <= p_d)
+ table_remove(keys[i]);
+ }
+ if (p_r != 0.0) {
+ replace = common_drand48();
+ if (replace <= p_r)
+ table_replace(keys[i]);
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+
+ if (next_stage == true) {
+ next_stage = false;
+ break;
+ }
+ }
+ ck_pr_store_int(&state, HT_STATE_STOP);
+ while (ck_pr_load_int(&barrier[HT_STATE_REPLACEMENT]) != n_threads)
+ ck_pr_stall();
+ table_reset();
+ ck_epoch_synchronize(&epoch_wr);
+ fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
+ a / (repeated * keys_length), accumulator[HT_STATE_REPLACEMENT] / n_threads);
+
+ ck_pr_inc_int(&barrier[HT_STATE_REPLACEMENT]);
+ epoch_temporary = epoch_wr;
+ ck_epoch_synchronize(&epoch_wr);
+
+ fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
+ "%u pending, %u peak, %lu reclamations\n\n",
+ epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
+ epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
+ return 0;
+}
diff --git a/regressions/ck_ht/benchmark/parallel_direct.c b/regressions/ck_ht/benchmark/parallel_direct.c
new file mode 100644
index 0000000..195bb25
--- /dev/null
+++ b/regressions/ck_ht/benchmark/parallel_direct.c
@@ -0,0 +1,545 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_ht.h>
+
+#include <assert.h>
+#include <ck_epoch.h>
+#include <ck_malloc.h>
+#include <ck_pr.h>
+#include <ck_spinlock.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+
+#include "../../common.h"
+
+static ck_ht_t ht CK_CC_CACHELINE;
+static uintptr_t *keys;
+static size_t keys_length = 0;
+static ck_epoch_t epoch_ht;
+static ck_epoch_record_t epoch_wr;
+static int n_threads;
+static bool next_stage;
+
+enum state {
+ HT_STATE_STOP = 0,
+ HT_STATE_GET,
+ HT_STATE_STRICT_REPLACEMENT,
+ HT_STATE_DELETION,
+ HT_STATE_REPLACEMENT,
+ HT_STATE_COUNT
+};
+
+static struct affinity affinerator = AFFINITY_INITIALIZER;
+static uint64_t accumulator[HT_STATE_COUNT];
+static ck_spinlock_t accumulator_mutex = CK_SPINLOCK_INITIALIZER;
+static int barrier[HT_STATE_COUNT];
+static int state;
+
+struct ht_epoch {
+ ck_epoch_entry_t epoch_entry;
+};
+
+COMMON_ALARM_DECLARE_GLOBAL(ht_alarm, alarm_event, next_stage)
+
+static void
+alarm_handler(int s)
+{
+
+ (void)s;
+ next_stage = true;
+ return;
+}
+
+static void
+ht_destroy(ck_epoch_entry_t *e)
+{
+
+ free(e);
+ return;
+}
+
+static void *
+ht_malloc(size_t r)
+{
+ ck_epoch_entry_t *b;
+
+ b = malloc(sizeof(*b) + r);
+ return b + 1;
+}
+
+static void
+ht_free(void *p, size_t b, bool r)
+{
+ struct ht_epoch *e = p;
+
+ (void)b;
+
+ if (r == true) {
+ /* Destruction requires safe memory reclamation. */
+ ck_epoch_call(&epoch_wr, &(--e)->epoch_entry, ht_destroy);
+ } else {
+ free(--e);
+ }
+
+ return;
+}
+
+static struct ck_malloc my_allocator = {
+ .malloc = ht_malloc,
+ .free = ht_free
+};
+
+static void
+hash_function(ck_ht_hash_t *h, const void *key, size_t key_length, uint64_t seed)
+{
+ const uintptr_t *value = key;
+
+ (void)key_length;
+ (void)seed;
+ h->value = *value;
+ return;
+}
+
+static void
+table_init(void)
+{
+
+ ck_epoch_init(&epoch_ht);
+ ck_epoch_register(&epoch_ht, &epoch_wr);
+ common_srand48((long int)time(NULL));
+ if (ck_ht_init(&ht, CK_HT_MODE_DIRECT, hash_function, &my_allocator, 8, common_lrand48()) == false) {
+ perror("ck_ht_init");
+ exit(EXIT_FAILURE);
+ }
+
+ return;
+}
+
+static bool
+table_remove(uintptr_t value)
+{
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+
+ ck_ht_hash_direct(&h, &ht, value);
+ ck_ht_entry_key_set_direct(&entry, value);
+ return ck_ht_remove_spmc(&ht, h, &entry);
+}
+
+static bool
+table_replace(uintptr_t value)
+{
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+
+ ck_ht_hash_direct(&h, &ht, value);
+ ck_ht_entry_set_direct(&entry, h, value, 6605241);
+ return ck_ht_set_spmc(&ht, h, &entry);
+}
+
+static uintptr_t
+table_get(uintptr_t value)
+{
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+
+ ck_ht_hash_direct(&h, &ht, value);
+ ck_ht_entry_key_set_direct(&entry, value);
+ if (ck_ht_get_spmc(&ht, h, &entry) == true)
+ return ck_ht_entry_value_direct(&entry);
+
+ return 0;
+}
+
+static bool
+table_insert(uintptr_t value)
+{
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+
+ ck_ht_hash_direct(&h, &ht, value);
+ ck_ht_entry_set_direct(&entry, h, value, value);
+ return ck_ht_put_spmc(&ht, h, &entry);
+}
+
+static size_t
+table_count(void)
+{
+
+ return ck_ht_count(&ht);
+}
+
+static bool
+table_reset(void)
+{
+
+ return ck_ht_reset_spmc(&ht);
+}
+
+static void *
+ht_reader(void *unused)
+{
+ size_t i;
+ ck_epoch_record_t epoch_record;
+ int state_previous = HT_STATE_STOP;
+ int n_state;
+ uint64_t s, j, a;
+
+ (void)unused;
+ if (aff_iterate(&affinerator) != 0)
+ perror("WARNING: Failed to affine thread");
+
+ s = j = a = 0;
+ ck_epoch_register(&epoch_ht, &epoch_record);
+ for (;;) {
+ j++;
+ ck_epoch_begin(&epoch_record, NULL);
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ uintptr_t r;
+
+ r = table_get(keys[i]);
+ if (r == 0)
+ continue;
+
+ if (r == 6605241)
+ continue;
+
+ if (r == keys[i])
+ continue;
+
+ ck_error("ERROR: Found invalid value: [%ju]\n",
+ (uintmax_t)r);
+ }
+ a += rdtsc() - s;
+ ck_epoch_end(&epoch_record, NULL);
+
+ n_state = ck_pr_load_int(&state);
+ if (n_state != state_previous) {
+ ck_spinlock_lock(&accumulator_mutex);
+ accumulator[state_previous] += a / (j * keys_length);
+ ck_spinlock_unlock(&accumulator_mutex);
+ ck_pr_inc_int(&barrier[state_previous]);
+ while (ck_pr_load_int(&barrier[state_previous]) != n_threads + 1)
+ ck_pr_stall();
+
+ state_previous = n_state;
+ s = j = a = 0;
+ }
+ }
+
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ size_t i, j, r;
+ unsigned int d = 0;
+ uint64_t s, e, a, repeated;
+ pthread_t *readers;
+ double p_r, p_d;
+
+ COMMON_ALARM_DECLARE_LOCAL(ht_alarm, alarm_event)
+
+ r = 20;
+ s = 8;
+ p_d = 0.5;
+ p_r = 0.5;
+ n_threads = CORES - 1;
+
+ if (argc < 2) {
+ fprintf(stderr, "Usage: parallel <#entries> [<interval length> <initial size> <readers>\n"
+ " <probability of replacement> <probability of deletion> <epoch threshold>]\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (argc >= 3)
+ r = atoi(argv[2]);
+
+ if (argc >= 4)
+ s = (uint64_t)atoi(argv[3]);
+
+ if (argc >= 5) {
+ n_threads = atoi(argv[4]);
+ if (n_threads < 1) {
+ ck_error("ERROR: Number of readers must be >= 1.\n");
+ }
+ }
+
+ if (argc >= 6) {
+ p_r = atof(argv[5]) / 100.00;
+ if (p_r < 0) {
+ ck_error("ERROR: Probability of replacement must be >= 0 and <= 100.\n");
+ }
+ }
+
+ if (argc >= 7) {
+ p_d = atof(argv[6]) / 100.00;
+ if (p_d < 0) {
+ ck_error("ERROR: Probability of deletion must be >= 0 and <= 100.\n");
+ }
+ }
+
+ COMMON_ALARM_INIT(ht_alarm, alarm_event, r)
+
+ affinerator.delta = 1;
+ readers = malloc(sizeof(pthread_t) * n_threads);
+ assert(readers != NULL);
+
+ keys_length = (size_t)atoi(argv[1]);
+ keys = malloc(sizeof(uintptr_t) * keys_length);
+ assert(keys != NULL);
+
+ table_init();
+
+ for (i = 0; i < keys_length; i++) {
+ keys[i] = (uintptr_t)common_lrand48();
+ while (keys[i] == 2)
+ keys[i] = (uintptr_t)common_lrand48();
+ }
+
+ for (i = 0; i < (size_t)n_threads; i++) {
+ if (pthread_create(&readers[i], NULL, ht_reader, NULL) != 0) {
+ ck_error("ERROR: Failed to create thread %zu.\n", i);
+ }
+ }
+
+ for (i = 0; i < keys_length; i++)
+ d += table_insert(keys[i]) == false;
+
+ fprintf(stderr, " [S] %zu entries stored and %u duplicates.\n\n",
+ table_count(), d);
+
+ fprintf(stderr, " ,- BASIC TEST\n");
+ fprintf(stderr, " | Executing SMR test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ if (table_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ d += table_insert(keys[i]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ fprintf(stderr, " | Executing replacement test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ table_replace(keys[i]);
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ fprintf(stderr, " | Executing get test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ if (table_get(keys[i]) == 0) {
+ ck_error("ERROR: Unexpected 0 value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ a = 0;
+ fprintf(stderr, " | Executing removal test...");
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ table_remove(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ for (i = 0; i < keys_length; i++)
+ table_insert(keys[i]);
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ fprintf(stderr, " | Executing negative look-up test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ table_get(2);
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ ck_epoch_record_t epoch_temporary = epoch_wr;
+ ck_epoch_synchronize(&epoch_wr);
+
+ fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
+ "%u pending, %u peak, %lu reclamations\n\n",
+ epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
+ epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
+
+ fprintf(stderr, " ,- READER CONCURRENCY\n");
+ fprintf(stderr, " | Executing reader test...");
+
+ ck_pr_store_int(&state, HT_STATE_GET);
+ while (ck_pr_load_int(&barrier[HT_STATE_STOP]) != n_threads)
+ ck_pr_stall();
+ ck_pr_inc_int(&barrier[HT_STATE_STOP]);
+ common_sleep(r);
+ ck_pr_store_int(&state, HT_STATE_STRICT_REPLACEMENT);
+ while (ck_pr_load_int(&barrier[HT_STATE_GET]) != n_threads)
+ ck_pr_stall();
+ fprintf(stderr, "done (reader = %" PRIu64 " ticks)\n",
+ accumulator[HT_STATE_GET] / n_threads);
+
+ fprintf(stderr, " | Executing strict replacement test...");
+
+ a = repeated = 0;
+ common_alarm(alarm_handler, &alarm_event, r);
+
+ ck_pr_inc_int(&barrier[HT_STATE_GET]);
+ for (;;) {
+ repeated++;
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ table_replace(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ if (next_stage == true) {
+ next_stage = false;
+ break;
+ }
+ }
+
+ ck_pr_store_int(&state, HT_STATE_DELETION);
+ while (ck_pr_load_int(&barrier[HT_STATE_STRICT_REPLACEMENT]) != n_threads)
+ ck_pr_stall();
+ table_reset();
+ ck_epoch_synchronize(&epoch_wr);
+ fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
+ a / (repeated * keys_length), accumulator[HT_STATE_STRICT_REPLACEMENT] / n_threads);
+
+ common_alarm(alarm_handler, &alarm_event, r);
+
+ fprintf(stderr, " | Executing deletion test (%.2f)...", p_d * 100);
+ a = repeated = 0;
+ ck_pr_inc_int(&barrier[HT_STATE_STRICT_REPLACEMENT]);
+ for (;;) {
+ double delete;
+
+ repeated++;
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ table_insert(keys[i]);
+ if (p_d != 0.0) {
+ delete = common_drand48();
+ if (delete <= p_d)
+ table_remove(keys[i]);
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+
+ if (next_stage == true) {
+ next_stage = false;
+ break;
+ }
+ }
+ ck_pr_store_int(&state, HT_STATE_REPLACEMENT);
+ while (ck_pr_load_int(&barrier[HT_STATE_DELETION]) != n_threads)
+ ck_pr_stall();
+
+ table_reset();
+ ck_epoch_synchronize(&epoch_wr);
+ fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
+ a / (repeated * keys_length), accumulator[HT_STATE_DELETION] / n_threads);
+
+ common_alarm(alarm_handler, &alarm_event, r);
+
+ fprintf(stderr, " | Executing replacement test (%.2f)...", p_r * 100);
+ a = repeated = 0;
+ ck_pr_inc_int(&barrier[HT_STATE_DELETION]);
+ for (;;) {
+ double replace, delete;
+
+ repeated++;
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ table_insert(keys[i]);
+ if (p_d != 0.0) {
+ delete = common_drand48();
+ if (delete <= p_d)
+ table_remove(keys[i]);
+ }
+ if (p_r != 0.0) {
+ replace = common_drand48();
+ if (replace <= p_r)
+ table_replace(keys[i]);
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+
+ if (next_stage == true) {
+ next_stage = false;
+ break;
+ }
+ }
+ ck_pr_store_int(&state, HT_STATE_STOP);
+ while (ck_pr_load_int(&barrier[HT_STATE_REPLACEMENT]) != n_threads)
+ ck_pr_stall();
+ table_reset();
+ ck_epoch_synchronize(&epoch_wr);
+ fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
+ a / (repeated * keys_length), accumulator[HT_STATE_REPLACEMENT] / n_threads);
+
+ ck_pr_inc_int(&barrier[HT_STATE_REPLACEMENT]);
+ epoch_temporary = epoch_wr;
+ ck_epoch_synchronize(&epoch_wr);
+
+ fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
+ "%u pending, %u peak, %lu reclamations\n\n",
+ epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
+ epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
+ return 0;
+}
diff --git a/regressions/ck_ht/benchmark/serial.c b/regressions/ck_ht/benchmark/serial.c
new file mode 100644
index 0000000..0daa45c
--- /dev/null
+++ b/regressions/ck_ht/benchmark/serial.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_ht.h>
+
+#include <assert.h>
+#include <ck_malloc.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include "../../common.h"
+
+static ck_ht_t ht;
+static char **keys;
+static size_t keys_length = 0;
+static size_t keys_capacity = 128;
+
+static void *
+ht_malloc(size_t r)
+{
+
+ return malloc(r);
+}
+
+static void
+ht_free(void *p, size_t b, bool r)
+{
+
+ (void)b;
+ (void)r;
+
+ free(p);
+
+ return;
+}
+
+static struct ck_malloc my_allocator = {
+ .malloc = ht_malloc,
+ .free = ht_free
+};
+
+static void
+table_init(void)
+{
+ unsigned int mode = CK_HT_MODE_BYTESTRING;
+
+#ifdef HT_DELETE
+ mode |= CK_HT_WORKLOAD_DELETE;
+#endif
+
+ common_srand48((long int)time(NULL));
+ if (ck_ht_init(&ht, mode, NULL, &my_allocator, 8, common_lrand48()) == false) {
+ perror("ck_ht_init");
+ exit(EXIT_FAILURE);
+ }
+
+ return;
+}
+
+static bool
+table_remove(const char *value)
+{
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+ size_t l = strlen(value);
+
+ ck_ht_hash(&h, &ht, value, l);
+ ck_ht_entry_key_set(&entry, value, l);
+ return ck_ht_remove_spmc(&ht, h, &entry);
+}
+
+static bool
+table_replace(const char *value)
+{
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+ size_t l = strlen(value);
+
+ ck_ht_hash(&h, &ht, value, l);
+ ck_ht_entry_set(&entry, h, value, l, "REPLACED");
+ return ck_ht_set_spmc(&ht, h, &entry);
+}
+
+static void *
+table_get(const char *value)
+{
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+ size_t l = strlen(value);
+ void *v = NULL;
+
+ ck_ht_hash(&h, &ht, value, l);
+ ck_ht_entry_key_set(&entry, value, l);
+
+ if (ck_ht_get_spmc(&ht, h, &entry) == true) {
+ v = ck_ht_entry_value(&entry);
+ }
+ return v;
+}
+
+static bool
+table_insert(const char *value)
+{
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+ size_t l = strlen(value);
+
+ ck_ht_hash(&h, &ht, value, l);
+ ck_ht_entry_set(&entry, h, value, l, "VALUE");
+ return ck_ht_put_spmc(&ht, h, &entry);
+}
+
+static size_t
+table_count(void)
+{
+
+ return ck_ht_count(&ht);
+}
+
+static bool
+table_gc(void)
+{
+
+ return ck_ht_gc(&ht, 0, common_lrand48());
+}
+
+static bool
+table_reset(void)
+{
+
+ return ck_ht_reset_spmc(&ht);
+}
+
+static void
+keys_shuffle(char **k)
+{
+ size_t i, j;
+ char *t;
+
+ for (i = keys_length; i > 1; i--) {
+ j = rand() % (i - 1);
+
+ if (j != i - 1) {
+ t = k[i - 1];
+ k[i - 1] = k[j];
+ k[j] = t;
+ }
+ }
+
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ FILE *fp;
+ char buffer[512];
+ size_t i, j, r;
+ unsigned int d = 0;
+ uint64_t s, e, a, ri, si, ai, sr, rg, sg, ag, sd, ng, gg;
+ char **t;
+ struct ck_ht_stat st;
+
+ r = 20;
+ s = 8;
+ srand(time(NULL));
+
+ if (argc < 2) {
+ ck_error("Usage: ck_ht <dictionary> [<repetitions> <initial size>]\n");
+ }
+
+ if (argc >= 3)
+ r = atoi(argv[2]);
+
+ if (argc >= 4)
+ s = (uint64_t)atoi(argv[3]);
+
+ keys = malloc(sizeof(char *) * keys_capacity);
+ assert(keys != NULL);
+
+ fp = fopen(argv[1], "r");
+ assert(fp != NULL);
+
+ while (fgets(buffer, sizeof(buffer), fp) != NULL) {
+ buffer[strlen(buffer) - 1] = '\0';
+ keys[keys_length++] = strdup(buffer);
+ assert(keys[keys_length - 1] != NULL);
+
+ if (keys_length == keys_capacity) {
+ t = realloc(keys, sizeof(char *) * (keys_capacity *= 2));
+ assert(t != NULL);
+ keys = t;
+ }
+ }
+
+ t = realloc(keys, sizeof(char *) * keys_length);
+ assert(t != NULL);
+ keys = t;
+
+ table_init();
+
+ for (i = 0; i < keys_length; i++)
+ d += table_insert(keys[i]) == false;
+ ck_ht_stat(&ht, &st);
+
+ fprintf(stderr, "# %zu entries stored, %u duplicates, %" PRIu64 " probe.\n",
+ table_count(), d, st.probe_maximum);
+
+ fprintf(stderr, "# reverse_insertion serial_insertion random_insertion serial_replace reverse_get serial_get random_get serial_remove negative_get garbage_collect\n\n");
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ if (table_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = keys_length; i > 0; i--)
+ d += table_insert(keys[i - 1]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ ri = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ if (table_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ d += table_insert(keys[i]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ si = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ keys_shuffle(keys);
+
+ if (table_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ d += table_insert(keys[i]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ ai = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ table_replace(keys[i]);
+ e = rdtsc();
+ a += e - s;
+ }
+ sr = a / (r * keys_length);
+
+ table_reset();
+ for (i = 0; i < keys_length; i++)
+ table_insert(keys[i]);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = keys_length; i > 0; i--) {
+ if (table_get(keys[i - 1]) == NULL) {
+ ck_error("ERROR: Unexpected NULL value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ rg = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ if (table_get(keys[i]) == NULL) {
+ ck_error("ERROR: Unexpected NULL value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ sg = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ keys_shuffle(keys);
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ if (table_get(keys[i]) == NULL) {
+ ck_error("ERROR: Unexpected NULL value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ ag = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ table_remove(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ for (i = 0; i < keys_length; i++)
+ table_insert(keys[i]);
+ }
+ sd = a / (r * keys_length);
+
+ for (i = 0; i < keys_length / 2; i++)
+ table_remove(keys[i]);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ table_gc();
+ e = rdtsc();
+ a += e - s;
+ }
+ gg = a / r;
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ table_get("\x50\x03\x04\x05\x06\x10");
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ ng = a / (r * keys_length);
+
+ printf("%zu "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 "\n",
+ keys_length, ri, si, ai, sr, rg, sg, ag, sd, ng, gg);
+
+ return 0;
+}
diff --git a/regressions/ck_ht/validate/Makefile b/regressions/ck_ht/validate/Makefile
new file mode 100644
index 0000000..cb5682c
--- /dev/null
+++ b/regressions/ck_ht/validate/Makefile
@@ -0,0 +1,21 @@
+.PHONY: check clean distribution
+
+OBJECTS=serial serial.delete
+
+all: $(OBJECTS)
+
+serial: serial.c ../../../include/ck_ht.h ../../../src/ck_ht.c
+ $(CC) $(CFLAGS) -o serial serial.c ../../../src/ck_ht.c
+
+serial.delete: serial.c ../../../include/ck_ht.h ../../../src/ck_ht.c
+ $(CC) $(CFLAGS) -DHT_DELETE -o serial.delete serial.c ../../../src/ck_ht.c
+
+check: all
+ ./serial
+ ./serial.delete
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE
diff --git a/regressions/ck_ht/validate/serial.c b/regressions/ck_ht/validate/serial.c
new file mode 100644
index 0000000..9a85c2f
--- /dev/null
+++ b/regressions/ck_ht/validate/serial.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_ht.h>
+
+#include <assert.h>
+#include <ck_malloc.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "../../common.h"
+#include "../../../src/ck_ht_hash.h"
+
+static size_t hash_times_called = 0;
+
+static void *
+ht_malloc(size_t r)
+{
+
+ return malloc(r);
+}
+
+static void
+ht_free(void *p, size_t b, bool r)
+{
+
+ (void)b;
+ (void)r;
+ free(p);
+ return;
+}
+
+static void
+ht_hash_wrapper(struct ck_ht_hash *h,
+ const void *key,
+ size_t length,
+ uint64_t seed)
+{
+ hash_times_called++;
+
+ h->value = (unsigned long)MurmurHash64A(key, length, seed);
+ return;
+}
+
+static struct ck_malloc my_allocator = {
+ .malloc = ht_malloc,
+ .free = ht_free
+};
+
+const char *test[] = {"Samy", "Al", "Bahra", "dances", "in", "the", "wind.", "Once",
+ "upon", "a", "time", "his", "gypsy", "ate", "one", "itsy",
+ "bitsy", "spider.", "What", "goes", "up", "must",
+ "come", "down.", "What", "is", "down", "stays",
+ "down.", "A", "B", "C", "D", "E", "F", "G", "H",
+ "I", "J", "K", "L", "M", "N", "O"};
+
+static uintptr_t direct[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 1, 2, 3, 4, 5, 9 };
+
+const char *negative = "negative";
+
+int
+main(void)
+{
+ size_t i, l;
+ ck_ht_t ht;
+ ck_ht_entry_t entry;
+ ck_ht_hash_t h;
+ ck_ht_iterator_t iterator = CK_HT_ITERATOR_INITIALIZER;
+ ck_ht_entry_t *cursor;
+ unsigned int mode = CK_HT_MODE_BYTESTRING;
+
+#ifdef HT_DELETE
+ mode |= CK_HT_WORKLOAD_DELETE;
+#endif
+
+ if (ck_ht_init(&ht, mode, ht_hash_wrapper, &my_allocator, 2, 6602834) == false) {
+ perror("ck_ht_init");
+ exit(EXIT_FAILURE);
+ }
+
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ l = strlen(test[i]);
+ ck_ht_hash(&h, &ht, test[i], l);
+ ck_ht_entry_set(&entry, h, test[i], l, test[i]);
+ ck_ht_put_spmc(&ht, h, &entry);
+ }
+
+ l = strlen(test[0]);
+ ck_ht_hash(&h, &ht, test[0], l);
+ ck_ht_entry_set(&entry, h, test[0], l, test[0]);
+ ck_ht_put_spmc(&ht, h, &entry);
+
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ l = strlen(test[i]);
+ ck_ht_hash(&h, &ht, test[i], l);
+ ck_ht_entry_key_set(&entry, test[i], l);
+ if (ck_ht_get_spmc(&ht, h, &entry) == false) {
+ ck_error("ERROR (put): Failed to find [%s]\n", test[i]);
+ } else {
+ void *k, *v;
+
+ k = ck_ht_entry_key(&entry);
+ v = ck_ht_entry_value(&entry);
+
+ if (strcmp(k, test[i]) || strcmp(v, test[i])) {
+ ck_error("ERROR: Mismatch: (%s, %s) != (%s, %s)\n",
+ (char *)k, (char *)v, test[i], test[i]);
+ }
+ }
+ }
+
+ ck_ht_hash(&h, &ht, negative, strlen(negative));
+ ck_ht_entry_key_set(&entry, negative, strlen(negative));
+ if (ck_ht_get_spmc(&ht, h, &entry) == true) {
+ ck_error("ERROR: Found non-existing entry.\n");
+ }
+
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ l = strlen(test[i]);
+ ck_ht_hash(&h, &ht, test[i], l);
+ ck_ht_entry_key_set(&entry, test[i], l);
+
+ if (ck_ht_get_spmc(&ht, h, &entry) == false)
+ continue;
+
+ if (ck_ht_remove_spmc(&ht, h, &entry) == false) {
+ ck_error("ERROR: Failed to delete existing entry\n");
+ }
+
+ if (ck_ht_get_spmc(&ht, h, &entry) == true)
+ ck_error("ERROR: Able to find [%s] after delete\n", test[i]);
+
+ ck_ht_entry_set(&entry, h, test[i], l, test[i]);
+ if (ck_ht_put_spmc(&ht, h, &entry) == false)
+ ck_error("ERROR: Failed to insert [%s]\n", test[i]);
+
+ if (ck_ht_remove_spmc(&ht, h, &entry) == false) {
+ ck_error("ERROR: Failed to delete existing entry\n");
+ }
+ }
+
+ ck_ht_reset_spmc(&ht);
+ if (ck_ht_count(&ht) != 0) {
+ ck_error("ERROR: Map was not reset.\n");
+ }
+
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ l = strlen(test[i]);
+ ck_ht_hash(&h, &ht, test[i], l);
+ ck_ht_entry_set(&entry, h, test[i], l, test[i]);
+ ck_ht_put_spmc(&ht, h, &entry);
+ }
+
+ for (i = 0; ck_ht_next(&ht, &iterator, &cursor) == true; i++);
+ if (i != 42) {
+ ck_error("ERROR: Incorrect number of entries in table.\n");
+ }
+
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ l = strlen(test[i]);
+ ck_ht_hash(&h, &ht, test[i], l);
+ ck_ht_entry_set(&entry, h, test[i], l, test[i]);
+ ck_ht_set_spmc(&ht, h, &entry);
+ }
+
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ l = strlen(test[i]);
+ ck_ht_hash(&h, &ht, test[i], l);
+ ck_ht_entry_key_set(&entry, test[i], l);
+ if (ck_ht_get_spmc(&ht, h, &entry) == false) {
+ ck_error("ERROR (set): Failed to find [%s]\n", test[i]);
+ } else {
+ void *k, *v;
+
+ k = ck_ht_entry_key(&entry);
+ v = ck_ht_entry_value(&entry);
+
+ if (strcmp(k, test[i]) || strcmp(v, test[i])) {
+ ck_error("ERROR: Mismatch: (%s, %s) != (%s, %s)\n",
+ (char *)k, (char *)v, test[i], test[i]);
+ }
+ }
+ }
+
+ if (ck_ht_gc(&ht, 0, 27) == false) {
+ ck_error("ck_ht_gc\n");
+ }
+
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ l = strlen(test[i]);
+ ck_ht_hash(&h, &ht, test[i], l);
+ ck_ht_entry_set(&entry, h, test[i], l, "REPLACED");
+ ck_ht_set_spmc(&ht, h, &entry);
+
+ if (strcmp(test[i], "What") == 0)
+ continue;
+
+ if (strcmp(test[i], "down.") == 0)
+ continue;
+
+ if (strcmp(ck_ht_entry_value(&entry), test[i]) != 0) {
+ ck_error("Mismatch detected: %s, expected %s\n",
+ (char *)ck_ht_entry_value(&entry),
+ test[i]);
+ }
+ }
+
+ ck_ht_iterator_init(&iterator);
+ while (ck_ht_next(&ht, &iterator, &cursor) == true) {
+ if (strcmp(ck_ht_entry_value(cursor), "REPLACED") != 0) {
+ ck_error("Mismatch detected: %s, expected REPLACED\n",
+ (char *)ck_ht_entry_value(cursor));
+ }
+ }
+
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ l = strlen(test[i]);
+ ck_ht_hash(&h, &ht, test[i], l);
+ ck_ht_entry_key_set(&entry, test[i], l);
+
+ if (ck_ht_get_spmc(&ht, h, &entry) == false)
+ continue;
+
+ if (ck_ht_remove_spmc(&ht, h, &entry) == false) {
+ ck_error("ERROR: Failed to delete existing entry\n");
+ }
+
+ if (ck_ht_get_spmc(&ht, h, &entry) == true)
+ ck_error("ERROR: Able to find [%s] after delete\n", test[i]);
+
+ ck_ht_entry_set(&entry, h, test[i], l, test[i]);
+ if (ck_ht_put_spmc(&ht, h, &entry) == false)
+ ck_error("ERROR: Failed to insert [%s]\n", test[i]);
+
+ if (ck_ht_remove_spmc(&ht, h, &entry) == false) {
+ ck_error("ERROR: Failed to delete existing entry\n");
+ }
+ }
+
+ ck_ht_destroy(&ht);
+
+ if (hash_times_called == 0) {
+ ck_error("ERROR: Our hash function was not called!\n");
+ }
+
+ hash_times_called = 0;
+
+ if (ck_ht_init(&ht, CK_HT_MODE_DIRECT, ht_hash_wrapper, &my_allocator, 8, 6602834) == false) {
+ perror("ck_ht_init");
+ exit(EXIT_FAILURE);
+ }
+
+ l = 0;
+ for (i = 0; i < sizeof(direct) / sizeof(*direct); i++) {
+ ck_ht_hash_direct(&h, &ht, direct[i]);
+ ck_ht_entry_set_direct(&entry, h, direct[i], (uintptr_t)test[i]);
+ l += ck_ht_put_spmc(&ht, h, &entry) == false;
+ }
+
+ if (l != 7) {
+ ck_error("ERROR: Got %zu failures rather than 7\n", l);
+ }
+
+ for (i = 0; i < sizeof(direct) / sizeof(*direct); i++) {
+ ck_ht_hash_direct(&h, &ht, direct[i]);
+ ck_ht_entry_set_direct(&entry, h, direct[i], (uintptr_t)"REPLACED");
+ l += ck_ht_set_spmc(&ht, h, &entry) == false;
+ }
+
+ ck_ht_iterator_init(&iterator);
+ while (ck_ht_next(&ht, &iterator, &cursor) == true) {
+ if (strcmp(ck_ht_entry_value(cursor), "REPLACED") != 0) {
+ ck_error("Mismatch detected: %s, expected REPLACED\n",
+ (char *)ck_ht_entry_value(cursor));
+ }
+ }
+
+ ck_ht_destroy(&ht);
+
+ if (hash_times_called == 0) {
+ ck_error("ERROR: Our hash function was not called!\n");
+ }
+
+ return 0;
+}
diff --git a/regressions/ck_pflock/benchmark/Makefile b/regressions/ck_pflock/benchmark/Makefile
new file mode 100644
index 0000000..6f739d9
--- /dev/null
+++ b/regressions/ck_pflock/benchmark/Makefile
@@ -0,0 +1,17 @@
+.PHONY: clean distribution
+
+OBJECTS=latency throughput
+
+all: $(OBJECTS)
+
+latency: latency.c ../../../include/ck_rwlock.h
+ $(CC) $(CFLAGS) -o latency latency.c
+
+throughput: throughput.c ../../../include/ck_rwlock.h
+ $(CC) $(CFLAGS) -o throughput throughput.c
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_pflock/benchmark/latency.c b/regressions/ck_pflock/benchmark/latency.c
new file mode 100644
index 0000000..a28c9dd
--- /dev/null
+++ b/regressions/ck_pflock/benchmark/latency.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * Copyright 2013 John Wittrock.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHEPFISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_pflock.h>
+#include <inttypes.h>
+#include <stdio.h>
+
+#include "../../common.h"
+
+#ifndef STEPS
+#define STEPS 1000000
+#endif
+
+int
+main(void)
+{
+ uint64_t s_b, e_b, i;
+ ck_pflock_t pflock = CK_PFLOCK_INITIALIZER;
+
+ for (i = 0; i < STEPS; i++) {
+ ck_pflock_write_lock(&pflock);
+ ck_pflock_write_unlock(&pflock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_pflock_write_lock(&pflock);
+ ck_pflock_write_unlock(&pflock);
+ }
+ e_b = rdtsc();
+ printf("WRITE: pflock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ for (i = 0; i < STEPS; i++) {
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ }
+ e_b = rdtsc();
+ printf("READ: pflock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ return 0;
+}
+
diff --git a/regressions/ck_pflock/benchmark/throughput.c b/regressions/ck_pflock/benchmark/throughput.c
new file mode 100644
index 0000000..429465f
--- /dev/null
+++ b/regressions/ck_pflock/benchmark/throughput.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * Copyright 2013 John Wittrock.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHEPFISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_pflock.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "../../common.h"
+
+#ifndef STEPS
+#define STEPS 1000000
+#endif
+
+static int barrier;
+static int threads;
+static unsigned int flag CK_CC_CACHELINE;
+static ck_pflock_t pflock = CK_PFLOCK_INITIALIZER;
+static struct affinity affinity;
+
+static void *
+thread_pflock(void *pun)
+{
+ uint64_t s_b, e_b, a, i;
+ uint64_t *value = pun;
+
+ if (aff_iterate(&affinity) != 0) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_int(&barrier);
+ while (ck_pr_load_int(&barrier) != threads)
+ ck_pr_stall();
+
+ for (i = 1, a = 0;; i++) {
+ s_b = rdtsc();
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ ck_pflock_read_lock(&pflock);
+ ck_pflock_read_unlock(&pflock);
+ e_b = rdtsc();
+
+ a += (e_b - s_b) >> 4;
+
+ if (ck_pr_load_uint(&flag) == 1)
+ break;
+ }
+
+ ck_pr_inc_int(&barrier);
+ while (ck_pr_load_int(&barrier) != threads * 2)
+ ck_pr_stall();
+
+ *value = (a / i);
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int t;
+ pthread_t *p;
+ uint64_t *latency;
+
+ if (argc != 3) {
+ ck_error("Usage: throughput <delta> <threads>\n");
+ }
+
+ threads = atoi(argv[2]);
+ if (threads <= 0) {
+ ck_error("ERROR: Threads must be a value > 0.\n");
+ }
+
+ p = malloc(sizeof(pthread_t) * threads);
+ if (p == NULL) {
+ ck_error("ERROR: Failed to initialize thread.\n");
+ }
+
+ latency = malloc(sizeof(uint64_t) * threads);
+ if (latency == NULL) {
+ ck_error("ERROR: Failed to create latency buffer.\n");
+ }
+
+ affinity.delta = atoi(argv[1]);
+ affinity.request = 0;
+
+ fprintf(stderr, "Creating threads (pflock)...");
+ for (t = 0; t < threads; t++) {
+ if (pthread_create(&p[t], NULL, thread_pflock, latency + t) != 0) {
+ ck_error("ERROR: Could not create thread %d\n", t);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ common_sleep(10);
+ ck_pr_store_uint(&flag, 1);
+
+ fprintf(stderr, "Waiting for threads to finish acquisition regression...");
+ for (t = 0; t < threads; t++)
+ pthread_join(p[t], NULL);
+ fprintf(stderr, "done\n\n");
+
+ for (t = 1; t <= threads; t++)
+ printf("%10u %20" PRIu64 "\n", t, latency[t - 1]);
+
+ return 0;
+}
+
diff --git a/regressions/ck_pflock/validate/Makefile b/regressions/ck_pflock/validate/Makefile
new file mode 100644
index 0000000..eea9d02
--- /dev/null
+++ b/regressions/ck_pflock/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean distribution
+
+OBJECTS=validate
+
+all: $(OBJECTS)
+
+validate: validate.c ../../../include/ck_pflock.h
+ $(CC) $(CFLAGS) -o validate validate.c
+
+check: all
+ ./validate $(CORES) 1
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_pflock/validate/validate.c b/regressions/ck_pflock/validate/validate.c
new file mode 100644
index 0000000..2551755
--- /dev/null
+++ b/regressions/ck_pflock/validate/validate.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra, John Wittrock.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_pflock.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 1000000
+#endif
+
+static struct affinity a;
+static unsigned int locked;
+static int nthr;
+static ck_pflock_t lock = CK_PFLOCK_INITIALIZER;
+
+static void *
+thread(void *null CK_CC_UNUSED)
+{
+ int i = ITERATE;
+ unsigned int l;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+ ck_pflock_write_lock(&lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_pflock_write_unlock(&lock);
+
+ ck_pflock_read_lock(&lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_pflock_read_unlock(&lock);
+ }
+
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+ int i;
+
+ if (argc != 3) {
+ ck_error("Usage: validate <number of threads> <affinity delta>\n");
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ a.delta = atoi(argv[2]);
+
+ fprintf(stderr, "Creating threads (mutual exclusion)...");
+ for (i = 0; i < nthr; i++) {
+ if (pthread_create(&threads[i], NULL, thread, NULL)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Waiting for threads to finish correctness regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+
+ return 0;
+}
+
diff --git a/regressions/ck_pr/benchmark/Makefile b/regressions/ck_pr/benchmark/Makefile
new file mode 100644
index 0000000..55183d8
--- /dev/null
+++ b/regressions/ck_pr/benchmark/Makefile
@@ -0,0 +1,31 @@
+.PHONY: clean
+
+all: ck_pr_cas_64 ck_pr_fas_64 ck_pr_cas_64_2 ck_pr_add_64 ck_pr_faa_64 ck_pr_neg_64 fp
+
+fp: fp.c
+ $(CC) $(CFLAGS) -o fp fp.c
+
+ck_pr_cas_64_2: ck_pr_cas_64_2.c
+ $(CC) $(CFLAGS) -o ck_pr_cas_64_2 ck_pr_cas_64_2.c -lm
+
+ck_pr_cas_64: ck_pr_cas_64.c
+ $(CC) $(CFLAGS) -o ck_pr_cas_64 ck_pr_cas_64.c -lm
+
+ck_pr_fas_64: ck_pr_fas_64.c
+ $(CC) $(CFLAGS) -o ck_pr_fas_64 ck_pr_fas_64.c -lm
+
+ck_pr_add_64: ck_pr_add_64.c
+ $(CC) $(CFLAGS) -o ck_pr_add_64 ck_pr_add_64.c -lm
+
+ck_pr_faa_64: ck_pr_faa_64.c
+ $(CC) $(CFLAGS) -o ck_pr_faa_64 ck_pr_faa_64.c -lm
+
+ck_pr_neg_64: ck_pr_neg_64.c
+ $(CC) $(CFLAGS) -o ck_pr_neg_64 ck_pr_neg_64.c -lm
+
+clean:
+ rm -rf ck_pr_cas_64 ck_pr_fas_64 ck_pr_cas_64_2 ck_pr_add_64 \
+ ck_pr_faa_64 ck_pr_neg_64 *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_pr/benchmark/benchmark.h b/regressions/ck_pr/benchmark/benchmark.h
new file mode 100644
index 0000000..f9e4ed2
--- /dev/null
+++ b/regressions/ck_pr/benchmark/benchmark.h
@@ -0,0 +1,130 @@
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+
+/* 8! = 40320, evenly divide 1 .. 8 processor workload. */
+#define WORKLOAD (40320 * 2056)
+
+struct block {
+ unsigned int tid;
+};
+
+static struct affinity a;
+static unsigned int ready;
+static uint64_t *count;
+static uint64_t nthr;
+
+static uint64_t object[2] CK_CC_CACHELINE;
+
+static void *
+fairness(void *null)
+{
+ struct block *context = null;
+ unsigned int i = context->tid;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (ck_pr_load_uint(&ready) == 0);
+ while (ck_pr_load_uint(&ready)) {
+ ATOMIC;
+ ATOMIC;
+ ATOMIC;
+ ATOMIC;
+ ck_pr_store_64(count + i, count[i] + 1);
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ uint64_t v, d;
+ unsigned int i;
+ pthread_t *threads;
+ struct block *context;
+
+ if (argc != 3) {
+ ck_error("Usage: " ATOMIC_STRING " <number of threads> <affinity delta>\n");
+ exit(EXIT_FAILURE);
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ exit(EXIT_FAILURE);
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ exit(EXIT_FAILURE);
+ }
+
+ context = malloc(sizeof(struct block) * nthr);
+ if (context == NULL) {
+ ck_error("ERROR: Could not allocate thread contexts\n");
+ exit(EXIT_FAILURE);
+ }
+
+ a.delta = atoi(argv[2]);
+ a.request = 0;
+
+ count = malloc(sizeof(uint64_t) * nthr);
+ if (count == NULL) {
+ ck_error("ERROR: Could not create acquisition buffer\n");
+ exit(EXIT_FAILURE);
+ }
+ memset(count, 0, sizeof(uint64_t) * nthr);
+
+ fprintf(stderr, "Creating threads (fairness)...");
+ for (i = 0; i < nthr; i++) {
+ context[i].tid = i;
+ if (pthread_create(&threads[i], NULL, fairness, context + i)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ exit(EXIT_FAILURE);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ ck_pr_store_uint(&ready, 1);
+ common_sleep(10);
+ ck_pr_store_uint(&ready, 0);
+
+ fprintf(stderr, "Waiting for threads to finish acquisition regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done\n\n");
+
+ for (i = 0, v = 0; i < nthr; i++) {
+ printf("%d %15" PRIu64 "\n", i, count[i]);
+ v += count[i];
+ }
+
+ printf("\n# total : %15" PRIu64 "\n", v);
+ printf("# throughput : %15" PRIu64 " a/s\n", (v /= nthr) / 10);
+
+ for (i = 0, d = 0; i < nthr; i++)
+ d += (count[i] - v) * (count[i] - v);
+
+ printf("# average : %15" PRIu64 "\n", v);
+ printf("# deviation : %.2f (%.2f%%)\n\n", sqrt(d / nthr), (sqrt(d / nthr) / v) * 100.00);
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/benchmark/ck_pr_add_64.c b/regressions/ck_pr/benchmark/ck_pr_add_64.c
new file mode 100644
index 0000000..9c4d51f
--- /dev/null
+++ b/regressions/ck_pr/benchmark/ck_pr_add_64.c
@@ -0,0 +1,16 @@
+#include <ck_pr.h>
+
+#ifdef CK_F_PR_ADD_64
+#define ATOMIC ck_pr_add_64(object, 1)
+#define ATOMIC_STRING "ck_pr_add_64"
+#include "benchmark.h"
+#else
+#warning Did not find ADD_64 implementation.
+#include <stdlib.h>
+
+int
+main(void)
+{
+ exit(EXIT_FAILURE);
+}
+#endif
diff --git a/regressions/ck_pr/benchmark/ck_pr_cas_64.c b/regressions/ck_pr/benchmark/ck_pr_cas_64.c
new file mode 100644
index 0000000..90dcb64
--- /dev/null
+++ b/regressions/ck_pr/benchmark/ck_pr_cas_64.c
@@ -0,0 +1,16 @@
+#include <ck_pr.h>
+
+#ifdef CK_F_PR_CAS_64
+#define ATOMIC ck_pr_cas_64(object, 1, 1)
+#define ATOMIC_STRING "ck_pr_cas_64"
+#include "benchmark.h"
+#else
+#warning Did not find CAS_64 implementation.
+#include <stdlib.h>
+
+int
+main(void)
+{
+ exit(EXIT_FAILURE);
+}
+#endif
diff --git a/regressions/ck_pr/benchmark/ck_pr_cas_64_2.c b/regressions/ck_pr/benchmark/ck_pr_cas_64_2.c
new file mode 100644
index 0000000..e959b39
--- /dev/null
+++ b/regressions/ck_pr/benchmark/ck_pr_cas_64_2.c
@@ -0,0 +1,17 @@
+#include <ck_pr.h>
+
+#ifdef CK_F_PR_CAS_64_2
+#define ATOMIC { uint64_t z[2] = {1, 2}; ck_pr_cas_64_2(object, z, z); }
+#define ATOMIC_STRING "ck_pr_cas_64_2"
+#include "benchmark.h"
+#else
+#include <stdio.h>
+#include <stdlib.h>
+
+int
+main(void)
+{
+ fprintf(stderr, "Unsupported.\n");
+ return 0;
+}
+#endif
diff --git a/regressions/ck_pr/benchmark/ck_pr_faa_64.c b/regressions/ck_pr/benchmark/ck_pr_faa_64.c
new file mode 100644
index 0000000..9bdc87d
--- /dev/null
+++ b/regressions/ck_pr/benchmark/ck_pr_faa_64.c
@@ -0,0 +1,16 @@
+#include <ck_pr.h>
+
+#ifdef CK_F_PR_FAA_64
+#define ATOMIC ck_pr_faa_64(object, 1)
+#define ATOMIC_STRING "ck_pr_faa_64"
+#include "benchmark.h"
+#else
+#warning Did not find FAA_64 implementation.
+#include <stdlib.h>
+
+int
+main(void)
+{
+ exit(EXIT_FAILURE);
+}
+#endif
diff --git a/regressions/ck_pr/benchmark/ck_pr_fas_64.c b/regressions/ck_pr/benchmark/ck_pr_fas_64.c
new file mode 100644
index 0000000..facd759
--- /dev/null
+++ b/regressions/ck_pr/benchmark/ck_pr_fas_64.c
@@ -0,0 +1,17 @@
+#include <ck_pr.h>
+
+#ifdef CK_F_PR_FAS_64
+#define ATOMIC ck_pr_fas_64(object, 1)
+#define ATOMIC_STRING "ck_pr_fas_64"
+#include "benchmark.h"
+#else
+#warning Did not find FAS_64 implementation.
+#include <stdlib.h>
+
+int
+main(void)
+{
+
+ return 0;
+}
+#endif
diff --git a/regressions/ck_pr/benchmark/ck_pr_neg_64.c b/regressions/ck_pr/benchmark/ck_pr_neg_64.c
new file mode 100644
index 0000000..d4e0ad9
--- /dev/null
+++ b/regressions/ck_pr/benchmark/ck_pr_neg_64.c
@@ -0,0 +1,16 @@
+#include <ck_pr.h>
+
+#ifdef CK_F_PR_NEG_64
+#define ATOMIC ck_pr_neg_64(object)
+#define ATOMIC_STRING "ck_pr_neg_64"
+#include "benchmark.h"
+#else
+#warning Did not find NEG_64 implementation.
+#include <stdlib.h>
+
+int
+main(void)
+{
+ exit(EXIT_FAILURE);
+}
+#endif
diff --git a/regressions/ck_pr/benchmark/fp.c b/regressions/ck_pr/benchmark/fp.c
new file mode 100644
index 0000000..f7aa157
--- /dev/null
+++ b/regressions/ck_pr/benchmark/fp.c
@@ -0,0 +1,66 @@
+#include <stdio.h>
+#include <inttypes.h>
+#include <stdint.h>
+
+#include "../../common.h"
+
+#ifndef IR
+#define IR 3000000
+#endif /* IR */
+
+static int a CK_CC_CACHELINE;
+static int b CK_CC_CACHELINE;
+
+int
+main(void)
+{
+ uint64_t s, e;
+ unsigned int i;
+
+ s = rdtsc();
+ for (i = 0; i < IR; i++) {
+ ck_pr_load_int(&a);
+ ck_pr_fence_strict_load();
+ ck_pr_load_int(&b);
+ }
+ e = rdtsc();
+ printf("[A] fence_load: %" PRIu64 "\n", (e - s) / IR);
+
+ s = rdtsc();
+ for (i = 0; i < IR; i++) {
+ if (ck_pr_load_int(&a) == 0)
+ ck_pr_barrier();
+ ck_pr_fence_strict_lock();
+ ck_pr_load_int(&b);
+ }
+ e = rdtsc();
+ printf("[A] fence_lock: %" PRIu64 "\n", (e - s) / IR);
+
+ s = rdtsc();
+ for (i = 0; i < IR; i++) {
+ ck_pr_store_int(&a, 0);
+ ck_pr_fence_strict_store();
+ ck_pr_store_int(&b, 0);
+ }
+ e = rdtsc();
+ printf("[B] fence_store: %" PRIu64 "\n", (e - s) / IR);
+
+ s = rdtsc();
+ for (i = 0; i < IR; i++) {
+ ck_pr_store_int(&a, 0);
+ ck_pr_fence_strict_memory();
+ ck_pr_load_int(&b);
+ }
+ e = rdtsc();
+ printf("[C] fence_memory: %" PRIu64 "\n", (e - s) / IR);
+
+ s = rdtsc();
+ for (i = 0; i < IR; i++) {
+ ck_pr_store_int(&a, 0);
+ ck_pr_faa_int(&a, 0);
+ ck_pr_load_int(&b);
+ }
+ e = rdtsc();
+ printf("[C] atomic: %" PRIu64 "\n", (e - s) / IR);
+ return 0;
+}
diff --git a/regressions/ck_pr/validate/Makefile b/regressions/ck_pr/validate/Makefile
new file mode 100644
index 0000000..9e4a82d
--- /dev/null
+++ b/regressions/ck_pr/validate/Makefile
@@ -0,0 +1,84 @@
+.PHONY: check clean distribution
+
+OBJECTS=ck_pr_cas ck_pr_faa ck_pr_inc ck_pr_dec ck_pr_bts \
+ ck_pr_btr ck_pr_btc ck_pr_load ck_pr_store \
+ ck_pr_and ck_pr_or ck_pr_xor ck_pr_add ck_pr_sub \
+ ck_pr_fas ck_pr_bin ck_pr_btx ck_pr_fax ck_pr_n \
+ ck_pr_unary
+
+all: $(OBJECTS)
+
+check: all
+ for d in $(OBJECTS) ; do \
+ echo $$d; \
+ ./$$d || exit 1; \
+ done;
+
+ck_pr_cas: ck_pr_cas.c
+ $(CC) $(CFLAGS) -o ck_pr_cas ck_pr_cas.c
+
+ck_pr_inc: ck_pr_inc.c
+ $(CC) $(CFLAGS) -o ck_pr_inc ck_pr_inc.c
+
+ck_pr_dec: ck_pr_dec.c
+ $(CC) $(CFLAGS) -o ck_pr_dec ck_pr_dec.c
+
+ck_pr_faa: ck_pr_faa.c
+ $(CC) $(CFLAGS) -o ck_pr_faa ck_pr_faa.c
+
+ck_pr_btc: ck_pr_btc.c
+ $(CC) $(CFLAGS) -o ck_pr_btc ck_pr_btc.c
+
+ck_pr_btr: ck_pr_btr.c
+ $(CC) $(CFLAGS) -o ck_pr_btr ck_pr_btr.c
+
+ck_pr_bts: ck_pr_bts.c
+ $(CC) $(CFLAGS) -o ck_pr_bts ck_pr_bts.c
+
+ck_pr_load: ck_pr_load.c
+ $(CC) $(CFLAGS) -o ck_pr_load ck_pr_load.c
+
+ck_pr_store: ck_pr_store.c
+ $(CC) $(CFLAGS) -o ck_pr_store ck_pr_store.c
+
+ck_pr_and: ck_pr_and.c
+ $(CC) $(CFLAGS) -o ck_pr_and ck_pr_and.c
+
+ck_pr_or: ck_pr_or.c
+ $(CC) $(CFLAGS) -o ck_pr_or ck_pr_or.c
+
+ck_pr_xor: ck_pr_xor.c
+ $(CC) $(CFLAGS) -o ck_pr_xor ck_pr_xor.c
+
+ck_pr_add: ck_pr_add.c
+ $(CC) $(CFLAGS) -o ck_pr_add ck_pr_add.c
+
+ck_pr_sub: ck_pr_sub.c
+ $(CC) $(CFLAGS) -o ck_pr_sub ck_pr_sub.c
+
+ck_pr_fas: ck_pr_fas.c
+ $(CC) $(CFLAGS) -o ck_pr_fas ck_pr_fas.c
+
+ck_tp: ck_tp.c
+ $(CC) $(CFLAGS) -o ck_tp ck_tp.c
+
+ck_pr_bin: ck_pr_bin.c
+ $(CC) $(CFLAGS) -o ck_pr_bin ck_pr_bin.c
+
+ck_pr_btx: ck_pr_btx.c
+ $(CC) $(CFLAGS) -o ck_pr_btx ck_pr_btx.c
+
+ck_pr_fax: ck_pr_fax.c
+ $(CC) $(CFLAGS) -o ck_pr_fax ck_pr_fax.c
+
+ck_pr_n: ck_pr_n.c
+ $(CC) $(CFLAGS) -o ck_pr_n ck_pr_n.c
+
+ck_pr_unary: ck_pr_unary.c
+ $(CC) $(CFLAGS) -o ck_pr_unary ck_pr_unary.c
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE
diff --git a/regressions/ck_pr/validate/ck_pr_add.c b/regressions/ck_pr/validate/ck_pr_add.c
new file mode 100644
index 0000000..31f1893
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_add.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+#define CK_PR_ADD_T(w, v, d) \
+ { \
+ uint##w##_t t = v; \
+ ck_pr_add_##w(&t, d); \
+ if (t != (uint##w##_t)(v + d)) { \
+ printf("FAIL ["); \
+ printf("%" PRIu##w " (%" PRIu##w ") -> %" PRIu##w "]\n",\
+ (uint##w##_t)v, d, t); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+#define CK_PR_ADD_B(w) \
+ { \
+ unsigned int __ck_i = 0; \
+ printf("ck_pr_add_" #w ": "); \
+ if (w < 10) \
+ printf(" "); \
+ for (__ck_i = 0; __ck_i < R_REPEAT; __ck_i++) { \
+ uint##w##_t a = common_rand() % ((uint##w##_t)-1 / 2); \
+ uint##w##_t b = common_rand() % ((uint##w##_t)-1 / 2); \
+ CK_PR_ADD_T(w, a, b); \
+ } \
+ rg_width(w); \
+ printf(" SUCCESS\n"); \
+ }
+
+#define CK_PR_ADD_W(m, w) \
+ { \
+ uint##m##_t t = -1, r = -1 & ~(uint##m##_t)(uint##w##_t)-1; \
+ ck_pr_add_##w((uint##w##_t *)(void *)&t, 1); \
+ if (t != r) { \
+ printf("FAIL [%#" PRIx##m " != %#" PRIx##m "]\n", t, r);\
+ exit(EXIT_FAILURE); \
+ } \
+ t = 0, r = (uint##m##_t)(uint##w##_t)-1; \
+ ck_pr_add_##w((uint##w##_t *)(void *)&t, -1); \
+ if (t != r) { \
+ printf("FAIL [%#" PRIx##m " != %#" PRIx##m "]\n", t, r);\
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+static void
+rg_width(int m)
+{
+
+ /* Other architectures are bi-endian. */
+#if !defined(__x86__) && !defined(__x86_64__)
+ return;
+#endif
+
+#ifdef CK_F_PR_ADD_64
+ if (m == 64) {
+#if defined(CK_F_PR_ADD_32)
+ CK_PR_ADD_W(64, 32);
+#endif
+#if defined(CK_PR_ADD_16)
+ CK_PR_ADD_W(64, 16);
+#endif
+#if defined(CK_PR_ADD_8)
+ CK_PR_ADD_W(64, 8);
+#endif
+ }
+#endif /* CK_PR_ADD_64 */
+
+#ifdef CK_F_PR_ADD_32
+ if (m == 32) {
+#if defined(CK_F_PR_ADD_16)
+ CK_PR_ADD_W(32, 16);
+#endif
+#if defined(CK_PR_ADD_8)
+ CK_PR_ADD_W(32, 8);
+#endif
+ }
+#endif /* CK_PR_ADD_32 */
+
+#if defined(CK_F_PR_ADD_16) && defined(CK_PR_ADD_8)
+ if (m == 16) {
+ CK_PR_ADD_W(16, 8);
+ }
+#endif /* CK_PR_ADD_16 && CK_PR_ADD_8 */
+
+ return;
+}
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_ADD_64
+ CK_PR_ADD_B(64);
+#endif
+
+#ifdef CK_F_PR_ADD_32
+ CK_PR_ADD_B(32);
+#endif
+
+#ifdef CK_F_PR_ADD_16
+ CK_PR_ADD_B(16);
+#endif
+
+#ifdef CK_F_PR_ADD_8
+ CK_PR_ADD_B(8);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_and.c b/regressions/ck_pr/validate/ck_pr_and.c
new file mode 100644
index 0000000..4c569bb
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_and.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+#define BM(m, w) ((uint##m##_t)-1 << (w))
+
+#define CK_PR_AND_T(w, v, d) \
+ { \
+ uint##w##_t t = v; \
+ ck_pr_and_##w(&t, d); \
+ if (t != (uint##w##_t)(v & d)) { \
+ printf("FAIL ["); \
+ printf("%" PRIu##w " (%" PRIu##w ") -> %" PRIu##w "]\n",\
+ (uint##w##_t)v, d, t); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+#define CK_PR_AND_B(w) \
+ { \
+ unsigned int __ck_i = 0; \
+ printf("ck_pr_and_" #w ": "); \
+ if (w < 10) \
+ printf(" "); \
+ for (__ck_i = 0; __ck_i < R_REPEAT; __ck_i++) { \
+ uint##w##_t a = (uint##w##_t)common_rand(); \
+ uint##w##_t b = (uint##w##_t)common_rand(); \
+ CK_PR_AND_T(w, a, b); \
+ } \
+ rg_width(w); \
+ printf(" SUCCESS\n"); \
+ }
+
+#define CK_PR_AND_W(m, w) \
+ { \
+ uint##m##_t t = -1; \
+ ck_pr_and_##w((uint##w##_t *)(void *)&t, 0); \
+ if (t != BM(m, w)) { \
+ printf(" FAIL [%#" PRIx##m " != %#" PRIx##m "]\n", t, BM(m, w)); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+static void
+rg_width(int m)
+{
+
+ /* Other architectures are bi-endian. */
+#if !defined(__x86__) && !defined(__x86_64__)
+ return;
+#endif
+
+#ifdef CK_F_PR_AND_64
+ if (m == 64) {
+#if defined(CK_F_PR_AND_32)
+ CK_PR_AND_W(64, 32);
+#endif
+#if defined(CK_PR_AND_16)
+ CK_PR_AND_W(64, 16);
+#endif
+#if defined(CK_PR_AND_8)
+ CK_PR_AND_W(64, 8);
+#endif
+ }
+#endif /* CK_PR_AND_64 */
+
+#ifdef CK_F_PR_AND_32
+ if (m == 32) {
+#if defined(CK_F_PR_AND_16)
+ CK_PR_AND_W(32, 16);
+#endif
+#if defined(CK_PR_AND_8)
+ CK_PR_AND_W(32, 8);
+#endif
+ }
+#endif /* CK_PR_AND_32 */
+
+#if defined(CK_F_PR_AND_16) && defined(CK_PR_AND_8)
+ if (m == 16) {
+ CK_PR_AND_W(16, 8);
+ }
+#endif /* CK_PR_AND_16 && CK_PR_AND_8 */
+
+ return;
+}
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_AND_64
+ CK_PR_AND_B(64);
+#endif
+
+#ifdef CK_F_PR_AND_32
+ CK_PR_AND_B(32);
+#endif
+
+#ifdef CK_F_PR_AND_16
+ CK_PR_AND_B(16);
+#endif
+
+#ifdef CK_F_PR_AND_8
+ CK_PR_AND_B(8);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_bin.c b/regressions/ck_pr/validate/ck_pr_bin.c
new file mode 100644
index 0000000..31868f4
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_bin.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_pr.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "../../common.h"
+#define REPEAT 2000000
+
+#define TEST_BINARY(K, S, T, P, D) \
+ static void \
+ run_test_##K##_##S(void) \
+ { \
+ int i, r; \
+ T serial_result = 65535; \
+ T ck_result = 65535; \
+ \
+ puts("***TESTING ck_pr_" #K "_" #S "***"); \
+ common_srand((unsigned int)getpid()); \
+ for (i = 0; i < REPEAT; ++i) { \
+ r = common_rand(); \
+ serial_result = serial_result P r; \
+ ck_pr_##K##_##S(&ck_result, r); \
+ } \
+ \
+ printf("Value of operation " #K " on 2000000 " \
+ "random numbers\n\tusing " #P ": %" #D ",\n" \
+ "\tusing ck_pr_"#K"_"#S": %" #D "\n", \
+ serial_result, ck_result); \
+ (serial_result == ck_result) ? puts("SUCCESS.") \
+ : puts("FAILURE."); \
+ \
+ return; \
+ } \
+
+#define GENERATE_TEST(K, P) \
+ TEST_BINARY(K, int, int, P, d) \
+ TEST_BINARY(K, uint, unsigned int, P, u) \
+ static void \
+ run_test_##K(void) \
+ { \
+ run_test_##K##_int(); \
+ run_test_##K##_uint(); \
+ \
+ return; \
+ }
+
+GENERATE_TEST(add, +)
+GENERATE_TEST(sub, -)
+GENERATE_TEST(and, &)
+GENERATE_TEST(or, |)
+GENERATE_TEST(xor, ^)
+
+#undef GENERATE_TEST
+#undef TEST_BINARY
+
+int
+main(void)
+{
+ run_test_add();
+ run_test_sub();
+ run_test_and();
+ run_test_or();
+ run_test_xor();
+
+ return (0);
+}
+
+
diff --git a/regressions/ck_pr/validate/ck_pr_btc.c b/regressions/ck_pr/validate/ck_pr_btc.c
new file mode 100644
index 0000000..0edec98
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_btc.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+/*
+ * Bit selector.
+ */
+#define BM(v, b) (((v) >> (b)) & 1)
+
+#define CK_PR_BTC_T(w, v) \
+ { \
+ unsigned int j; \
+ uint##w##_t r = v; \
+ bool t; \
+ for (j = 0; j < (w); j++) { \
+ t = ck_pr_btc_##w(&r, j); \
+ if ((t && !BM(v, j)) || ((BM(v, j) + BM(r, j)) != 1)) { \
+ printf("FAIL [%" PRIx##w ":%u]\n", r, j); \
+ exit(EXIT_FAILURE); \
+ } \
+ } \
+ }
+
+#define CK_PR_BTC_B(w) \
+ { \
+ uint##w##_t o; \
+ unsigned int i; \
+ printf("ck_pr_btc_" #w ": "); \
+ for (i = 0; i < R_REPEAT; i++) { \
+ o = (uint##w##_t)common_rand(); \
+ CK_PR_BTC_T(w, o); \
+ } \
+ printf(" SUCCESS\n"); \
+ }
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_BTC_64
+ CK_PR_BTC_B(64);
+#endif
+
+#ifdef CK_F_PR_BTC_32
+ CK_PR_BTC_B(32);
+#endif
+
+#ifdef CK_F_PR_BTC_16
+ CK_PR_BTC_B(16);
+#endif
+
+#ifdef CK_F_PR_BTC_8
+ CK_PR_BTC_B(8);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_btr.c b/regressions/ck_pr/validate/ck_pr_btr.c
new file mode 100644
index 0000000..91abb30
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_btr.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+/*
+ * Bit selector.
+ */
+#define BM(v, b) (((v) >> (b)) & 1)
+
+#define CK_PR_BTR_T(w, v) \
+ { \
+ unsigned int j; \
+ uint##w##_t r = v, c = v; \
+ bool t; \
+ for (j = 0; j < (w); j++) { \
+ c &= (uint##w##_t)-1 ^ (1 << j); \
+ t = ck_pr_btr_##w(&r, j); \
+ if ((t && !BM(v, j)) || (r != c)) { \
+ printf("FAIL [%" PRIx##w ":%u != %" PRIx##w ":%u]\n", r, j, c, j); \
+ exit(EXIT_FAILURE); \
+ } \
+ } \
+ }
+
+#define CK_PR_BTR_B(w) \
+ { \
+ uint##w##_t o; \
+ unsigned int i; \
+ printf("ck_pr_btr_" #w ": "); \
+ for (i = 0; i < R_REPEAT; i++) { \
+ o = (uint##w##_t)common_rand(); \
+ CK_PR_BTR_T(w, o); \
+ } \
+ printf(" SUCCESS\n"); \
+ }
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_BTR_64
+ CK_PR_BTR_B(64);
+#endif
+
+#ifdef CK_F_PR_BTR_32
+ CK_PR_BTR_B(32);
+#endif
+
+#ifdef CK_F_PR_BTR_16
+ CK_PR_BTR_B(16);
+#endif
+
+#ifdef CK_F_PR_BTR_8
+ CK_PR_BTR_B(8);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_bts.c b/regressions/ck_pr/validate/ck_pr_bts.c
new file mode 100644
index 0000000..1e62165
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_bts.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+/*
+ * Bit selector.
+ */
+#define BM(v, b) (((v) >> (b)) & 1)
+
+#define CK_PR_BTS_T(w, v) \
+ { \
+ unsigned int j; \
+ uint##w##_t r = v, c = v; \
+ bool t; \
+ for (j = 0; j < (w); j++) { \
+ c |= (uint##w##_t)1 << j; \
+ t = ck_pr_bts_##w(&r, j); \
+ if ((t && !BM(v, j)) || (r != c)) { \
+ printf("FAIL [%" PRIx##w ":%u != %" PRIx##w ":%u]\n", r, j, c, j); \
+ exit(EXIT_FAILURE); \
+ } \
+ } \
+ }
+
+#define CK_PR_BTS_B(w) \
+ { \
+ uint##w##_t o; \
+ unsigned int i; \
+ printf("ck_pr_bts_" #w ": "); \
+ for (i = 0; i < R_REPEAT; i++) { \
+ o = (uint##w##_t)common_rand(); \
+ CK_PR_BTS_T(w, o); \
+ } \
+ printf(" SUCCESS\n"); \
+ }
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_BTS_64
+ CK_PR_BTS_B(64);
+#endif
+
+#ifdef CK_F_PR_BTS_32
+ CK_PR_BTS_B(32);
+#endif
+
+#ifdef CK_F_PR_BTS_16
+ CK_PR_BTS_B(16);
+#endif
+
+#ifdef CK_F_PR_BTS_8
+ CK_PR_BTS_B(8);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_btx.c b/regressions/ck_pr/validate/ck_pr_btx.c
new file mode 100644
index 0000000..2bb3964
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_btx.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdbool.h>
+#include <ck_pr.h>
+
+#include "../../common.h"
+#define REPEAT 2000000
+
+#define TEST_BTX(K, S, M, T, L, P, D, R) \
+ static bool \
+ test_##K##_##S(M *target, int offset) \
+ { \
+ T previous; \
+ const L change = R (0x01 << offset); \
+ \
+ previous = (T)*target; \
+ *target = previous P change; \
+ return ((previous >> offset) & 0x01); \
+ } \
+ static void \
+ run_test_##K##_##S(void) \
+ { \
+ int i, offset, m; \
+ bool serial_t, ck_pr_t; \
+ T x = 65535, y = 65535; \
+ \
+ common_srand((unsigned int)getpid()); \
+ m = sizeof(T) * 8; \
+ \
+ puts("***TESTING ck_pr_"#K"_"#S"***"); \
+ for (i = 0; i < REPEAT; ++i) { \
+ offset = common_rand() % m; \
+ serial_t = test_##K##_##S(&x, offset); \
+ ck_pr_t = ck_pr_##K##_##S(&y, offset); \
+ \
+ if (serial_t != ck_pr_t || x != y ) { \
+ printf("Serial(%"#D") and ck_pr(%"#D")" \
+ #K"_"#S " do not match.\n" \
+ "FAILURE.\n", \
+ serial_t, ck_pr_t); \
+ \
+ return; \
+ } \
+ } \
+ printf("\tserial_"#K"_"#S": %"#D"\n" \
+ "\tck_pr_"#K"_"#S": %"#D"\n" \
+ "SUCCESS.\n", \
+ x, y); \
+ \
+ return; \
+ }
+
+#define TEST_BTX_S(K, S, T, P, D, R) TEST_BTX(K, S, T, T, T, P, D, R)
+
+#define GENERATE_TEST(K, P, R) \
+ TEST_BTX_S(K, int, int, P, d, R) \
+ TEST_BTX_S(K, uint, unsigned int, P, u, R) \
+ static void \
+ run_test_##K(void) \
+ { \
+ run_test_##K##_int(); \
+ run_test_##K##_uint(); \
+ \
+ return; \
+ }
+
+GENERATE_TEST(btc, ^, 0+)
+GENERATE_TEST(btr, &, ~)
+GENERATE_TEST(bts, |, 0+)
+
+#undef GENERATE_TEST
+#undef TEST_BTX_S
+#undef TEST_BTX
+
+int
+main(void)
+{
+ run_test_btc();
+ run_test_btr();
+ run_test_bts();
+
+ return (0);
+}
+
+
diff --git a/regressions/ck_pr/validate/ck_pr_cas.c b/regressions/ck_pr/validate/ck_pr_cas.c
new file mode 100644
index 0000000..132d1e5
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_cas.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+#define W(w, x) (uint##w##_t)((x) & (uint##w##_t)~0)
+
+#define CK_PR_CAS_T(w, v, c, s) \
+ { \
+ uint##w##_t t = v; \
+ bool r; \
+ r = ck_pr_cas_##w(&t, c, s); \
+ if (((c == v) && (r == false)) || ((c != v) && (r == true)) || \
+ ((r == true) && (W(w, s) != t))) { \
+ printf("FAIL ["); \
+ printf("%" PRIu##w " (%" PRIu##w " -> %" PRIu##w ")" \
+ " -> %" PRIu##w "]\n", \
+ (uint##w##_t)(v), (uint##w##_t)(c), W(w, s), (uint##w##_t)(t)); \
+ } \
+ }
+
+#define CK_PR_CAS_B(w) \
+ { \
+ unsigned int __ck_i; \
+ printf("ck_pr_cas_" #w ": "); \
+ if (w < 10) \
+ printf(" "); \
+ for (__ck_i = 0; __ck_i < R_REPEAT; __ck_i++) { \
+ uint##w##_t a = common_rand() % (uint##w##_t)-1; \
+ CK_PR_CAS_T(w, a, a + 1, (a - 1)); \
+ CK_PR_CAS_T(w, a, a, (a - 1)); \
+ CK_PR_CAS_T(w, a, a + 1, a); \
+ } \
+ rg_width(w); \
+ printf(" SUCCESS\n"); \
+ }
+
+#define CK_PR_CAS_W(m, w) \
+ { \
+ uint##m##_t t = -1, r = -1 & ~(uint##m##_t)(uint##w##_t)-1; \
+ ck_pr_cas_##w((uint##w##_t *)(void *)&t, (uint##w##_t)t, 0); \
+ if (t != r) { \
+ printf("FAIL [%#" PRIx##m " != %#" PRIx##m "]\n", \
+ (uint##m##_t)t, (uint##m##_t)r); \
+ } \
+ }
+
+static void
+rg_width(int m)
+{
+
+ /* Other architectures are bi-endian. */
+#if !defined(__x86__) && !defined(__x86_64__)
+ return;
+#endif
+
+#ifdef CK_F_PR_CAS_64
+ if (m == 64) {
+#if defined(CK_F_PR_CAS_32)
+ CK_PR_CAS_W(64, 32);
+#endif
+#if defined(CK_PR_CAS_16)
+ CK_PR_CAS_W(64, 16);
+#endif
+#if defined(CK_PR_CAS_8)
+ CK_PR_CAS_W(64, 8);
+#endif
+ }
+#endif /* CK_PR_CAS_64 */
+
+#ifdef CK_F_PR_CAS_32
+ if (m == 32) {
+#if defined(CK_F_PR_CAS_16)
+ CK_PR_CAS_W(32, 16);
+#endif
+#if defined(CK_PR_CAS_8)
+ CK_PR_CAS_W(32, 8);
+#endif
+ }
+#endif /* CK_PR_CAS_32 */
+
+#if defined(CK_F_PR_CAS_16) && defined(CK_PR_CAS_8)
+ if (m == 16) {
+ CK_PR_CAS_W(16, 8);
+ }
+#endif /* CK_PR_CAS_16 && CK_PR_CAS_8 */
+
+ return;
+}
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_CAS_64
+ CK_PR_CAS_B(64);
+#endif
+
+#ifdef CK_F_PR_CAS_32
+ CK_PR_CAS_B(32);
+#endif
+
+#ifdef CK_F_PR_CAS_16
+ CK_PR_CAS_B(16);
+#endif
+
+#ifdef CK_F_PR_CAS_8
+ CK_PR_CAS_B(8);
+#endif
+
+#ifdef CK_F_PR_CAS_64_VALUE
+ uint64_t a = 0xffffffffaaaaaaaa, b = 0x8888888800000000;
+
+ printf("%" PRIx64 " (%" PRIx64 ") -> ", b, a);
+ ck_pr_cas_64_value(&a, a, b, &b);
+ printf("%" PRIx64 " (%" PRIx64 ")\n", b, a);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_dec.c b/regressions/ck_pr/validate/ck_pr_dec.c
new file mode 100644
index 0000000..86ce088
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_dec.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+#define CK_PR_DEC_T(w, v) \
+ { \
+ uint##w##_t t = v; \
+ ck_pr_dec_##w(&t); \
+ if ((t != (uint##w##_t)(v - 1))) { \
+ printf("FAIL ["); \
+ printf("%" PRIu##w " -> %" PRIu##w "]\n", (uint##w##_t)v, t); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+#define CK_PR_DEC_B(w) \
+ { \
+ unsigned int __ck_i = 0; \
+ printf("ck_pr_dec_" #w ": "); \
+ if (w < 10) \
+ printf(" "); \
+ for (__ck_i = 0; __ck_i < R_REPEAT; __ck_i++) { \
+ uint##w##_t a = common_rand() % ((uint##w##_t)-1); \
+ CK_PR_DEC_T(w, a); \
+ } \
+ rg_width(w); \
+ printf(" SUCCESS\n"); \
+ }
+
+#define CK_PR_DEC_W(m, w) \
+ { \
+ uint##m##_t t = 0, r = (uint##w##_t)-1; \
+ ck_pr_dec_##w((uint##w##_t *)(void *)&t); \
+ if (t != r) { \
+ printf("FAIL [%#" PRIx##m " != %#" PRIx##m "]\n", t, r);\
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+static void
+rg_width(int m)
+{
+
+ /* Other architectures are bi-endian. */
+#if !defined(__x86__) && !defined(__x86_64__)
+ return;
+#endif
+
+#ifdef CK_F_PR_DEC_64
+ if (m == 64) {
+#if defined(CK_F_PR_DEC_32)
+ CK_PR_DEC_W(64, 32);
+#endif
+#if defined(CK_PR_DEC_16)
+ CK_PR_DEC_W(64, 16);
+#endif
+#if defined(CK_PR_DEC_8)
+ CK_PR_DEC_W(64, 8);
+#endif
+ }
+#endif /* CK_PR_DEC_64 */
+
+#ifdef CK_F_PR_DEC_32
+ if (m == 32) {
+#if defined(CK_F_PR_DEC_16)
+ CK_PR_DEC_W(32, 16);
+#endif
+#if defined(CK_PR_DEC_8)
+ CK_PR_DEC_W(32, 8);
+#endif
+ }
+#endif /* CK_PR_DEC_32 */
+
+#if defined(CK_F_PR_DEC_16) && defined(CK_PR_DEC_8)
+ if (m == 16) {
+ CK_PR_DEC_W(16, 8);
+ }
+#endif /* CK_PR_DEC_16 && CK_PR_DEC_8 */
+
+ return;
+}
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_DEC_64
+ CK_PR_DEC_B(64);
+#endif
+
+#ifdef CK_F_PR_DEC_32
+ CK_PR_DEC_B(32);
+#endif
+
+#ifdef CK_F_PR_DEC_16
+ CK_PR_DEC_B(16);
+#endif
+
+#ifdef CK_F_PR_DEC_8
+ CK_PR_DEC_B(8);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_faa.c b/regressions/ck_pr/validate/ck_pr_faa.c
new file mode 100644
index 0000000..1d10bb9
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_faa.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+#define CK_PR_FAA_T(w, v, d) \
+ { \
+ uint##w##_t r, t = v; \
+ r = ck_pr_faa_##w(&t, d); \
+ if ((t != (uint##w##_t)(v + d)) || (r != v)) { \
+ printf("FAIL ["); \
+ printf("%" PRIu##w " (%" PRIu##w ") -> %" PRIu##w \
+ " (%" PRIu##w ")]\n", \
+ (uint##w##_t)v, d, t, r); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+#define CK_PR_FAA_B(w) \
+ { \
+ unsigned int __ck_i = 0; \
+ printf("ck_pr_faa_" #w ": "); \
+ if (w < 10) \
+ printf(" "); \
+ for (__ck_i = 0; __ck_i < R_REPEAT; __ck_i++) { \
+ uint##w##_t a = common_rand() % ((uint##w##_t)-1 / 2); \
+ uint##w##_t b = common_rand() % ((uint##w##_t)-1 / 2); \
+ CK_PR_FAA_T(w, a, b); \
+ } \
+ rg_width(w); \
+ printf(" SUCCESS\n"); \
+ }
+
+#define CK_PR_FAA_W(m, w) \
+ { \
+ uint##m##_t t = -1, r = -1 & ~(uint##m##_t)(uint##w##_t)-1; \
+ ck_pr_faa_##w((uint##w##_t *)(void *)&t, 1); \
+ if (t != r) { \
+ printf("FAIL [%#" PRIx##m " != %#" PRIx##m "]\n", t, r);\
+ exit(EXIT_FAILURE); \
+ } \
+ t = 0, r = (uint##m##_t)(uint##w##_t)-1; \
+ ck_pr_faa_##w((uint##w##_t *)(void *)&t, -1); \
+ if (t != r) { \
+ printf("FAIL [%#" PRIx##m " != %#" PRIx##m "]\n", t, r);\
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+static void
+rg_width(int m)
+{
+
+ /* Other architectures are bi-endian. */
+#if !defined(__x86__) && !defined(__x86_64__)
+ return;
+#endif
+
+#ifdef CK_F_PR_FAA_64
+ if (m == 64) {
+#if defined(CK_F_PR_FAA_32)
+ CK_PR_FAA_W(64, 32);
+#endif
+#if defined(CK_PR_FAA_16)
+ CK_PR_FAA_W(64, 16);
+#endif
+#if defined(CK_PR_FAA_8)
+ CK_PR_FAA_W(64, 8);
+#endif
+ }
+#endif /* CK_PR_FAA_64 */
+
+#ifdef CK_F_PR_FAA_32
+ if (m == 32) {
+#if defined(CK_F_PR_FAA_16)
+ CK_PR_FAA_W(32, 16);
+#endif
+#if defined(CK_PR_FAA_8)
+ CK_PR_FAA_W(32, 8);
+#endif
+ }
+#endif /* CK_PR_FAA_32 */
+
+#if defined(CK_F_PR_FAA_16) && defined(CK_PR_FAA_8)
+ if (m == 16) {
+ CK_PR_FAA_W(16, 8);
+ }
+#endif /* CK_PR_FAA_16 && CK_PR_FAA_8 */
+
+ return;
+}
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_FAA_64
+ CK_PR_FAA_B(64);
+#endif
+
+#ifdef CK_F_PR_FAA_32
+ CK_PR_FAA_B(32);
+#endif
+
+#ifdef CK_F_PR_FAA_16
+ CK_PR_FAA_B(16);
+#endif
+
+#ifdef CK_F_PR_FAA_8
+ CK_PR_FAA_B(8);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_fas.c b/regressions/ck_pr/validate/ck_pr_fas.c
new file mode 100644
index 0000000..00cef4e
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_fas.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+#define BM(m, w) ((uint##m##_t)(uint##w##_t)(-1))
+
+#define CK_PR_FAS_T(w, v, d) \
+ { \
+ uint##w##_t r, t = v; \
+ r = ck_pr_fas_##w(&t, d); \
+ if ((t != d) || (r != v)) { \
+ printf("FAIL ["); \
+ printf("%" PRIu##w " (%" PRIu##w ") -> %" PRIu##w \
+ " (%" PRIu##w ")]\n", \
+ (uint##w##_t)v, d, t, r); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+#define CK_PR_FAS_B(w) \
+ { \
+ unsigned int __ck_i = 0; \
+ printf("ck_pr_fas_" #w ": "); \
+ if (w < 10) \
+ printf(" "); \
+ for (__ck_i = 0; __ck_i < R_REPEAT; __ck_i++) { \
+ uint##w##_t a = common_rand(); \
+ uint##w##_t b = common_rand(); \
+ CK_PR_FAS_T(w, a, b); \
+ } \
+ rg_width(w); \
+ printf(" SUCCESS\n"); \
+ }
+
+#define CK_PR_FAS_W(m, w) \
+ { \
+ uint##m##_t t = 0; \
+ ck_pr_fas_##w((uint##w##_t *)(void *)&t, -1); \
+ if (t != BM(m, w)) { \
+ printf("FAIL [%#" PRIx##m " != %#" PRIx##m "]\n", t, BM(m, w)); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+static void
+rg_width(int m)
+{
+
+ /* Other architectures are bi-endian. */
+#if !defined(__x86__) && !defined(__x86_64__)
+ return;
+#endif
+
+#ifdef CK_F_PR_FAS_64
+ if (m == 64) {
+#if defined(CK_F_PR_FAS_32)
+ CK_PR_FAS_W(64, 32);
+#endif
+#if defined(CK_PR_FAS_16)
+ CK_PR_FAS_W(64, 16);
+#endif
+#if defined(CK_PR_FAS_8)
+ CK_PR_FAS_W(64, 8);
+#endif
+ }
+#endif /* CK_PR_FAS_64 */
+
+#ifdef CK_F_PR_FAS_32
+ if (m == 32) {
+#if defined(CK_F_PR_FAS_16)
+ CK_PR_FAS_W(32, 16);
+#endif
+#if defined(CK_PR_FAS_8)
+ CK_PR_FAS_W(32, 8);
+#endif
+ }
+#endif /* CK_PR_FAS_32 */
+
+#if defined(CK_F_PR_FAS_16) && defined(CK_PR_FAS_8)
+ if (m == 16) {
+ CK_PR_FAS_W(16, 8);
+ }
+#endif /* CK_PR_FAS_16 && CK_PR_FAS_8 */
+
+ return;
+}
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_FAS_64
+ CK_PR_FAS_B(64);
+#endif
+
+#ifdef CK_F_PR_FAS_32
+ CK_PR_FAS_B(32);
+#endif
+
+#ifdef CK_F_PR_FAS_16
+ CK_PR_FAS_B(16);
+#endif
+
+#ifdef CK_F_PR_FAS_8
+ CK_PR_FAS_B(8);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_fax.c b/regressions/ck_pr/validate/ck_pr_fax.c
new file mode 100644
index 0000000..9d8c94f
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_fax.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <ck_pr.h>
+
+#include "../../common.h"
+#define REPEAT 2000000
+
+#define TEST_FAX_FN(S, T, M) \
+ static T \
+ test_faa_##S(M *target, T delta) \
+ { \
+ T previous = (T)*target; \
+ *target = (T)*target + delta; \
+ \
+ return (previous); \
+ } \
+ static T \
+ test_fas_##S(M *target, T update) \
+ { \
+ T previous = *target; \
+ *target = update; \
+ \
+ return (previous); \
+ }
+
+#define TEST_FAX_FN_S(S, T) TEST_FAX_FN(S, T, T)
+
+TEST_FAX_FN_S(int, int)
+TEST_FAX_FN_S(uint, unsigned int)
+
+#undef TEST_FAX_FN_S
+#undef TEST_FAX_FN
+
+#define TEST_FAX(K, S, T, D) \
+ static void \
+ run_test_##K##_##S(void) \
+ { \
+ int i, r; \
+ T x = 0, y = 0, x_b, y_b; \
+ \
+ puts("***TESTING ck_pr_"#K"_"#S"***"); \
+ common_srand((unsigned int)getpid()); \
+ for (i = 0; i < REPEAT; ++i) { \
+ r = common_rand(); \
+ x_b = test_##K##_##S(&x, r); \
+ y_b = ck_pr_##K##_##S(&y, r); \
+ \
+ if (x_b != y_b) { \
+ printf("Serial fetch does not match ck_pr fetch.\n" \
+ "\tSerial: %"#D"\n" \
+ "\tck_pr: %"#D"\n", \
+ x_b, y_b); \
+ \
+ return; \
+ } \
+ } \
+ \
+ printf("Final result:\n" \
+ "\tSerial: %"#D"\n" \
+ "\tck_pr: %"#D"\n", \
+ x, y); \
+ (x == y) ? puts("SUCCESS.") \
+ : puts("FAILURE."); \
+ \
+ return; \
+ } \
+
+
+#define GENERATE_TEST(K) \
+ TEST_FAX(K, int, int, d) \
+ TEST_FAX(K, uint, unsigned int, u) \
+ static void \
+ run_test_##K(void) \
+ { \
+ run_test_##K##_int(); \
+ run_test_##K##_uint(); \
+ }
+
+GENERATE_TEST(faa)
+GENERATE_TEST(fas)
+
+#undef GENERATE_TEST
+#undef TEST_FAX
+
+int
+main(void)
+{
+ run_test_faa();
+ run_test_fas();
+
+ return (0);
+}
+
+
diff --git a/regressions/ck_pr/validate/ck_pr_inc.c b/regressions/ck_pr/validate/ck_pr_inc.c
new file mode 100644
index 0000000..e8524a5
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_inc.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+#define CK_PR_INC_T(w, v) \
+ { \
+ uint##w##_t t = v; \
+ ck_pr_inc_##w(&t); \
+ if ((t != (uint##w##_t)(v + 1))) { \
+ printf("FAIL [%" PRIu##w " -> %" PRIu##w "]\n", \
+ (uint##w##_t)v, t); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+#define CK_PR_INC_B(w) \
+ { \
+ unsigned int __ck_i = 0; \
+ printf("ck_pr_inc_" #w ": "); \
+ if (w < 10) \
+ printf(" "); \
+ for (__ck_i = 0; __ck_i < R_REPEAT; __ck_i++) { \
+ uint##w##_t a = common_rand() % ((uint##w##_t)-1); \
+ CK_PR_INC_T(w, a); \
+ } \
+ rg_width(w); \
+ printf(" SUCCESS\n"); \
+ }
+
+#define CK_PR_INC_W(m, w) \
+ { \
+ uint##m##_t t = -1, r = -1 & ~(uint##m##_t)(uint##w##_t)-1; \
+ ck_pr_inc_##w((uint##w##_t *)(void *)&t); \
+ if (t != r) { \
+ printf("FAIL [%#" PRIx##m " != %#" PRIx##m "]\n", t, r);\
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+static void
+rg_width(int m)
+{
+
+ /* Other architectures are bi-endian. */
+#if !defined(__x86__) && !defined(__x86_64__)
+ return;
+#endif
+
+#ifdef CK_F_PR_INC_64
+ if (m == 64) {
+#if defined(CK_F_PR_INC_32)
+ CK_PR_INC_W(64, 32);
+#endif
+#if defined(CK_PR_INC_16)
+ CK_PR_INC_W(64, 16);
+#endif
+#if defined(CK_PR_INC_8)
+ CK_PR_INC_W(64, 8);
+#endif
+ }
+#endif /* CK_PR_INC_64 */
+
+#ifdef CK_F_PR_INC_32
+ if (m == 32) {
+#if defined(CK_F_PR_INC_16)
+ CK_PR_INC_W(32, 16);
+#endif
+#if defined(CK_PR_INC_8)
+ CK_PR_INC_W(32, 8);
+#endif
+ }
+#endif /* CK_PR_INC_32 */
+
+#if defined(CK_F_PR_INC_16) && defined(CK_PR_INC_8)
+ if (m == 16) {
+ CK_PR_INC_W(16, 8);
+ }
+#endif /* CK_PR_INC_16 && CK_PR_INC_8 */
+
+ return;
+}
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_INC_64
+ CK_PR_INC_B(64);
+#endif
+
+#ifdef CK_F_PR_INC_32
+ CK_PR_INC_B(32);
+#endif
+
+#ifdef CK_F_PR_INC_16
+ CK_PR_INC_B(16);
+#endif
+
+#ifdef CK_F_PR_INC_8
+ CK_PR_INC_B(8);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_load.c b/regressions/ck_pr/validate/ck_pr_load.c
new file mode 100644
index 0000000..a15acd0
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_load.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+#define CK_PR_LOAD_B(w) \
+ { \
+ uint##w##_t t = (uint##w##_t)-1, a = 0; \
+ unsigned int i; \
+ printf("ck_pr_load_" #w ": "); \
+ if (w < 10) \
+ printf(" "); \
+ a = ck_pr_load_##w(&t); \
+ if (a != t) { \
+ printf("FAIL [%#" PRIx##w " != %#" PRIx##w "]\n", a, t); \
+ exit(EXIT_FAILURE); \
+ } \
+ for (i = 0; i < R_REPEAT; i++) { \
+ t = (uint##w##_t)common_rand(); \
+ a = ck_pr_load_##w(&t); \
+ if (a != t) { \
+ printf("FAIL [%#" PRIx##w " != %#" PRIx##w "]\n", a, t);\
+ exit(EXIT_FAILURE); \
+ } \
+ } \
+ rg_width(w); \
+ printf(" SUCCESS\n"); \
+ }
+
+#define CK_PR_LOAD_W(m, w) \
+ { \
+ uint##m##_t f = 0; \
+ uint##w##_t j = (uint##w##_t)-1; \
+ f = ck_pr_load_##w(&j); \
+ if (f != j) { \
+ printf("FAIL [%#" PRIx##m " != %#" PRIx##w "]\n", f, j);\
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+static void
+rg_width(int m)
+{
+
+ /* Other architectures are bi-endian. */
+#if !defined(__x86__) && !defined(__x86_64__)
+ return;
+#endif
+
+#ifdef CK_F_PR_LOAD_64
+ if (m == 64) {
+#if defined(CK_F_PR_LOAD_32)
+ CK_PR_LOAD_W(64, 32);
+#endif
+#if defined(CK_PR_LOAD_16)
+ CK_PR_LOAD_W(64, 16);
+#endif
+#if defined(CK_PR_LOAD_8)
+ CK_PR_LOAD_W(64, 8);
+#endif
+ }
+#endif /* CK_PR_LOAD_64 */
+
+#ifdef CK_F_PR_LOAD_32
+ if (m == 32) {
+#if defined(CK_F_PR_LOAD_16)
+ CK_PR_LOAD_W(32, 16);
+#endif
+#if defined(CK_PR_LOAD_8)
+ CK_PR_LOAD_W(32, 8);
+#endif
+ }
+#endif /* CK_PR_LOAD_32 */
+
+#if defined(CK_F_PR_LOAD_16) && defined(CK_PR_LOAD_8)
+ if (m == 16)
+ CK_PR_LOAD_W(16, 8);
+#endif /* CK_PR_LOAD_16 && CK_PR_LOAD_8 */
+
+ return;
+}
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_LOAD_64
+ CK_PR_LOAD_B(64);
+#endif
+
+#ifdef CK_F_PR_LOAD_32
+ CK_PR_LOAD_B(32);
+#endif
+
+#ifdef CK_F_PR_LOAD_16
+ CK_PR_LOAD_B(16);
+#endif
+
+#ifdef CK_F_PR_LOAD_8
+ CK_PR_LOAD_B(8);
+#endif
+
+#if 0
+ uint64_t a[2] = {0, 0}, b[2] = {0x1111111144444444, 0x2222222266666666};
+ printf("%" PRIx64 ":%" PRIx64 " -> ", a[0], a[1]);
+ ck_pr_load_64_2(&b, &a);
+ printf("%" PRIx64 ":%" PRIx64 "\n", a[0], a[1]);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_n.c b/regressions/ck_pr/validate/ck_pr_n.c
new file mode 100644
index 0000000..81e3639
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_n.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <ck_pr.h>
+
+#include "../../common.h"
+#define REPEAT 2000000
+
+#define TEST_N(K, S, T, P, D) \
+ static void \
+ run_test_##K##_##S(void) \
+ { \
+ int i, r; \
+ T x = 0, y = 0; \
+ \
+ puts("***TESTING ck_pr_"#K"_"#S"***"); \
+ common_srand((unsigned int)getpid()); \
+ for (i = 0; i < REPEAT; ++i) { \
+ r = common_rand(); \
+ x += r; \
+ x = P x; \
+ y += r; \
+ ck_pr_##K##_##S(&y); \
+ } \
+ \
+ printf("Value of operation "#K" on 2000000 " \
+ "random numbers\n" \
+ "\tusing "#P": %"#D",\n" \
+ "\tusing ck_pr_"#K"_"#S": %"#D",\n", \
+ x, y); \
+ (x == y) ? puts("SUCCESS.") \
+ : puts("FAILURE."); \
+ \
+ return; \
+ }
+
+#define GENERATE_TEST(K, P) \
+ TEST_N(K, int, int, P, d) \
+ TEST_N(K, uint, unsigned int, P, u) \
+ static void \
+ run_test_##K(void) \
+ { \
+ run_test_##K##_int(); \
+ run_test_##K##_uint(); \
+ \
+ return; \
+ }
+
+GENERATE_TEST(not, ~)
+GENERATE_TEST(neg, -)
+
+#undef GENERATE_TEST
+#undef TEST_N
+
+int
+main(void)
+{
+ run_test_not();
+ run_test_neg();
+
+ return (0);
+}
+
+
diff --git a/regressions/ck_pr/validate/ck_pr_or.c b/regressions/ck_pr/validate/ck_pr_or.c
new file mode 100644
index 0000000..27580c3
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_or.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+#define BM(m, w) (uint##m##_t)(uint##w##_t)-1
+
+#define CK_PR_OR_T(w, v, d) \
+ { \
+ uint##w##_t t; \
+ ck_pr_or_##w(&t, 1ULL << (w - 1)); \
+ t = v; \
+ ck_pr_or_##w(&t, d); \
+ if (t != (uint##w##_t)(v | d)) { \
+ printf("FAIL ["); \
+ printf("%" PRIu##w " (%" PRIu##w ") -> %" PRIu##w "]\n",\
+ (uint##w##_t)v, d, t); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+#define CK_PR_OR_B(w) \
+ { \
+ unsigned int __ck_i = 0; \
+ printf("ck_pr_or_" #w ": "); \
+ if (w < 10) \
+ printf(" "); \
+ for (__ck_i = 0; __ck_i < R_REPEAT; __ck_i++) { \
+ uint##w##_t a = (uint##w##_t)common_rand(); \
+ uint##w##_t b = (uint##w##_t)common_rand(); \
+ CK_PR_OR_T(w, a, b); \
+ } \
+ rg_width(w); \
+ printf(" SUCCESS\n"); \
+ }
+
+#define CK_PR_OR_W(m, w) \
+ { \
+ uint##m##_t t = 0; \
+ ck_pr_or_##w((uint##w##_t *)(void *)&t, -1); \
+ if (t != BM(m, w)) { \
+ printf(" FAIL [%#" PRIx##m " != %#" PRIx##m "]\n", t, BM(m, w)); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+static void
+rg_width(int m)
+{
+
+ /* Other architectures are bi-endian. */
+#if !defined(__x86__) && !defined(__x86_64__)
+ return;
+#endif
+
+#ifdef CK_F_PR_OR_64
+ if (m == 64) {
+#if defined(CK_F_PR_OR_32)
+ CK_PR_OR_W(64, 32);
+#endif
+#if defined(CK_PR_OR_16)
+ CK_PR_OR_W(64, 16);
+#endif
+#if defined(CK_PR_OR_8)
+ CK_PR_OR_W(64, 8);
+#endif
+ }
+#endif /* CK_PR_OR_64 */
+
+#ifdef CK_F_PR_OR_32
+ if (m == 32) {
+#if defined(CK_F_PR_OR_16)
+ CK_PR_OR_W(32, 16);
+#endif
+#if defined(CK_PR_OR_8)
+ CK_PR_OR_W(32, 8);
+#endif
+ }
+#endif /* CK_PR_OR_32 */
+
+#if defined(CK_F_PR_OR_16) && defined(CK_PR_OR_8)
+ if (m == 16) {
+ CK_PR_OR_W(16, 8);
+ }
+#endif /* CK_PR_OR_16 && CK_PR_OR_8 */
+
+ return;
+}
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_OR_64
+ CK_PR_OR_B(64);
+#endif
+
+#ifdef CK_F_PR_OR_32
+ CK_PR_OR_B(32);
+#endif
+
+#ifdef CK_F_PR_OR_16
+ CK_PR_OR_B(16);
+#endif
+
+#ifdef CK_F_PR_OR_8
+ CK_PR_OR_B(8);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_store.c b/regressions/ck_pr/validate/ck_pr_store.c
new file mode 100644
index 0000000..e4b852b
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_store.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "../../common.h"
+#include <ck_pr.h>
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+#define CK_PR_STORE_B(w) \
+ { \
+ uint##w##_t t = (uint##w##_t)-1, a = 0, b; \
+ ck_pr_store_##w(&b, 1ULL << (w - 1)); \
+ unsigned int i; \
+ printf("ck_pr_store_" #w ": "); \
+ if (w < 10) \
+ printf(" "); \
+ ck_pr_store_##w(&a, t); \
+ if (a != t) { \
+ printf("FAIL [%#" PRIx##w " != %#" PRIx##w "]\n", a, t); \
+ exit(EXIT_FAILURE); \
+ } \
+ for (i = 0; i < R_REPEAT; i++) { \
+ t = (uint##w##_t)common_rand(); \
+ ck_pr_store_##w(&a, t); \
+ if (a != t) { \
+ printf("FAIL [%#" PRIx##w " != %#" PRIx##w "]\n", a, t);\
+ exit(EXIT_FAILURE); \
+ } \
+ } \
+ rg_width(w); \
+ printf("SUCCESS\n"); \
+ }
+
+#define CK_PR_STORE_W(m, w) \
+ { \
+ uint##m##_t f = 0; \
+ uint##w##_t j = (uint##w##_t)-1; \
+ ck_pr_store_##w((uint##w##_t *)(void *)&f, j); \
+ if (f != j) { \
+ printf("FAIL [%#" PRIx##m " != %#" PRIx##w "]\n", f, j);\
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+static void
+rg_width(int m)
+{
+
+ /* Other architectures are bi-endian. */
+#if !defined(__x86__) && !defined(__x86_64__)
+ return;
+#endif
+
+#ifdef CK_F_PR_STORE_64
+ if (m == 64) {
+#if defined(CK_F_PR_STORE_32)
+ CK_PR_STORE_W(64, 32);
+#endif
+#if defined(CK_PR_STORE_16)
+ CK_PR_STORE_W(64, 16);
+#endif
+#if defined(CK_PR_STORE_8)
+ CK_PR_STORE_W(64, 8);
+#endif
+ }
+#endif /* CK_PR_STORE_64 */
+
+#ifdef CK_F_PR_STORE_32
+ if (m == 32) {
+#if defined(CK_F_PR_STORE_16)
+ CK_PR_STORE_W(32, 16);
+#endif
+#if defined(CK_PR_STORE_8)
+ CK_PR_STORE_W(32, 8);
+#endif
+ }
+#endif /* CK_PR_STORE_32 */
+
+#if defined(CK_F_PR_STORE_16) && defined(CK_PR_STORE_8)
+ if (m == 16)
+ CK_PR_STORE_W(16, 8);
+#endif /* CK_PR_STORE_16 && CK_PR_STORE_8 */
+
+ return;
+}
+
+int
+main(void)
+{
+#if defined(CK_F_PR_STORE_DOUBLE) && defined(CK_F_PR_LOAD_DOUBLE)
+ double d;
+
+ ck_pr_store_double(&d, 0.0);
+ if (ck_pr_load_double(&d) != 0.0) {
+ ck_error("Stored 0 in double, did not find 0.\n");
+ }
+#endif
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_STORE_64
+ CK_PR_STORE_B(64);
+#endif
+
+#ifdef CK_F_PR_STORE_32
+ CK_PR_STORE_B(32);
+#endif
+
+#ifdef CK_F_PR_STORE_16
+ CK_PR_STORE_B(16);
+#endif
+
+#ifdef CK_F_PR_STORE_8
+ CK_PR_STORE_B(8);
+#endif
+
+ return (0);
+}
diff --git a/regressions/ck_pr/validate/ck_pr_sub.c b/regressions/ck_pr/validate/ck_pr_sub.c
new file mode 100644
index 0000000..f515914
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_sub.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+#define CK_PR_SUB_T(w, v, d) \
+ { \
+ uint##w##_t t = v; \
+ ck_pr_sub_##w(&t, d); \
+ if (t != (uint##w##_t)(v - d)) { \
+ printf("FAIL ["); \
+ printf("%" PRIu##w " (%" PRIu##w ") -> %" PRIu##w "]\n", \
+ (uint##w##_t)v, d, t); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+#define CK_PR_SUB_B(w) \
+ { \
+ unsigned int __ck_i = 0; \
+ printf("ck_pr_sub_" #w ": "); \
+ if (w < 10) \
+ printf(" "); \
+ for (__ck_i = 0; __ck_i < R_REPEAT; __ck_i++) { \
+ uint##w##_t a = common_rand() % ((uint##w##_t)-1 / 2); \
+ uint##w##_t b = common_rand() % ((uint##w##_t)-1 / 2); \
+ CK_PR_SUB_T(w, a, b); \
+ } \
+ rg_width(w); \
+ printf(" SUCCESS\n"); \
+ }
+
+#define CK_PR_SUB_W(m, w) \
+ { \
+ uint##m##_t t = 0, r = (uint##m##_t)(uint##w##_t)-1; \
+ ck_pr_sub_##w((uint##w##_t *)(void *)&t, 1); \
+ if (t != r) { \
+ printf(" FAIL [%#" PRIx##m " != %#" PRIx##m "]\n", t, r); \
+ exit(EXIT_FAILURE); \
+ } \
+ t = 0; \
+ ck_pr_sub_##w((uint##w##_t *)(void *)&t, -1); \
+ if (t != 1) { \
+ printf(" FAIL [%#" PRIx##m " != 1]\n", t); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+static void
+rg_width(int m)
+{
+
+ /* Other architectures are bi-endian. */
+#if !defined(__x86__) && !defined(__x86_64__)
+ return;
+#endif
+
+#ifdef CK_F_PR_SUB_64
+ if (m == 64) {
+#if defined(CK_F_PR_SUB_32)
+ CK_PR_SUB_W(64, 32);
+#endif
+#if defined(CK_PR_SUB_16)
+ CK_PR_SUB_W(64, 16);
+#endif
+#if defined(CK_PR_SUB_8)
+ CK_PR_SUB_W(64, 8);
+#endif
+ }
+#endif /* CK_PR_SUB_64 */
+
+#ifdef CK_F_PR_SUB_32
+ if (m == 32) {
+#if defined(CK_F_PR_SUB_16)
+ CK_PR_SUB_W(32, 16);
+#endif
+#if defined(CK_PR_SUB_8)
+ CK_PR_SUB_W(32, 8);
+#endif
+ }
+#endif /* CK_PR_SUB_32 */
+
+#if defined(CK_F_PR_SUB_16) && defined(CK_PR_SUB_8)
+ if (m == 16) {
+ CK_PR_SUB_W(16, 8);
+ }
+#endif /* CK_PR_SUB_16 && CK_PR_SUB_8 */
+
+ return;
+}
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_SUB_64
+ CK_PR_SUB_B(64);
+#endif
+
+#ifdef CK_F_PR_SUB_32
+ CK_PR_SUB_B(32);
+#endif
+
+#ifdef CK_F_PR_SUB_16
+ CK_PR_SUB_B(16);
+#endif
+
+#ifdef CK_F_PR_SUB_8
+ CK_PR_SUB_B(8);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_unary.c b/regressions/ck_pr/validate/ck_pr_unary.c
new file mode 100644
index 0000000..b2300cd
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_unary.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <ck_pr.h>
+
+#define REPEAT 2000000
+
+#define TEST_UNARY(K, S, M, T, P, D, H) \
+ static void \
+ test_##K##_##S(M *target) \
+ { \
+ *target = *target P 1; \
+ \
+ return; \
+ } \
+ static void \
+ test_##K##_##S##_zero(M *target, bool *zero) \
+ { \
+ *zero = *target == H; \
+ *target = *target P 1; \
+ \
+ return; \
+ } \
+ static void \
+ run_test_##K##_##S(bool use_zero) \
+ { \
+ int i; \
+ T x = 1, y = 1; \
+ bool zero_x = false, zero_y = false; \
+ \
+ use_zero ? puts("***TESTING ck_pr_"#K"_"#S"_zero***") \
+ : puts("***TESTING ck_pr_"#K"_"#S"***"); \
+ for (i = 0; i < REPEAT; ++i) { \
+ if (use_zero) { \
+ test_##K##_##S##_zero(&x, &zero_x); \
+ ck_pr_##K##_##S##_zero(&y, &zero_y); \
+ } \
+ else { \
+ test_##K##_##S(&x); \
+ ck_pr_##K##_##S(&y); \
+ } \
+ \
+ if (x != y || zero_x != zero_y) { \
+ printf("Serial(%"#D") and ck_pr(%"#D")" \
+ #K"_"#S" do not match.\n" \
+ "FAILURE.\n", \
+ x, y); \
+ \
+ return; \
+ } \
+ \
+ if (zero_x) \
+ printf("Variables are zero at iteration %d\n", i); \
+ } \
+ \
+ \
+ printf("\tserial_"#K"_"#S": %"#D"\n" \
+ "\tck_pr_"#K"_"#S": %"#D"\n" \
+ "SUCCESS.\n", \
+ x, y); \
+ \
+ return; \
+ }
+
+#define GENERATE_TEST(K, P, Y, Z) \
+ TEST_UNARY(K, int, int, int, P, d, Y) \
+ TEST_UNARY(K, uint, unsigned int, unsigned int, P, u, Z) \
+ static void \
+ run_test_##K(void) \
+ { \
+ run_test_##K##_int(false); \
+ run_test_##K##_int(true); \
+ run_test_##K##_uint(false); \
+ run_test_##K##_uint(true); \
+ }
+
+GENERATE_TEST(inc, +, -1, UINT_MAX)
+GENERATE_TEST(dec, -, 1, 1)
+
+#undef GENERATE_TEST
+#undef TEST_UNARY
+
+int
+main(void)
+{
+ run_test_inc();
+ run_test_dec();
+
+ return (0);
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_xor.c b/regressions/ck_pr/validate/ck_pr_xor.c
new file mode 100644
index 0000000..4515cc4
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_xor.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <ck_pr.h>
+
+#include "../../common.h"
+#ifndef R_REPEAT
+#define R_REPEAT 200000
+#endif
+
+#define BM(m, w) ((uint##m##_t)-1 << (w))
+
+#define CK_PR_XOR_T(w, v, d) \
+ { \
+ uint##w##_t t = v; \
+ ck_pr_xor_##w(&t, d); \
+ if (t != (uint##w##_t)(v ^ d)) { \
+ printf("FAIL ["); \
+ printf("%" PRIu##w " (%" PRIu##w ") -> %" PRIu##w "]\n",\
+ (uint##w##_t)v, d, t); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+#define CK_PR_XOR_B(w) \
+ { \
+ unsigned int __ck_i = 0; \
+ printf("ck_pr_xor_" #w ": "); \
+ if (w < 10) \
+ printf(" "); \
+ for (__ck_i = 0; __ck_i < R_REPEAT; __ck_i++) { \
+ uint##w##_t a = (uint##w##_t)common_rand(); \
+ uint##w##_t b = (uint##w##_t)common_rand(); \
+ CK_PR_XOR_T(w, a, b); \
+ } \
+ rg_width(w); \
+ printf(" SUCCESS\n"); \
+ }
+
+#define CK_PR_XOR_W(m, w) \
+ { \
+ uint##m##_t t = -1; \
+ ck_pr_xor_##w((uint##w##_t *)(void *)&t, -1); \
+ if (t != BM(m, w)) { \
+ printf(" FAIL [%#" PRIx##m " != %#" PRIx##m "]\n", t, BM(m, w)); \
+ exit(EXIT_FAILURE); \
+ } \
+ }
+
+static void
+rg_width(int m)
+{
+
+ /* Other architectures are bi-endian. */
+#if !defined(__x86__) && !defined(__x86_64__)
+ return;
+#endif
+
+#ifdef CK_F_PR_XOR_64
+ if (m == 64) {
+#if defined(CK_F_PR_XOR_32)
+ CK_PR_XOR_W(64, 32);
+#endif
+#if defined(CK_PR_XOR_16)
+ CK_PR_XOR_W(64, 16);
+#endif
+#if defined(CK_PR_XOR_8)
+ CK_PR_XOR_W(64, 8);
+#endif
+ }
+#endif /* CK_PR_XOR_64 */
+
+#ifdef CK_F_PR_XOR_32
+ if (m == 32) {
+#if defined(CK_F_PR_XOR_16)
+ CK_PR_XOR_W(32, 16);
+#endif
+#if defined(CK_PR_XOR_8)
+ CK_PR_XOR_W(32, 8);
+#endif
+ }
+#endif /* CK_PR_XOR_32 */
+
+#if defined(CK_F_PR_XOR_16) && defined(CK_PR_XOR_8)
+ if (m == 16) {
+ CK_PR_XOR_W(16, 8);
+ }
+#endif /* CK_PR_XOR_16 && CK_PR_XOR_8 */
+
+ return;
+}
+
+int
+main(void)
+{
+
+ common_srand((unsigned int)getpid());
+
+#ifdef CK_F_PR_XOR_64
+ CK_PR_XOR_B(64);
+#endif
+
+#ifdef CK_F_PR_XOR_32
+ CK_PR_XOR_B(32);
+#endif
+
+#ifdef CK_F_PR_XOR_16
+ CK_PR_XOR_B(16);
+#endif
+
+#ifdef CK_F_PR_XOR_8
+ CK_PR_XOR_B(8);
+#endif
+
+ return (0);
+}
+
diff --git a/regressions/ck_queue/validate/Makefile b/regressions/ck_queue/validate/Makefile
new file mode 100644
index 0000000..d6be3dc
--- /dev/null
+++ b/regressions/ck_queue/validate/Makefile
@@ -0,0 +1,26 @@
+.PHONY: check clean distribution
+
+HEADER=../../../include/ck_queue.h
+OBJECTS=ck_list ck_slist ck_stailq
+
+all: $(OBJECTS)
+
+check: all
+ ./ck_list $(CORES) 5
+ ./ck_slist $(CORES) 5
+ ./ck_stailq $(CORES) 1000000
+
+ck_list: $(HEADER) ck_list.c
+ $(CC) $(CFLAGS) -o ck_list ck_list.c
+
+ck_slist: $(HEADER) ck_slist.c
+ $(CC) $(CFLAGS) -o ck_slist ck_slist.c
+
+ck_stailq: $(HEADER) ck_stailq.c
+ $(CC) $(CFLAGS) -o ck_stailq ck_stailq.c
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_queue/validate/ck_list.c b/regressions/ck_queue/validate/ck_list.c
new file mode 100644
index 0000000..daa48b1
--- /dev/null
+++ b/regressions/ck_queue/validate/ck_list.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <pthread.h>
+#include <ck_queue.h>
+
+#include "../../common.h"
+
+struct test {
+ int value;
+ CK_LIST_ENTRY(test) list_entry;
+};
+static CK_LIST_HEAD(test_list, test) head = CK_LIST_HEAD_INITIALIZER(head);
+
+static int goal;
+
+static void
+test_foreach(void)
+{
+ struct test *n, *next, *safe;
+ int i, s = 0, j = 0, k = 0;
+
+ for (i = goal; i != 0; i = goal) {
+ s = 0;
+
+ CK_LIST_FOREACH(n, &head, list_entry) {
+ j++;
+ if (s == 0)
+ s = n->value;
+ else
+ s = s - 1;
+
+ if (n->value != s) {
+ ck_error("\nExpected %d, but got %d.\n",
+ s, n->value);
+ }
+
+ next = CK_LIST_NEXT(n, list_entry);
+ if (next != NULL && next->value != s - 1) {
+ ck_error("\nExpected %d, but got %d.\n",
+ s, next->value);
+ }
+
+ i--;
+ }
+
+ if (i == 0)
+ break;
+
+ s = 0;
+ CK_LIST_FOREACH_SAFE(n, &head, list_entry, safe) {
+ k++;
+
+ if (s == 0)
+ s = n->value;
+ else
+ s = s - 1;
+
+ if (n->value != s) {
+ ck_error("\nExpected %d, but got %d.\n",
+ s, n->value);
+ }
+
+ next = CK_LIST_NEXT(n, list_entry);
+ if (next != NULL && next->value != s - 1) {
+ ck_error("\nExpected %d, but got %d.\n",
+ s, next->value);
+ }
+
+ i--;
+ }
+
+ if (i == 0 || CK_LIST_EMPTY(&head) == true)
+ break;
+ }
+
+ fprintf(stderr, "(%d, %d) ", j, k);
+ return;
+}
+
+static void *
+execute(void *c)
+{
+
+ (void)c;
+ test_foreach();
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *thread;
+ struct test *n, a, b;
+ struct test_list target;
+ int n_threads, i;
+
+ if (argc != 3) {
+ ck_error("Usage: %s <number of threads> <number of list entries>\n", argv[0]);
+ }
+
+ n_threads = atoi(argv[1]);
+ if (n_threads < 1) {
+ ck_error("ERROR: Number of threads must be >= 1.\n");
+ }
+
+ thread = malloc(sizeof(pthread_t) * n_threads);
+ assert(thread != NULL);
+
+ goal = atoi(argv[2]);
+ if (goal < 4) {
+ ck_error("ERROR: Number of entries must be >= 4.\n");
+ }
+
+ fprintf(stderr, "Beginning serial test...");
+ CK_LIST_INIT(&head);
+
+ for (i = 1; i <= goal; i++) {
+ n = malloc(sizeof *n);
+ assert(n != NULL);
+ n->value = i;
+ CK_LIST_INSERT_HEAD(&head, n, list_entry);
+ }
+
+ test_foreach();
+
+ for (i = 1; i <= goal; i++) {
+ n = CK_LIST_FIRST(&head);
+ CK_LIST_REMOVE(n, list_entry);
+ free(n);
+ }
+
+ CK_LIST_INSERT_HEAD(&head, &a, list_entry);
+ CK_LIST_INSERT_HEAD(&head, &b, list_entry);
+ CK_LIST_REMOVE(&a, list_entry);
+ if (CK_LIST_FIRST(&head) != &b)
+ ck_error("List is in invalid state.\n");
+ CK_LIST_REMOVE(&b, list_entry);
+
+ if (CK_LIST_EMPTY(&head) == false) {
+ ck_error("List is not empty after bulk removal.\n");
+ }
+
+ CK_LIST_INSERT_HEAD(&head, &a, list_entry);
+ CK_LIST_INSERT_AFTER(&a, &b, list_entry);
+
+ if (CK_LIST_NEXT(&b, list_entry) != NULL)
+ ck_error("Inserted item after last, it should not have no next.\n");
+
+ CK_LIST_INIT(&head);
+
+ CK_LIST_INSERT_HEAD(&head, &a, list_entry);
+ CK_LIST_INSERT_BEFORE(&a, &b, list_entry);
+
+ if (CK_LIST_NEXT(&b, list_entry) != &a)
+ ck_error("Inserted item before last, it should point to last.\n");
+
+ CK_LIST_INIT(&head);
+ fprintf(stderr, "done (success)\n");
+
+ fprintf(stderr, "Beginning parallel traversal...");
+
+ n = malloc(sizeof *n);
+ assert(n != NULL);
+ n->value = 1;
+ CK_LIST_INSERT_HEAD(&head, n, list_entry);
+
+ for (i = 0; i < n_threads; i++) {
+ int r = pthread_create(&thread[i], NULL, execute, NULL);
+ assert(r == 0);
+ }
+
+ for (i = 2; i <= goal; i++) {
+ volatile int j;
+
+ n = malloc(sizeof *n);
+ assert(n != NULL);
+ n->value = i;
+ CK_LIST_INSERT_HEAD(&head, n, list_entry);
+ for (j = 0; j <= 1000; j++);
+ }
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(thread[i], NULL);
+
+ for (i = 0; i < n_threads; i++) {
+ int r = pthread_create(&thread[i], NULL, execute, NULL);
+ assert(r == 0);
+ }
+
+ CK_LIST_MOVE(&target, &head, list_entry);
+
+ for (i = 1; i <= goal; i++) {
+ volatile int j;
+
+ if (CK_LIST_EMPTY(&target) == false) {
+ struct test *r = CK_LIST_FIRST(&target);
+ CK_LIST_REMOVE(r, list_entry);
+ }
+
+ for (j = 0; j <= 1000; j++);
+ }
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(thread[i], NULL);
+
+ fprintf(stderr, "done (success)\n");
+ return (0);
+}
+
diff --git a/regressions/ck_queue/validate/ck_slist.c b/regressions/ck_queue/validate/ck_slist.c
new file mode 100644
index 0000000..7adf2ef
--- /dev/null
+++ b/regressions/ck_queue/validate/ck_slist.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <pthread.h>
+#include <ck_queue.h>
+
+#include "../../common.h"
+
+struct test {
+ int value;
+ CK_SLIST_ENTRY(test) list_entry;
+};
+static CK_SLIST_HEAD(test_list, test) head = CK_SLIST_HEAD_INITIALIZER(head);
+
+static int goal;
+
+static void
+test_foreach(void)
+{
+ struct test *n, *next, *safe;
+ int i, s = 0, j = 0, k = 0;
+
+ for (i = goal; i != 0; i = goal) {
+ s = 0;
+
+ CK_SLIST_FOREACH(n, &head, list_entry) {
+ j++;
+ if (s == 0)
+ s = n->value;
+ else
+ s = s - 1;
+
+ if (n->value != s) {
+ ck_error("\nExpected %d, but got %d.\n",
+ s, n->value);
+ }
+
+ next = CK_SLIST_NEXT(n, list_entry);
+ if (next != NULL && next->value != s - 1) {
+ ck_error("\nExpected %d, but got %d.\n",
+ s, next->value);
+ }
+
+ i--;
+ }
+
+ if (i == 0)
+ break;
+
+ s = 0;
+ CK_SLIST_FOREACH_SAFE(n, &head, list_entry, safe) {
+ k++;
+
+ if (s == 0)
+ s = n->value;
+ else
+ s = s - 1;
+
+ if (n->value != s) {
+ ck_error("\nExpected %d, but got %d.\n",
+ s, n->value);
+ }
+
+ next = CK_SLIST_NEXT(n, list_entry);
+ if (next != NULL && next->value != s - 1) {
+ ck_error("\nExpected %d, but got %d.\n",
+ s, next->value);
+ }
+
+ i--;
+ }
+
+ if (i == 0 || CK_SLIST_EMPTY(&head) == true)
+ break;
+ }
+
+ fprintf(stderr, "(%d, %d) ", j, k);
+ return;
+}
+
+static void *
+execute(void *c)
+{
+
+ (void)c;
+ test_foreach();
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *thread;
+ struct test *n;
+ struct test_list target;
+ int n_threads, i;
+
+ if (argc != 3) {
+ ck_error("Usage: %s <number of threads> <number of list entries>\n", argv[0]);
+ }
+
+ n_threads = atoi(argv[1]);
+ if (n_threads < 1) {
+ ck_error("ERROR: Number of threads must be >= 1.\n");
+ }
+
+ thread = malloc(sizeof(pthread_t) * n_threads);
+ assert(thread != NULL);
+
+ goal = atoi(argv[2]);
+ if (goal < 4) {
+ ck_error("ERROR: Number of entries must be >= 4.\n");
+ }
+
+ fprintf(stderr, "Beginning serial test...");
+ CK_SLIST_INIT(&head);
+
+ for (i = 1; i <= goal; i++) {
+ n = malloc(sizeof *n);
+ assert(n != NULL);
+ n->value = i;
+ CK_SLIST_INSERT_HEAD(&head, n, list_entry);
+ }
+
+ test_foreach();
+
+ for (i = 1; i <= goal; i++) {
+ n = CK_SLIST_FIRST(&head);
+ CK_SLIST_REMOVE_HEAD(&head, list_entry);
+ free(n);
+ }
+
+ if (CK_SLIST_EMPTY(&head) == false) {
+ ck_error("List is not empty after bulk removal.\n");
+ }
+
+ fprintf(stderr, "done (success)\n");
+
+ fprintf(stderr, "Beginning parallel traversal...");
+
+ n = malloc(sizeof *n);
+ assert(n != NULL);
+ n->value = 1;
+ CK_SLIST_INSERT_HEAD(&head, n, list_entry);
+
+ for (i = 0; i < n_threads; i++) {
+ int r = pthread_create(&thread[i], NULL, execute, NULL);
+ assert(r == 0);
+ }
+
+ for (i = 2; i <= goal; i++) {
+ volatile int j;
+
+ n = malloc(sizeof *n);
+ assert(n != NULL);
+ n->value = i;
+ CK_SLIST_INSERT_HEAD(&head, n, list_entry);
+ for (j = 0; j <= 1000; j++);
+ }
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(thread[i], NULL);
+
+ for (i = 0; i < n_threads; i++) {
+ int r = pthread_create(&thread[i], NULL, execute, NULL);
+ assert(r == 0);
+ }
+
+ CK_SLIST_MOVE(&target, &head, list_entry);
+
+ for (i = 1; i <= goal; i++) {
+ volatile int j;
+
+ if (CK_SLIST_EMPTY(&target) == false)
+ CK_SLIST_REMOVE_HEAD(&target, list_entry);
+
+ for (j = 0; j <= 1000; j++);
+
+ if (CK_SLIST_EMPTY(&target) == false) {
+ struct test *r = CK_SLIST_FIRST(&target);
+ CK_SLIST_REMOVE(&target, r, test, list_entry);
+ }
+ }
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(thread[i], NULL);
+
+ fprintf(stderr, "done (success)\n");
+ return (0);
+}
+
diff --git a/regressions/ck_queue/validate/ck_stailq.c b/regressions/ck_queue/validate/ck_stailq.c
new file mode 100644
index 0000000..219e93f
--- /dev/null
+++ b/regressions/ck_queue/validate/ck_stailq.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright 2012-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <pthread.h>
+#include <ck_queue.h>
+#include "../../common.h"
+
+struct test {
+ int value;
+ CK_STAILQ_ENTRY(test) list_entry;
+};
+static CK_STAILQ_HEAD(test_list, test) head = CK_STAILQ_HEAD_INITIALIZER(head);
+
+static int goal;
+
+static void
+test_foreach(void)
+{
+ struct test *n, *next, *safe;
+ int i, s = 0, j = 0, k = 0;
+
+ for (i = goal; i != 0; i = goal) {
+ s = 0;
+
+ CK_STAILQ_FOREACH(n, &head, list_entry) {
+ j++;
+ if (s == 0)
+ s = n->value;
+ else
+ s = s - 1;
+
+ if (n->value != s) {
+ ck_error("\nExpected %d, but got %d.\n",
+ s, n->value);
+ }
+
+ next = CK_STAILQ_NEXT(n, list_entry);
+ if (next != NULL && next->value != s - 1) {
+ ck_error("\nExpected %d, but got %d.\n",
+ s, next->value);
+ }
+
+ i--;
+ }
+
+ if (i == 0)
+ break;
+
+ s = 0;
+ CK_STAILQ_FOREACH_SAFE(n, &head, list_entry, safe) {
+ k++;
+
+ if (s == 0)
+ s = n->value;
+ else
+ s = s - 1;
+
+ if (n->value != s) {
+ ck_error("\nExpected %d, but got %d.\n",
+ s, n->value);
+ }
+
+ next = CK_STAILQ_NEXT(n, list_entry);
+ if (next != NULL && next->value != s - 1) {
+ ck_error("\nExpected %d, but got %d.\n",
+ s, next->value);
+ }
+
+ i--;
+ }
+
+ if (i == 0 || CK_STAILQ_EMPTY(&head) == true)
+ break;
+ }
+
+ fprintf(stderr, "(%d, %d) ", j, k);
+ return;
+}
+
+static void *
+execute(void *c)
+{
+
+ (void)c;
+ test_foreach();
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *thread;
+ struct test *n, a, b;
+ struct test_list target;
+ int n_threads, i;
+
+ if (argc != 3) {
+ ck_error("Usage: %s <number of threads> <number of list entries>\n", argv[0]);
+ }
+
+ n_threads = atoi(argv[1]);
+ if (n_threads < 1) {
+ ck_error("ERROR: Number of threads must be >= 1.\n");
+ }
+
+ thread = malloc(sizeof(pthread_t) * n_threads);
+ assert(thread != NULL);
+
+ goal = atoi(argv[2]);
+ if (goal < 4) {
+ ck_error("ERROR: Number of entries must be >= 4.\n");
+ }
+
+ fprintf(stderr, "Beginning serial test...");
+ CK_STAILQ_INIT(&head);
+
+ for (i = 1; i <= goal; i++) {
+ n = malloc(sizeof *n);
+ assert(n != NULL);
+ n->value = i;
+ CK_STAILQ_INSERT_HEAD(&head, n, list_entry);
+ }
+
+ test_foreach();
+
+ for (i = 1; i <= goal; i++) {
+ n = CK_STAILQ_FIRST(&head);
+ CK_STAILQ_REMOVE(&head, n, test, list_entry);
+ free(n);
+ }
+
+ if (CK_STAILQ_EMPTY(&head) == false) {
+ ck_error("List is not empty after bulk removal.\n");
+ }
+
+ for (i = 1; i <= goal; i++) {
+ n = malloc(sizeof *n);
+ assert(n != NULL);
+ n->value = goal - i;
+ CK_STAILQ_INSERT_TAIL(&head, n, list_entry);
+ }
+
+ test_foreach();
+
+ for (i = 1; i <= goal; i++) {
+ n = CK_STAILQ_FIRST(&head);
+ CK_STAILQ_REMOVE(&head, n, test, list_entry);
+ free(n);
+ }
+
+ if (CK_STAILQ_EMPTY(&head) == false) {
+ ck_error("List is not empty after bulk removal.\n");
+ }
+
+ CK_STAILQ_INSERT_HEAD(&head, &a, list_entry);
+ CK_STAILQ_INSERT_HEAD(&head, &b, list_entry);
+ CK_STAILQ_REMOVE(&head, &a, test, list_entry);
+ if (CK_STAILQ_FIRST(&head) != &b)
+ ck_error("List is in invalid state.\n");
+ CK_STAILQ_REMOVE(&head, &b, test, list_entry);
+
+ if (CK_STAILQ_EMPTY(&head) == false) {
+ ck_error("List is not empty after bulk removal.\n");
+ }
+
+ CK_STAILQ_INSERT_HEAD(&head, &a, list_entry);
+ CK_STAILQ_INSERT_AFTER(&head, &a, &b, list_entry);
+
+ if (CK_STAILQ_NEXT(&b, list_entry) != NULL)
+ ck_error("Inserted item after last, it should not have no next.\n");
+
+ CK_STAILQ_INIT(&head);
+
+ CK_STAILQ_INSERT_HEAD(&head, &a, list_entry);
+ if (CK_STAILQ_NEXT(&a, list_entry) != NULL)
+ ck_error("Inserted item as last, but it contains next pointer.\n");
+
+ CK_STAILQ_INIT(&head);
+ fprintf(stderr, "done (success)\n");
+
+ fprintf(stderr, "Beginning parallel traversal...");
+
+ n = malloc(sizeof *n);
+ assert(n != NULL);
+ n->value = 1;
+ CK_STAILQ_INSERT_HEAD(&head, n, list_entry);
+
+ for (i = 0; i < n_threads; i++) {
+ int r = pthread_create(&thread[i], NULL, execute, NULL);
+ assert(r == 0);
+ }
+
+ for (i = 2; i <= goal; i++) {
+ volatile int j;
+
+ n = malloc(sizeof *n);
+ assert(n != NULL);
+ n->value = i;
+ CK_STAILQ_INSERT_HEAD(&head, n, list_entry);
+ for (j = 0; j <= 1000; j++);
+ }
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(thread[i], NULL);
+
+ for (i = 0; i < n_threads; i++) {
+ int r = pthread_create(&thread[i], NULL, execute, NULL);
+ assert(r == 0);
+ }
+
+ CK_STAILQ_MOVE(&target, &head, list_entry);
+
+ for (i = 1; i <= goal; i++) {
+ volatile int j;
+
+ if (CK_STAILQ_EMPTY(&target) == false) {
+ struct test *r = CK_STAILQ_FIRST(&target);
+ CK_STAILQ_REMOVE(&target, r, test, list_entry);
+ }
+
+ for (j = 0; j <= 1000; j++);
+ }
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(thread[i], NULL);
+
+ fprintf(stderr, "done (success)\n");
+ return (0);
+}
+
diff --git a/regressions/ck_rhs/benchmark/Makefile b/regressions/ck_rhs/benchmark/Makefile
new file mode 100644
index 0000000..e997993
--- /dev/null
+++ b/regressions/ck_rhs/benchmark/Makefile
@@ -0,0 +1,17 @@
+.PHONY: clean distribution
+
+OBJECTS=serial parallel_bytestring
+
+all: $(OBJECTS)
+
+serial: serial.c ../../../include/ck_rhs.h ../../../src/ck_rhs.c
+ $(CC) $(CFLAGS) -o serial serial.c ../../../src/ck_rhs.c
+
+parallel_bytestring: parallel_bytestring.c ../../../include/ck_rhs.h ../../../src/ck_rhs.c ../../../src/ck_epoch.c
+ $(CC) $(PTHREAD_CFLAGS) $(CFLAGS) -o parallel_bytestring parallel_bytestring.c ../../../src/ck_rhs.c ../../../src/ck_epoch.c
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE
diff --git a/regressions/ck_rhs/benchmark/parallel_bytestring.c b/regressions/ck_rhs/benchmark/parallel_bytestring.c
new file mode 100644
index 0000000..a95d940
--- /dev/null
+++ b/regressions/ck_rhs/benchmark/parallel_bytestring.c
@@ -0,0 +1,599 @@
+/*
+ * Copyright 2012 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyrighs
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyrighs
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include "../../common.h"
+#include <ck_rhs.h>
+#include "../../../src/ck_ht_hash.h"
+#include <assert.h>
+#include <ck_epoch.h>
+#include <ck_malloc.h>
+#include <ck_pr.h>
+#include <ck_spinlock.h>
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+
+static ck_rhs_t hs CK_CC_CACHELINE;
+static char **keys;
+static size_t keys_length = 0;
+static size_t keys_capacity = 128;
+static ck_epoch_t epoch_hs;
+static ck_epoch_record_t epoch_wr;
+static int n_threads;
+static bool next_stage;
+
+enum state {
+ HS_STATE_STOP = 0,
+ HS_STATE_GET,
+ HS_STATE_STRICT_REPLACEMENT,
+ HS_STATE_DELETION,
+ HS_STATE_REPLACEMENT,
+ HS_STATE_COUNT
+};
+
+static ck_spinlock_t mtx = CK_SPINLOCK_INITIALIZER;
+static struct affinity affinerator = AFFINITY_INITIALIZER;
+static uint64_t accumulator[HS_STATE_COUNT];
+static int barrier[HS_STATE_COUNT];
+static int state;
+
+struct hs_epoch {
+ ck_epoch_entry_t epoch_entry;
+};
+
+COMMON_ALARM_DECLARE_GLOBAL(hs_alarm, alarm_event, next_stage)
+
+static void
+alarm_handler(int s)
+{
+
+ (void)s;
+ next_stage = true;
+ return;
+}
+
+static unsigned long
+hs_hash(const void *object, unsigned long seed)
+{
+ const char *c = object;
+ unsigned long h;
+
+ h = (unsigned long)MurmurHash64A(c, strlen(c), seed);
+ return h;
+}
+
+static bool
+hs_compare(const void *previous, const void *compare)
+{
+
+ return strcmp(previous, compare) == 0;
+}
+
+static void
+hs_destroy(ck_epoch_entry_t *e)
+{
+
+ free(e);
+ return;
+}
+
+static void *
+hs_malloc(size_t r)
+{
+ ck_epoch_entry_t *b;
+
+ b = malloc(sizeof(*b) + r);
+ return b + 1;
+}
+
+static void
+hs_free(void *p, size_t b, bool r)
+{
+ struct hs_epoch *e = p;
+
+ (void)b;
+
+ if (r == true) {
+ /* Destruction requires safe memory reclamation. */
+ ck_epoch_call(&epoch_wr, &(--e)->epoch_entry, hs_destroy);
+ } else {
+ free(--e);
+ }
+
+ return;
+}
+
+static struct ck_malloc my_allocator = {
+ .malloc = hs_malloc,
+ .free = hs_free
+};
+
+static void
+set_init(void)
+{
+ unsigned int mode = CK_RHS_MODE_OBJECT | CK_RHS_MODE_SPMC;
+
+
+ ck_epoch_init(&epoch_hs);
+ ck_epoch_register(&epoch_hs, &epoch_wr);
+ common_srand48((long int)time(NULL));
+ if (ck_rhs_init(&hs, mode, hs_hash, hs_compare, &my_allocator, 65536, common_lrand48()) == false) {
+ perror("ck_rhs_init");
+ exit(EXIT_FAILURE);
+ }
+
+ return;
+}
+
+static bool
+set_remove(const char *value)
+{
+ unsigned long h;
+
+ h = CK_RHS_HASH(&hs, hs_hash, value);
+ return (bool)ck_rhs_remove(&hs, h, value);
+}
+
+static bool
+set_replace(const char *value)
+{
+ unsigned long h;
+ void *previous;
+
+ h = CK_RHS_HASH(&hs, hs_hash, value);
+ return ck_rhs_set(&hs, h, value, &previous);
+}
+
+static bool
+set_swap(const char *value)
+{
+ unsigned long h;
+ void *previous;
+
+ h = CK_RHS_HASH(&hs, hs_hash, value);
+ return ck_rhs_fas(&hs, h, value, &previous);
+}
+
+static void *
+set_get(const char *value)
+{
+ unsigned long h;
+ void *v;
+
+ h = CK_RHS_HASH(&hs, hs_hash, value);
+ v = ck_rhs_get(&hs, h, value);
+ return v;
+}
+
+static bool
+set_insert(const char *value)
+{
+ unsigned long h;
+
+ h = CK_RHS_HASH(&hs, hs_hash, value);
+ return ck_rhs_put(&hs, h, value);
+}
+
+static size_t
+set_count(void)
+{
+
+ return ck_rhs_count(&hs);
+}
+
+static bool
+set_reset(void)
+{
+
+ return ck_rhs_reset(&hs);
+}
+
+static void *
+reader(void *unused)
+{
+ size_t i;
+ ck_epoch_record_t epoch_record;
+ int state_previous = HS_STATE_STOP;
+ int n_state = 0;
+ uint64_t s, j, a;
+
+ (void)unused;
+ if (aff_iterate(&affinerator) != 0)
+ perror("WARNING: Failed to affine thread");
+
+ s = j = a = 0;
+ ck_epoch_register(&epoch_hs, &epoch_record);
+ for (;;) {
+ j++;
+ ck_epoch_begin(&epoch_record, NULL);
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ char *r;
+
+ r = set_get(keys[i]);
+ if (r == NULL) {
+ if (n_state == HS_STATE_STRICT_REPLACEMENT) {
+ ck_error("ERROR: Did not find during replacement: %s\n", keys[i]);
+ }
+
+ continue;
+ }
+
+ if (strcmp(r, keys[i]) == 0)
+ continue;
+
+ ck_error("ERROR: Found invalid value: [%s] but expected [%s]\n", (char *)r, keys[i]);
+ }
+ a += rdtsc() - s;
+ ck_epoch_end(&epoch_record, NULL);
+
+ n_state = ck_pr_load_int(&state);
+ if (n_state != state_previous) {
+ ck_spinlock_lock(&mtx);
+ accumulator[state_previous] += a / (j * keys_length);
+ ck_spinlock_unlock(&mtx);
+
+ ck_pr_inc_int(&barrier[state_previous]);
+ while (ck_pr_load_int(&barrier[state_previous]) != n_threads + 1)
+ ck_pr_stall();
+
+ state_previous = n_state;
+ s = j = a = 0;
+ }
+ }
+
+ return NULL;
+}
+
+static uint64_t
+acc(size_t i)
+{
+ uint64_t r;
+
+ ck_spinlock_lock(&mtx);
+ r = accumulator[i];
+ ck_spinlock_unlock(&mtx);
+
+ return r;
+}
+
+int
+main(int argc, char *argv[])
+{
+ FILE *fp;
+ char buffer[512];
+ size_t i, j, r;
+ unsigned int d = 0;
+ uint64_t s, e, a, repeated;
+ char **t;
+ pthread_t *readers;
+ double p_r, p_d;
+
+ COMMON_ALARM_DECLARE_LOCAL(hs_alarm, alarm_event)
+
+ r = 20;
+ s = 8;
+ p_d = 0.5;
+ p_r = 0.5;
+ n_threads = CORES - 1;
+
+ if (argc < 2) {
+ ck_error("Usage: parallel <dictionary> [<interval length> <initial size> <readers>\n"
+ " <probability of replacement> <probability of deletion> <epoch threshold>]\n");
+ }
+
+ if (argc >= 3)
+ r = atoi(argv[2]);
+
+ if (argc >= 4)
+ s = (uint64_t)atoi(argv[3]);
+
+ if (argc >= 5) {
+ n_threads = atoi(argv[4]);
+ if (n_threads < 1) {
+ ck_error("ERROR: Number of readers must be >= 1.\n");
+ }
+ }
+
+ if (argc >= 6) {
+ p_r = atof(argv[5]) / 100.00;
+ if (p_r < 0) {
+ ck_error("ERROR: Probability of replacement must be >= 0 and <= 100.\n");
+ }
+ }
+
+ if (argc >= 7) {
+ p_d = atof(argv[6]) / 100.00;
+ if (p_d < 0) {
+ ck_error("ERROR: Probability of deletion must be >= 0 and <= 100.\n");
+ }
+ }
+
+ COMMON_ALARM_INIT(hs_alarm, alarm_event, r)
+
+ affinerator.delta = 1;
+ readers = malloc(sizeof(pthread_t) * n_threads);
+ assert(readers != NULL);
+
+ keys = malloc(sizeof(char *) * keys_capacity);
+ assert(keys != NULL);
+
+ fp = fopen(argv[1], "r");
+ assert(fp != NULL);
+
+ while (fgets(buffer, sizeof(buffer), fp) != NULL) {
+ buffer[strlen(buffer) - 1] = '\0';
+ keys[keys_length++] = strdup(buffer);
+ assert(keys[keys_length - 1] != NULL);
+
+ if (keys_length == keys_capacity) {
+ t = realloc(keys, sizeof(char *) * (keys_capacity *= 2));
+ assert(t != NULL);
+ keys = t;
+ }
+ }
+
+ t = realloc(keys, sizeof(char *) * keys_length);
+ assert(t != NULL);
+ keys = t;
+
+ set_init();
+
+ for (i = 0; i < (size_t)n_threads; i++) {
+ if (pthread_create(&readers[i], NULL, reader, NULL) != 0) {
+ ck_error("ERROR: Failed to create thread %zu.\n", i);
+ }
+ }
+
+ for (i = 0; i < keys_length; i++)
+ d += set_insert(keys[i]) == false;
+
+ fprintf(stderr, " [S] %d readers, 1 writer.\n", n_threads);
+ fprintf(stderr, " [S] %zu entries stored and %u duplicates.\n\n",
+ set_count(), d);
+
+ fprintf(stderr, " ,- BASIC TEST\n");
+ fprintf(stderr, " | Executing SMR test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ if (set_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ d += set_insert(keys[i]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ fprintf(stderr, " | Executing replacement test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_replace(keys[i]);
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ fprintf(stderr, " | Executing get test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ if (set_get(keys[i]) == NULL) {
+ ck_error("ERROR: Unexpected NULL value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ a = 0;
+ fprintf(stderr, " | Executing removal test...");
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_remove(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ for (i = 0; i < keys_length; i++)
+ set_insert(keys[i]);
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ fprintf(stderr, " | Executing negative look-up test...");
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ set_get("\x50\x03\x04\x05\x06\x10");
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ fprintf(stderr, "done (%" PRIu64 " ticks)\n", a / (r * keys_length));
+
+ ck_epoch_record_t epoch_temporary = epoch_wr;
+ ck_epoch_synchronize(&epoch_wr);
+
+ fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
+ "%u pending, %u peak, %lu reclamations\n\n",
+ epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
+ epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
+
+ fprintf(stderr, " ,- READER CONCURRENCY\n");
+ fprintf(stderr, " | Executing reader test...");
+
+ ck_pr_store_int(&state, HS_STATE_GET);
+ while (ck_pr_load_int(&barrier[HS_STATE_STOP]) != n_threads)
+ ck_pr_stall();
+ ck_pr_inc_int(&barrier[HS_STATE_STOP]);
+ common_sleep(r);
+ ck_pr_store_int(&state, HS_STATE_STRICT_REPLACEMENT);
+ while (ck_pr_load_int(&barrier[HS_STATE_GET]) != n_threads)
+ ck_pr_stall();
+
+ fprintf(stderr, "done (reader = %" PRIu64 " ticks)\n",
+ acc(HS_STATE_GET) / n_threads);
+
+ fprintf(stderr, " | Executing strict replacement test...");
+
+ a = repeated = 0;
+ common_alarm(alarm_handler, &alarm_event, r);
+
+ ck_pr_inc_int(&barrier[HS_STATE_GET]);
+ for (;;) {
+ repeated++;
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ if (i & 1) {
+ set_replace(keys[i]);
+ } else {
+ set_swap(keys[i]);
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+
+ if (next_stage == true) {
+ next_stage = false;
+ break;
+ }
+ }
+
+ ck_pr_store_int(&state, HS_STATE_DELETION);
+ while (ck_pr_load_int(&barrier[HS_STATE_STRICT_REPLACEMENT]) != n_threads)
+ ck_pr_stall();
+ set_reset();
+ ck_epoch_synchronize(&epoch_wr);
+ fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
+ a / (repeated * keys_length), acc(HS_STATE_STRICT_REPLACEMENT) / n_threads);
+
+ common_alarm(alarm_handler, &alarm_event, r);
+
+ fprintf(stderr, " | Executing deletion test (%.2f)...", p_d * 100);
+ a = repeated = 0;
+ ck_pr_inc_int(&barrier[HS_STATE_STRICT_REPLACEMENT]);
+ for (;;) {
+ double delete;
+
+ repeated++;
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ set_insert(keys[i]);
+ if (p_d != 0.0) {
+ delete = common_drand48();
+ if (delete <= p_d)
+ set_remove(keys[i]);
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+
+ if (next_stage == true) {
+ next_stage = false;
+ break;
+ }
+ }
+ ck_pr_store_int(&state, HS_STATE_REPLACEMENT);
+ while (ck_pr_load_int(&barrier[HS_STATE_DELETION]) != n_threads)
+ ck_pr_stall();
+
+ set_reset();
+ ck_epoch_synchronize(&epoch_wr);
+ fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
+ a / (repeated * keys_length), acc(HS_STATE_DELETION) / n_threads);
+
+ common_alarm(alarm_handler, &alarm_event, r);
+
+ fprintf(stderr, " | Executing replacement test (%.2f)...", p_r * 100);
+ a = repeated = 0;
+ ck_pr_inc_int(&barrier[HS_STATE_DELETION]);
+ for (;;) {
+ double delete, replace;
+
+ repeated++;
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ set_insert(keys[i]);
+ if (p_d != 0.0) {
+ delete = common_drand48();
+ if (delete <= p_d)
+ set_remove(keys[i]);
+ } else {
+ delete = 0.0;
+ }
+
+ if (p_r != 0.0) {
+ replace = common_drand48();
+ if (replace <= p_r) {
+ if ((i & 1) || (delete <= p_d)) {
+ set_replace(keys[i]);
+ } else {
+ set_swap(keys[i]);
+ }
+ }
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+
+ if (next_stage == true) {
+ next_stage = false;
+ break;
+ }
+ }
+ ck_pr_store_int(&state, HS_STATE_STOP);
+ while (ck_pr_load_int(&barrier[HS_STATE_REPLACEMENT]) != n_threads)
+ ck_pr_stall();
+ set_reset();
+ ck_epoch_synchronize(&epoch_wr);
+ fprintf(stderr, "done (writer = %" PRIu64 " ticks, reader = %" PRIu64 " ticks)\n",
+ a / (repeated * keys_length), acc(HS_STATE_REPLACEMENT) / n_threads);
+
+ ck_pr_inc_int(&barrier[HS_STATE_REPLACEMENT]);
+ epoch_temporary = epoch_wr;
+ ck_epoch_synchronize(&epoch_wr);
+
+ fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
+ "%u pending, %u peak, %lu reclamations\n\n",
+ epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
+ epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
+ return 0;
+}
+
diff --git a/regressions/ck_rhs/benchmark/serial.c b/regressions/ck_rhs/benchmark/serial.c
new file mode 100644
index 0000000..18fa892
--- /dev/null
+++ b/regressions/ck_rhs/benchmark/serial.c
@@ -0,0 +1,517 @@
+/*
+ * Copyright 2012 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyrighs
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyrighs
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_rhs.h>
+
+#include <assert.h>
+#include <ck_malloc.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include "../../common.h"
+#include "../../../src/ck_ht_hash.h"
+
+static ck_rhs_t hs;
+static char **keys;
+static size_t keys_length = 0;
+static size_t keys_capacity = 128;
+static unsigned long global_seed;
+
+static void *
+hs_malloc(size_t r)
+{
+
+ return malloc(r);
+}
+
+static void
+hs_free(void *p, size_t b, bool r)
+{
+
+ (void)b;
+ (void)r;
+
+ free(p);
+
+ return;
+}
+
+static struct ck_malloc my_allocator = {
+ .malloc = hs_malloc,
+ .free = hs_free
+};
+
+static unsigned long
+hs_hash(const void *object, unsigned long seed)
+{
+ const char *c = object;
+ unsigned long h;
+
+ h = (unsigned long)MurmurHash64A(c, strlen(c), seed);
+ return h;
+}
+
+static bool
+hs_compare(const void *previous, const void *compare)
+{
+
+ return strcmp(previous, compare) == 0;
+}
+
+static void
+set_destroy(void)
+{
+
+ ck_rhs_destroy(&hs);
+ return;
+}
+
+static void
+set_init(unsigned int size, unsigned int mode)
+{
+
+ if (ck_rhs_init(&hs, CK_RHS_MODE_OBJECT | CK_RHS_MODE_SPMC | mode, hs_hash, hs_compare,
+ &my_allocator, size, global_seed) == false) {
+ perror("ck_rhs_init");
+ exit(EXIT_FAILURE);
+ }
+
+ return;
+}
+
+static bool
+set_remove(const char *value)
+{
+ unsigned long h;
+
+ h = CK_RHS_HASH(&hs, hs_hash, value);
+ return ck_rhs_remove(&hs, h, value) != NULL;
+}
+
+static bool
+set_swap(const char *value)
+{
+ unsigned long h;
+ void *previous;
+
+ h = CK_RHS_HASH(&hs, hs_hash, value);
+ return ck_rhs_fas(&hs, h, value, &previous);
+}
+
+static bool
+set_replace(const char *value)
+{
+ unsigned long h;
+ void *previous;
+
+ h = CK_RHS_HASH(&hs, hs_hash, value);
+ ck_rhs_set(&hs, h, value, &previous);
+ return previous != NULL;
+}
+
+static void *
+set_get(const char *value)
+{
+ unsigned long h;
+ void *v;
+
+ h = CK_RHS_HASH(&hs, hs_hash, value);
+ v = ck_rhs_get(&hs, h, value);
+ return v;
+}
+
+static bool
+set_insert(const char *value)
+{
+ unsigned long h;
+
+ h = CK_RHS_HASH(&hs, hs_hash, value);
+ return ck_rhs_put(&hs, h, value);
+}
+
+static bool
+set_insert_unique(const char *value)
+{
+ unsigned long h;
+
+ h = CK_RHS_HASH(&hs, hs_hash, value);
+ return ck_rhs_put_unique(&hs, h, value);
+}
+
+static size_t
+set_count(void)
+{
+
+ return ck_rhs_count(&hs);
+}
+
+static bool
+set_reset(void)
+{
+
+ return ck_rhs_reset(&hs);
+}
+
+static void
+set_gc(void)
+{
+
+ ck_rhs_gc(&hs);
+ return;
+}
+
+static void
+set_rebuild(void)
+{
+
+ ck_rhs_rebuild(&hs);
+ return;
+}
+
+static void
+keys_shuffle(char **k)
+{
+ size_t i, j;
+ char *t;
+
+ for (i = keys_length; i > 1; i--) {
+ j = rand() % (i - 1);
+
+ if (j != i - 1) {
+ t = k[i - 1];
+ k[i - 1] = k[j];
+ k[j] = t;
+ }
+ }
+
+ return;
+}
+
+static void
+run_test(const char *file, size_t r, unsigned int size, unsigned int mode)
+{
+ FILE *fp;
+ char buffer[512];
+ size_t i, j;
+ unsigned int d = 0;
+ uint64_t s, e, a, ri, si, ai, sr, rg, sg, ag, sd, ng, ss, sts, su, sgc, sb;
+ struct ck_rhs_stat st;
+ char **t;
+
+ keys = malloc(sizeof(char *) * keys_capacity);
+ assert(keys != NULL);
+
+ fp = fopen(file, "r");
+ assert(fp != NULL);
+
+ while (fgets(buffer, sizeof(buffer), fp) != NULL) {
+ buffer[strlen(buffer) - 1] = '\0';
+ keys[keys_length++] = strdup(buffer);
+ assert(keys[keys_length - 1] != NULL);
+
+ if (keys_length == keys_capacity) {
+ t = realloc(keys, sizeof(char *) * (keys_capacity *= 2));
+ assert(t != NULL);
+ keys = t;
+ }
+ }
+
+ t = realloc(keys, sizeof(char *) * keys_length);
+ assert(t != NULL);
+ keys = t;
+
+ set_init(size, mode);
+ for (i = 0; i < keys_length; i++)
+ d += set_insert(keys[i]) == false;
+ ck_rhs_stat(&hs, &st);
+
+ fprintf(stderr, "# %zu entries stored, %u duplicates, %u probe.\n",
+ set_count(), d, st.probe_maximum);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ if (set_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = keys_length; i > 0; i--)
+ d += set_insert(keys[i - 1]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ ri = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ if (set_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ d += set_insert(keys[i]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ si = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ keys_shuffle(keys);
+
+ if (set_reset() == false) {
+ ck_error("ERROR: Failed to reset hash table.\n");
+ }
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ d += set_insert(keys[i]) == false;
+ e = rdtsc();
+ a += e - s;
+ }
+ ai = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_swap(keys[i]);
+ e = rdtsc();
+ a += e - s;
+ }
+ ss = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_replace(keys[i]);
+ e = rdtsc();
+ a += e - s;
+ }
+ sr = a / (r * keys_length);
+
+ set_reset();
+ for (i = 0; i < keys_length; i++)
+ set_insert(keys[i]);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = keys_length; i > 0; i--) {
+ if (set_get(keys[i - 1]) == NULL) {
+ ck_error("ERROR: Unexpected NULL value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ rg = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ if (set_get(keys[i]) == NULL) {
+ ck_error("ERROR: Unexpected NULL value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ sg = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ keys_shuffle(keys);
+
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ if (set_get(keys[i]) == NULL) {
+ ck_error("ERROR: Unexpected NULL value.\n");
+ }
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ ag = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_remove(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ for (i = 0; i < keys_length; i++)
+ set_insert(keys[i]);
+ }
+ sd = a / (r * keys_length);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++) {
+ set_get("\x50\x03\x04\x05\x06\x10");
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ ng = a / (r * keys_length);
+
+ set_reset();
+ for (i = 0; i < keys_length; i++)
+ set_insert(keys[i]);
+ for (i = 0; i < keys_length; i++)
+ set_remove(keys[i]);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_insert(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ for (i = 0; i < keys_length; i++)
+ set_remove(keys[i]);
+ }
+ sts = a / (r * keys_length);
+
+ set_reset();
+
+ /* Prune duplicates. */
+ for (i = 0; i < keys_length; i++) {
+ if (set_insert(keys[i]) == true)
+ continue;
+
+ free(keys[i]);
+ keys[i] = keys[--keys_length];
+ }
+
+ for (i = 0; i < keys_length; i++)
+ set_remove(keys[i]);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ for (i = 0; i < keys_length; i++)
+ set_insert_unique(keys[i]);
+ e = rdtsc();
+ a += e - s;
+
+ for (i = 0; i < keys_length; i++)
+ set_remove(keys[i]);
+ }
+ su = a / (r * keys_length);
+
+ for (i = 0; i < keys_length; i++)
+ set_insert_unique(keys[i]);
+
+ for (i = 0; i < keys_length / 2; i++)
+ set_remove(keys[i]);
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ set_gc();
+ e = rdtsc();
+ a += e - s;
+ }
+ sgc = a / r;
+
+ a = 0;
+ for (j = 0; j < r; j++) {
+ s = rdtsc();
+ set_rebuild();
+ e = rdtsc();
+ a += e - s;
+ }
+ sb = a / r;
+
+ printf("%zu "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 " "
+ "%" PRIu64 "\n",
+ keys_length, ri, si, ai, ss, sr, rg, sg, ag, sd, ng, sts, su, sgc, sb);
+
+ fclose(fp);
+
+ for (i = 0; i < keys_length; i++) {
+ free(keys[i]);
+ }
+
+ free(keys);
+ keys_length = 0;
+ set_destroy();
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ unsigned int r, size;
+
+ common_srand48((long int)time(NULL));
+ if (argc < 2) {
+ ck_error("Usage: ck_rhs <dictionary> [<repetitions> <initial size>]\n");
+ }
+
+ r = 16;
+ if (argc >= 3)
+ r = atoi(argv[2]);
+
+ size = 8;
+ if (argc >= 4)
+ size = atoi(argv[3]);
+
+ global_seed = common_lrand48();
+ run_test(argv[1], r, size, 0);
+ run_test(argv[1], r, size, CK_RHS_MODE_READ_MOSTLY);
+ fprintf(stderr, "# reverse_insertion serial_insertion random_insertion serial_swap "
+ "serial_replace reverse_get serial_get random_get serial_remove negative_get tombstone "
+ "set_unique gc rebuild\n\n");
+
+ return 0;
+}
+
diff --git a/regressions/ck_rhs/validate/Makefile b/regressions/ck_rhs/validate/Makefile
new file mode 100644
index 0000000..5987395
--- /dev/null
+++ b/regressions/ck_rhs/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean distribution
+
+OBJECTS=serial
+
+all: $(OBJECTS)
+
+serial: serial.c ../../../include/ck_rhs.h ../../../src/ck_rhs.c
+ $(CC) $(CFLAGS) -o serial serial.c ../../../src/ck_rhs.c
+
+check: all
+ ./serial
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE
diff --git a/regressions/ck_rhs/validate/serial.c b/regressions/ck_rhs/validate/serial.c
new file mode 100644
index 0000000..ef9365f
--- /dev/null
+++ b/regressions/ck_rhs/validate/serial.c
@@ -0,0 +1,310 @@
+/*
+ * Copyright 2012 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyrighs
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyrighs
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_rhs.h>
+
+#include <assert.h>
+#include <ck_malloc.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "../../common.h"
+
+static void *
+hs_malloc(size_t r)
+{
+
+ return malloc(r);
+}
+
+static void
+hs_free(void *p, size_t b, bool r)
+{
+
+ (void)b;
+ (void)r;
+ free(p);
+ return;
+}
+
+static struct ck_malloc my_allocator = {
+ .malloc = hs_malloc,
+ .free = hs_free
+};
+
+const char *test[] = { "Samy", "Al", "Bahra", "dances", "in", "the", "wind.", "Once",
+ "upon", "a", "time", "his", "gypsy", "ate", "one", "itsy",
+ "bitsy", "spider.", "What", "goes", "up", "must",
+ "come", "down.", "What", "is", "down", "stays",
+ "down.", "A", "B", "C", "D", "E", "F", "G", "H",
+ "I", "J", "K", "L", "M", "N", "O", "P", "Q" };
+
+const char *negative = "negative";
+
+/* Purposefully crappy hash function. */
+static unsigned long
+hs_hash(const void *object, unsigned long seed)
+{
+ const char *c = object;
+ unsigned long h;
+
+ (void)seed;
+ h = c[0];
+ return h;
+}
+
+static bool
+hs_compare(const void *previous, const void *compare)
+{
+
+ return strcmp(previous, compare) == 0;
+}
+
+static void *
+test_ip(void *key, void *closure)
+{
+ const char *a = key;
+ const char *b = closure;
+
+ if (strcmp(a, b) != 0)
+ ck_error("Mismatch: %s != %s\n", a, b);
+
+ return closure;
+}
+
+static void *
+test_negative(void *key, void *closure)
+{
+
+ (void)closure;
+ if (key != NULL)
+ ck_error("ERROR: Apply callback expects NULL argument instead of [%s]\n", key);
+
+ return NULL;
+}
+
+static void *
+test_unique(void *key, void *closure)
+{
+
+ if (key != NULL)
+ ck_error("ERROR: Apply callback expects NULL argument instead of [%s]\n", key);
+
+ return closure;
+}
+
+static void *
+test_remove(void *key, void *closure)
+{
+
+ (void)key;
+ (void)closure;
+
+ return NULL;
+}
+
+static void
+run_test(unsigned int is, unsigned int ad)
+{
+ ck_rhs_t hs[16];
+ const size_t size = sizeof(hs) / sizeof(*hs);
+ size_t i, j;
+ const char *blob = "#blobs";
+ unsigned long h;
+
+ if (ck_rhs_init(&hs[0], CK_RHS_MODE_SPMC | CK_RHS_MODE_OBJECT | ad, hs_hash, hs_compare, &my_allocator, is, 6602834) == false)
+ ck_error("ck_rhs_init\n");
+
+ for (j = 0; j < size; j++) {
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ h = test[i][0];
+ if (ck_rhs_get(&hs[j], h, test[i]) != NULL) {
+ continue;
+ }
+
+ if (i & 1) {
+ if (ck_rhs_put_unique(&hs[j], h, test[i]) == false)
+ ck_error("ERROR [%zu]: Failed to insert unique (%s)\n", j, test[i]);
+ } else if (ck_rhs_apply(&hs[j], h, test[i], test_unique,
+ (void *)(uintptr_t)test[i]) == false) {
+ ck_error("ERROR: Failed to apply for insertion.\n");
+ }
+
+ if (i & 1) {
+ if (ck_rhs_remove(&hs[j], h, test[i]) == false)
+ ck_error("ERROR [%zu]: Failed to remove unique (%s)\n", j, test[i]);
+ } else if (ck_rhs_apply(&hs[j], h, test[i], test_remove, NULL) == false) {
+ ck_error("ERROR: Failed to remove apply.\n");
+ }
+
+ if (ck_rhs_apply(&hs[j], h, test[i], test_negative,
+ (void *)(uintptr_t)test[i]) == false)
+ ck_error("ERROR: Failed to apply.\n");
+
+ break;
+ }
+
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ h = test[i][0];
+ ck_rhs_put(&hs[j], h, test[i]);
+ if (ck_rhs_put(&hs[j], h, test[i]) == true) {
+ ck_error("ERROR [%u] [1]: put must fail on collision (%s).\n", is, test[i]);
+ }
+ if (ck_rhs_get(&hs[j], h, test[i]) == NULL) {
+ ck_error("ERROR [%u]: get must not fail after put\n", is);
+ }
+ }
+
+ /* Test grow semantics. */
+ ck_rhs_grow(&hs[j], 128);
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ h = test[i][0];
+ if (ck_rhs_put(&hs[j], h, test[i]) == true) {
+ ck_error("ERROR [%u] [2]: put must fail on collision.\n", is);
+ }
+
+ if (ck_rhs_get(&hs[j], h, test[i]) == NULL) {
+ ck_error("ERROR [%u]: get must not fail\n", is);
+ }
+ }
+
+ h = blob[0];
+ if (ck_rhs_get(&hs[j], h, blob) == NULL) {
+ if (j > 0)
+ ck_error("ERROR [%u]: Blob must always exist after first.\n", is);
+
+ if (ck_rhs_put(&hs[j], h, blob) == false) {
+ ck_error("ERROR [%u]: A unique blob put failed.\n", is);
+ }
+ } else {
+ if (ck_rhs_put(&hs[j], h, blob) == true) {
+ ck_error("ERROR [%u]: Duplicate blob put succeeded.\n", is);
+ }
+ }
+
+ /* Grow set and check get semantics. */
+ ck_rhs_grow(&hs[j], 512);
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ h = test[i][0];
+ if (ck_rhs_get(&hs[j], h, test[i]) == NULL) {
+ ck_error("ERROR [%u]: get must not fail\n", is);
+ }
+ }
+
+ /* Delete and check negative membership. */
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ void *r;
+
+ h = test[i][0];
+ if (ck_rhs_get(&hs[j], h, test[i]) == NULL)
+ continue;
+
+ if (r = ck_rhs_remove(&hs[j], h, test[i]), r == NULL) {
+ ck_error("ERROR [%u]: remove must not fail\n", is);
+ }
+
+ if (strcmp(r, test[i]) != 0) {
+ ck_error("ERROR [%u]: Removed incorrect node (%s != %s)\n", (char *)r, test[i], is);
+ }
+ }
+
+ /* Test replacement semantics. */
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ void *r;
+ bool d;
+
+ h = test[i][0];
+ d = ck_rhs_get(&hs[j], h, test[i]) != NULL;
+ if (ck_rhs_set(&hs[j], h, test[i], &r) == false) {
+ ck_error("ERROR [%u]: Failed to set\n", is);
+ }
+
+ /* Expected replacement. */
+ if (d == true && (r == NULL || strcmp(r, test[i]) != 0)) {
+ ck_error("ERROR [%u]: Incorrect previous value: %s != %s\n",
+ is, test[i], (char *)r);
+ }
+
+ /* Replacement should succeed. */
+ if (ck_rhs_fas(&hs[j], h, test[i], &r) == false)
+ ck_error("ERROR [%u]: ck_rhs_fas must succeed.\n", is);
+
+ if (strcmp(r, test[i]) != 0) {
+ ck_error("ERROR [%u]: Incorrect replaced value: %s != %s\n",
+ is, test[i], (char *)r);
+ }
+
+ if (ck_rhs_fas(&hs[j], h, negative, &r) == true)
+ ck_error("ERROR [%u]: Replacement of negative should fail.\n", is);
+
+ if (ck_rhs_set(&hs[j], h, test[i], &r) == false) {
+ ck_error("ERROR [%u]: Failed to set [1]\n", is);
+ }
+
+ if (strcmp(r, test[i]) != 0) {
+ ck_error("ERROR [%u]: Invalid &hs[j]: %s != %s\n", (char *)r, test[i], is);
+ }
+ /* Attempt in-place mutation. */
+ if (ck_rhs_apply(&hs[j], h, test[i], test_ip,
+ (void *)(uintptr_t)test[i]) == false) {
+ ck_error("ERROR [%u]: Failed to apply: %s != %s\n", is, (char *)r, test[i]);
+ }
+
+ d = ck_rhs_get(&hs[j], h, test[i]) != NULL;
+ if (d == false)
+ ck_error("ERROR [%u]: Expected [%s] to exist.\n", is, test[i]);
+ }
+
+ if (j == size - 1)
+ break;
+
+ if (ck_rhs_move(&hs[j + 1], &hs[j], hs_hash, hs_compare, &my_allocator) == false)
+ ck_error("Failed to move hash table");
+
+ ck_rhs_gc(&hs[j + 1]);
+
+ if (ck_rhs_rebuild(&hs[j + 1]) == false)
+ ck_error("Failed to rebuild");
+ }
+
+ return;
+}
+
+int
+main(void)
+{
+ unsigned int k;
+
+ for (k = 16; k <= 64; k <<= 1) {
+ run_test(k, 0);
+ break;
+ }
+
+ return 0;
+}
+
diff --git a/regressions/ck_ring/benchmark/Makefile b/regressions/ck_ring/benchmark/Makefile
new file mode 100644
index 0000000..4087ed1
--- /dev/null
+++ b/regressions/ck_ring/benchmark/Makefile
@@ -0,0 +1,14 @@
+.PHONY: clean distribution
+
+OBJECTS=latency
+
+all: $(OBJECTS)
+
+latency: latency.c ../../../include/ck_ring.h
+ $(CC) $(CFLAGS) -o latency latency.c
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE
diff --git a/regressions/ck_ring/benchmark/latency.c b/regressions/ck_ring/benchmark/latency.c
new file mode 100644
index 0000000..657be4d
--- /dev/null
+++ b/regressions/ck_ring/benchmark/latency.c
@@ -0,0 +1,142 @@
+#include <ck_ring.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../../common.h"
+
+#ifndef ITERATIONS
+#define ITERATIONS (128000)
+#endif
+
+struct entry {
+ int tid;
+ int value;
+};
+
+int
+main(int argc, char *argv[])
+{
+ int i, r, size;
+ uint64_t s, e, e_a, d_a;
+ struct entry entry = {0, 0};
+ ck_ring_buffer_t *buf;
+ ck_ring_t ring;
+
+ if (argc != 2) {
+ ck_error("Usage: latency <size>\n");
+ }
+
+ size = atoi(argv[1]);
+ if (size <= 4 || (size & (size - 1))) {
+ ck_error("ERROR: Size must be a power of 2 greater than 4.\n");
+ }
+
+ buf = malloc(sizeof(ck_ring_buffer_t) * size);
+ if (buf == NULL) {
+ ck_error("ERROR: Failed to allocate buffer\n");
+ }
+
+ ck_ring_init(&ring, size);
+
+ e_a = d_a = s = e = 0;
+ for (r = 0; r < ITERATIONS; r++) {
+ for (i = 0; i < size / 4; i += 4) {
+ s = rdtsc();
+ ck_ring_enqueue_spsc(&ring, buf, &entry);
+ ck_ring_enqueue_spsc(&ring, buf, &entry);
+ ck_ring_enqueue_spsc(&ring, buf, &entry);
+ ck_ring_enqueue_spsc(&ring, buf, &entry);
+ e = rdtsc();
+ }
+ e_a += (e - s) / 4;
+
+ for (i = 0; i < size / 4; i += 4) {
+ s = rdtsc();
+ ck_ring_dequeue_spsc(&ring, buf, &entry);
+ ck_ring_dequeue_spsc(&ring, buf, &entry);
+ ck_ring_dequeue_spsc(&ring, buf, &entry);
+ ck_ring_dequeue_spsc(&ring, buf, &entry);
+ e = rdtsc();
+ }
+ d_a += (e - s) / 4;
+ }
+
+ printf("spsc %10d %16" PRIu64 " %16" PRIu64 "\n", size, e_a / ITERATIONS, d_a / ITERATIONS);
+
+ e_a = d_a = s = e = 0;
+ for (r = 0; r < ITERATIONS; r++) {
+ for (i = 0; i < size / 4; i += 4) {
+ s = rdtsc();
+ ck_ring_enqueue_spmc(&ring, buf, &entry);
+ ck_ring_enqueue_spmc(&ring, buf, &entry);
+ ck_ring_enqueue_spmc(&ring, buf, &entry);
+ ck_ring_enqueue_spmc(&ring, buf, &entry);
+ e = rdtsc();
+ }
+ e_a += (e - s) / 4;
+
+ for (i = 0; i < size / 4; i += 4) {
+ s = rdtsc();
+ ck_ring_dequeue_spmc(&ring, buf, &entry);
+ ck_ring_dequeue_spmc(&ring, buf, &entry);
+ ck_ring_dequeue_spmc(&ring, buf, &entry);
+ ck_ring_dequeue_spmc(&ring, buf, &entry);
+ e = rdtsc();
+ }
+ d_a += (e - s) / 4;
+ }
+
+ printf("spmc %10d %16" PRIu64 " %16" PRIu64 "\n", size, e_a / ITERATIONS, d_a / ITERATIONS);
+
+ ck_ring_init(&ring, size);
+ e_a = d_a = s = e = 0;
+ for (r = 0; r < ITERATIONS; r++) {
+ for (i = 0; i < size / 4; i += 4) {
+ s = rdtsc();
+ ck_ring_enqueue_mpsc(&ring, buf, &entry);
+ ck_ring_enqueue_mpsc(&ring, buf, &entry);
+ ck_ring_enqueue_mpsc(&ring, buf, &entry);
+ ck_ring_enqueue_mpsc(&ring, buf, &entry);
+ e = rdtsc();
+ }
+ e_a += (e - s) / 4;
+
+ for (i = 0; i < size / 4; i += 4) {
+ s = rdtsc();
+ ck_ring_dequeue_mpsc(&ring, buf, &entry);
+ ck_ring_dequeue_mpsc(&ring, buf, &entry);
+ ck_ring_dequeue_mpsc(&ring, buf, &entry);
+ ck_ring_dequeue_mpsc(&ring, buf, &entry);
+ e = rdtsc();
+ }
+ d_a += (e - s) / 4;
+ }
+ printf("mpsc %10d %16" PRIu64 " %16" PRIu64 "\n", size, e_a / ITERATIONS, d_a / ITERATIONS);
+ ck_ring_init(&ring, size);
+ e_a = d_a = s = e = 0;
+ for (r = 0; r < ITERATIONS; r++) {
+ for (i = 0; i < size / 4; i += 4) {
+ s = rdtsc();
+ ck_ring_enqueue_mpmc(&ring, buf, &entry);
+ ck_ring_enqueue_mpmc(&ring, buf, &entry);
+ ck_ring_enqueue_mpmc(&ring, buf, &entry);
+ ck_ring_enqueue_mpmc(&ring, buf, &entry);
+ e = rdtsc();
+ }
+ e_a += (e - s) / 4;
+
+ for (i = 0; i < size / 4; i += 4) {
+ s = rdtsc();
+ ck_ring_dequeue_mpmc(&ring, buf, &entry);
+ ck_ring_dequeue_mpmc(&ring, buf, &entry);
+ ck_ring_dequeue_mpmc(&ring, buf, &entry);
+ ck_ring_dequeue_mpmc(&ring, buf, &entry);
+ e = rdtsc();
+ }
+ d_a += (e - s) / 4;
+ }
+ printf("mpmc %10d %16" PRIu64 " %16" PRIu64 "\n", size, e_a / ITERATIONS, d_a / ITERATIONS);
+ return (0);
+}
diff --git a/regressions/ck_ring/validate/Makefile b/regressions/ck_ring/validate/Makefile
new file mode 100644
index 0000000..0b68fad
--- /dev/null
+++ b/regressions/ck_ring/validate/Makefile
@@ -0,0 +1,40 @@
+.PHONY: check clean distribution
+
+OBJECTS=ck_ring_spsc ck_ring_spmc ck_ring_spmc_template ck_ring_mpmc \
+ ck_ring_mpmc_template
+SIZE=16384
+
+all: $(OBJECTS)
+
+check: all
+ ./ck_ring_spsc $(CORES) 1 $(SIZE)
+ ./ck_ring_spmc $(CORES) 1 $(SIZE)
+ ./ck_ring_spmc_template $(CORES) 1 $(SIZE)
+ ./ck_ring_mpmc $(CORES) 1 $(SIZE)
+ ./ck_ring_mpmc_template $(CORES) 1 $(SIZE)
+
+ck_ring_spsc: ck_ring_spsc.c ../../../include/ck_ring.h
+ $(CC) $(CFLAGS) -o ck_ring_spsc ck_ring_spsc.c \
+ ../../../src/ck_barrier_centralized.c
+
+ck_ring_spmc: ck_ring_spmc.c ../../../include/ck_ring.h
+ $(CC) $(CFLAGS) -o ck_ring_spmc ck_ring_spmc.c \
+ ../../../src/ck_barrier_centralized.c
+
+ck_ring_mpmc: ck_ring_mpmc.c ../../../include/ck_ring.h
+ $(CC) $(CFLAGS) -o ck_ring_mpmc ck_ring_mpmc.c \
+ ../../../src/ck_barrier_centralized.c
+
+ck_ring_mpmc_template: ck_ring_mpmc_template.c ../../../include/ck_ring.h
+ $(CC) $(CFLAGS) -o ck_ring_mpmc_template ck_ring_mpmc_template.c \
+ ../../../src/ck_barrier_centralized.c
+
+ck_ring_spmc_template: ck_ring_spmc_template.c ../../../include/ck_ring.h
+ $(CC) $(CFLAGS) -o ck_ring_spmc_template ck_ring_spmc_template.c \
+ ../../../src/ck_barrier_centralized.c
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_ring/validate/ck_ring_mpmc.c b/regressions/ck_ring/validate/ck_ring_mpmc.c
new file mode 100644
index 0000000..66d7f39
--- /dev/null
+++ b/regressions/ck_ring/validate/ck_ring_mpmc.c
@@ -0,0 +1,448 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <pthread.h>
+
+#include <ck_barrier.h>
+#include <ck_ring.h>
+#include <ck_spinlock.h>
+#include "../../common.h"
+
+#ifndef ITERATIONS
+#define ITERATIONS 128
+#endif
+
+struct context {
+ unsigned int tid;
+ unsigned int previous;
+ unsigned int next;
+ ck_ring_buffer_t *buffer;
+};
+
+struct entry {
+ unsigned long value_long;
+ unsigned int magic;
+ unsigned int ref;
+ int tid;
+ int value;
+};
+
+static int nthr;
+static ck_ring_t *ring;
+static ck_ring_t ring_mpmc CK_CC_CACHELINE;
+static ck_ring_t ring_mw CK_CC_CACHELINE;
+static struct affinity a;
+static int size;
+static int eb;
+static ck_barrier_centralized_t barrier = CK_BARRIER_CENTRALIZED_INITIALIZER;
+static struct context *_context;
+
+static unsigned int global_counter;
+
+static void *
+test_mpmc(void *c)
+{
+ unsigned int observed = 0;
+ unsigned int enqueue = 0;
+ unsigned int seed;
+ int i, k, j, tid;
+ struct context *context = c;
+ ck_ring_buffer_t *buffer;
+ unsigned int *csp;
+
+ csp = malloc(sizeof(*csp) * nthr);
+ assert(csp != NULL);
+
+ memset(csp, 0, sizeof(*csp) * nthr);
+
+ buffer = context->buffer;
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ tid = ck_pr_faa_int(&eb, 1);
+ ck_pr_fence_memory();
+ while (ck_pr_load_int(&eb) != nthr - 1);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ struct entry *o = NULL;
+ int spin;
+
+ /* Keep trying until we encounter at least one node. */
+ if (j & 1) {
+ if (ck_ring_dequeue_mpmc(&ring_mw, buffer, &o) == false)
+ o = NULL;
+ } else {
+ if (ck_ring_trydequeue_mpmc(&ring_mw, buffer, &o) == false)
+ o = NULL;
+ }
+
+ if (o == NULL) {
+ o = malloc(sizeof(*o));
+ if (o == NULL)
+ continue;
+
+ o->value_long = (unsigned long)ck_pr_faa_uint(&global_counter, 1) + 1;
+
+ o->magic = 0xdead;
+ o->ref = 0;
+ o->tid = tid;
+
+ if (ck_ring_enqueue_mpmc(&ring_mw, buffer, o) == false) {
+ free(o);
+ } else {
+ enqueue++;
+ }
+
+ continue;
+ }
+
+ observed++;
+
+ if (o->magic != 0xdead) {
+ ck_error("[%p] (%x)\n",
+ (void *)o, o->magic);
+ }
+
+ o->magic = 0xbeef;
+
+ if (csp[o->tid] >= o->value_long)
+ ck_error("queue semantics violated: %lu <= %lu\n", o->value_long, csp[o->tid]);
+
+ csp[o->tid] = o->value_long;
+
+ if (ck_pr_faa_uint(&o->ref, 1) != 0) {
+ ck_error("[%p] We dequeued twice.\n", (void *)o);
+ }
+
+ if ((i % 4) == 0) {
+ spin = common_rand_r(&seed) % 16384;
+ for (k = 0; k < spin; k++) {
+ ck_pr_stall();
+ }
+ }
+
+ free(o);
+ }
+ }
+
+ fprintf(stderr, "[%d] dequeue=%u enqueue=%u\n", tid, observed, enqueue);
+ return NULL;
+}
+
+static void *
+test_spmc(void *c)
+{
+ unsigned int observed = 0;
+ unsigned long previous = 0;
+ unsigned int seed;
+ int i, k, j, tid;
+ struct context *context = c;
+ ck_ring_buffer_t *buffer;
+
+ buffer = context->buffer;
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ tid = ck_pr_faa_int(&eb, 1);
+ ck_pr_fence_memory();
+ while (ck_pr_load_int(&eb) != nthr - 1);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ struct entry *o;
+ int spin;
+
+ /* Keep trying until we encounter at least one node. */
+ if (j & 1) {
+ while (ck_ring_dequeue_mpmc(&ring_mpmc, buffer,
+ &o) == false);
+ } else {
+ while (ck_ring_trydequeue_mpmc(&ring_mpmc, buffer,
+ &o) == false);
+ }
+
+ observed++;
+ if (o->value < 0
+ || o->value != o->tid
+ || o->magic != 0xdead
+ || (previous != 0 && previous >= o->value_long)) {
+ ck_error("[0x%p] (%x) (%d, %d) >< (0, %d)\n",
+ (void *)o, o->magic, o->tid, o->value, size);
+ }
+
+ o->magic = 0xbeef;
+ o->value = -31337;
+ o->tid = -31338;
+ previous = o->value_long;
+
+ if (ck_pr_faa_uint(&o->ref, 1) != 0) {
+ ck_error("[%p] We dequeued twice.\n", (void *)o);
+ }
+
+ if ((i % 4) == 0) {
+ spin = common_rand_r(&seed) % 16384;
+ for (k = 0; k < spin; k++) {
+ ck_pr_stall();
+ }
+ }
+
+ free(o);
+ }
+ }
+
+ fprintf(stderr, "[%d] Observed %u\n", tid, observed);
+ return NULL;
+}
+
+static void *
+test(void *c)
+{
+ struct context *context = c;
+ struct entry *entry;
+ unsigned int s;
+ int i, j;
+ bool r;
+ ck_ring_buffer_t *buffer = context->buffer;
+ ck_barrier_centralized_state_t sense =
+ CK_BARRIER_CENTRALIZED_STATE_INITIALIZER;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ if (context->tid == 0) {
+ struct entry *entries;
+
+ entries = malloc(sizeof(struct entry) * size);
+ assert(entries != NULL);
+
+ if (ck_ring_size(ring) != 0) {
+ ck_error("More entries than expected: %u > 0\n",
+ ck_ring_size(ring));
+ }
+
+ for (i = 0; i < size; i++) {
+ entries[i].value = i;
+ entries[i].tid = 0;
+
+ if (true) {
+ r = ck_ring_enqueue_mpmc(ring, buffer,
+ entries + i);
+ } else {
+ r = ck_ring_enqueue_mpmc_size(ring, buffer,
+ entries + i, &s);
+
+ if ((int)s != i) {
+ ck_error("Size is %u, expected %d.\n",
+ s, size);
+ }
+ }
+
+ assert(r != false);
+ }
+
+ if (ck_ring_size(ring) != (unsigned int)size) {
+ ck_error("Less entries than expected: %u < %d\n",
+ ck_ring_size(ring), size);
+ }
+
+ if (ck_ring_capacity(ring) != ck_ring_size(ring) + 1) {
+ ck_error("Capacity less than expected: %u < %u\n",
+ ck_ring_size(ring), ck_ring_capacity(ring));
+ }
+ }
+
+ /*
+ * Wait for all threads. The idea here is to maximize the contention.
+ */
+ ck_barrier_centralized(&barrier, &sense, nthr);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ buffer = _context[context->previous].buffer;
+ while (ck_ring_dequeue_mpmc(ring + context->previous,
+ buffer, &entry) == false);
+
+ if (context->previous != (unsigned int)entry->tid) {
+ ck_error("[%u:%p] %u != %u\n",
+ context->tid, (void *)entry, entry->tid, context->previous);
+ }
+
+ if (entry->value < 0 || entry->value >= size) {
+ ck_error("[%u:%p] %u </> %u\n",
+ context->tid, (void *)entry, entry->tid, context->previous);
+ }
+
+ entry->tid = context->tid;
+ buffer = context->buffer;
+
+ if (true) {
+ r = ck_ring_enqueue_mpmc(ring + context->tid,
+ buffer, entry);
+ } else {
+ r = ck_ring_enqueue_mpmc_size(ring + context->tid,
+ buffer, entry, &s);
+
+ if ((int)s >= size) {
+ ck_error("Size %u out of range of %d\n",
+ s, size);
+ }
+ }
+ assert(r == true);
+ }
+ }
+
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int i, r;
+ unsigned long l;
+ pthread_t *thread;
+ ck_ring_buffer_t *buffer;
+
+ if (argc != 4) {
+ ck_error("Usage: validate <threads> <affinity delta> <size>\n");
+ }
+
+ a.request = 0;
+ a.delta = atoi(argv[2]);
+
+ nthr = atoi(argv[1]);
+ assert(nthr >= 1);
+
+ size = atoi(argv[3]);
+ assert(size >= 4 && (size & size - 1) == 0);
+ size -= 1;
+
+ ring = malloc(sizeof(ck_ring_t) * nthr);
+ assert(ring);
+
+ _context = malloc(sizeof(*_context) * nthr);
+ assert(_context);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread);
+ fprintf(stderr, "SPSC test:");
+ for (i = 0; i < nthr; i++) {
+ _context[i].tid = i;
+ if (i == 0) {
+ _context[i].previous = nthr - 1;
+ _context[i].next = i + 1;
+ } else if (i == nthr - 1) {
+ _context[i].next = 0;
+ _context[i].previous = i - 1;
+ } else {
+ _context[i].next = i + 1;
+ _context[i].previous = i - 1;
+ }
+
+ buffer = malloc(sizeof(ck_ring_buffer_t) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(ck_ring_buffer_t) * (size + 1));
+ _context[i].buffer = buffer;
+ ck_ring_init(ring + i, size + 1);
+ r = pthread_create(thread + i, NULL, test, _context + i);
+ assert(r == 0);
+ }
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ fprintf(stderr, " done\n");
+
+ fprintf(stderr, "SPMC test:\n");
+ buffer = malloc(sizeof(ck_ring_buffer_t) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(void *) * (size + 1));
+ ck_ring_init(&ring_mpmc, size + 1);
+ for (i = 0; i < nthr - 1; i++) {
+ _context[i].buffer = buffer;
+ r = pthread_create(thread + i, NULL, test_spmc, _context + i);
+ assert(r == 0);
+ }
+
+ for (l = 0; l < (unsigned long)size * ITERATIONS * (nthr - 1) ; l++) {
+ struct entry *entry = malloc(sizeof *entry);
+
+ assert(entry != NULL);
+ entry->value_long = l;
+ entry->value = (int)l;
+ entry->tid = (int)l;
+ entry->magic = 0xdead;
+ entry->ref = 0;
+
+ /* Wait until queue is not full. */
+ if (l & 1) {
+ while (ck_ring_enqueue_mpmc(&ring_mpmc,
+ buffer,
+ entry) == false)
+ ck_pr_stall();
+ } else {
+ unsigned int s;
+
+ while (ck_ring_enqueue_mpmc_size(&ring_mpmc,
+ buffer, entry, &s) == false) {
+ ck_pr_stall();
+ }
+
+ if ((int)s >= (size * ITERATIONS * (nthr - 1))) {
+ ck_error("MPMC: Unexpected size of %u\n", s);
+ }
+ }
+ }
+
+ for (i = 0; i < nthr - 1; i++)
+ pthread_join(thread[i], NULL);
+ ck_pr_store_int(&eb, 0);
+ fprintf(stderr, "MPMC test:\n");
+ buffer = malloc(sizeof(ck_ring_buffer_t) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(void *) * (size + 1));
+ ck_ring_init(&ring_mw, size + 1);
+ for (i = 0; i < nthr - 1; i++) {
+ _context[i].buffer = buffer;
+ r = pthread_create(thread + i, NULL, test_mpmc, _context + i);
+ assert(r == 0);
+ }
+
+ for (i = 0; i < nthr - 1; i++)
+ pthread_join(thread[i], NULL);
+
+ return (0);
+}
diff --git a/regressions/ck_ring/validate/ck_ring_mpmc_template.c b/regressions/ck_ring/validate/ck_ring_mpmc_template.c
new file mode 100644
index 0000000..f076e9a
--- /dev/null
+++ b/regressions/ck_ring/validate/ck_ring_mpmc_template.c
@@ -0,0 +1,349 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <pthread.h>
+
+#include <ck_barrier.h>
+#include <ck_ring.h>
+#include <ck_spinlock.h>
+#include "../../common.h"
+
+#ifndef ITERATIONS
+#define ITERATIONS 128
+#endif
+
+struct context {
+ unsigned int tid;
+ unsigned int previous;
+ unsigned int next;
+ struct entry **buffer;
+};
+
+struct entry {
+ unsigned long value_long;
+ unsigned int magic;
+ unsigned int ref;
+ int tid;
+ int value;
+};
+
+CK_RING_PROTOTYPE(entry, entry *)
+
+static int nthr;
+static ck_ring_t *ring;
+static ck_ring_t ring_spmc CK_CC_CACHELINE;
+static struct affinity a;
+static int size;
+static int eb;
+static ck_barrier_centralized_t barrier = CK_BARRIER_CENTRALIZED_INITIALIZER;
+static struct context *_context;
+
+static void *
+test_spmc(void *c)
+{
+ unsigned int observed = 0;
+ unsigned long previous = 0;
+ unsigned int seed;
+ int i, k, j, tid;
+ struct context *context = c;
+ struct entry **buffer;
+
+ buffer = context->buffer;
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ tid = ck_pr_faa_int(&eb, 1);
+ ck_pr_fence_memory();
+ while (ck_pr_load_int(&eb) != nthr - 1);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ struct entry *o;
+ int spin;
+
+ /* Keep trying until we encounter at least one node. */
+ if (j & 1) {
+ while (CK_RING_DEQUEUE_MPMC(entry,
+ &ring_spmc, buffer, &o) == false);
+ } else {
+ while (CK_RING_TRYDEQUEUE_MPMC(entry,
+ &ring_spmc, buffer, &o) == false);
+ }
+
+ observed++;
+ if (o->value < 0
+ || o->value != o->tid
+ || o->magic != 0xdead
+ || (previous != 0 && previous >= o->value_long)) {
+ ck_error("[0x%p] (%x) (%d, %d) >< (0, %d)\n",
+ (void *)o, o->magic, o->tid, o->value, size);
+ }
+
+ o->magic = 0xbeef;
+ o->value = -31337;
+ o->tid = -31338;
+ previous = o->value_long;
+
+ if (ck_pr_faa_uint(&o->ref, 1) != 0) {
+ ck_error("[%p] We dequeued twice.\n", (void *)o);
+ }
+
+ if ((i % 4) == 0) {
+ spin = common_rand_r(&seed) % 16384;
+ for (k = 0; k < spin; k++) {
+ ck_pr_stall();
+ }
+ }
+
+ free(o);
+ }
+ }
+
+ fprintf(stderr, "[%d] Observed %u\n", tid, observed);
+ return NULL;
+}
+
+static void *
+test(void *c)
+{
+ struct context *context = c;
+ struct entry *entry;
+ unsigned int s;
+ int i, j;
+ bool r;
+ struct entry **buffer = context->buffer;
+ ck_barrier_centralized_state_t sense =
+ CK_BARRIER_CENTRALIZED_STATE_INITIALIZER;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ if (context->tid == 0) {
+ struct entry **entries;
+
+ entries = malloc(sizeof(struct entry *) * size);
+ assert(entries != NULL);
+
+ if (ck_ring_size(ring) != 0) {
+ ck_error("More entries than expected: %u > 0\n",
+ ck_ring_size(ring));
+ }
+
+ for (i = 0; i < size; i++) {
+ entries[i] = malloc(sizeof(struct entry));
+ assert(entries[i] != NULL);
+
+ entries[i]->value = i;
+ entries[i]->tid = 0;
+
+ if (i & 1) {
+ r = CK_RING_ENQUEUE_MPMC(entry, ring, buffer,
+ &entries[i]);
+ } else {
+ r = CK_RING_ENQUEUE_MPMC_SIZE(entry, ring,
+ buffer, &entries[i], &s);
+
+ if ((int)s != i) {
+ ck_error("Size is %u, expected %d.\n",
+ s, size);
+ }
+ }
+
+ assert(r != false);
+ }
+
+ if (ck_ring_size(ring) != (unsigned int)size) {
+ ck_error("Less entries than expected: %u < %d\n",
+ ck_ring_size(ring), size);
+ }
+
+ if (ck_ring_capacity(ring) != ck_ring_size(ring) + 1) {
+ ck_error("Capacity less than expected: %u < %u\n",
+ ck_ring_size(ring), ck_ring_capacity(ring));
+ }
+ }
+
+ /*
+ * Wait for all threads. The idea here is to maximize the contention.
+ */
+ ck_barrier_centralized(&barrier, &sense, nthr);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ buffer = _context[context->previous].buffer;
+ while (CK_RING_DEQUEUE_MPMC(entry,
+ ring + context->previous,
+ buffer, &entry) == false);
+
+ if (context->previous != (unsigned int)entry->tid) {
+ ck_error("[%u:%p] %u != %u\n",
+ context->tid, (void *)entry,
+ entry->tid, context->previous);
+ }
+
+ if (entry->value < 0 || entry->value >= size) {
+ ck_error("[%u:%p] %u </> %u\n",
+ context->tid, (void *)entry,
+ entry->tid, context->previous);
+ }
+
+ entry->tid = context->tid;
+ buffer = context->buffer;
+
+ if (i & 1) {
+ r = CK_RING_ENQUEUE_MPMC(entry,
+ ring + context->tid,
+ buffer, &entry);
+ } else {
+ r = CK_RING_ENQUEUE_MPMC_SIZE(entry,
+ ring + context->tid,
+ buffer, &entry, &s);
+
+ if ((int)s >= size) {
+ ck_error("Size %u out of range of %d\n",
+ s, size);
+ }
+ }
+ assert(r == true);
+ }
+ }
+
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int i, r;
+ unsigned long l;
+ pthread_t *thread;
+ struct entry **buffer;
+
+ if (argc != 4) {
+ ck_error("Usage: validate <threads> <affinity delta> <size>\n");
+ }
+
+ a.request = 0;
+ a.delta = atoi(argv[2]);
+
+ nthr = atoi(argv[1]);
+ assert(nthr >= 1);
+
+ size = atoi(argv[3]);
+ assert(size >= 4 && (size & size - 1) == 0);
+ size -= 1;
+
+ ring = malloc(sizeof(ck_ring_t) * nthr);
+ assert(ring);
+
+ _context = malloc(sizeof(*_context) * nthr);
+ assert(_context);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread);
+
+ fprintf(stderr, "SPSC test:");
+ for (i = 0; i < nthr; i++) {
+ _context[i].tid = i;
+ if (i == 0) {
+ _context[i].previous = nthr - 1;
+ _context[i].next = i + 1;
+ } else if (i == nthr - 1) {
+ _context[i].next = 0;
+ _context[i].previous = i - 1;
+ } else {
+ _context[i].next = i + 1;
+ _context[i].previous = i - 1;
+ }
+
+ buffer = malloc(sizeof(struct entry *) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(struct entry *) * (size + 1));
+ _context[i].buffer = buffer;
+ ck_ring_init(ring + i, size + 1);
+ r = pthread_create(thread + i, NULL, test, _context + i);
+ assert(r == 0);
+ }
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ fprintf(stderr, " done\n");
+
+ fprintf(stderr, "MPMC test:\n");
+ buffer = malloc(sizeof(struct entry *) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(struct entry *) * (size + 1));
+ ck_ring_init(&ring_spmc, size + 1);
+ for (i = 0; i < nthr - 1; i++) {
+ _context[i].buffer = buffer;
+ r = pthread_create(thread + i, NULL, test_spmc, _context + i);
+ assert(r == 0);
+ }
+
+ for (l = 0; l < (unsigned long)size * ITERATIONS * (nthr - 1) ; l++) {
+ struct entry *entry = malloc(sizeof *entry);
+
+ assert(entry != NULL);
+ entry->value_long = l;
+ entry->value = (int)l;
+ entry->tid = (int)l;
+ entry->magic = 0xdead;
+ entry->ref = 0;
+
+ /* Wait until queue is not full. */
+ if (l & 1) {
+ while (CK_RING_ENQUEUE_MPMC(entry, &ring_spmc,
+ buffer, &entry) == false) {
+ ck_pr_stall();
+ }
+ } else {
+ unsigned int s;
+
+ while (CK_RING_ENQUEUE_MPMC_SIZE(entry, &ring_spmc,
+ buffer, &entry, &s) == false) {
+ ck_pr_stall();
+ }
+
+ if ((int)s >= (size * ITERATIONS * (nthr - 1))) {
+ ck_error("MPMC: Unexpected size of %u\n", s);
+ }
+ }
+ }
+
+ for (i = 0; i < nthr - 1; i++)
+ pthread_join(thread[i], NULL);
+
+ return 0;
+}
diff --git a/regressions/ck_ring/validate/ck_ring_spmc.c b/regressions/ck_ring/validate/ck_ring_spmc.c
new file mode 100644
index 0000000..161c0d8
--- /dev/null
+++ b/regressions/ck_ring/validate/ck_ring_spmc.c
@@ -0,0 +1,340 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <pthread.h>
+
+#include <ck_barrier.h>
+#include <ck_ring.h>
+#include <ck_spinlock.h>
+#include "../../common.h"
+
+#ifndef ITERATIONS
+#define ITERATIONS 128
+#endif
+
+struct context {
+ unsigned int tid;
+ unsigned int previous;
+ unsigned int next;
+ ck_ring_buffer_t *buffer;
+};
+
+struct entry {
+ unsigned long value_long;
+ unsigned int magic;
+ unsigned int ref;
+ int tid;
+ int value;
+};
+
+static int nthr;
+static ck_ring_t *ring;
+static ck_ring_t ring_spmc CK_CC_CACHELINE;
+static struct affinity a;
+static int size;
+static int eb;
+static ck_barrier_centralized_t barrier = CK_BARRIER_CENTRALIZED_INITIALIZER;
+static struct context *_context;
+
+static void *
+test_spmc(void *c)
+{
+ unsigned int observed = 0;
+ unsigned long previous = 0;
+ unsigned int seed;
+ int i, k, j, tid;
+ struct context *context = c;
+ ck_ring_buffer_t *buffer;
+
+ buffer = context->buffer;
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ tid = ck_pr_faa_int(&eb, 1);
+ ck_pr_fence_memory();
+ while (ck_pr_load_int(&eb) != nthr - 1);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ struct entry *o;
+ int spin;
+
+ /* Keep trying until we encounter at least one node. */
+ if (j & 1) {
+ while (ck_ring_dequeue_spmc(&ring_spmc, buffer,
+ &o) == false);
+ } else {
+ while (ck_ring_trydequeue_spmc(&ring_spmc, buffer,
+ &o) == false);
+ }
+
+ observed++;
+ if (o->value < 0
+ || o->value != o->tid
+ || o->magic != 0xdead
+ || (previous != 0 && previous >= o->value_long)) {
+ ck_error("[0x%p] (%x) (%d, %d) >< (0, %d)\n",
+ (void *)o, o->magic, o->tid, o->value, size);
+ }
+
+ o->magic = 0xbeef;
+ o->value = -31337;
+ o->tid = -31338;
+ previous = o->value_long;
+
+ if (ck_pr_faa_uint(&o->ref, 1) != 0) {
+ ck_error("[%p] We dequeued twice.\n", (void *)o);
+ }
+
+ if ((i % 4) == 0) {
+ spin = common_rand_r(&seed) % 16384;
+ for (k = 0; k < spin; k++) {
+ ck_pr_stall();
+ }
+ }
+
+ free(o);
+ }
+ }
+
+ fprintf(stderr, "[%d] Observed %u\n", tid, observed);
+ return NULL;
+}
+
+static void *
+test(void *c)
+{
+ struct context *context = c;
+ struct entry *entry;
+ unsigned int s;
+ int i, j;
+ bool r;
+ ck_ring_buffer_t *buffer = context->buffer;
+ ck_barrier_centralized_state_t sense =
+ CK_BARRIER_CENTRALIZED_STATE_INITIALIZER;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ if (context->tid == 0) {
+ struct entry *entries;
+
+ entries = malloc(sizeof(struct entry) * size);
+ assert(entries != NULL);
+
+ if (ck_ring_size(ring) != 0) {
+ ck_error("More entries than expected: %u > 0\n",
+ ck_ring_size(ring));
+ }
+
+ for (i = 0; i < size; i++) {
+ entries[i].value = i;
+ entries[i].tid = 0;
+
+ if (i & 1) {
+ r = ck_ring_enqueue_spmc(ring, buffer,
+ entries + i);
+ } else {
+ r = ck_ring_enqueue_spmc_size(ring, buffer,
+ entries + i, &s);
+
+ if ((int)s != i) {
+ ck_error("Size is %u, expected %d.\n",
+ s, size);
+ }
+ }
+
+ assert(r != false);
+ }
+
+ if (ck_ring_size(ring) != (unsigned int)size) {
+ ck_error("Less entries than expected: %u < %d\n",
+ ck_ring_size(ring), size);
+ }
+
+ if (ck_ring_capacity(ring) != ck_ring_size(ring) + 1) {
+ ck_error("Capacity less than expected: %u < %u\n",
+ ck_ring_size(ring), ck_ring_capacity(ring));
+ }
+ }
+
+ /*
+ * Wait for all threads. The idea here is to maximize the contention.
+ */
+ ck_barrier_centralized(&barrier, &sense, nthr);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ buffer = _context[context->previous].buffer;
+ while (ck_ring_dequeue_spmc(ring + context->previous,
+ buffer, &entry) == false);
+
+ if (context->previous != (unsigned int)entry->tid) {
+ ck_error("[%u:%p] %u != %u\n",
+ context->tid, (void *)entry, entry->tid, context->previous);
+ }
+
+ if (entry->value < 0 || entry->value >= size) {
+ ck_error("[%u:%p] %u </> %u\n",
+ context->tid, (void *)entry, entry->tid, context->previous);
+ }
+
+ entry->tid = context->tid;
+ buffer = context->buffer;
+
+ if (i & 1) {
+ r = ck_ring_enqueue_spmc(ring + context->tid,
+ buffer, entry);
+ } else {
+ r = ck_ring_enqueue_spmc_size(ring + context->tid,
+ buffer, entry, &s);
+
+ if ((int)s >= size) {
+ ck_error("Size %u out of range of %d\n",
+ s, size);
+ }
+ }
+ assert(r == true);
+ }
+ }
+
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int i, r;
+ unsigned long l;
+ pthread_t *thread;
+ ck_ring_buffer_t *buffer;
+
+ if (argc != 4) {
+ ck_error("Usage: validate <threads> <affinity delta> <size>\n");
+ }
+
+ a.request = 0;
+ a.delta = atoi(argv[2]);
+
+ nthr = atoi(argv[1]);
+ assert(nthr >= 1);
+
+ size = atoi(argv[3]);
+ assert(size >= 4 && (size & size - 1) == 0);
+ size -= 1;
+
+ ring = malloc(sizeof(ck_ring_t) * nthr);
+ assert(ring);
+
+ _context = malloc(sizeof(*_context) * nthr);
+ assert(_context);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread);
+
+ fprintf(stderr, "SPSC test:");
+ for (i = 0; i < nthr; i++) {
+ _context[i].tid = i;
+ if (i == 0) {
+ _context[i].previous = nthr - 1;
+ _context[i].next = i + 1;
+ } else if (i == nthr - 1) {
+ _context[i].next = 0;
+ _context[i].previous = i - 1;
+ } else {
+ _context[i].next = i + 1;
+ _context[i].previous = i - 1;
+ }
+
+ buffer = malloc(sizeof(ck_ring_buffer_t) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(ck_ring_buffer_t) * (size + 1));
+ _context[i].buffer = buffer;
+ ck_ring_init(ring + i, size + 1);
+ r = pthread_create(thread + i, NULL, test, _context + i);
+ assert(r == 0);
+ }
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ fprintf(stderr, " done\n");
+
+ fprintf(stderr, "SPMC test:\n");
+ buffer = malloc(sizeof(ck_ring_buffer_t) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(void *) * (size + 1));
+ ck_ring_init(&ring_spmc, size + 1);
+ for (i = 0; i < nthr - 1; i++) {
+ _context[i].buffer = buffer;
+ r = pthread_create(thread + i, NULL, test_spmc, _context + i);
+ assert(r == 0);
+ }
+
+ for (l = 0; l < (unsigned long)size * ITERATIONS * (nthr - 1) ; l++) {
+ struct entry *entry = malloc(sizeof *entry);
+
+ assert(entry != NULL);
+ entry->value_long = l;
+ entry->value = (int)l;
+ entry->tid = (int)l;
+ entry->magic = 0xdead;
+ entry->ref = 0;
+
+ /* Wait until queue is not full. */
+ if (l & 1) {
+ while (ck_ring_enqueue_spmc(&ring_spmc,
+ buffer,
+ entry) == false)
+ ck_pr_stall();
+ } else {
+ unsigned int s;
+
+ while (ck_ring_enqueue_spmc_size(&ring_spmc,
+ buffer, entry, &s) == false) {
+ ck_pr_stall();
+ }
+
+ if ((int)s >= (size * ITERATIONS * (nthr - 1))) {
+ ck_error("MPMC: Unexpected size of %u\n", s);
+ }
+ }
+ }
+
+ for (i = 0; i < nthr - 1; i++)
+ pthread_join(thread[i], NULL);
+
+ return (0);
+}
+
diff --git a/regressions/ck_ring/validate/ck_ring_spmc_template.c b/regressions/ck_ring/validate/ck_ring_spmc_template.c
new file mode 100644
index 0000000..bbc75c1
--- /dev/null
+++ b/regressions/ck_ring/validate/ck_ring_spmc_template.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <pthread.h>
+
+#include <ck_barrier.h>
+#include <ck_ring.h>
+#include <ck_spinlock.h>
+#include "../../common.h"
+
+#ifndef ITERATIONS
+#define ITERATIONS 128
+#endif
+
+struct context {
+ unsigned int tid;
+ unsigned int previous;
+ unsigned int next;
+ struct entry **buffer;
+};
+
+struct entry {
+ unsigned long value_long;
+ unsigned int magic;
+ unsigned int ref;
+ int tid;
+ int value;
+};
+
+CK_RING_PROTOTYPE(entry, entry *)
+
+static int nthr;
+static ck_ring_t *ring;
+static ck_ring_t ring_spmc CK_CC_CACHELINE;
+static struct affinity a;
+static int size;
+static int eb;
+static ck_barrier_centralized_t barrier = CK_BARRIER_CENTRALIZED_INITIALIZER;
+static struct context *_context;
+
+static void *
+test_spmc(void *c)
+{
+ unsigned int observed = 0;
+ unsigned long previous = 0;
+ unsigned int seed;
+ int i, k, j, tid;
+ struct context *context = c;
+ struct entry **buffer;
+
+ buffer = context->buffer;
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ tid = ck_pr_faa_int(&eb, 1);
+ ck_pr_fence_memory();
+ while (ck_pr_load_int(&eb) != nthr - 1);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ struct entry *o;
+ int spin;
+
+ /* Keep trying until we encounter at least one node. */
+ if (j & 1) {
+ while (CK_RING_DEQUEUE_SPMC(entry,
+ &ring_spmc, buffer, &o) == false);
+ } else {
+ while (CK_RING_TRYDEQUEUE_SPMC(entry,
+ &ring_spmc, buffer, &o) == false);
+ }
+
+ observed++;
+ if (o->value < 0
+ || o->value != o->tid
+ || o->magic != 0xdead
+ || (previous != 0 && previous >= o->value_long)) {
+ ck_error("[0x%p] (%x) (%d, %d) >< (0, %d)\n",
+ (void *)o, o->magic, o->tid, o->value, size);
+ }
+
+ o->magic = 0xbeef;
+ o->value = -31337;
+ o->tid = -31338;
+ previous = o->value_long;
+
+ if (ck_pr_faa_uint(&o->ref, 1) != 0) {
+ ck_error("[%p] We dequeued twice.\n", (void *)o);
+ }
+
+ if ((i % 4) == 0) {
+ spin = common_rand_r(&seed) % 16384;
+ for (k = 0; k < spin; k++) {
+ ck_pr_stall();
+ }
+ }
+
+ free(o);
+ }
+ }
+
+ fprintf(stderr, "[%d] Observed %u\n", tid, observed);
+ return NULL;
+}
+
+static void *
+test(void *c)
+{
+ struct context *context = c;
+ struct entry *entry;
+ unsigned int s;
+ int i, j;
+ bool r;
+ struct entry **buffer = context->buffer;
+ ck_barrier_centralized_state_t sense =
+ CK_BARRIER_CENTRALIZED_STATE_INITIALIZER;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ if (context->tid == 0) {
+ struct entry **entries;
+
+ entries = malloc(sizeof(struct entry *) * size);
+ assert(entries != NULL);
+
+ if (ck_ring_size(ring) != 0) {
+ ck_error("More entries than expected: %u > 0\n",
+ ck_ring_size(ring));
+ }
+
+ for (i = 0; i < size; i++) {
+ entries[i] = malloc(sizeof(struct entry));
+ assert(entries[i] != NULL);
+
+ entries[i]->value = i;
+ entries[i]->tid = 0;
+
+ if (i & 1) {
+ r = CK_RING_ENQUEUE_SPMC(entry, ring, buffer,
+ &entries[i]);
+ } else {
+ r = CK_RING_ENQUEUE_SPMC_SIZE(entry, ring,
+ buffer, &entries[i], &s);
+
+ if ((int)s != i) {
+ ck_error("Size is %u, expected %d.\n",
+ s, size);
+ }
+ }
+
+ assert(r != false);
+ }
+
+ if (ck_ring_size(ring) != (unsigned int)size) {
+ ck_error("Less entries than expected: %u < %d\n",
+ ck_ring_size(ring), size);
+ }
+
+ if (ck_ring_capacity(ring) != ck_ring_size(ring) + 1) {
+ ck_error("Capacity less than expected: %u < %u\n",
+ ck_ring_size(ring), ck_ring_capacity(ring));
+ }
+ }
+
+ /*
+ * Wait for all threads. The idea here is to maximize the contention.
+ */
+ ck_barrier_centralized(&barrier, &sense, nthr);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ buffer = _context[context->previous].buffer;
+ while (CK_RING_DEQUEUE_SPMC(entry,
+ ring + context->previous,
+ buffer, &entry) == false);
+
+ if (context->previous != (unsigned int)entry->tid) {
+ ck_error("[%u:%p] %u != %u\n",
+ context->tid, (void *)entry,
+ entry->tid, context->previous);
+ }
+
+ if (entry->value < 0 || entry->value >= size) {
+ ck_error("[%u:%p] %u </> %u\n",
+ context->tid, (void *)entry,
+ entry->tid, context->previous);
+ }
+
+ entry->tid = context->tid;
+ buffer = context->buffer;
+
+ if (i & 1) {
+ r = CK_RING_ENQUEUE_SPMC(entry,
+ ring + context->tid,
+ buffer, &entry);
+ } else {
+ r = CK_RING_ENQUEUE_SPMC_SIZE(entry,
+ ring + context->tid,
+ buffer, &entry, &s);
+
+ if ((int)s >= size) {
+ ck_error("Size %u out of range of %d\n",
+ s, size);
+ }
+ }
+ assert(r == true);
+ }
+ }
+
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int i, r;
+ unsigned long l;
+ pthread_t *thread;
+ struct entry **buffer;
+
+ if (argc != 4) {
+ ck_error("Usage: validate <threads> <affinity delta> <size>\n");
+ }
+
+ a.request = 0;
+ a.delta = atoi(argv[2]);
+
+ nthr = atoi(argv[1]);
+ assert(nthr >= 1);
+
+ size = atoi(argv[3]);
+ assert(size >= 4 && (size & size - 1) == 0);
+ size -= 1;
+
+ ring = malloc(sizeof(ck_ring_t) * nthr);
+ assert(ring);
+
+ _context = malloc(sizeof(*_context) * nthr);
+ assert(_context);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread);
+
+ fprintf(stderr, "SPSC test:");
+ for (i = 0; i < nthr; i++) {
+ _context[i].tid = i;
+ if (i == 0) {
+ _context[i].previous = nthr - 1;
+ _context[i].next = i + 1;
+ } else if (i == nthr - 1) {
+ _context[i].next = 0;
+ _context[i].previous = i - 1;
+ } else {
+ _context[i].next = i + 1;
+ _context[i].previous = i - 1;
+ }
+
+ buffer = malloc(sizeof(struct entry *) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(struct entry *) * (size + 1));
+ _context[i].buffer = buffer;
+ ck_ring_init(ring + i, size + 1);
+ r = pthread_create(thread + i, NULL, test, _context + i);
+ assert(r == 0);
+ }
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ fprintf(stderr, " done\n");
+
+ fprintf(stderr, "SPMC test:\n");
+ buffer = malloc(sizeof(struct entry *) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(struct entry *) * (size + 1));
+ ck_ring_init(&ring_spmc, size + 1);
+ for (i = 0; i < nthr - 1; i++) {
+ _context[i].buffer = buffer;
+ r = pthread_create(thread + i, NULL, test_spmc, _context + i);
+ assert(r == 0);
+ }
+
+ for (l = 0; l < (unsigned long)size * ITERATIONS * (nthr - 1) ; l++) {
+ struct entry *entry = malloc(sizeof *entry);
+
+ assert(entry != NULL);
+ entry->value_long = l;
+ entry->value = (int)l;
+ entry->tid = (int)l;
+ entry->magic = 0xdead;
+ entry->ref = 0;
+
+ /* Wait until queue is not full. */
+ if (l & 1) {
+ while (CK_RING_ENQUEUE_SPMC(entry, &ring_spmc,
+ buffer, &entry) == false) {
+ ck_pr_stall();
+ }
+ } else {
+ unsigned int s;
+
+ while (CK_RING_ENQUEUE_SPMC_SIZE(entry, &ring_spmc,
+ buffer, &entry, &s) == false) {
+ ck_pr_stall();
+ }
+
+ if ((int)s >= (size * ITERATIONS * (nthr - 1))) {
+ ck_error("MPMC: Unexpected size of %u\n", s);
+ }
+ }
+ }
+
+ for (i = 0; i < nthr - 1; i++)
+ pthread_join(thread[i], NULL);
+
+ return 0;
+}
+
diff --git a/regressions/ck_ring/validate/ck_ring_spsc.c b/regressions/ck_ring/validate/ck_ring_spsc.c
new file mode 100644
index 0000000..910f7e6
--- /dev/null
+++ b/regressions/ck_ring/validate/ck_ring_spsc.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <pthread.h>
+
+#include <ck_barrier.h>
+#include <ck_ring.h>
+#include "../../common.h"
+
+#ifndef ITERATIONS
+#define ITERATIONS 128
+#endif
+
+struct context {
+ unsigned int tid;
+ unsigned int previous;
+ unsigned int next;
+ void *buffer;
+};
+
+struct entry {
+ int tid;
+ int value;
+};
+
+static int nthr;
+static ck_ring_t *ring;
+static struct affinity a;
+static int size;
+static ck_barrier_centralized_t barrier = CK_BARRIER_CENTRALIZED_INITIALIZER;
+static struct context *_context;
+
+static void *
+test(void *c)
+{
+ struct context *context = c;
+ struct entry *entry;
+ unsigned int s;
+ int i, j;
+ bool r;
+ ck_barrier_centralized_state_t sense =
+ CK_BARRIER_CENTRALIZED_STATE_INITIALIZER;
+ ck_ring_buffer_t *buffer;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ buffer = context->buffer;
+ if (context->tid == 0) {
+ struct entry *entries;
+
+ entries = malloc(sizeof(struct entry) * size);
+ assert(entries != NULL);
+
+ if (ck_ring_size(ring) != 0) {
+ ck_error("More entries than expected: %u > 0\n",
+ ck_ring_size(ring));
+ }
+
+ for (i = 0; i < size; i++) {
+ entries[i].value = i;
+ entries[i].tid = 0;
+
+ if (i & 1) {
+ r = ck_ring_enqueue_spsc(ring, buffer,
+ entries + i);
+ } else {
+ r = ck_ring_enqueue_spsc_size(ring,
+ buffer, entries + i, &s);
+
+ if ((int)s != i) {
+ ck_error("Size is %u, expected %d\n",
+ s, i + 1);
+ }
+ }
+
+ assert(r != false);
+ }
+
+ if (ck_ring_size(ring) != (unsigned int)size) {
+ ck_error("Less entries than expected: %u < %d\n",
+ ck_ring_size(ring), size);
+ }
+
+ if (ck_ring_capacity(ring) != ck_ring_size(ring) + 1) {
+ ck_error("Capacity less than expected: %u < %u\n",
+ ck_ring_size(ring), ck_ring_capacity(ring));
+ }
+ }
+
+ ck_barrier_centralized(&barrier, &sense, nthr);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ buffer = _context[context->previous].buffer;
+ while (ck_ring_dequeue_spsc(ring + context->previous,
+ buffer, &entry) == false);
+
+ if (context->previous != (unsigned int)entry->tid) {
+ ck_error("[%u:%p] %u != %u\n",
+ context->tid, (void *)entry, entry->tid, context->previous);
+ }
+
+ if (entry->value != j) {
+ ck_error("[%u:%p] %u != %u\n",
+ context->tid, (void *)entry, entry->tid, context->previous);
+ }
+
+ entry->tid = context->tid;
+ buffer = context->buffer;
+ if (i & 1) {
+ r = ck_ring_enqueue_spsc(ring + context->tid,
+ buffer, entry);
+ } else {
+ r = ck_ring_enqueue_spsc_size(ring +
+ context->tid, buffer, entry, &s);
+
+ if ((int)s >= size) {
+ ck_error("Size %u is out of range %d\n",
+ s, size);
+ }
+ }
+ assert(r == true);
+ }
+ }
+
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int i, r;
+ ck_ring_buffer_t *buffer;
+ pthread_t *thread;
+
+ if (argc != 4) {
+ ck_error("Usage: validate <threads> <affinity delta> <size>\n");
+ }
+
+ a.request = 0;
+ a.delta = atoi(argv[2]);
+
+ nthr = atoi(argv[1]);
+ assert(nthr >= 1);
+
+ size = atoi(argv[3]);
+ assert(size >= 4 && (size & size - 1) == 0);
+ size -= 1;
+
+ ring = malloc(sizeof(ck_ring_t) * nthr);
+ assert(ring);
+
+ _context = malloc(sizeof(*_context) * nthr);
+ assert(_context);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread);
+
+ for (i = 0; i < nthr; i++) {
+ _context[i].tid = i;
+ if (i == 0) {
+ _context[i].previous = nthr - 1;
+ _context[i].next = i + 1;
+ } else if (i == nthr - 1) {
+ _context[i].next = 0;
+ _context[i].previous = i - 1;
+ } else {
+ _context[i].next = i + 1;
+ _context[i].previous = i - 1;
+ }
+
+ buffer = malloc(sizeof(ck_ring_buffer_t) * (size + 1));
+ assert(buffer);
+ _context[i].buffer = buffer;
+ ck_ring_init(ring + i, size + 1);
+ r = pthread_create(thread + i, NULL, test, _context + i);
+ assert(r == 0);
+ }
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ return (0);
+}
diff --git a/regressions/ck_rwcohort/benchmark/Makefile b/regressions/ck_rwcohort/benchmark/Makefile
new file mode 100644
index 0000000..054c85c
--- /dev/null
+++ b/regressions/ck_rwcohort/benchmark/Makefile
@@ -0,0 +1,32 @@
+.PHONY: clean distribution
+
+OBJECTS=latency throughput
+OBJECTS=ck_neutral.THROUGHPUT ck_neutral.LATENCY \
+ ck_rp.THROUGHPUT ck_rp.LATENCY \
+ ck_wp.THROUGHPUT ck_wp.LATENCY
+
+all: $(OBJECTS)
+
+ck_neutral.THROUGHPUT: ck_neutral.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_neutral.THROUGHPUT ck_neutral.c
+
+ck_neutral.LATENCY: ck_neutral.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_neutral.LATENCY ck_neutral.c
+
+ck_rp.THROUGHPUT: ck_rp.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_rp.THROUGHPUT ck_rp.c
+
+ck_rp.LATENCY: ck_rp.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_rp.LATENCY ck_rp.c
+
+ck_wp.THROUGHPUT: ck_wp.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_wp.THROUGHPUT ck_wp.c
+
+ck_wp.LATENCY: ck_wp.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_wp.LATENCY ck_wp.c
+
+clean:
+ rm -rf *.dSYM *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_rwcohort/benchmark/ck_neutral.c b/regressions/ck_rwcohort/benchmark/ck_neutral.c
new file mode 100644
index 0000000..9fb85db
--- /dev/null
+++ b/regressions/ck_rwcohort/benchmark/ck_neutral.c
@@ -0,0 +1,7 @@
+#include "../ck_neutral.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
diff --git a/regressions/ck_rwcohort/benchmark/ck_rp.c b/regressions/ck_rwcohort/benchmark/ck_rp.c
new file mode 100644
index 0000000..798e578
--- /dev/null
+++ b/regressions/ck_rwcohort/benchmark/ck_rp.c
@@ -0,0 +1,7 @@
+#include "../ck_rp.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
diff --git a/regressions/ck_rwcohort/benchmark/ck_wp.c b/regressions/ck_rwcohort/benchmark/ck_wp.c
new file mode 100644
index 0000000..07b0cce
--- /dev/null
+++ b/regressions/ck_rwcohort/benchmark/ck_wp.c
@@ -0,0 +1,7 @@
+#include "../ck_wp.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
diff --git a/regressions/ck_rwcohort/benchmark/latency.h b/regressions/ck_rwcohort/benchmark/latency.h
new file mode 100644
index 0000000..027a8b2
--- /dev/null
+++ b/regressions/ck_rwcohort/benchmark/latency.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2013-2015 Samy Al Bahra.
+ * Copyright 2013 Brendon Scheinman.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_rwcohort.h>
+#include <ck_spinlock.h>
+#include <inttypes.h>
+#include <stdio.h>
+
+#include "../../common.h"
+
+#ifndef STEPS
+#define STEPS 1000000
+#endif
+
+static void
+ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+ (void)context;
+ ck_spinlock_fas_lock(lock);
+}
+
+static void
+ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+ (void)context;
+ ck_spinlock_fas_unlock(lock);
+}
+
+static bool
+ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+ (void)context;
+ return ck_spinlock_fas_locked(lock);
+}
+
+CK_COHORT_PROTOTYPE(fas_fas,
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context,
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context)
+LOCK_PROTOTYPE(fas_fas)
+
+int
+main(void)
+{
+ uint64_t s_b, e_b, i;
+ ck_spinlock_fas_t global_lock = CK_SPINLOCK_FAS_INITIALIZER;
+ ck_spinlock_fas_t local_lock = CK_SPINLOCK_FAS_INITIALIZER;
+ CK_COHORT_INSTANCE(fas_fas) cohort = CK_COHORT_INITIALIZER;
+ LOCK_INSTANCE(fas_fas) rw_cohort = LOCK_INITIALIZER;
+
+ CK_COHORT_INIT(fas_fas, &cohort, &global_lock, &local_lock,
+ CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT);
+ LOCK_INIT(fas_fas, &rw_cohort, CK_RWCOHORT_WP_DEFAULT_WAIT_LIMIT);
+
+ for (i = 0; i < STEPS; i++) {
+ WRITE_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL);
+ WRITE_UNLOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ WRITE_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL);
+ WRITE_UNLOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL);
+ }
+ e_b = rdtsc();
+ printf("WRITE: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ for (i = 0; i < STEPS; i++) {
+ READ_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ READ_LOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, &cohort, NULL, NULL);
+ }
+ e_b = rdtsc();
+ printf("READ: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ return (0);
+}
+
diff --git a/regressions/ck_rwcohort/benchmark/throughput.h b/regressions/ck_rwcohort/benchmark/throughput.h
new file mode 100644
index 0000000..2870855
--- /dev/null
+++ b/regressions/ck_rwcohort/benchmark/throughput.h
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2013-2015 Samy Al Bahra.
+ * Copyright 2013 Brendon Scheinman.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_cohort.h>
+#include <ck_rwcohort.h>
+#include <ck_spinlock.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "../../common.h"
+
+#define max(x, y) (((x) > (y)) ? (x) : (y))
+
+#ifndef STEPS
+#define STEPS 1000000
+#endif
+
+static unsigned int barrier;
+static unsigned int flag CK_CC_CACHELINE;
+static struct affinity affinity;
+static unsigned int nthr;
+
+static void
+ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+
+ (void)context;
+ ck_spinlock_fas_lock(lock);
+ return;
+}
+
+static void
+ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+
+ (void)context;
+ ck_spinlock_fas_unlock(lock);
+ return;
+}
+
+static bool
+ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+
+ (void)context;
+ return ck_spinlock_fas_locked(lock);
+}
+
+CK_COHORT_PROTOTYPE(fas_fas,
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context,
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context)
+LOCK_PROTOTYPE(fas_fas)
+
+struct cohort_record {
+ CK_COHORT_INSTANCE(fas_fas) cohort;
+} CK_CC_CACHELINE;
+static struct cohort_record *cohorts;
+
+static ck_spinlock_t global_lock = CK_SPINLOCK_INITIALIZER;
+static LOCK_INSTANCE(fas_fas) rw_cohort = LOCK_INITIALIZER;
+static unsigned int n_cohorts;
+
+struct block {
+ unsigned int tid;
+};
+
+static void *
+thread_rwlock(void *pun)
+{
+ uint64_t s_b, e_b, a, i;
+ uint64_t *value = pun;
+ CK_COHORT_INSTANCE(fas_fas) *cohort;
+ unsigned int core;
+
+ if (aff_iterate_core(&affinity, &core) != 0) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ cohort = &((cohorts + (core / (int)(affinity.delta)) % n_cohorts)->cohort);
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) != nthr)
+ ck_pr_stall();
+
+ for (i = 1, a = 0;; i++) {
+ s_b = rdtsc();
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ e_b = rdtsc();
+
+ a += (e_b - s_b) >> 4;
+
+ if (ck_pr_load_uint(&flag) == 1)
+ break;
+ }
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) != nthr * 2)
+ ck_pr_stall();
+
+ *value = (a / i);
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ unsigned int i;
+ pthread_t *threads;
+ uint64_t *latency;
+ struct block *context;
+ ck_spinlock_fas_t *local_lock;
+
+ if (argc != 4) {
+ ck_error("Usage: throughput <number of cohorts> <threads per cohort> <affinity delta>\n");
+ }
+
+ n_cohorts = atoi(argv[1]);
+ if (n_cohorts <= 0) {
+ ck_error("ERROR: Number of cohorts must be greater than 0\n");
+ }
+
+ nthr = n_cohorts * atoi(argv[2]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ cohorts = malloc(sizeof(struct cohort_record) * n_cohorts);
+ if (cohorts == NULL) {
+ ck_error("ERROR: Could not allocate cohort structures\n");
+ }
+
+ context = malloc(sizeof(struct block) * nthr);
+ if (context == NULL) {
+ ck_error("ERROR: Could not allocate thread contexts\n");
+ }
+
+ affinity.delta = atoi(argv[3]);
+ affinity.request = 0;
+
+ latency = malloc(sizeof(*latency) * nthr);
+ if (latency == NULL) {
+ ck_error("ERROR: Could not create latency buffer\n");
+ }
+ memset(latency, 0, sizeof(*latency) * nthr);
+
+ fprintf(stderr, "Creating cohorts...");
+ for (i = 0 ; i < n_cohorts ; i++) {
+ local_lock = malloc(max(CK_MD_CACHELINE, sizeof(ck_spinlock_fas_t)));
+ if (local_lock == NULL) {
+ ck_error("ERROR: Could not allocate local lock\n");
+ }
+ CK_COHORT_INIT(fas_fas, &((cohorts + i)->cohort), &global_lock, local_lock,
+ CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT);
+ local_lock = NULL;
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Creating threads (rwlock)...");
+ for (i = 0; i < nthr; i++) {
+ if (pthread_create(&threads[i], NULL, thread_rwlock, latency + i) != 0) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ common_sleep(10);
+ ck_pr_store_uint(&flag, 1);
+
+ fprintf(stderr, "Waiting for threads to finish acquisition regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done\n\n");
+
+ for (i = 1; i <= nthr; i++)
+ printf("%10u %20" PRIu64 "\n", i, latency[i - 1]);
+
+ return (0);
+}
+
diff --git a/regressions/ck_rwcohort/ck_neutral.h b/regressions/ck_rwcohort/ck_neutral.h
new file mode 100644
index 0000000..dbbda9d
--- /dev/null
+++ b/regressions/ck_rwcohort/ck_neutral.h
@@ -0,0 +1,8 @@
+#define LOCK_PROTOTYPE CK_RWCOHORT_NEUTRAL_PROTOTYPE
+#define LOCK_INSTANCE CK_RWCOHORT_NEUTRAL_INSTANCE
+#define LOCK_INITIALIZER CK_RWCOHORT_NEUTRAL_INITIALIZER
+#define LOCK_INIT(N, C, W) CK_RWCOHORT_NEUTRAL_INIT(N, C)
+#define READ_LOCK CK_RWCOHORT_NEUTRAL_READ_LOCK
+#define WRITE_LOCK CK_RWCOHORT_NEUTRAL_WRITE_LOCK
+#define READ_UNLOCK CK_RWCOHORT_NEUTRAL_READ_UNLOCK
+#define WRITE_UNLOCK CK_RWCOHORT_NEUTRAL_WRITE_UNLOCK
diff --git a/regressions/ck_rwcohort/ck_rp.h b/regressions/ck_rwcohort/ck_rp.h
new file mode 100644
index 0000000..e20f3d2
--- /dev/null
+++ b/regressions/ck_rwcohort/ck_rp.h
@@ -0,0 +1,8 @@
+#define LOCK_PROTOTYPE CK_RWCOHORT_RP_PROTOTYPE
+#define LOCK_INSTANCE CK_RWCOHORT_RP_INSTANCE
+#define LOCK_INITIALIZER CK_RWCOHORT_RP_INITIALIZER
+#define LOCK_INIT CK_RWCOHORT_RP_INIT
+#define READ_LOCK CK_RWCOHORT_RP_READ_LOCK
+#define READ_UNLOCK CK_RWCOHORT_RP_READ_UNLOCK
+#define WRITE_LOCK CK_RWCOHORT_RP_WRITE_LOCK
+#define WRITE_UNLOCK CK_RWCOHORT_RP_WRITE_UNLOCK
diff --git a/regressions/ck_rwcohort/ck_wp.h b/regressions/ck_rwcohort/ck_wp.h
new file mode 100644
index 0000000..556c7df
--- /dev/null
+++ b/regressions/ck_rwcohort/ck_wp.h
@@ -0,0 +1,8 @@
+#define LOCK_PROTOTYPE CK_RWCOHORT_WP_PROTOTYPE
+#define LOCK_INSTANCE CK_RWCOHORT_WP_INSTANCE
+#define LOCK_INITIALIZER CK_RWCOHORT_WP_INITIALIZER
+#define LOCK_INIT CK_RWCOHORT_WP_INIT
+#define READ_LOCK CK_RWCOHORT_WP_READ_LOCK
+#define WRITE_LOCK CK_RWCOHORT_WP_WRITE_LOCK
+#define READ_UNLOCK CK_RWCOHORT_WP_READ_UNLOCK
+#define WRITE_UNLOCK CK_RWCOHORT_WP_WRITE_UNLOCK
diff --git a/regressions/ck_rwcohort/validate/Makefile b/regressions/ck_rwcohort/validate/Makefile
new file mode 100644
index 0000000..33e3a29
--- /dev/null
+++ b/regressions/ck_rwcohort/validate/Makefile
@@ -0,0 +1,25 @@
+.PHONY: check clean distribution
+
+OBJECTS=ck_neutral ck_rp ck_wp
+
+all: $(OBJECTS)
+
+ck_neutral: ck_neutral.c ../../../include/ck_rwcohort.h
+ $(CC) $(CFLAGS) -o ck_neutral ck_neutral.c
+
+ck_rp: ck_rp.c ../../../include/ck_rwcohort.h
+ $(CC) $(CFLAGS) -o ck_rp ck_rp.c
+
+ck_wp: ck_wp.c ../../../include/ck_rwcohort.h
+ $(CC) $(CFLAGS) -o ck_wp ck_wp.c
+
+check: all
+ ./ck_neutral `expr $(CORES) / 2` 2 1
+ ./ck_rp `expr $(CORES) / 2` 2 1
+ ./ck_wp `expr $(CORES) / 2` 2 1
+
+clean:
+ rm -rf *.dSYM *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_rwcohort/validate/ck_neutral.c b/regressions/ck_rwcohort/validate/ck_neutral.c
new file mode 100644
index 0000000..7884dc5
--- /dev/null
+++ b/regressions/ck_rwcohort/validate/ck_neutral.c
@@ -0,0 +1,2 @@
+#include "../ck_neutral.h"
+#include "validate.h"
diff --git a/regressions/ck_rwcohort/validate/ck_rp.c b/regressions/ck_rwcohort/validate/ck_rp.c
new file mode 100644
index 0000000..d63e9d5
--- /dev/null
+++ b/regressions/ck_rwcohort/validate/ck_rp.c
@@ -0,0 +1,2 @@
+#include "../ck_rp.h"
+#include "validate.h"
diff --git a/regressions/ck_rwcohort/validate/ck_wp.c b/regressions/ck_rwcohort/validate/ck_wp.c
new file mode 100644
index 0000000..f89be35
--- /dev/null
+++ b/regressions/ck_rwcohort/validate/ck_wp.c
@@ -0,0 +1,2 @@
+#include "../ck_wp.h"
+#include "validate.h"
diff --git a/regressions/ck_rwcohort/validate/validate.h b/regressions/ck_rwcohort/validate/validate.h
new file mode 100644
index 0000000..8bc9a88
--- /dev/null
+++ b/regressions/ck_rwcohort/validate/validate.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2013-2015 Samy Al Bahra.
+ * Copything 2013 Brendon Scheinman.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_rwcohort.h>
+#include <ck_spinlock.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 1000000
+#endif
+
+
+static struct affinity a;
+static unsigned int locked;
+static int nthr;
+static ck_spinlock_fas_t global_fas_lock = CK_SPINLOCK_FAS_INITIALIZER;
+
+static void
+ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+ (void)context;
+ ck_spinlock_fas_lock(lock);
+}
+
+static void
+ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+ (void)context;
+ ck_spinlock_fas_unlock(lock);
+}
+
+static bool
+ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+ (void)context;
+ return ck_spinlock_fas_locked(lock);
+}
+
+CK_COHORT_PROTOTYPE(fas_fas,
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context,
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context)
+LOCK_PROTOTYPE(fas_fas)
+
+static CK_COHORT_INSTANCE(fas_fas) *cohorts;
+static LOCK_INSTANCE(fas_fas) rw_cohort = LOCK_INITIALIZER;
+static int n_cohorts;
+
+static void *
+thread(void *null CK_CC_UNUSED)
+{
+ int i = ITERATE;
+ unsigned int l;
+ unsigned int core;
+ CK_COHORT_INSTANCE(fas_fas) *cohort;
+
+ if (aff_iterate_core(&a, &core)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ cohort = cohorts + (core / (int)(a.delta)) % n_cohorts;
+
+ while (i--) {
+ WRITE_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ WRITE_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+
+ READ_LOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ READ_UNLOCK(fas_fas, &rw_cohort, cohort, NULL, NULL);
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+ int threads_per_cohort;
+ ck_spinlock_fas_t *local_lock;
+ int i;
+
+ if (argc != 4) {
+ ck_error("Usage: validate <number of cohorts> <threads per cohort> <affinity delta>\n");
+ }
+
+ n_cohorts = atoi(argv[1]);
+ if (n_cohorts <= 0) {
+ ck_error("ERROR: Number of cohorts must be greater than 0\n");
+ }
+
+ threads_per_cohort = atoi(argv[2]);
+ if (threads_per_cohort <= 0) {
+ ck_error("ERROR: Threads per cohort must be greater than 0\n");
+ }
+
+ nthr = n_cohorts * threads_per_cohort;
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ a.delta = atoi(argv[3]);
+
+ fprintf(stderr, "Creating cohorts...");
+ cohorts = malloc(sizeof(CK_COHORT_INSTANCE(fas_fas)) * n_cohorts);
+ if (cohorts == NULL) {
+ ck_error("ERROR: Could not allocate base cohort structures\n");
+ }
+ for (i = 0 ; i < n_cohorts ; i++) {
+ local_lock = malloc(sizeof(ck_spinlock_fas_t));
+ CK_COHORT_INIT(fas_fas, cohorts + i, &global_fas_lock, local_lock,
+ CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT);
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Creating threads...");
+ for (i = 0; i < nthr; i++) {
+ if (pthread_create(&threads[i], NULL, thread, NULL)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Waiting for threads to finish correctness regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+
+ return (0);
+}
+
diff --git a/regressions/ck_rwlock/benchmark/Makefile b/regressions/ck_rwlock/benchmark/Makefile
new file mode 100644
index 0000000..ed63504
--- /dev/null
+++ b/regressions/ck_rwlock/benchmark/Makefile
@@ -0,0 +1,17 @@
+.PHONY: clean distribution
+
+OBJECTS=latency throughput
+
+all: $(OBJECTS)
+
+latency: latency.c ../../../include/ck_rwlock.h ../../../include/ck_elide.h
+ $(CC) $(CFLAGS) -o latency latency.c
+
+throughput: throughput.c ../../../include/ck_rwlock.h ../../../include/ck_elide.h
+ $(CC) $(CFLAGS) -o throughput throughput.c
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_rwlock/benchmark/latency.c b/regressions/ck_rwlock/benchmark/latency.c
new file mode 100644
index 0000000..18213c6
--- /dev/null
+++ b/regressions/ck_rwlock/benchmark/latency.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_rwlock.h>
+#include <inttypes.h>
+#include <stdio.h>
+
+#include "../../common.h"
+
+#define CK_F_PR_RTM
+
+#ifndef STEPS
+#define STEPS 2000000
+#endif
+
+int
+main(void)
+{
+ uint64_t s_b, e_b, i;
+ ck_rwlock_t rwlock = CK_RWLOCK_INITIALIZER;
+
+ for (i = 0; i < STEPS; i++) {
+ ck_rwlock_write_lock(&rwlock);
+ ck_rwlock_write_unlock(&rwlock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_rwlock_write_lock(&rwlock);
+ ck_rwlock_write_unlock(&rwlock);
+ }
+ e_b = rdtsc();
+ printf(" WRITE: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+#ifdef CK_F_PR_RTM
+ struct ck_elide_config config = CK_ELIDE_CONFIG_DEFAULT_INITIALIZER;
+ struct ck_elide_stat st = CK_ELIDE_STAT_INITIALIZER;
+
+ for (i = 0; i < STEPS; i++) {
+ CK_ELIDE_LOCK(ck_rwlock_write, &rwlock);
+ CK_ELIDE_UNLOCK(ck_rwlock_write, &rwlock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ CK_ELIDE_LOCK(ck_rwlock_write, &rwlock);
+ CK_ELIDE_UNLOCK(ck_rwlock_write, &rwlock);
+ }
+ e_b = rdtsc();
+ printf(" (rtm) WRITE: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ for (i = 0; i < STEPS; i++) {
+ CK_ELIDE_LOCK_ADAPTIVE(ck_rwlock_write, &st, &config, &rwlock);
+ CK_ELIDE_UNLOCK_ADAPTIVE(ck_rwlock_write, &st, &rwlock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ CK_ELIDE_LOCK_ADAPTIVE(ck_rwlock_write, &st, &config, &rwlock);
+ CK_ELIDE_UNLOCK_ADAPTIVE(ck_rwlock_write, &st, &rwlock);
+ }
+ e_b = rdtsc();
+ printf(" (rtm-adaptive) WRITE: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+#endif /* CK_F_PR_RTM */
+
+ for (i = 0; i < STEPS; i++) {
+ ck_rwlock_read_lock(&rwlock);
+ ck_rwlock_read_unlock(&rwlock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_rwlock_read_lock(&rwlock);
+ ck_rwlock_read_unlock(&rwlock);
+ }
+ e_b = rdtsc();
+ printf(" READ: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+#ifdef CK_F_PR_RTM
+ ck_elide_stat_init(&st);
+
+ for (i = 0; i < STEPS; i++) {
+ CK_ELIDE_LOCK(ck_rwlock_read, &rwlock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rwlock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ CK_ELIDE_LOCK(ck_rwlock_read, &rwlock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rwlock);
+ }
+ e_b = rdtsc();
+ printf(" (rtm) READ: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ for (i = 0; i < STEPS; i++) {
+ CK_ELIDE_LOCK_ADAPTIVE(ck_rwlock_read, &st, &config, &rwlock);
+ CK_ELIDE_UNLOCK_ADAPTIVE(ck_rwlock_read, &st, &rwlock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ CK_ELIDE_LOCK_ADAPTIVE(ck_rwlock_read, &st, &config, &rwlock);
+ CK_ELIDE_UNLOCK_ADAPTIVE(ck_rwlock_read, &st, &rwlock);
+ }
+ e_b = rdtsc();
+ printf(" (rtm-adaptive) READ: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+#endif /* CK_F_PR_RTM */
+
+ return 0;
+}
+
diff --git a/regressions/ck_rwlock/benchmark/throughput.c b/regressions/ck_rwlock/benchmark/throughput.c
new file mode 100644
index 0000000..f57fbd8
--- /dev/null
+++ b/regressions/ck_rwlock/benchmark/throughput.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_rwlock.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "../../common.h"
+
+#ifndef STEPS
+#define STEPS 1000000
+#endif
+
+static int barrier;
+static int threads;
+static unsigned int flag CK_CC_CACHELINE;
+static struct {
+ ck_rwlock_t lock;
+} rw CK_CC_CACHELINE = {
+ .lock = CK_RWLOCK_INITIALIZER
+};
+
+static struct affinity affinity;
+
+#ifdef CK_F_PR_RTM
+static void *
+thread_lock_rtm(void *pun)
+{
+ uint64_t s_b, e_b, a, i;
+ uint64_t *value = pun;
+
+ if (aff_iterate(&affinity) != 0) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_int(&barrier);
+ while (ck_pr_load_int(&barrier) != threads)
+ ck_pr_stall();
+
+ for (i = 1, a = 0;; i++) {
+ s_b = rdtsc();
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
+ e_b = rdtsc();
+
+ a += (e_b - s_b) >> 4;
+
+ if (ck_pr_load_uint(&flag) == 1)
+ break;
+ }
+
+ ck_pr_inc_int(&barrier);
+ while (ck_pr_load_int(&barrier) != threads * 2)
+ ck_pr_stall();
+
+ *value = (a / i);
+ return NULL;
+}
+#endif /* CK_F_PR_RTM */
+
+static void *
+thread_lock(void *pun)
+{
+ uint64_t s_b, e_b, a, i;
+ uint64_t *value = pun;
+
+ if (aff_iterate(&affinity) != 0) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_int(&barrier);
+ while (ck_pr_load_int(&barrier) != threads)
+ ck_pr_stall();
+
+ for (i = 1, a = 0;; i++) {
+ s_b = rdtsc();
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ ck_rwlock_read_lock(&rw.lock);
+ ck_rwlock_read_unlock(&rw.lock);
+ e_b = rdtsc();
+
+ a += (e_b - s_b) >> 4;
+
+ if (ck_pr_load_uint(&flag) == 1)
+ break;
+ }
+
+ ck_pr_inc_int(&barrier);
+ while (ck_pr_load_int(&barrier) != threads * 2)
+ ck_pr_stall();
+
+ *value = (a / i);
+ return NULL;
+}
+
+static void
+rwlock_test(pthread_t *p, int d, uint64_t *latency, void *(*f)(void *), const char *label)
+{
+ int t;
+
+ ck_pr_store_int(&barrier, 0);
+ ck_pr_store_uint(&flag, 0);
+
+ affinity.delta = d;
+ affinity.request = 0;
+
+ fprintf(stderr, "Creating threads (%s)...", label);
+ for (t = 0; t < threads; t++) {
+ if (pthread_create(&p[t], NULL, f, latency + t) != 0) {
+ ck_error("ERROR: Could not create thread %d\n", t);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ common_sleep(10);
+ ck_pr_store_uint(&flag, 1);
+
+ fprintf(stderr, "Waiting for threads to finish acquisition regression...");
+ for (t = 0; t < threads; t++)
+ pthread_join(p[t], NULL);
+ fprintf(stderr, "done\n\n");
+
+ for (t = 1; t <= threads; t++)
+ printf("%10u %20" PRIu64 "\n", t, latency[t - 1]);
+
+ fprintf(stderr, "\n");
+ return;
+}
+
+
+int
+main(int argc, char *argv[])
+{
+ int d;
+ pthread_t *p;
+ uint64_t *latency;
+
+ if (argc != 3) {
+ ck_error("Usage: throughput <delta> <threads>\n");
+ }
+
+ threads = atoi(argv[2]);
+ if (threads <= 0) {
+ ck_error("ERROR: Threads must be a value > 0.\n");
+ }
+
+ p = malloc(sizeof(pthread_t) * threads);
+ if (p == NULL) {
+ ck_error("ERROR: Failed to initialize thread.\n");
+ }
+
+ latency = malloc(sizeof(uint64_t) * threads);
+ if (latency == NULL) {
+ ck_error("ERROR: Failed to create latency buffer.\n");
+ }
+
+ d = atoi(argv[1]);
+ rwlock_test(p, d, latency, thread_lock, "rwlock");
+
+#ifdef CK_F_PR_RTM
+ rwlock_test(p, d, latency, thread_lock_rtm, "rwlock, rtm");
+#endif /* CK_F_PR_RTM */
+
+ return 0;
+}
+
diff --git a/regressions/ck_rwlock/validate/Makefile b/regressions/ck_rwlock/validate/Makefile
new file mode 100644
index 0000000..2c2116b
--- /dev/null
+++ b/regressions/ck_rwlock/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean distribution
+
+OBJECTS=validate
+
+all: $(OBJECTS)
+
+validate: validate.c ../../../include/ck_rwlock.h ../../../include/ck_elide.h
+ $(CC) $(CFLAGS) -o validate validate.c
+
+check: all
+ ./validate $(CORES) 1
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_rwlock/validate/validate.c b/regressions/ck_rwlock/validate/validate.c
new file mode 100644
index 0000000..8a32e08
--- /dev/null
+++ b/regressions/ck_rwlock/validate/validate.c
@@ -0,0 +1,447 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_rwlock.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 1000000
+#endif
+
+static struct affinity a;
+static unsigned int locked;
+static unsigned int tid = 2;
+static int nthr;
+static ck_rwlock_t lock = CK_RWLOCK_INITIALIZER;
+static ck_rwlock_recursive_t r_lock = CK_RWLOCK_RECURSIVE_INITIALIZER;
+
+static void *
+thread_recursive(void *null CK_CC_UNUSED)
+{
+ int i = ITERATE;
+ unsigned int l;
+ unsigned int t = ck_pr_faa_uint(&tid, 1);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+ while (ck_rwlock_recursive_write_trylock(&r_lock, t) == false)
+ ck_pr_stall();
+
+ ck_rwlock_recursive_write_lock(&r_lock, t);
+ ck_rwlock_recursive_write_lock(&r_lock, t);
+ ck_rwlock_recursive_write_lock(&r_lock, t);
+
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_rwlock_recursive_write_unlock(&r_lock);
+ ck_rwlock_recursive_write_unlock(&r_lock);
+ ck_rwlock_recursive_write_unlock(&r_lock);
+ ck_rwlock_recursive_write_unlock(&r_lock);
+
+ ck_rwlock_recursive_read_lock(&r_lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_rwlock_recursive_read_unlock(&r_lock);
+ }
+
+ return (NULL);
+}
+
+#ifdef CK_F_PR_RTM
+static void *
+thread_rtm_adaptive(void *null CK_CC_UNUSED)
+{
+ unsigned int i = ITERATE;
+ unsigned int l;
+ struct ck_elide_config config = CK_ELIDE_CONFIG_DEFAULT_INITIALIZER;
+ struct ck_elide_stat st = CK_ELIDE_STAT_INITIALIZER;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+ CK_ELIDE_LOCK_ADAPTIVE(ck_rwlock_write, &st, &config, &lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ CK_ELIDE_UNLOCK_ADAPTIVE(ck_rwlock_write, &st, &lock);
+
+ CK_ELIDE_LOCK(ck_rwlock_read, &lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &lock);
+ }
+
+ return NULL;
+}
+
+static void *
+thread_rtm_mix(void *null CK_CC_UNUSED)
+{
+ unsigned int i = ITERATE;
+ unsigned int l;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+ if (i & 1) {
+ CK_ELIDE_LOCK(ck_rwlock_write, &lock);
+ } else {
+ ck_rwlock_write_lock(&lock);
+ }
+
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+
+ if (i & 1) {
+ CK_ELIDE_UNLOCK(ck_rwlock_write, &lock);
+ } else {
+ ck_rwlock_write_unlock(&lock);
+ }
+
+ if (i & 1) {
+ CK_ELIDE_LOCK(ck_rwlock_read, &lock);
+ } else {
+ ck_rwlock_read_lock(&lock);
+ }
+
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+
+ if (i & 1) {
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &lock);
+ } else {
+ ck_rwlock_read_unlock(&lock);
+ }
+ }
+
+ return (NULL);
+}
+
+static void *
+thread_rtm(void *null CK_CC_UNUSED)
+{
+ unsigned int i = ITERATE;
+ unsigned int l;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+ CK_ELIDE_LOCK(ck_rwlock_write, &lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ CK_ELIDE_UNLOCK(ck_rwlock_write, &lock);
+
+ CK_ELIDE_LOCK(ck_rwlock_read, &lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ CK_ELIDE_UNLOCK(ck_rwlock_read, &lock);
+ }
+
+ return (NULL);
+}
+#endif /* CK_F_PR_RTM */
+
+static void *
+thread(void *null CK_CC_UNUSED)
+{
+ unsigned int i = ITERATE;
+ unsigned int l;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+ ck_rwlock_write_lock(&lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_rwlock_write_unlock(&lock);
+
+ ck_rwlock_read_lock(&lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_rwlock_read_unlock(&lock);
+ }
+
+ return (NULL);
+}
+
+static void
+rwlock_test(pthread_t *threads, void *(*f)(void *), const char *test)
+{
+ int i;
+
+ fprintf(stderr, "Creating threads (%s)...", test);
+ for (i = 0; i < nthr; i++) {
+ if (pthread_create(&threads[i], NULL, f, NULL)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, ".");
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+
+ if (argc != 3) {
+ ck_error("Usage: validate <number of threads> <affinity delta>\n");
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ a.delta = atoi(argv[2]);
+
+ rwlock_test(threads, thread, "regular");
+#ifdef CK_F_PR_RTM
+ rwlock_test(threads, thread_rtm, "rtm");
+ rwlock_test(threads, thread_rtm_mix, "rtm-mix");
+ rwlock_test(threads, thread_rtm_adaptive, "rtm-adaptive");
+#endif
+ rwlock_test(threads, thread_recursive, "recursive");
+ return 0;
+}
+
diff --git a/regressions/ck_sequence/benchmark/Makefile b/regressions/ck_sequence/benchmark/Makefile
new file mode 100644
index 0000000..5803a4d
--- /dev/null
+++ b/regressions/ck_sequence/benchmark/Makefile
@@ -0,0 +1,18 @@
+.PHONY: clean distribution
+
+OBJECTS=ck_sequence
+
+all: $(OBJECTS)
+
+ck_sequence: ck_sequence.c ../../../include/ck_sequence.h
+ $(CC) $(CFLAGS) -o ck_sequence ck_sequence.c
+
+check: all
+ ./ck_sequence $(CORES) 1
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE
+
diff --git a/regressions/ck_sequence/benchmark/ck_sequence.c b/regressions/ck_sequence/benchmark/ck_sequence.c
new file mode 100644
index 0000000..f720c31
--- /dev/null
+++ b/regressions/ck_sequence/benchmark/ck_sequence.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2013-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_cc.h>
+#include <ck_sequence.h>
+#include <errno.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <inttypes.h>
+
+#include "../../common.h"
+
+#ifndef STEPS
+#define STEPS (65536 * 64)
+#endif
+
+static ck_sequence_t seqlock CK_CC_CACHELINE = CK_SEQUENCE_INITIALIZER;
+
+int
+main(void)
+{
+ unsigned int i = 0;
+ unsigned int version;
+ uint64_t a, s;
+
+ /* Read-side latency. */
+ a = 0;
+ for (i = 0; i < STEPS / 4; i++) {
+ s = rdtsc();
+ ck_sequence_read_retry(&seqlock, ck_sequence_read_begin(&seqlock));
+ ck_sequence_read_retry(&seqlock, ck_sequence_read_begin(&seqlock));
+ ck_sequence_read_retry(&seqlock, ck_sequence_read_begin(&seqlock));
+ ck_sequence_read_retry(&seqlock, ck_sequence_read_begin(&seqlock));
+ a += rdtsc() - s;
+ }
+ printf("read: %" PRIu64 "\n", a / STEPS);
+
+ a = 0;
+ for (i = 0; i < STEPS / 4; i++) {
+ s = rdtsc();
+ CK_SEQUENCE_READ(&seqlock, &version);
+ CK_SEQUENCE_READ(&seqlock, &version);
+ CK_SEQUENCE_READ(&seqlock, &version);
+ CK_SEQUENCE_READ(&seqlock, &version);
+ a += rdtsc() - s;
+ }
+ printf("READ %" PRIu64 "\n", a / STEPS);
+
+ /* Write-side latency. */
+ a = 0;
+ for (i = 0; i < STEPS / 4; i++) {
+ s = rdtsc();
+ ck_sequence_write_begin(&seqlock);
+ ck_sequence_write_end(&seqlock);
+ ck_sequence_write_begin(&seqlock);
+ ck_sequence_write_end(&seqlock);
+ ck_sequence_write_begin(&seqlock);
+ ck_sequence_write_end(&seqlock);
+ ck_sequence_write_begin(&seqlock);
+ ck_sequence_write_end(&seqlock);
+ a += rdtsc() - s;
+ }
+ printf("write: %" PRIu64 "\n", a / STEPS);
+
+ return 0;
+}
+
diff --git a/regressions/ck_sequence/validate/Makefile b/regressions/ck_sequence/validate/Makefile
new file mode 100644
index 0000000..bc2e5be
--- /dev/null
+++ b/regressions/ck_sequence/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean distribution
+
+OBJECTS=ck_sequence
+
+all: $(OBJECTS)
+
+ck_sequence: ck_sequence.c ../../../include/ck_sequence.h
+ $(CC) $(CFLAGS) -o ck_sequence ck_sequence.c
+
+check: all
+ ./ck_sequence $(CORES) 1
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_sequence/validate/ck_sequence.c b/regressions/ck_sequence/validate/ck_sequence.c
new file mode 100644
index 0000000..e0bc700
--- /dev/null
+++ b/regressions/ck_sequence/validate/ck_sequence.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_cc.h>
+#include <ck_sequence.h>
+#include <errno.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../../common.h"
+
+#ifndef STEPS
+#define STEPS 1000000
+#endif
+
+struct example {
+ unsigned int a;
+ unsigned int b;
+ unsigned int c;
+};
+
+static struct example global CK_CC_CACHELINE;
+static ck_sequence_t seqlock CK_CC_CACHELINE = CK_SEQUENCE_INITIALIZER;
+static unsigned int barrier;
+static struct affinity affinerator;
+
+static void
+validate(struct example *copy)
+{
+
+ if (copy->b != copy->a + 1000) {
+ ck_error("ERROR: Failed regression: copy->b (%u != %u + %u / %u)\n",
+ copy->b, copy->a, 1000, copy->a + 1000);
+ }
+
+ if (copy->c != copy->a + copy->b) {
+ ck_error("ERROR: Failed regression: copy->c (%u != %u + %u / %u)\n",
+ copy->c, copy->a, copy->b, copy->a + copy->b);
+ }
+
+ return;
+}
+
+static void *
+consumer(void *unused CK_CC_UNUSED)
+{
+ struct example copy;
+ uint32_t version;
+ unsigned int retries = 0;
+ unsigned int i;
+
+ unused = NULL;
+ if (aff_iterate(&affinerator)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (ck_pr_load_uint(&barrier) == 0);
+ for (i = 0; i < STEPS; i++) {
+ /*
+ * Attempt a read of the data structure. If the structure
+ * has been modified between ck_sequence_read_begin and
+ * ck_sequence_read_retry then attempt another read since
+ * the data may be in an inconsistent state.
+ */
+ do {
+ version = ck_sequence_read_begin(&seqlock);
+ copy.a = ck_pr_load_uint(&global.a);
+ copy.b = ck_pr_load_uint(&global.b);
+ copy.c = ck_pr_load_uint(&global.c);
+ retries++;
+ } while (ck_sequence_read_retry(&seqlock, version) == true);
+ validate(&copy);
+
+ CK_SEQUENCE_READ(&seqlock, &version) {
+ copy.a = ck_pr_load_uint(&global.a);
+ copy.b = ck_pr_load_uint(&global.b);
+ copy.c = ck_pr_load_uint(&global.c);
+ retries++;
+ }
+ validate(&copy);
+ }
+
+ fprintf(stderr, "%u retries.\n", retries - STEPS);
+ ck_pr_dec_uint(&barrier);
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+ unsigned int counter = 0;
+ bool first = true;
+ int n_threads, i;
+
+ if (argc != 3) {
+ ck_error("Usage: ck_sequence <number of threads> <affinity delta>\n");
+ }
+
+ n_threads = atoi(argv[1]);
+ if (n_threads <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * n_threads);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate memory for threads\n");
+ }
+
+ affinerator.delta = atoi(argv[2]);
+ affinerator.request = 0;
+
+ for (i = 0; i < n_threads; i++) {
+ if (pthread_create(&threads[i], NULL, consumer, NULL)) {
+ ck_error("ERROR: Failed to create thread %d\n", i);
+ }
+ }
+
+ for (;;) {
+ /*
+ * Update the shared data in a non-blocking fashion.
+ * If the data is modified by multiple writers then
+ * ck_sequence_write_begin must be called after acquiring
+ * the associated lock and ck_sequence_write_end must be
+ * called before relinquishing the lock.
+ */
+ ck_sequence_write_begin(&seqlock);
+ global.a = counter++;
+ global.b = global.a + 1000;
+ global.c = global.b + global.a;
+ ck_sequence_write_end(&seqlock);
+
+ if (first == true) {
+ ck_pr_store_uint(&barrier, n_threads);
+ first = false;
+ }
+
+ counter++;
+ if (ck_pr_load_uint(&barrier) == 0)
+ break;
+ }
+
+ printf("%u updates made.\n", counter);
+ return (0);
+}
+
diff --git a/regressions/ck_spinlock/benchmark/Makefile b/regressions/ck_spinlock/benchmark/Makefile
new file mode 100644
index 0000000..ca3e1cf
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/Makefile
@@ -0,0 +1,87 @@
+.PHONY: all clean
+
+OBJECTS=ck_ticket.THROUGHPUT ck_ticket.LATENCY \
+ ck_mcs.THROUGHPUT ck_mcs.LATENCY \
+ ck_dec.THROUGHPUT ck_dec.LATENCY \
+ ck_cas.THROUGHPUT ck_cas.LATENCY \
+ ck_fas.THROUGHPUT ck_fas.LATENCY \
+ ck_clh.THROUGHPUT ck_clh.LATENCY \
+ linux_spinlock.THROUGHPUT linux_spinlock.LATENCY \
+ ck_ticket_pb.THROUGHPUT ck_ticket_pb.LATENCY \
+ ck_anderson.THROUGHPUT ck_anderson.LATENCY \
+ ck_spinlock.THROUGHPUT ck_spinlock.LATENCY \
+ ck_hclh.THROUGHPUT ck_hclh.LATENCY
+
+all: $(OBJECTS)
+
+ck_spinlock.THROUGHPUT: ck_spinlock.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_spinlock.THROUGHPUT ck_spinlock.c -lm
+
+ck_spinlock.LATENCY: ck_spinlock.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_spinlock.LATENCY ck_spinlock.c -lm
+
+ck_ticket.THROUGHPUT: ck_ticket.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_ticket.THROUGHPUT ck_ticket.c -lm
+
+ck_ticket.LATENCY: ck_ticket.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_ticket.LATENCY ck_ticket.c -lm
+
+ck_mcs.THROUGHPUT: ck_mcs.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_mcs.THROUGHPUT ck_mcs.c -lm
+
+ck_mcs.LATENCY: ck_mcs.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_mcs.LATENCY ck_mcs.c -lm
+
+ck_dec.THROUGHPUT: ck_dec.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_dec.THROUGHPUT ck_dec.c -lm
+
+ck_dec.LATENCY: ck_dec.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_dec.LATENCY ck_dec.c -lm
+
+ck_cas.THROUGHPUT: ck_cas.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_cas.THROUGHPUT ck_cas.c -lm
+
+ck_cas.LATENCY: ck_cas.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_cas.LATENCY ck_cas.c -lm
+
+ck_fas.THROUGHPUT: ck_fas.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_fas.THROUGHPUT ck_fas.c -lm
+
+ck_fas.LATENCY: ck_fas.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_fas.LATENCY ck_fas.c -lm
+
+ck_clh.THROUGHPUT: ck_clh.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_clh.THROUGHPUT ck_clh.c -lm
+
+ck_clh.LATENCY: ck_clh.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_clh.LATENCY ck_clh.c -lm
+
+ck_hclh.THROUGHPUT: ck_hclh.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_hclh.THROUGHPUT ck_hclh.c -lm
+
+ck_hclh.LATENCY: ck_hclh.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_hclh.LATENCY ck_hclh.c -lm
+
+linux_spinlock.THROUGHPUT: linux_spinlock.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o linux_spinlock.THROUGHPUT linux_spinlock.c -lm
+
+linux_spinlock.LATENCY: linux_spinlock.c
+ $(CC) -DLATENCY $(CFLAGS) -o linux_spinlock.LATENCY linux_spinlock.c -lm
+
+ck_ticket_pb.THROUGHPUT: ck_ticket_pb.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_ticket_pb.THROUGHPUT ck_ticket_pb.c -lm
+
+ck_ticket_pb.LATENCY: ck_ticket_pb.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_ticket_pb.LATENCY ck_ticket_pb.c -lm
+
+ck_anderson.THROUGHPUT: ck_anderson.c
+ $(CC) -DTHROUGHPUT $(CFLAGS) -o ck_anderson.THROUGHPUT ck_anderson.c -lm
+
+ck_anderson.LATENCY: ck_anderson.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_anderson.LATENCY ck_anderson.c -lm
+
+clean:
+ rm -rf *.dSYM *.exe $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_spinlock/benchmark/ck_anderson.c b/regressions/ck_spinlock/benchmark/ck_anderson.c
new file mode 100644
index 0000000..2f1aecd
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/ck_anderson.c
@@ -0,0 +1,8 @@
+#include "../ck_anderson.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
+
diff --git a/regressions/ck_spinlock/benchmark/ck_cas.c b/regressions/ck_spinlock/benchmark/ck_cas.c
new file mode 100644
index 0000000..96bd9d8
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/ck_cas.c
@@ -0,0 +1,8 @@
+#include "../ck_cas.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
+
diff --git a/regressions/ck_spinlock/benchmark/ck_clh.c b/regressions/ck_spinlock/benchmark/ck_clh.c
new file mode 100644
index 0000000..da71d5e
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/ck_clh.c
@@ -0,0 +1,7 @@
+#include "../ck_clh.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
diff --git a/regressions/ck_spinlock/benchmark/ck_dec.c b/regressions/ck_spinlock/benchmark/ck_dec.c
new file mode 100644
index 0000000..115c116
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/ck_dec.c
@@ -0,0 +1,7 @@
+#include "../ck_dec.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
diff --git a/regressions/ck_spinlock/benchmark/ck_fas.c b/regressions/ck_spinlock/benchmark/ck_fas.c
new file mode 100644
index 0000000..c76c964
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/ck_fas.c
@@ -0,0 +1,7 @@
+#include "../ck_fas.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
diff --git a/regressions/ck_spinlock/benchmark/ck_hclh.c b/regressions/ck_spinlock/benchmark/ck_hclh.c
new file mode 100644
index 0000000..9ae443e
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/ck_hclh.c
@@ -0,0 +1,7 @@
+#include "../ck_hclh.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
diff --git a/regressions/ck_spinlock/benchmark/ck_mcs.c b/regressions/ck_spinlock/benchmark/ck_mcs.c
new file mode 100644
index 0000000..c2e95de
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/ck_mcs.c
@@ -0,0 +1,7 @@
+#include "../ck_mcs.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
diff --git a/regressions/ck_spinlock/benchmark/ck_spinlock.c b/regressions/ck_spinlock/benchmark/ck_spinlock.c
new file mode 100644
index 0000000..138541e
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/ck_spinlock.c
@@ -0,0 +1,7 @@
+#include "../ck_spinlock.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
diff --git a/regressions/ck_spinlock/benchmark/ck_ticket.c b/regressions/ck_spinlock/benchmark/ck_ticket.c
new file mode 100644
index 0000000..09c9193
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/ck_ticket.c
@@ -0,0 +1,8 @@
+#include "../ck_ticket.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
+
diff --git a/regressions/ck_spinlock/benchmark/ck_ticket_pb.c b/regressions/ck_spinlock/benchmark/ck_ticket_pb.c
new file mode 100644
index 0000000..6122d6a
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/ck_ticket_pb.c
@@ -0,0 +1,7 @@
+#include "../ck_ticket_pb.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
diff --git a/regressions/ck_spinlock/benchmark/latency.h b/regressions/ck_spinlock/benchmark/latency.h
new file mode 100644
index 0000000..afadcd2
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/latency.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * Copyright 2011 David Joseph.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_bytelock.h>
+#include <ck_spinlock.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../../common.h"
+
+#ifndef STEPS
+#define STEPS 30000000
+#endif
+
+LOCK_DEFINE;
+
+int
+main(void)
+{
+ CK_CC_UNUSED unsigned int nthr = 1;
+
+ #ifdef LOCK_INIT
+ LOCK_INIT;
+ #endif
+
+ #ifdef LOCK_STATE
+ LOCK_STATE;
+ #endif
+
+ uint64_t s_b, e_b, i;
+ CK_CC_UNUSED int core = 0;
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; ++i) {
+ #ifdef LOCK
+ LOCK;
+ UNLOCK;
+ LOCK;
+ UNLOCK;
+ LOCK;
+ UNLOCK;
+ LOCK;
+ UNLOCK;
+ #endif
+ }
+ e_b = rdtsc();
+ printf("%15" PRIu64 "\n", (e_b - s_b) / 4 / STEPS);
+
+ return (0);
+}
+
diff --git a/regressions/ck_spinlock/benchmark/linux_spinlock.c b/regressions/ck_spinlock/benchmark/linux_spinlock.c
new file mode 100644
index 0000000..954019b
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/linux_spinlock.c
@@ -0,0 +1,7 @@
+#include "../linux_spinlock.h"
+
+#ifdef THROUGHPUT
+#include "throughput.h"
+#elif defined(LATENCY)
+#include "latency.h"
+#endif
diff --git a/regressions/ck_spinlock/benchmark/throughput.h b/regressions/ck_spinlock/benchmark/throughput.h
new file mode 100644
index 0000000..7851c50
--- /dev/null
+++ b/regressions/ck_spinlock/benchmark/throughput.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright 2008-2012 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_spinlock.h>
+
+#include "../../common.h"
+
+/* 8! = 40320, evenly divide 1 .. 8 processor workload. */
+#define WORKLOAD (40320 * 2056)
+
+#ifndef ITERATE
+#define ITERATE 65536
+#endif
+
+struct block {
+ unsigned int tid;
+};
+
+static struct affinity a;
+static unsigned int ready;
+
+struct counters {
+ uint64_t value;
+} CK_CC_CACHELINE;
+
+static struct counters *count;
+static uint64_t nthr;
+static unsigned int barrier;
+
+int critical __attribute__((aligned(64)));
+
+LOCK_DEFINE;
+
+CK_CC_USED static void
+gen_lock(void)
+{
+ CK_CC_UNUSED int core = 0;
+#ifdef LOCK_STATE
+ LOCK_STATE;
+#endif
+
+#ifdef LOCK
+ LOCK;
+#endif
+}
+
+CK_CC_USED static void
+gen_unlock(void)
+{
+#ifdef LOCK_STATE
+ LOCK_STATE;
+#endif
+
+#ifdef UNLOCK
+ UNLOCK;
+#endif
+}
+
+static void *
+fairness(void *null)
+{
+#ifdef LOCK_STATE
+ LOCK_STATE;
+#endif
+ struct block *context = null;
+ unsigned int i = context->tid;
+ volatile int j;
+ long int base;
+ unsigned int core;
+
+ if (aff_iterate_core(&a, &core)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (ck_pr_load_uint(&ready) == 0);
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) != nthr);
+
+ while (ready) {
+ LOCK;
+
+ count[i].value++;
+ if (critical) {
+ base = common_lrand48() % critical;
+ for (j = 0; j < base; j++);
+ }
+
+ UNLOCK;
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ uint64_t v, d;
+ unsigned int i;
+ pthread_t *threads;
+ struct block *context;
+
+ if (argc != 4) {
+ ck_error("Usage: " LOCK_NAME " <number of threads> <affinity delta> <critical section>\n");
+ exit(EXIT_FAILURE);
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ exit(EXIT_FAILURE);
+ }
+
+#ifdef LOCK_INIT
+ LOCK_INIT;
+#endif
+
+ critical = atoi(argv[3]);
+ if (critical < 0) {
+ ck_error("ERROR: critical section cannot be negative\n");
+ exit(EXIT_FAILURE);
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ exit(EXIT_FAILURE);
+ }
+
+ context = malloc(sizeof(struct block) * nthr);
+ if (context == NULL) {
+ ck_error("ERROR: Could not allocate thread contexts\n");
+ exit(EXIT_FAILURE);
+ }
+
+ a.delta = atoi(argv[2]);
+ a.request = 0;
+
+ count = malloc(sizeof(*count) * nthr);
+ if (count == NULL) {
+ ck_error("ERROR: Could not create acquisition buffer\n");
+ exit(EXIT_FAILURE);
+ }
+ memset(count, 0, sizeof(*count) * nthr);
+
+ fprintf(stderr, "Creating threads (fairness)...");
+ for (i = 0; i < nthr; i++) {
+ context[i].tid = i;
+ if (pthread_create(&threads[i], NULL, fairness, context + i)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ exit(EXIT_FAILURE);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ ck_pr_store_uint(&ready, 1);
+ common_sleep(10);
+ ck_pr_store_uint(&ready, 0);
+
+ fprintf(stderr, "Waiting for threads to finish acquisition regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done\n\n");
+
+ for (i = 0, v = 0; i < nthr; i++) {
+ printf("%d %15" PRIu64 "\n", i, count[i].value);
+ v += count[i].value;
+ }
+
+ printf("\n# total : %15" PRIu64 "\n", v);
+ printf("# throughput : %15" PRIu64 " a/s\n", (v /= nthr) / 10);
+
+ for (i = 0, d = 0; i < nthr; i++)
+ d += (count[i].value - v) * (count[i].value - v);
+
+ printf("# average : %15" PRIu64 "\n", v);
+ printf("# deviation : %.2f (%.2f%%)\n\n", sqrt(d / nthr), (sqrt(d / nthr) / v) * 100.00);
+
+ return (0);
+}
+
diff --git a/regressions/ck_spinlock/ck_anderson.h b/regressions/ck_spinlock/ck_anderson.h
new file mode 100644
index 0000000..7dc8e6e
--- /dev/null
+++ b/regressions/ck_spinlock/ck_anderson.h
@@ -0,0 +1,11 @@
+#define MAX(a,b) ((a) > (b) ? (a) : (b))
+#define LOCK_NAME "ck_anderson"
+#define LOCK_DEFINE static ck_spinlock_anderson_t lock CK_CC_CACHELINE
+#define LOCK_STATE ck_spinlock_anderson_thread_t *nad = NULL
+#define LOCK ck_spinlock_anderson_lock(&lock, &nad)
+#define UNLOCK ck_spinlock_anderson_unlock(&lock, nad)
+#define LOCK_INIT ck_spinlock_anderson_init(&lock, malloc(MAX(64,sizeof(ck_spinlock_anderson_thread_t)) * nthr), nthr)
+#define LOCKED ck_spinlock_anderson_locked(&lock)
+
+#define NO_LOCAL
+
diff --git a/regressions/ck_spinlock/ck_cas.h b/regressions/ck_spinlock/ck_cas.h
new file mode 100644
index 0000000..bd4ae13
--- /dev/null
+++ b/regressions/ck_spinlock/ck_cas.h
@@ -0,0 +1,6 @@
+#define LOCK_NAME "ck_cas"
+#define LOCK_DEFINE static ck_spinlock_cas_t CK_CC_CACHELINE lock = CK_SPINLOCK_CAS_INITIALIZER
+#define LOCK ck_spinlock_cas_lock_eb(&lock)
+#define UNLOCK ck_spinlock_cas_unlock(&lock)
+#define LOCKED ck_spinlock_cas_locked(&lock)
+
diff --git a/regressions/ck_spinlock/ck_clh.h b/regressions/ck_spinlock/ck_clh.h
new file mode 100644
index 0000000..df7e49f
--- /dev/null
+++ b/regressions/ck_spinlock/ck_clh.h
@@ -0,0 +1,9 @@
+#define MAX(a,b) ((a) > (b) ? (a) : (b))
+#define LOCK_NAME "ck_clh"
+#define LOCK_DEFINE static ck_spinlock_clh_t CK_CC_CACHELINE *lock = NULL
+#define LOCK_STATE ck_spinlock_clh_t *na = malloc(MAX(sizeof(ck_spinlock_clh_t), 64))
+#define LOCK ck_spinlock_clh_lock(&lock, na)
+#define UNLOCK ck_spinlock_clh_unlock(&na)
+#define LOCK_INIT ck_spinlock_clh_init(&lock, malloc(MAX(sizeof(ck_spinlock_clh_t), 64)))
+#define LOCKED ck_spinlock_clh_locked(&lock)
+
diff --git a/regressions/ck_spinlock/ck_dec.h b/regressions/ck_spinlock/ck_dec.h
new file mode 100644
index 0000000..c21a390
--- /dev/null
+++ b/regressions/ck_spinlock/ck_dec.h
@@ -0,0 +1,6 @@
+#define LOCK_NAME "ck_dec"
+#define LOCK_DEFINE static ck_spinlock_dec_t CK_CC_CACHELINE lock = CK_SPINLOCK_DEC_INITIALIZER
+#define LOCK ck_spinlock_dec_lock_eb(&lock)
+#define UNLOCK ck_spinlock_dec_unlock(&lock)
+#define LOCKED ck_spinlock_dec_locked(&lock)
+
diff --git a/regressions/ck_spinlock/ck_fas.h b/regressions/ck_spinlock/ck_fas.h
new file mode 100644
index 0000000..e244746
--- /dev/null
+++ b/regressions/ck_spinlock/ck_fas.h
@@ -0,0 +1,6 @@
+#define LOCK_NAME "ck_fas"
+#define LOCK_DEFINE static ck_spinlock_fas_t CK_CC_CACHELINE lock = CK_SPINLOCK_FAS_INITIALIZER
+#define LOCK ck_spinlock_fas_lock_eb(&lock)
+#define UNLOCK ck_spinlock_fas_unlock(&lock)
+#define LOCKED ck_spinlock_fas_locked(&lock)
+
diff --git a/regressions/ck_spinlock/ck_hclh.h b/regressions/ck_spinlock/ck_hclh.h
new file mode 100644
index 0000000..eb2e6eb
--- /dev/null
+++ b/regressions/ck_spinlock/ck_hclh.h
@@ -0,0 +1,16 @@
+#define MAX(a,b) ((a) > (b) ? (a) : (b))
+#define LOCK_NAME "ck_clh"
+#define LOCK_DEFINE static ck_spinlock_hclh_t CK_CC_CACHELINE *glob_lock; \
+ static ck_spinlock_hclh_t CK_CC_CACHELINE *local_lock[CORES / 2]
+#define LOCK_STATE ck_spinlock_hclh_t *na = malloc(MAX(sizeof(ck_spinlock_hclh_t), 64))
+#define LOCK ck_spinlock_hclh_lock(&glob_lock, &local_lock[(core % CORES) / 2], na)
+#define UNLOCK ck_spinlock_hclh_unlock(&na)
+#define LOCK_INIT do { \
+ int _i; \
+ ck_spinlock_hclh_init(&glob_lock, malloc(MAX(sizeof(ck_spinlock_hclh_t), 64)), -1); \
+ for (_i = 0; _i < CORES / 2; _i++) { \
+ ck_spinlock_hclh_init(&local_lock[_i], malloc(MAX(sizeof(ck_spinlock_hclh_t), 64)), _i); } \
+} while (0)
+
+#define LOCKED ck_spinlock_hclh_locked(&glob_lock)
+
diff --git a/regressions/ck_spinlock/ck_mcs.h b/regressions/ck_spinlock/ck_mcs.h
new file mode 100644
index 0000000..dd127df
--- /dev/null
+++ b/regressions/ck_spinlock/ck_mcs.h
@@ -0,0 +1,7 @@
+#define LOCK_NAME "ck_mcs"
+#define LOCK_DEFINE static ck_spinlock_mcs_t CK_CC_CACHELINE lock = NULL
+#define LOCK_STATE ck_spinlock_mcs_context_t node CK_CC_CACHELINE;
+#define LOCK ck_spinlock_mcs_lock(&lock, &node)
+#define UNLOCK ck_spinlock_mcs_unlock(&lock, &node)
+#define LOCKED ck_spinlock_mcs_locked(&lock)
+
diff --git a/regressions/ck_spinlock/ck_spinlock.h b/regressions/ck_spinlock/ck_spinlock.h
new file mode 100644
index 0000000..938e1ce
--- /dev/null
+++ b/regressions/ck_spinlock/ck_spinlock.h
@@ -0,0 +1,6 @@
+#define LOCK_NAME "ck_spinlock"
+#define LOCK_DEFINE static ck_spinlock_t CK_CC_CACHELINE lock = CK_SPINLOCK_INITIALIZER
+#define LOCK ck_spinlock_lock_eb(&lock)
+#define UNLOCK ck_spinlock_unlock(&lock)
+#define LOCKED ck_spinlock_locked(&lock)
+
diff --git a/regressions/ck_spinlock/ck_ticket.h b/regressions/ck_spinlock/ck_ticket.h
new file mode 100644
index 0000000..39054a6
--- /dev/null
+++ b/regressions/ck_spinlock/ck_ticket.h
@@ -0,0 +1,11 @@
+#include <ck_spinlock.h>
+
+#define LOCK_NAME "ck_ticket"
+#define LOCK_DEFINE static ck_spinlock_ticket_t CK_CC_CACHELINE lock = CK_SPINLOCK_TICKET_INITIALIZER
+#define LOCK ck_spinlock_ticket_lock(&lock)
+#define UNLOCK ck_spinlock_ticket_unlock(&lock)
+#ifdef CK_F_SPINLOCK_TICKET_TRYLOCK
+#define TRYLOCK ck_spinlock_ticket_trylock(&lock)
+#endif
+#define LOCKED ck_spinlock_ticket_locked(&lock)
+
diff --git a/regressions/ck_spinlock/ck_ticket_pb.h b/regressions/ck_spinlock/ck_ticket_pb.h
new file mode 100644
index 0000000..b8a7a84
--- /dev/null
+++ b/regressions/ck_spinlock/ck_ticket_pb.h
@@ -0,0 +1,6 @@
+#define LOCK_NAME "ck_ticket_pb"
+#define LOCK_DEFINE static ck_spinlock_ticket_t CK_CC_CACHELINE lock = CK_SPINLOCK_TICKET_INITIALIZER
+#define LOCK ck_spinlock_ticket_lock_pb(&lock, 0)
+#define UNLOCK ck_spinlock_ticket_unlock(&lock)
+#define LOCKED ck_spinlock_ticket_locked(&lock)
+
diff --git a/regressions/ck_spinlock/linux_spinlock.h b/regressions/ck_spinlock/linux_spinlock.h
new file mode 100644
index 0000000..5fe1f3e
--- /dev/null
+++ b/regressions/ck_spinlock/linux_spinlock.h
@@ -0,0 +1,39 @@
+#include <ck_cc.h>
+
+CK_CC_INLINE static void
+spin_lock(volatile unsigned int *lock)
+{
+#ifdef __x86_64__
+ __asm__ __volatile__(
+ "\n1:\t"
+ "lock ; decl %0\n\t"
+ "jns 2f\n"
+ "3:\n"
+ "rep;nop\n\t"
+ "cmpl $0,%0\n\t"
+ "jle 3b\n\t"
+ "jmp 1b\n"
+ "2:\t" : "=m" (*lock) : : "memory");
+#else
+ *lock = 1;
+#endif
+
+ return;
+}
+
+CK_CC_INLINE static void
+spin_unlock(volatile unsigned int *lock)
+{
+#ifdef __x86_64__
+ __asm__ __volatile__("movl $1,%0" :"=m" (*lock) :: "memory");
+#else
+ *lock = 0;
+ return;
+#endif
+}
+
+#define LOCK_NAME "linux_spinlock"
+#define LOCK_DEFINE volatile unsigned int lock = 1
+#define LOCK spin_lock(&lock)
+#define UNLOCK spin_unlock(&lock)
+
diff --git a/regressions/ck_spinlock/validate/Makefile b/regressions/ck_spinlock/validate/Makefile
new file mode 100644
index 0000000..b1d7cba
--- /dev/null
+++ b/regressions/ck_spinlock/validate/Makefile
@@ -0,0 +1,57 @@
+.PHONY: check clean
+
+all: ck_ticket ck_mcs ck_dec ck_cas ck_fas ck_clh linux_spinlock \
+ ck_ticket_pb ck_anderson ck_spinlock ck_hclh
+
+check: all
+ ./ck_ticket $(CORES) 1
+ ./ck_mcs $(CORES) 1
+ ./ck_dec $(CORES) 1
+ ./ck_cas $(CORES) 1
+ ./ck_fas $(CORES) 1
+ ./ck_clh $(CORES) 1
+ ./ck_hclh $(CORES) 1
+ ./linux_spinlock $(CORES) 1
+ ./ck_ticket_pb $(CORES) 1
+ ./ck_anderson $(CORES) 1
+ ./ck_spinlock $(CORES) 1
+
+linux_spinlock: linux_spinlock.c
+ $(CC) $(CFLAGS) -o linux_spinlock linux_spinlock.c
+
+ck_spinlock: ck_spinlock.c
+ $(CC) $(CFLAGS) -o ck_spinlock ck_spinlock.c
+
+ck_ticket_pb: ck_ticket_pb.c
+ $(CC) $(CFLAGS) -o ck_ticket_pb ck_ticket_pb.c
+
+ck_clh: ck_clh.c
+ $(CC) $(CFLAGS) -o ck_clh ck_clh.c
+
+ck_hclh: ck_hclh.c
+ $(CC) $(CFLAGS) -o ck_hclh ck_hclh.c
+
+ck_anderson: ck_anderson.c
+ $(CC) $(CFLAGS) -o ck_anderson ck_anderson.c
+
+ck_fas: ck_fas.c
+ $(CC) $(CFLAGS) -o ck_fas ck_fas.c
+
+ck_ticket: ck_ticket.c
+ $(CC) $(CFLAGS) -o ck_ticket ck_ticket.c
+
+ck_cas: ck_cas.c
+ $(CC) $(CFLAGS) -o ck_cas ck_cas.c
+
+ck_mcs: ck_mcs.c
+ $(CC) $(CFLAGS) -o ck_mcs ck_mcs.c
+
+ck_dec: ck_dec.c
+ $(CC) $(CFLAGS) -o ck_dec ck_dec.c
+
+clean:
+ rm -rf ck_ticket ck_mcs ck_dec ck_cas ck_fas ck_clh linux_spinlock ck_ticket_pb \
+ ck_anderson ck_spinlock ck_hclh *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE -lm
diff --git a/regressions/ck_spinlock/validate/ck_anderson.c b/regressions/ck_spinlock/validate/ck_anderson.c
new file mode 100644
index 0000000..b10900c
--- /dev/null
+++ b/regressions/ck_spinlock/validate/ck_anderson.c
@@ -0,0 +1,2 @@
+#include "../ck_anderson.h"
+#include "validate.h"
diff --git a/regressions/ck_spinlock/validate/ck_cas.c b/regressions/ck_spinlock/validate/ck_cas.c
new file mode 100644
index 0000000..162490a
--- /dev/null
+++ b/regressions/ck_spinlock/validate/ck_cas.c
@@ -0,0 +1,2 @@
+#include "../ck_cas.h"
+#include "validate.h"
diff --git a/regressions/ck_spinlock/validate/ck_clh.c b/regressions/ck_spinlock/validate/ck_clh.c
new file mode 100644
index 0000000..19cb512
--- /dev/null
+++ b/regressions/ck_spinlock/validate/ck_clh.c
@@ -0,0 +1,2 @@
+#include "../ck_clh.h"
+#include "validate.h"
diff --git a/regressions/ck_spinlock/validate/ck_dec.c b/regressions/ck_spinlock/validate/ck_dec.c
new file mode 100644
index 0000000..fd351de
--- /dev/null
+++ b/regressions/ck_spinlock/validate/ck_dec.c
@@ -0,0 +1,2 @@
+#include "../ck_dec.h"
+#include "validate.h"
diff --git a/regressions/ck_spinlock/validate/ck_fas.c b/regressions/ck_spinlock/validate/ck_fas.c
new file mode 100644
index 0000000..5cf4071
--- /dev/null
+++ b/regressions/ck_spinlock/validate/ck_fas.c
@@ -0,0 +1,2 @@
+#include "../ck_fas.h"
+#include "validate.h"
diff --git a/regressions/ck_spinlock/validate/ck_hclh.c b/regressions/ck_spinlock/validate/ck_hclh.c
new file mode 100644
index 0000000..001f57b
--- /dev/null
+++ b/regressions/ck_spinlock/validate/ck_hclh.c
@@ -0,0 +1,2 @@
+#include "../ck_hclh.h"
+#include "validate.h"
diff --git a/regressions/ck_spinlock/validate/ck_mcs.c b/regressions/ck_spinlock/validate/ck_mcs.c
new file mode 100644
index 0000000..7adad43
--- /dev/null
+++ b/regressions/ck_spinlock/validate/ck_mcs.c
@@ -0,0 +1,2 @@
+#include "../ck_mcs.h"
+#include "validate.h"
diff --git a/regressions/ck_spinlock/validate/ck_spinlock.c b/regressions/ck_spinlock/validate/ck_spinlock.c
new file mode 100644
index 0000000..e682905
--- /dev/null
+++ b/regressions/ck_spinlock/validate/ck_spinlock.c
@@ -0,0 +1,2 @@
+#include "../ck_spinlock.h"
+#include "validate.h"
diff --git a/regressions/ck_spinlock/validate/ck_ticket.c b/regressions/ck_spinlock/validate/ck_ticket.c
new file mode 100644
index 0000000..be67254
--- /dev/null
+++ b/regressions/ck_spinlock/validate/ck_ticket.c
@@ -0,0 +1,2 @@
+#include "../ck_ticket.h"
+#include "validate.h"
diff --git a/regressions/ck_spinlock/validate/ck_ticket_pb.c b/regressions/ck_spinlock/validate/ck_ticket_pb.c
new file mode 100644
index 0000000..e62ee0e
--- /dev/null
+++ b/regressions/ck_spinlock/validate/ck_ticket_pb.c
@@ -0,0 +1,2 @@
+#include "../ck_ticket_pb.h"
+#include "validate.h"
diff --git a/regressions/ck_spinlock/validate/linux_spinlock.c b/regressions/ck_spinlock/validate/linux_spinlock.c
new file mode 100644
index 0000000..781e419
--- /dev/null
+++ b/regressions/ck_spinlock/validate/linux_spinlock.c
@@ -0,0 +1,14 @@
+#ifdef __x86_64__
+#include "../linux_spinlock.h"
+#include "validate.h"
+#else
+#include <stdio.h>
+
+int
+main(void)
+{
+
+ fprintf(stderr, "Unsupported.\n");
+ return 0;
+}
+#endif
diff --git a/regressions/ck_spinlock/validate/validate.h b/regressions/ck_spinlock/validate/validate.h
new file mode 100644
index 0000000..df40584
--- /dev/null
+++ b/regressions/ck_spinlock/validate/validate.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <ck_spinlock.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 1000000
+#endif
+
+struct block {
+ unsigned int tid;
+};
+
+static struct affinity a;
+static unsigned int locked = 0;
+static uint64_t nthr;
+
+LOCK_DEFINE;
+
+static void *
+thread(void *null CK_CC_UNUSED)
+{
+#ifdef LOCK_STATE
+ LOCK_STATE;
+#endif
+ unsigned int i = ITERATE;
+ unsigned int j;
+ unsigned int core;
+
+ if (aff_iterate_core(&a, &core)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+#ifdef TRYLOCK
+ if (i & 1) {
+ LOCK;
+ } else {
+ while (TRYLOCK == false)
+ ck_pr_stall();
+ }
+#else
+ LOCK;
+#endif
+
+#ifdef LOCKED
+ if (LOCKED == false)
+ ck_error("is_locked operation failed.");
+#endif
+
+ ck_pr_store_uint(&locked, locked + 1);
+ ck_pr_store_uint(&locked, locked + 1);
+ ck_pr_store_uint(&locked, locked + 1);
+ ck_pr_store_uint(&locked, locked + 1);
+ ck_pr_store_uint(&locked, locked + 1);
+ ck_pr_store_uint(&locked, locked + 1);
+ ck_pr_store_uint(&locked, locked + 1);
+ ck_pr_store_uint(&locked, locked + 1);
+ ck_pr_store_uint(&locked, locked + 1);
+ ck_pr_store_uint(&locked, locked + 1);
+
+ j = ck_pr_load_uint(&locked);
+
+ if (j != 10) {
+ ck_error("ERROR (WR): Race condition (%u)\n", j);
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_store_uint(&locked, locked - 1);
+ ck_pr_store_uint(&locked, locked - 1);
+ ck_pr_store_uint(&locked, locked - 1);
+ ck_pr_store_uint(&locked, locked - 1);
+ ck_pr_store_uint(&locked, locked - 1);
+ ck_pr_store_uint(&locked, locked - 1);
+ ck_pr_store_uint(&locked, locked - 1);
+ ck_pr_store_uint(&locked, locked - 1);
+ ck_pr_store_uint(&locked, locked - 1);
+ ck_pr_store_uint(&locked, locked - 1);
+
+ UNLOCK;
+ LOCK;
+
+ j = ck_pr_load_uint(&locked);
+ if (j != 0) {
+ ck_error("ERROR (RD): Race condition (%u)\n", j);
+ exit(EXIT_FAILURE);
+ }
+
+ UNLOCK;
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ uint64_t i;
+ pthread_t *threads;
+
+ if (argc != 3) {
+ ck_error("Usage: " LOCK_NAME " <number of threads> <affinity delta>\n");
+ exit(EXIT_FAILURE);
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ exit(EXIT_FAILURE);
+ }
+
+#ifdef LOCK_INIT
+ LOCK_INIT;
+#endif
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ exit(EXIT_FAILURE);
+ }
+
+ a.delta = atoi(argv[2]);
+ a.request = 0;
+
+ fprintf(stderr, "Creating threads (mutual exclusion)...");
+ for (i = 0; i < nthr; i++) {
+ if (pthread_create(&threads[i], NULL, thread, NULL)) {
+ ck_error("ERROR: Could not create thread %" PRIu64 "\n", i);
+ exit(EXIT_FAILURE);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Waiting for threads to finish correctness regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+
+ return (0);
+}
+
diff --git a/regressions/ck_stack/benchmark/Makefile b/regressions/ck_stack/benchmark/Makefile
new file mode 100644
index 0000000..6e2df2a
--- /dev/null
+++ b/regressions/ck_stack/benchmark/Makefile
@@ -0,0 +1,14 @@
+.PHONY: clean distribution
+
+OBJECTS=latency
+
+all: $(OBJECTS)
+
+latency: latency.c
+ $(CC) $(CFLAGS) -o latency latency.c
+
+clean:
+ rm -rf *~ *.o *.dSYM *.exe $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_stack/benchmark/latency.c b/regressions/ck_stack/benchmark/latency.c
new file mode 100644
index 0000000..867151c
--- /dev/null
+++ b/regressions/ck_stack/benchmark/latency.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_stack.h>
+#include <ck_spinlock.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include "../../common.h"
+
+#ifndef ENTRIES
+#define ENTRIES 4096
+#endif
+
+#ifndef STEPS
+#define STEPS 40000
+#endif
+
+/*
+ * Note the redundant post-increment of r. This is to silence
+ * some irrelevant GCC warnings.
+ */
+
+static ck_stack_t stack CK_CC_CACHELINE;
+
+int
+main(void)
+{
+ ck_stack_entry_t entry[ENTRIES];
+ ck_spinlock_fas_t mutex = CK_SPINLOCK_FAS_INITIALIZER;
+ volatile ck_stack_entry_t * volatile r;
+ uint64_t s, e, a;
+ unsigned int i;
+ unsigned int j;
+
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_stack_init(&stack);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++) {
+ ck_spinlock_fas_lock(&mutex);
+ ck_stack_push_spnc(&stack, entry + j);
+ ck_spinlock_fas_unlock(&mutex);
+ }
+ e = rdtsc();
+
+ a += e - s;
+ }
+ printf(" spinlock_push: %16" PRIu64 "\n", a / STEPS / ENTRIES);
+
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_stack_init(&stack);
+
+ for (j = 0; j < ENTRIES; j++)
+ ck_stack_push_spnc(&stack, entry + j);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++) {
+ ck_spinlock_fas_lock(&mutex);
+ r = ck_stack_pop_npsc(&stack);
+ ck_spinlock_fas_unlock(&mutex);
+ }
+ e = rdtsc();
+ a += e - s;
+ }
+ printf(" spinlock_pop: %16" PRIu64 "\n", a / STEPS / ENTRIES);
+ r++;
+
+#ifdef CK_F_STACK_PUSH_UPMC
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_stack_init(&stack);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++)
+ ck_stack_push_upmc(&stack, entry + j);
+ e = rdtsc();
+
+ a += e - s;
+ }
+ printf("ck_stack_push_upmc: %16" PRIu64 "\n", a / STEPS / ENTRIES);
+#endif /* CK_F_STACK_PUSH_UPMC */
+
+#ifdef CK_F_STACK_PUSH_MPMC
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_stack_init(&stack);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++)
+ ck_stack_push_mpmc(&stack, entry + j);
+ e = rdtsc();
+
+ a += e - s;
+ }
+ printf("ck_stack_push_mpmc: %16" PRIu64 "\n", a / STEPS / ENTRIES);
+#endif /* CK_F_STACK_PUSH_MPMC */
+
+#ifdef CK_F_STACK_PUSH_MPNC
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_stack_init(&stack);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++)
+ ck_stack_push_mpnc(&stack, entry + j);
+ e = rdtsc();
+
+ a += e - s;
+ }
+ printf("ck_stack_push_mpnc: %16" PRIu64 "\n", a / STEPS / ENTRIES);
+#endif /* CK_F_STACK_PUSH_MPNC */
+
+#if defined(CK_F_STACK_PUSH_UPMC) && defined(CK_F_STACK_POP_UPMC)
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_stack_init(&stack);
+
+ for (j = 0; j < ENTRIES; j++)
+ ck_stack_push_upmc(&stack, entry + j);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++)
+ r = ck_stack_pop_upmc(&stack);
+ e = rdtsc();
+ a += e - s;
+ }
+ printf(" ck_stack_pop_upmc: %16" PRIu64 "\n", a / STEPS / (sizeof(entry) / sizeof(*entry)));
+#endif /* CK_F_STACK_PUSH_UPMC && CK_F_STACK_POP_UPMC */
+
+#if defined(CK_F_STACK_POP_MPMC) && defined(CK_F_STACK_PUSH_MPMC)
+ a = 0;
+ for (i = 0; i < STEPS; i++) {
+ ck_stack_init(&stack);
+
+ for (j = 0; j < ENTRIES; j++)
+ ck_stack_push_mpmc(&stack, entry + j);
+
+ s = rdtsc();
+ for (j = 0; j < ENTRIES; j++)
+ r = ck_stack_pop_mpmc(&stack);
+ e = rdtsc();
+ a += e - s;
+ }
+ printf(" ck_stack_pop_mpmc: %16" PRIu64 "\n", a / STEPS / (sizeof(entry) / sizeof(*entry)));
+ r++;
+#endif
+
+ return 0;
+}
diff --git a/regressions/ck_stack/validate/Makefile b/regressions/ck_stack/validate/Makefile
new file mode 100644
index 0000000..519dca1
--- /dev/null
+++ b/regressions/ck_stack/validate/Makefile
@@ -0,0 +1,56 @@
+.PHONY: check clean distribution
+
+OBJECTS=serial mpnc_push mpmc_push upmc_push spinlock_push spinlock_eb_push \
+ mpmc_pop upmc_pop spinlock_pop spinlock_eb_pop \
+ upmc_trypop mpmc_trypop mpmc_trypair \
+ mpmc_pair spinlock_pair spinlock_eb_pair pthreads_pair \
+ mpmc_trypush upmc_trypush
+
+all: $(OBJECTS)
+
+check: all
+ ./serial
+ ./mpmc_pair $(CORES) 1 0
+ ./upmc_trypop $(CORES) 1 0
+ ./mpmc_trypop $(CORES) 1 0
+ ./mpmc_trypair $(CORES) 1 0
+ ./mpmc_pop $(CORES) 1 0
+ ./upmc_pop $(CORES) 1 0
+ ./mpnc_push $(CORES) 1 0
+ ./mpmc_push $(CORES) 1 0
+ ./upmc_push $(CORES) 1 0
+ ./mpmc_trypush $(CORES) 1 0
+ ./upmc_trypush $(CORES) 1 0
+
+serial: serial.c
+ $(CC) $(CFLAGS) -o serial serial.c
+
+mpmc_trypush upmc_trypush mpnc_push mpmc_push upmc_push spinlock_push spinlock_eb_push: push.c
+ $(CC) -DTRYUPMC $(CFLAGS) -o upmc_trypush push.c
+ $(CC) -DTRYMPMC $(CFLAGS) -o mpmc_trypush push.c
+ $(CC) -DMPNC $(CFLAGS) -o mpnc_push push.c
+ $(CC) -DMPMC $(CFLAGS) -o mpmc_push push.c
+ $(CC) -DUPMC $(CFLAGS) -o upmc_push push.c
+ $(CC) -DSPINLOCK $(CFLAGS) -o spinlock_push push.c
+ $(CC) -DSPINLOCK -DEB $(CFLAGS) -o spinlock_eb_push push.c
+
+upmc_trypop mpmc_trypop mpmc_pop tryupmc_pop upmc_pop spinlock_pop spinlock_eb_pop: pop.c
+ $(CC) -DTRYMPMC $(CFLAGS) -o mpmc_trypop pop.c
+ $(CC) -DTRYUPMC $(CFLAGS) -o upmc_trypop pop.c
+ $(CC) -DMPMC $(CFLAGS) -o mpmc_pop pop.c
+ $(CC) -DUPMC $(CFLAGS) -o upmc_pop pop.c
+ $(CC) -DSPINLOCK $(CFLAGS) -o spinlock_pop pop.c
+ $(CC) -DEB -DSPINLOCK $(CFLAGS) -o spinlock_eb_pop pop.c
+
+mpmc_trypair mpmc_pair spinlock_pair spinlock_eb_pair pthreads_pair: pair.c
+ $(CC) -DTRYMPMC $(CFLAGS) -o mpmc_trypair pair.c
+ $(CC) -DMPMC $(CFLAGS) -o mpmc_pair pair.c
+ $(CC) -DSPINLOCK $(CFLAGS) -o spinlock_pair pair.c
+ $(CC) -DEB -DSPINLOCK $(CFLAGS) -o spinlock_eb_pair pair.c
+ $(CC) -DPTHREADS $(CFLAGS) -o pthreads_pair pair.c
+
+clean:
+ rm -rf *~ *.o *.dSYM *.exe $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_stack/validate/pair.c b/regressions/ck_stack/validate/pair.c
new file mode 100644
index 0000000..c0f1bb1
--- /dev/null
+++ b/regressions/ck_stack/validate/pair.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2009 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <ck_cc.h>
+#include <ck_pr.h>
+#ifdef SPINLOCK
+#include <ck_spinlock.h>
+#endif
+#include <ck_stack.h>
+#include <errno.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <pthread.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+#include "../../common.h"
+
+#ifndef ITEMS
+#define ITEMS (5765760)
+#endif
+
+#define TVTOD(tv) ((tv).tv_sec+((tv).tv_usec / (double)1000000))
+
+struct entry {
+ int value;
+#if defined(SPINLOCK) || defined(PTHREADS)
+ struct entry *next;
+#else
+ ck_stack_entry_t next;
+#endif
+} CK_CC_CACHELINE;
+
+#ifdef SPINLOCK
+static struct entry *stack CK_CC_CACHELINE;
+ck_spinlock_fas_t stack_spinlock = CK_SPINLOCK_FAS_INITIALIZER;
+#define UNLOCK ck_spinlock_fas_unlock
+#if defined(EB)
+#define LOCK ck_spinlock_fas_lock_eb
+#else
+#define LOCK ck_spinlock_fas_lock
+#endif
+#elif defined(PTHREADS)
+static struct entry *stack CK_CC_CACHELINE;
+pthread_mutex_t stack_spinlock = PTHREAD_MUTEX_INITIALIZER;
+#define LOCK pthread_mutex_lock
+#define UNLOCK pthread_mutex_unlock
+#else
+static ck_stack_t stack CK_CC_CACHELINE;
+CK_STACK_CONTAINER(struct entry, next, getvalue)
+#endif
+
+static struct affinity affinerator;
+static unsigned long long nthr;
+static volatile unsigned int barrier = 0;
+static unsigned int critical;
+
+static void *
+stack_thread(void *buffer)
+{
+#if (defined(MPMC) && defined(CK_F_STACK_POP_MPMC)) || (defined(UPMC) && defined(CK_F_STACK_POP_UPMC)) || (defined(TRYUPMC) && defined(CK_F_STACK_TRYPOP_UPMC)) || (defined(TRYMPMC) && defined(CK_F_STACK_TRYPOP_MPMC))
+ ck_stack_entry_t *ref;
+#endif
+ struct entry *entry = buffer;
+ unsigned long long i, n = ITEMS;
+ unsigned int seed;
+ int j;
+
+ if (aff_iterate(&affinerator)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (barrier == 0);
+
+ for (i = 0; i < n; i++) {
+#if defined(MPMC)
+ ck_stack_push_mpmc(&stack, &entry->next);
+#elif defined(TRYMPMC)
+ while (ck_stack_trypush_mpmc(&stack, &entry->next) == false)
+ ck_pr_stall();
+#elif defined(UPMC)
+ ck_stack_push_upmc(&stack, &entry->next);
+#elif defined(TRYUPMC)
+ while (ck_stack_trypush_upmc(&stack, &entry->next) == false)
+ ck_pr_stall();
+#elif defined(SPINLOCK) || defined(PTHREADS)
+ LOCK(&stack_spinlock);
+ ck_pr_store_ptr(&entry->next, stack);
+ ck_pr_store_ptr(&stack, entry);
+ UNLOCK(&stack_spinlock);
+#else
+# error Undefined operation.
+#endif
+
+ if (critical) {
+ j = common_rand_r(&seed) % critical;
+ while (j--)
+ __asm__ __volatile__("" ::: "memory");
+ }
+
+#if defined(MPMC)
+#ifdef CK_F_STACK_POP_MPMC
+ ref = ck_stack_pop_mpmc(&stack);
+ entry = getvalue(ref);
+#endif
+#elif defined(TRYMPMC)
+#ifdef CK_F_STACK_TRYPOP_MPMC
+ while (ck_stack_trypop_mpmc(&stack, &ref) == false)
+ ck_pr_stall();
+ entry = getvalue(ref);
+#endif /* CK_F_STACK_TRYPOP_MPMC */
+#elif defined(UPMC)
+ ref = ck_stack_pop_upmc(&stack);
+ entry = getvalue(ref);
+#elif defined(SPINLOCK) || defined(PTHREADS)
+ LOCK(&stack_spinlock);
+ entry = stack;
+ stack = stack->next;
+ UNLOCK(&stack_spinlock);
+#else
+# error Undefined operation.
+#endif
+ }
+
+ return (NULL);
+}
+
+static void
+stack_assert(void)
+{
+
+#if defined(SPINLOCK) || defined(PTHREADS)
+ assert(stack == NULL);
+#else
+ assert(CK_STACK_ISEMPTY(&stack));
+#endif
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ struct entry *bucket;
+ unsigned long long i, d;
+ pthread_t *thread;
+ struct timeval stv, etv;
+
+#if (defined(TRYMPMC) || defined(MPMC)) && (!defined(CK_F_STACK_PUSH_MPMC) || !defined(CK_F_STACK_POP_MPMC))
+ fprintf(stderr, "Unsupported.\n");
+ return 0;
+#endif
+
+ if (argc != 4) {
+ ck_error("Usage: stack <threads> <delta> <critical>\n");
+ }
+
+ {
+ char *e;
+
+ nthr = strtol(argv[1], &e, 10);
+ if (errno == ERANGE) {
+ perror("ERROR: too many threads");
+ exit(EXIT_FAILURE);
+ } else if (*e != '\0') {
+ ck_error("ERROR: input format is incorrect\n");
+ }
+
+ d = strtol(argv[2], &e, 10);
+ if (errno == ERANGE) {
+ perror("ERROR: delta is too large");
+ exit(EXIT_FAILURE);
+ } else if (*e != '\0') {
+ ck_error("ERROR: input format is incorrect\n");
+ }
+
+ critical = strtoul(argv[3], &e, 10);
+ if (errno == ERANGE) {
+ perror("ERROR: critical section is too large");
+ exit(EXIT_FAILURE);
+ } else if (*e != '\0') {
+ ck_error("ERROR: input format is incorrect\n");
+ }
+ }
+
+ srand(getpid());
+
+ affinerator.request = 0;
+ affinerator.delta = d;
+
+ bucket = malloc(sizeof(struct entry) * nthr);
+ assert(bucket != NULL);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread != NULL);
+
+ for (i = 0; i < nthr; i++)
+ pthread_create(&thread[i], NULL, stack_thread, bucket + i);
+
+ barrier = 1;
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ barrier = 0;
+
+ for (i = 0; i < nthr; i++)
+ pthread_create(&thread[i], NULL, stack_thread, bucket + i);
+
+ common_gettimeofday(&stv, NULL);
+ barrier = 1;
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+ common_gettimeofday(&etv, NULL);
+
+ stack_assert();
+#ifdef _WIN32
+ printf("%3llu %.6f\n", nthr, TVTOD(etv) - TVTOD(stv));
+#else
+ printf("%3llu %.6lf\n", nthr, TVTOD(etv) - TVTOD(stv));
+#endif
+ return 0;
+}
diff --git a/regressions/ck_stack/validate/pop.c b/regressions/ck_stack/validate/pop.c
new file mode 100644
index 0000000..0d69d42
--- /dev/null
+++ b/regressions/ck_stack/validate/pop.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <ck_cc.h>
+#include <ck_pr.h>
+#ifdef SPINLOCK
+#include <ck_spinlock.h>
+#endif
+#include <ck_stack.h>
+#include <errno.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <pthread.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+#include "../../common.h"
+
+#ifndef ITEMS
+#define ITEMS (5765760 * 2)
+#endif
+
+#define TVTOD(tv) ((tv).tv_sec+((tv).tv_usec / (double)1000000))
+
+struct entry {
+ int value;
+#ifdef SPINLOCK
+ struct entry *next;
+#else
+ ck_stack_entry_t next;
+#endif
+};
+
+#ifdef SPINLOCK
+static struct entry *stack CK_CC_CACHELINE;
+ck_spinlock_fas_t stack_spinlock = CK_SPINLOCK_FAS_INITIALIZER;
+#define UNLOCK ck_spinlock_fas_unlock
+#if defined(EB)
+#define LOCK ck_spinlock_fas_lock_eb
+#else
+#define LOCK ck_spinlock_fas_lock
+#endif
+#else
+static ck_stack_t stack CK_CC_CACHELINE;
+CK_STACK_CONTAINER(struct entry, next, getvalue)
+#endif
+
+static struct affinity affinerator = AFFINITY_INITIALIZER;
+static unsigned long long nthr;
+static volatile unsigned int barrier = 0;
+static unsigned int critical;
+
+static void *
+stack_thread(void *unused CK_CC_UNUSED)
+{
+#if (defined(MPMC) && defined(CK_F_STACK_POP_MPMC)) || (defined(UPMC) && defined(CK_F_STACK_POP_UPMC)) || (defined(TRYMPMC) && defined(CK_F_STACK_TRYPOP_MPMC)) || (defined(TRYUPMC) && defined(CK_F_STACK_TRYPOP_UPMC))
+ ck_stack_entry_t *ref;
+#endif
+ struct entry *entry = NULL;
+ unsigned long long i, n = ITEMS / nthr;
+ unsigned int seed;
+ int j, previous = INT_MAX;
+
+ if (aff_iterate(&affinerator)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (barrier == 0);
+
+ for (i = 0; i < n; i++) {
+#ifdef MPMC
+#ifdef CK_F_STACK_POP_MPMC
+ ref = ck_stack_pop_mpmc(&stack);
+ assert(ref);
+ entry = getvalue(ref);
+#endif /* CK_F_STACK_POP_MPMC */
+#elif defined(TRYMPMC)
+#ifdef CK_F_STACK_TRYPOP_MPMC
+ while (ck_stack_trypop_mpmc(&stack, &ref) == false)
+ ck_pr_stall();
+ assert(ref);
+ entry = getvalue(ref);
+#endif /* CK_F_STACK_TRYPOP_MPMC */
+#elif defined(UPMC)
+ ref = ck_stack_pop_upmc(&stack);
+ assert(ref);
+ entry = getvalue(ref);
+#elif defined(TRYUPMC)
+ while (ck_stack_trypop_upmc(&stack, &ref) == false)
+ ck_pr_stall();
+ assert(ref);
+ entry = getvalue(ref);
+#elif defined(SPINLOCK)
+ LOCK(&stack_spinlock);
+ entry = stack;
+ stack = stack->next;
+ UNLOCK(&stack_spinlock);
+#else
+# error Undefined operation.
+#endif
+
+ if (critical) {
+ j = common_rand_r(&seed) % critical;
+ while (j--)
+ __asm__ __volatile__("" ::: "memory");
+ }
+
+ assert (previous >= entry->value);
+ previous = entry->value;
+ }
+
+ return (NULL);
+}
+
+static void
+stack_assert(void)
+{
+
+#ifdef SPINLOCK
+ assert(stack == NULL);
+#else
+ assert(CK_STACK_ISEMPTY(&stack));
+#endif
+ return;
+}
+
+static void
+push_stack(struct entry *bucket)
+{
+ unsigned long long i;
+
+#ifdef SPINLOCK
+ stack = NULL;
+#else
+ ck_stack_init(&stack);
+#endif
+
+ for (i = 0; i < ITEMS; i++) {
+ bucket[i].value = i % INT_MAX;
+#ifdef SPINLOCK
+ bucket[i].next = stack;
+ stack = bucket + i;
+#else
+ ck_stack_push_spnc(&stack, &bucket[i].next);
+#endif
+ }
+
+#ifndef SPINLOCK
+ ck_stack_entry_t *entry;
+ i = 0;
+ CK_STACK_FOREACH(&stack, entry) {
+ i++;
+ }
+ assert(i == ITEMS);
+#endif
+
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ struct entry *bucket;
+ unsigned long long i, d;
+ pthread_t *thread;
+ struct timeval stv, etv;
+
+#if (defined(TRYMPMC) || defined(MPMC)) && (!defined(CK_F_STACK_PUSH_MPMC) || !defined(CK_F_STACK_POP_MPMC))
+ fprintf(stderr, "Unsupported.\n");
+ return 0;
+#endif
+
+ if (argc != 4) {
+ ck_error("Usage: stack <threads> <delta> <critical>\n");
+ }
+
+ {
+ char *e;
+
+ nthr = strtol(argv[1], &e, 10);
+ if (errno == ERANGE) {
+ perror("ERROR: too many threads");
+ exit(EXIT_FAILURE);
+ } else if (*e != '\0') {
+ ck_error("ERROR: input format is incorrect\n");
+ }
+
+ d = strtol(argv[2], &e, 10);
+ if (errno == ERANGE) {
+ perror("ERROR: delta is too large");
+ exit(EXIT_FAILURE);
+ } else if (*e != '\0') {
+ ck_error("ERROR: input format is incorrect\n");
+ }
+
+ critical = strtoul(argv[3], &e, 10);
+ if (errno == ERANGE) {
+ perror("ERROR: critical section is too large");
+ exit(EXIT_FAILURE);
+ } else if (*e != '\0') {
+ ck_error("ERROR: input format is incorrect\n");
+ }
+ }
+
+ srand(getpid());
+
+ affinerator.delta = d;
+ bucket = malloc(sizeof(struct entry) * ITEMS);
+ assert(bucket != NULL);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread != NULL);
+
+ push_stack(bucket);
+ for (i = 0; i < nthr; i++)
+ pthread_create(&thread[i], NULL, stack_thread, NULL);
+
+ barrier = 1;
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ barrier = 0;
+
+ push_stack(bucket);
+ for (i = 0; i < nthr; i++)
+ pthread_create(&thread[i], NULL, stack_thread, NULL);
+
+ common_gettimeofday(&stv, NULL);
+ barrier = 1;
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+ common_gettimeofday(&etv, NULL);
+
+ stack_assert();
+#ifdef _WIN32
+ printf("%3llu %.6f\n", nthr, TVTOD(etv) - TVTOD(stv));
+#else
+ printf("%3llu %.6lf\n", nthr, TVTOD(etv) - TVTOD(stv));
+#endif
+ return 0;
+}
diff --git a/regressions/ck_stack/validate/push.c b/regressions/ck_stack/validate/push.c
new file mode 100644
index 0000000..2b3ea33
--- /dev/null
+++ b/regressions/ck_stack/validate/push.c
@@ -0,0 +1,248 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <ck_pr.h>
+#ifdef SPINLOCK
+#include <ck_spinlock.h>
+#endif
+#include <ck_stack.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <pthread.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+#include "../../common.h"
+
+#ifndef ITEMS
+#define ITEMS (5765760 * 2)
+#endif
+
+#define TVTOD(tv) ((tv).tv_sec+((tv).tv_usec / (double)1000000))
+
+struct entry {
+ int value;
+#ifdef SPINLOCK
+ struct entry *next;
+#else
+ ck_stack_entry_t next;
+#endif
+};
+
+#ifdef SPINLOCK
+static struct entry *stack CK_CC_CACHELINE;
+#else
+static ck_stack_t stack CK_CC_CACHELINE;
+#endif
+
+CK_STACK_CONTAINER(struct entry, next, getvalue)
+
+static struct affinity affinerator = AFFINITY_INITIALIZER;
+static unsigned long long nthr;
+static volatile unsigned int barrier = 0;
+static unsigned int critical;
+
+#if defined(SPINLOCK)
+ck_spinlock_fas_t stack_spinlock = CK_SPINLOCK_FAS_INITIALIZER;
+#define UNLOCK ck_spinlock_fas_unlock
+#if defined(EB)
+#define LOCK ck_spinlock_fas_lock_eb
+#else
+#define LOCK ck_spinlock_fas_lock
+#endif
+#elif defined(PTHREAD)
+pthread_mutex_t stack_spinlock = PTHREAD_MUTEX_INITIALIZER;
+#define LOCK pthread_mutex_lock
+#define UNLOCK pthread_mutex_unlock
+#endif
+
+static void *
+stack_thread(void *buffer)
+{
+ struct entry *bucket = buffer;
+ unsigned long long i, n = ITEMS / nthr;
+ unsigned int seed;
+ int j;
+
+ if (aff_iterate(&affinerator)) {
+ perror("ERROR: failed to affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (barrier == 0);
+
+ for (i = 0; i < n; i++) {
+ bucket[i].value = (i + 1) * 2;
+
+#if defined(MPNC)
+ ck_stack_push_mpnc(&stack, &bucket[i].next);
+#elif defined(MPMC)
+ ck_stack_push_mpmc(&stack, &bucket[i].next);
+#elif defined(TRYMPMC)
+ while (ck_stack_trypush_mpmc(&stack, &bucket[i].next) == false)
+ ck_pr_stall();
+#elif defined(TRYUPMC)
+ while (ck_stack_trypush_upmc(&stack, &bucket[i].next) == false)
+ ck_pr_stall();
+#elif defined(UPMC)
+ ck_stack_push_upmc(&stack, &bucket[i].next);
+#elif defined(SPINLOCK) || defined(PTHREADS)
+ LOCK(&stack_spinlock);
+ bucket[i].next = stack;
+ stack = bucket + i;
+ UNLOCK(&stack_spinlock);
+#else
+# error Undefined operation.
+#endif
+
+ if (critical) {
+ j = common_rand_r(&seed) % critical;
+ while (j--)
+ __asm__ __volatile__("" ::: "memory");
+ }
+ }
+
+ return (NULL);
+}
+
+static void
+stack_assert(void)
+{
+#ifndef SPINLOCK
+ ck_stack_entry_t *n;
+#endif
+ struct entry *p;
+ unsigned long long c = 0;
+
+#ifdef SPINLOCK
+ for (p = stack; p; p = p->next)
+ c++;
+#else
+ CK_STACK_FOREACH(&stack, n) {
+ p = getvalue(n);
+ (void)((volatile struct entry *)p)->value;
+ c++;
+ }
+#endif
+
+ assert(c == ITEMS);
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ struct entry *bucket;
+ unsigned long long i, d, n;
+ pthread_t *thread;
+ struct timeval stv, etv;
+
+ if (argc != 4) {
+ ck_error("Usage: stack <threads> <delta> <critical>\n");
+ }
+
+ {
+ char *e;
+
+ nthr = strtol(argv[1], &e, 10);
+ if (errno == ERANGE) {
+ perror("ERROR: too many threads");
+ exit(EXIT_FAILURE);
+ } else if (*e != '\0') {
+ ck_error("ERROR: input format is incorrect\n");
+ }
+
+ d = strtol(argv[2], &e, 10);
+ if (errno == ERANGE) {
+ perror("ERROR: delta is too large");
+ exit(EXIT_FAILURE);
+ } else if (*e != '\0') {
+ ck_error("ERROR: input format is incorrect\n");
+ }
+
+ critical = strtoul(argv[3], &e, 10);
+ if (errno == ERANGE) {
+ perror("ERROR: critical section is too large");
+ exit(EXIT_FAILURE);
+ } else if (*e != '\0') {
+ ck_error("ERROR: input format is incorrect\n");
+ }
+ }
+
+ srand(getpid());
+
+ affinerator.request = 0;
+ affinerator.delta = d;
+ n = ITEMS / nthr;
+
+#ifndef SPINLOCK
+ ck_stack_init(&stack);
+#else
+ stack = NULL;
+#endif
+
+ bucket = malloc(sizeof(struct entry) * ITEMS);
+ assert(bucket != NULL);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread != NULL);
+
+ for (i = 0; i < nthr; i++)
+ pthread_create(&thread[i], NULL, stack_thread, bucket + i * n);
+
+ barrier = 1;
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ barrier = 0;
+
+#ifndef SPINLOCK
+ ck_stack_init(&stack);
+#else
+ stack = NULL;
+#endif
+
+ for (i = 0; i < nthr; i++)
+ pthread_create(&thread[i], NULL, stack_thread, bucket + i * n);
+
+ common_gettimeofday(&stv, NULL);
+ barrier = 1;
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+ common_gettimeofday(&etv, NULL);
+
+ stack_assert();
+#ifdef _WIN32
+ printf("%3llu %.6f\n", nthr, TVTOD(etv) - TVTOD(stv));
+#else
+ printf("%3llu %.6lf\n", nthr, TVTOD(etv) - TVTOD(stv));
+#endif
+ return 0;
+}
diff --git a/regressions/ck_stack/validate/serial.c b/regressions/ck_stack/validate/serial.c
new file mode 100644
index 0000000..eb667ca
--- /dev/null
+++ b/regressions/ck_stack/validate/serial.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2009-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include <ck_stack.h>
+
+#ifndef SIZE
+#define SIZE 1024000
+#endif
+
+struct entry {
+ int value;
+ ck_stack_entry_t next;
+};
+
+CK_STACK_CONTAINER(struct entry, next, get_entry)
+
+#define LOOP(PUSH, POP) \
+ for (i = 0; i < SIZE; i++) { \
+ entries[i].value = i; \
+ PUSH(stack, &entries[i].next); \
+ } \
+ for (i = SIZE - 1; i >= 0; i--) { \
+ entry = POP(stack); \
+ assert(entry); \
+ assert(get_entry(entry)->value == i); \
+ }
+
+static void
+serial(ck_stack_t *stack)
+{
+ struct entry *entries;
+ ck_stack_entry_t *entry;
+ int i;
+
+ ck_stack_init(stack);
+
+ entries = malloc(sizeof(struct entry) * SIZE);
+ assert(entries != NULL);
+
+ LOOP(ck_stack_push_upmc, ck_stack_pop_upmc);
+#ifdef CK_F_STACK_POP_MPMC
+ LOOP(ck_stack_push_mpmc, ck_stack_pop_mpmc);
+#endif
+ LOOP(ck_stack_push_mpnc, ck_stack_pop_upmc);
+ LOOP(ck_stack_push_spnc, ck_stack_pop_npsc);
+
+ return;
+}
+
+int
+main(void)
+{
+ ck_stack_t stack CK_CC_CACHELINE;
+
+ serial(&stack);
+ return (0);
+}
diff --git a/regressions/ck_swlock/benchmark/Makefile b/regressions/ck_swlock/benchmark/Makefile
new file mode 100644
index 0000000..4ec728c
--- /dev/null
+++ b/regressions/ck_swlock/benchmark/Makefile
@@ -0,0 +1,17 @@
+.PHONY: clean distribution
+
+OBJECTS=latency throughput
+
+all: $(OBJECTS)
+
+latency: latency.c ../../../include/ck_swlock.h
+ $(CC) $(CFLAGS) -o latency latency.c
+
+throughput: throughput.c ../../../include/ck_swlock.h
+ $(CC) $(CFLAGS) -o throughput throughput.c
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_swlock/benchmark/latency.c b/regressions/ck_swlock/benchmark/latency.c
new file mode 100644
index 0000000..73a9482
--- /dev/null
+++ b/regressions/ck_swlock/benchmark/latency.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_swlock.h>
+#include <inttypes.h>
+#include <stdio.h>
+
+#include "../../common.h"
+
+#define CK_F_PR_RTM
+
+#ifndef STEPS
+#define STEPS 2000000
+#endif
+
+int
+main(void)
+{
+ uint64_t s_b, e_b, i;
+ ck_swlock_t swlock = CK_SWLOCK_INITIALIZER;
+
+ for (i = 0; i < STEPS; i++) {
+ ck_swlock_write_lock(&swlock);
+ ck_swlock_write_unlock(&swlock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_swlock_write_lock(&swlock);
+ ck_swlock_write_unlock(&swlock);
+ }
+ e_b = rdtsc();
+ printf(" WRITE: swlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ for (i = 0; i < STEPS; i++) {
+ ck_swlock_read_lock(&swlock);
+ ck_swlock_read_unlock(&swlock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_swlock_read_lock(&swlock);
+ ck_swlock_read_unlock(&swlock);
+ }
+ e_b = rdtsc();
+ printf(" READ: swlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ for (i = 0; i < STEPS; i++) {
+ ck_swlock_write_latch(&swlock);
+ ck_swlock_write_unlatch(&swlock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_swlock_write_latch(&swlock);
+ ck_swlock_write_unlatch(&swlock);
+ }
+ e_b = rdtsc();
+ printf(" LATCH: swlock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ return 0;
+}
+
diff --git a/regressions/ck_swlock/benchmark/throughput.c b/regressions/ck_swlock/benchmark/throughput.c
new file mode 100644
index 0000000..5b05365
--- /dev/null
+++ b/regressions/ck_swlock/benchmark/throughput.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_swlock.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "../../common.h"
+
+#ifndef STEPS
+#define STEPS 1000000
+#endif
+
+static int barrier;
+static int threads;
+static unsigned int flag CK_CC_CACHELINE;
+static struct {
+ ck_swlock_t lock;
+} rw CK_CC_CACHELINE = {
+ .lock = CK_SWLOCK_INITIALIZER
+};
+
+static struct affinity affinity;
+
+static void *
+thread_lock(void *pun)
+{
+ uint64_t s_b, e_b, a, i;
+ uint64_t *value = pun;
+
+ if (aff_iterate(&affinity) != 0) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_int(&barrier);
+ while (ck_pr_load_int(&barrier) != threads)
+ ck_pr_stall();
+
+ for (i = 1, a = 0;; i++) {
+ s_b = rdtsc();
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ ck_swlock_read_lock(&rw.lock);
+ ck_swlock_read_unlock(&rw.lock);
+ e_b = rdtsc();
+
+ a += (e_b - s_b) >> 4;
+
+ if (ck_pr_load_uint(&flag) == 1)
+ break;
+ }
+
+ ck_pr_inc_int(&barrier);
+ while (ck_pr_load_int(&barrier) != threads * 2)
+ ck_pr_stall();
+
+ *value = (a / i);
+ return NULL;
+}
+
+static void
+swlock_test(pthread_t *p, int d, uint64_t *latency, void *(*f)(void *), const char *label)
+{
+ int t;
+
+ ck_pr_store_int(&barrier, 0);
+ ck_pr_store_uint(&flag, 0);
+
+ affinity.delta = d;
+ affinity.request = 0;
+
+ fprintf(stderr, "Creating threads (%s)...", label);
+ for (t = 0; t < threads; t++) {
+ if (pthread_create(&p[t], NULL, f, latency + t) != 0) {
+ ck_error("ERROR: Could not create thread %d\n", t);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ common_sleep(10);
+ ck_pr_store_uint(&flag, 1);
+
+ fprintf(stderr, "Waiting for threads to finish acquisition regression...");
+ for (t = 0; t < threads; t++)
+ pthread_join(p[t], NULL);
+ fprintf(stderr, "done\n\n");
+
+ for (t = 1; t <= threads; t++)
+ printf("%10u %20" PRIu64 "\n", t, latency[t - 1]);
+
+ fprintf(stderr, "\n");
+ return;
+}
+
+
+int
+main(int argc, char *argv[])
+{
+ int d;
+ pthread_t *p;
+ uint64_t *latency;
+
+ if (argc != 3) {
+ ck_error("Usage: throughput <delta> <threads>\n");
+ }
+
+ threads = atoi(argv[2]);
+ if (threads <= 0) {
+ ck_error("ERROR: Threads must be a value > 0.\n");
+ }
+
+ p = malloc(sizeof(pthread_t) * threads);
+ if (p == NULL) {
+ ck_error("ERROR: Failed to initialize thread.\n");
+ }
+
+ latency = malloc(sizeof(uint64_t) * threads);
+ if (latency == NULL) {
+ ck_error("ERROR: Failed to create latency buffer.\n");
+ }
+
+ d = atoi(argv[1]);
+ swlock_test(p, d, latency, thread_lock, "swlock");
+
+ return 0;
+}
+
diff --git a/regressions/ck_swlock/validate/Makefile b/regressions/ck_swlock/validate/Makefile
new file mode 100644
index 0000000..54d62f2
--- /dev/null
+++ b/regressions/ck_swlock/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean distribution
+
+OBJECTS=validate
+
+all: $(OBJECTS)
+
+validate: validate.c ../../../include/ck_swlock.h
+ $(CC) $(CFLAGS) -o validate validate.c
+
+check: all
+ ./validate $(CORES) 1
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_swlock/validate/validate.c b/regressions/ck_swlock/validate/validate.c
new file mode 100644
index 0000000..11366ce
--- /dev/null
+++ b/regressions/ck_swlock/validate/validate.c
@@ -0,0 +1,455 @@
+/*
+ * Copyright 2014 Jaidev Sridhar.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_swlock.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 1000000
+#endif
+
+static struct affinity a;
+static unsigned int locked;
+static int nthr;
+static ck_swlock_t lock = CK_SWLOCK_INITIALIZER;
+static ck_swlock_t copy;
+#ifdef CK_F_PR_RTM
+static void *
+thread_rtm_adaptive(void *arg)
+{
+ unsigned int i = ITERATE;
+ unsigned int l;
+ int tid = ck_pr_load_int(arg);
+
+ struct ck_elide_config config = CK_ELIDE_CONFIG_DEFAULT_INITIALIZER;
+ struct ck_elide_stat st = CK_ELIDE_STAT_INITIALIZER;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+ if (tid == 0) {
+ CK_ELIDE_LOCK_ADAPTIVE(ck_swlock_write, &st, &config, &lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ CK_ELIDE_UNLOCK_ADAPTIVE(ck_swlock_write, &st, &lock);
+ }
+
+ CK_ELIDE_LOCK(ck_swlock_read, &lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ CK_ELIDE_UNLOCK(ck_swlock_read, &lock);
+ }
+
+ return NULL;
+}
+
+static void *
+thread_rtm_mix(void *arg)
+{
+ unsigned int i = ITERATE;
+ unsigned int l;
+ int tid = ck_pr_load_int(arg);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+ if (tid == 0) {
+ if (i & 1) {
+ CK_ELIDE_LOCK(ck_swlock_write, &lock);
+ } else {
+ ck_swlock_write_lock(&lock);
+ }
+
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+
+ if (i & 1) {
+ CK_ELIDE_UNLOCK(ck_swlock_write, &lock);
+ } else {
+ ck_swlock_write_unlock(&lock);
+ }
+ }
+ if (i & 1) {
+ CK_ELIDE_LOCK(ck_swlock_read, &lock);
+ } else {
+ ck_swlock_read_lock(&lock);
+ }
+
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+
+ if (i & 1) {
+ CK_ELIDE_UNLOCK(ck_swlock_read, &lock);
+ } else {
+ ck_swlock_read_unlock(&lock);
+ }
+ }
+
+ return (NULL);
+}
+
+static void *
+thread_rtm(void *arg)
+{
+ unsigned int i = ITERATE;
+ unsigned int l;
+ int tid = ck_pr_load_int(arg);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+ if (tid == 0) {
+ CK_ELIDE_LOCK(ck_swlock_write, &lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ CK_ELIDE_UNLOCK(ck_swlock_write, &lock);
+ }
+
+ CK_ELIDE_LOCK(ck_swlock_read, &lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ CK_ELIDE_UNLOCK(ck_swlock_read, &lock);
+ }
+
+ return (NULL);
+}
+#endif /* CK_F_PR_RTM */
+
+static void *
+thread_latch(void *arg)
+{
+ unsigned int i = ITERATE;
+ unsigned int l;
+ int tid = ck_pr_load_int(arg);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+ if (tid == 0) {
+ /* Writer */
+ ck_swlock_write_latch(&lock);
+ {
+ memcpy(&copy, &lock, sizeof(ck_swlock_t));
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ memcpy(&lock, &copy, sizeof(ck_swlock_t));
+ }
+ ck_swlock_write_unlatch(&lock);
+ }
+
+ ck_swlock_read_lock(&lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_swlock_read_unlock(&lock);
+ }
+
+ return (NULL);
+}
+
+static void *
+thread(void *arg)
+{
+ unsigned int i = ITERATE;
+ unsigned int l;
+ int tid = ck_pr_load_int(arg);
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+ if (tid == 0) {
+ /* Writer */
+ ck_swlock_write_lock(&lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_swlock_write_unlock(&lock);
+ }
+
+ ck_swlock_read_lock(&lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_swlock_read_unlock(&lock);
+ }
+
+ return (NULL);
+}
+
+static void
+swlock_test(pthread_t *threads, void *(*f)(void *), const char *test)
+{
+ int i, tid[nthr];
+
+ fprintf(stderr, "Creating threads (%s)...", test);
+ for (i = 0; i < nthr; i++) {
+ ck_pr_store_int(&tid[i], i);
+ if (pthread_create(&threads[i], NULL, f, &tid[i])) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, ".");
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+
+ if (argc != 3) {
+ ck_error("Usage: validate <number of threads> <affinity delta>\n");
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ a.delta = atoi(argv[2]);
+
+ swlock_test(threads, thread, "regular");
+ swlock_test(threads, thread_latch, "latch");
+#ifdef CK_F_PR_RTM
+ swlock_test(threads, thread_rtm, "rtm");
+ swlock_test(threads, thread_rtm_mix, "rtm-mix");
+ swlock_test(threads, thread_rtm_adaptive, "rtm-adaptive");
+#endif
+ return 0;
+}
+
diff --git a/regressions/ck_tflock/benchmark/Makefile b/regressions/ck_tflock/benchmark/Makefile
new file mode 100644
index 0000000..ed63504
--- /dev/null
+++ b/regressions/ck_tflock/benchmark/Makefile
@@ -0,0 +1,17 @@
+.PHONY: clean distribution
+
+OBJECTS=latency throughput
+
+all: $(OBJECTS)
+
+latency: latency.c ../../../include/ck_rwlock.h ../../../include/ck_elide.h
+ $(CC) $(CFLAGS) -o latency latency.c
+
+throughput: throughput.c ../../../include/ck_rwlock.h ../../../include/ck_elide.h
+ $(CC) $(CFLAGS) -o throughput throughput.c
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_tflock/benchmark/latency.c b/regressions/ck_tflock/benchmark/latency.c
new file mode 100644
index 0000000..fd77d44
--- /dev/null
+++ b/regressions/ck_tflock/benchmark/latency.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_tflock.h>
+#include <inttypes.h>
+#include <stdio.h>
+
+#include "../../common.h"
+
+#define CK_F_PR_RTM
+
+#ifndef STEPS
+#define STEPS 2000000
+#endif
+
+int
+main(void)
+{
+ uint64_t s_b, e_b, i;
+ ck_tflock_ticket_t tflock = CK_TFLOCK_TICKET_INITIALIZER;
+
+ for (i = 0; i < STEPS; i++) {
+ ck_tflock_ticket_write_lock(&tflock);
+ ck_tflock_ticket_write_unlock(&tflock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_tflock_ticket_write_lock(&tflock);
+ ck_tflock_ticket_write_unlock(&tflock);
+ }
+ e_b = rdtsc();
+ printf(" WRITE: tflock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ for (i = 0; i < STEPS; i++) {
+ ck_tflock_ticket_read_lock(&tflock);
+ ck_tflock_ticket_read_unlock(&tflock);
+ }
+
+ s_b = rdtsc();
+ for (i = 0; i < STEPS; i++) {
+ ck_tflock_ticket_read_lock(&tflock);
+ ck_tflock_ticket_read_unlock(&tflock);
+ }
+ e_b = rdtsc();
+ printf(" READ: tflock %15" PRIu64 "\n", (e_b - s_b) / STEPS);
+
+ return 0;
+}
+
diff --git a/regressions/ck_tflock/benchmark/throughput.c b/regressions/ck_tflock/benchmark/throughput.c
new file mode 100644
index 0000000..41d22bd
--- /dev/null
+++ b/regressions/ck_tflock/benchmark/throughput.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_tflock.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "../../common.h"
+
+#ifndef STEPS
+#define STEPS 1000000
+#endif
+
+static int barrier;
+static int threads;
+static unsigned int flag CK_CC_CACHELINE;
+static struct {
+ ck_tflock_ticket_t lock;
+} rw CK_CC_CACHELINE = {
+ .lock = CK_TFLOCK_TICKET_INITIALIZER
+};
+
+static struct affinity affinity;
+
+static void *
+thread_lock(void *pun)
+{
+ uint64_t s_b, e_b, a, i;
+ uint64_t *value = pun;
+
+ if (aff_iterate(&affinity) != 0) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ ck_pr_inc_int(&barrier);
+ while (ck_pr_load_int(&barrier) != threads)
+ ck_pr_stall();
+
+ for (i = 1, a = 0;; i++) {
+ s_b = rdtsc();
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ ck_tflock_ticket_read_lock(&rw.lock);
+ ck_tflock_ticket_read_unlock(&rw.lock);
+ e_b = rdtsc();
+
+ a += (e_b - s_b) >> 4;
+
+ if (ck_pr_load_uint(&flag) == 1)
+ break;
+ }
+
+ ck_pr_inc_int(&barrier);
+ while (ck_pr_load_int(&barrier) != threads * 2)
+ ck_pr_stall();
+
+ *value = (a / i);
+ return NULL;
+}
+
+static void
+tflock_test(pthread_t *p, int d, uint64_t *latency, void *(*f)(void *), const char *label)
+{
+ int t;
+
+ ck_pr_store_int(&barrier, 0);
+ ck_pr_store_uint(&flag, 0);
+
+ affinity.delta = d;
+ affinity.request = 0;
+
+ fprintf(stderr, "Creating threads (%s)...", label);
+ for (t = 0; t < threads; t++) {
+ if (pthread_create(&p[t], NULL, f, latency + t) != 0) {
+ ck_error("ERROR: Could not create thread %d\n", t);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ common_sleep(10);
+ ck_pr_store_uint(&flag, 1);
+
+ fprintf(stderr, "Waiting for threads to finish acquisition regression...");
+ for (t = 0; t < threads; t++)
+ pthread_join(p[t], NULL);
+ fprintf(stderr, "done\n\n");
+
+ for (t = 1; t <= threads; t++)
+ printf("%10u %20" PRIu64 "\n", t, latency[t - 1]);
+
+ fprintf(stderr, "\n");
+ return;
+}
+
+
+int
+main(int argc, char *argv[])
+{
+ int d;
+ pthread_t *p;
+ uint64_t *latency;
+
+ if (argc != 3) {
+ ck_error("Usage: throughput <delta> <threads>\n");
+ }
+
+ threads = atoi(argv[2]);
+ if (threads <= 0) {
+ ck_error("ERROR: Threads must be a value > 0.\n");
+ }
+
+ p = malloc(sizeof(pthread_t) * threads);
+ if (p == NULL) {
+ ck_error("ERROR: Failed to initialize thread.\n");
+ }
+
+ latency = malloc(sizeof(uint64_t) * threads);
+ if (latency == NULL) {
+ ck_error("ERROR: Failed to create latency buffer.\n");
+ }
+
+ d = atoi(argv[1]);
+ tflock_test(p, d, latency, thread_lock, "tflock");
+ return 0;
+}
+
diff --git a/regressions/ck_tflock/validate/Makefile b/regressions/ck_tflock/validate/Makefile
new file mode 100644
index 0000000..6ae7c73
--- /dev/null
+++ b/regressions/ck_tflock/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean distribution
+
+OBJECTS=validate
+
+all: $(OBJECTS)
+
+validate: validate.c ../../../include/ck_tflock.h ../../../include/ck_elide.h
+ $(CC) $(CFLAGS) -o validate validate.c
+
+check: all
+ ./validate $(CORES) 1
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_tflock/validate/validate.c b/regressions/ck_tflock/validate/validate.c
new file mode 100644
index 0000000..22e9e65
--- /dev/null
+++ b/regressions/ck_tflock/validate/validate.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_tflock.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 1000000
+#endif
+
+static struct affinity a;
+static unsigned int locked;
+static int nthr;
+static ck_tflock_ticket_t lock = CK_TFLOCK_TICKET_INITIALIZER;
+
+static void *
+thread(void *null CK_CC_UNUSED)
+{
+ unsigned int i = ITERATE;
+ unsigned int l;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ while (i--) {
+ ck_tflock_ticket_write_lock(&lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_tflock_ticket_write_unlock(&lock);
+
+ ck_tflock_ticket_read_lock(&lock);
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [RD:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ ck_tflock_ticket_read_unlock(&lock);
+ }
+
+ return (NULL);
+}
+
+static void
+tflock_ticket_test(pthread_t *threads, void *(*f)(void *), const char *test)
+{
+ int i;
+
+ fprintf(stderr, "Creating threads (%s)...", test);
+ for (i = 0; i < nthr; i++) {
+ if (pthread_create(&threads[i], NULL, f, NULL)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, ".");
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+ return;
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+
+ if (argc != 3) {
+ ck_error("Usage: validate <number of threads> <affinity delta>\n");
+ }
+
+ nthr = atoi(argv[1]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ a.delta = atoi(argv[2]);
+
+ tflock_ticket_test(threads, thread, "regular");
+ ck_tflock_ticket_init(&lock);
+ return 0;
+}
+
diff --git a/regressions/common.h b/regressions/common.h
new file mode 100644
index 0000000..f67c2af
--- /dev/null
+++ b/regressions/common.h
@@ -0,0 +1,471 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_COMMON_H
+#define CK_COMMON_H
+
+#include <ck_cc.h>
+#include <ck_pr.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/time.h>
+
+#ifdef __linux__
+#include <sched.h>
+#include <sys/types.h>
+#include <sys/syscall.h>
+#elif defined(__MACH__)
+#include <mach/mach.h>
+#include <mach/thread_policy.h>
+#elif defined(__FreeBSD__)
+#include <sys/param.h>
+#include <sys/cpuset.h>
+#endif
+
+#if defined(_WIN32)
+#include <assert.h>
+#define NOMINMAX
+#include <windows.h>
+#define DELTA_EPOCH 11644473600000000ULL
+#else
+#include <signal.h>
+#include <unistd.h>
+#endif
+
+#ifndef CORES
+#define CORES 8
+#endif
+
+CK_CC_INLINE static void
+common_srand(unsigned int i)
+{
+#ifdef _WIN32
+ srand(i);
+#else
+ srandom(i);
+#endif
+}
+
+CK_CC_INLINE static int
+common_rand(void)
+{
+#ifdef _WIN32
+ return rand();
+#else
+ return random();
+#endif
+}
+
+CK_CC_INLINE static int
+common_rand_r(unsigned int *i)
+{
+#ifdef _WIN32
+ (void)i;
+
+ /*
+ * When linked with -mthreads, rand() is thread-safe.
+ * rand_s is also an option.
+ */
+ return rand();
+#else
+ return rand_r(i);
+#endif
+}
+
+CK_CC_INLINE static void
+common_srand48(long int i)
+{
+#ifdef _WIN32
+ srand(i);
+#else
+ srand48(i);
+#endif
+}
+
+CK_CC_INLINE static long int
+common_lrand48(void)
+{
+#ifdef _WIN32
+ return rand();
+#else
+ return lrand48();
+#endif
+}
+
+CK_CC_INLINE static double
+common_drand48(void)
+{
+#ifdef _WIN32
+ return (double)rand()/RAND_MAX;
+#else
+ return drand48();
+#endif
+}
+
+CK_CC_INLINE static void
+common_sleep(unsigned int n)
+{
+#ifdef _WIN32
+ Sleep(n * 1000);
+#else
+ sleep(n);
+#endif
+}
+
+CK_CC_INLINE static int
+common_gettimeofday(struct timeval *tv, void *tz)
+{
+#ifdef _WIN32
+ FILETIME ft;
+ uint64_t tmp_time = 0;
+ static bool tzflag = false;
+ struct timezone *tzp = tz;
+
+ if (tv != NULL) {
+ GetSystemTimeAsFileTime(&ft);
+ tmp_time |= ft.dwHighDateTime;
+ tmp_time <<= 32;
+ tmp_time |= ft.dwLowDateTime;
+
+ /* GetSystemTimeAsFileTime returns 100 nanosecond intervals. */
+ tmp_time /= 10;
+
+ /* Windows' epoch starts on 01/01/1601, while Unix' starts on 01/01/1970. */
+ tmp_time -= DELTA_EPOCH;
+
+ tv->tv_sec = (long)(tmp_time / 1000000UL);
+ tv->tv_usec = (long)(tmp_time % 1000000UL);
+ }
+
+
+ if (tz != NULL) {
+ if (tzflag == false) {
+ _tzset();
+ tzflag = true;
+ }
+
+ tzp->tz_minuteswest = _timezone / 60;
+ tzp->tz_dsttime = _daylight;
+ }
+
+ return 0;
+#else
+ return gettimeofday(tv, tz);
+#endif
+}
+
+CK_CC_UNUSED static unsigned int
+common_alarm(void (*sig_handler)(int), void *alarm_event, unsigned int duration)
+{
+#ifdef _WIN32
+ (void)sig_handler;
+ (void)duration;
+ bool success;
+ HANDLE *alarm_handle = alarm_event;
+ success = SetEvent(*alarm_handle);
+ assert(success != false);
+ return 0;
+#else
+ (void)alarm_event;
+ signal(SIGALRM, sig_handler);
+ return alarm(duration);
+#endif
+}
+
+#ifdef _WIN32
+#ifndef SECOND_TIMER
+#define SECOND_TIMER 10000000
+#endif
+#define COMMON_ALARM_DECLARE_GLOBAL(prefix, alarm_event_name, flag_name) \
+static HANDLE prefix##_common_win_alarm_timer; \
+static HANDLE alarm_event_name; \
+static LARGE_INTEGER prefix##_common_alarm_timer_length; \
+ \
+static void CALLBACK \
+prefix##_common_win_alarm_handler(LPVOID arg, DWORD timer_low_value, DWORD timer_high_value) \
+{ \
+ (void)arg; \
+ (void)timer_low_value; \
+ (void)timer_high_value; \
+ flag_name = true; \
+ return; \
+} \
+ \
+static void * \
+prefix##_common_win_alarm(void *unused) \
+{ \
+ (void)unused; \
+ bool timer_success = false; \
+ for (;;) { \
+ WaitForSingleObjectEx(alarm_event_name, INFINITE, true); \
+ timer_success = SetWaitableTimer(prefix##_common_win_alarm_timer, \
+ &prefix##_common_alarm_timer_length, \
+ 0, \
+ prefix##_common_win_alarm_handler, NULL, false); \
+ assert(timer_success != false); \
+ WaitForSingleObjectEx(prefix##_common_win_alarm_timer, INFINITE, true); \
+ } \
+ \
+ return NULL; \
+}
+
+#define COMMON_ALARM_DECLARE_LOCAL(prefix, alarm_event_name) \
+ int64_t prefix##_common_alarm_tl; \
+ pthread_t prefix##_common_win_alarm_thread;
+
+#define COMMON_ALARM_INIT(prefix, alarm_event_name, duration) \
+ prefix##_common_alarm_tl = -1 * (duration) * SECOND_TIMER; \
+ prefix##_common_alarm_timer_length.LowPart = \
+ (DWORD) (prefix##_common_alarm_tl & 0xFFFFFFFF); \
+ prefix##_common_alarm_timer_length.HighPart = \
+ (LONG) (prefix##_common_alarm_tl >> 32); \
+ alarm_event_name = CreateEvent(NULL, false, false, NULL); \
+ assert(alarm_event_name != NULL); \
+ prefix##_common_win_alarm_timer = CreateWaitableTimer(NULL, true, NULL); \
+ assert(prefix##_common_win_alarm_timer != NULL); \
+ if (pthread_create(&prefix##_common_win_alarm_thread, \
+ NULL, \
+ prefix##_common_win_alarm, \
+ NULL) != 0) \
+ ck_error("ERROR: Failed to create common_win_alarm thread.\n");
+#else
+#define COMMON_ALARM_DECLARE_GLOBAL(prefix, alarm_event_name, flag_name)
+#define COMMON_ALARM_DECLARE_LOCAL(prefix, alarm_event_name) \
+ int alarm_event_name = 0;
+#define COMMON_ALARM_INIT(prefix, alarm_event_name, duration)
+#endif
+
+struct affinity {
+ unsigned int delta;
+ unsigned int request;
+};
+
+#define AFFINITY_INITIALIZER {0, 0}
+
+#ifdef __linux__
+#ifndef gettid
+static pid_t
+gettid(void)
+{
+ return syscall(__NR_gettid);
+}
+#endif /* gettid */
+
+CK_CC_UNUSED static int
+aff_iterate(struct affinity *acb)
+{
+ cpu_set_t s;
+ unsigned int c;
+
+ c = ck_pr_faa_uint(&acb->request, acb->delta);
+ CPU_ZERO(&s);
+ CPU_SET(c % CORES, &s);
+
+ return sched_setaffinity(gettid(), sizeof(s), &s);
+}
+
+CK_CC_UNUSED static int
+aff_iterate_core(struct affinity *acb, unsigned int *core)
+{
+ cpu_set_t s;
+
+ *core = ck_pr_faa_uint(&acb->request, acb->delta);
+ CPU_ZERO(&s);
+ CPU_SET((*core) % CORES, &s);
+
+ return sched_setaffinity(gettid(), sizeof(s), &s);
+}
+#elif defined(__MACH__)
+CK_CC_UNUSED static int
+aff_iterate(struct affinity *acb)
+{
+ thread_affinity_policy_data_t policy;
+ unsigned int c;
+
+ c = ck_pr_faa_uint(&acb->request, acb->delta) % CORES;
+ policy.affinity_tag = c;
+ return thread_policy_set(mach_thread_self(),
+ THREAD_AFFINITY_POLICY,
+ (thread_policy_t)&policy,
+ THREAD_AFFINITY_POLICY_COUNT);
+}
+
+CK_CC_UNUSED static int
+aff_iterate_core(struct affinity *acb, unsigned int *core)
+{
+ thread_affinity_policy_data_t policy;
+
+ *core = ck_pr_faa_uint(&acb->request, acb->delta) % CORES;
+ policy.affinity_tag = *core;
+ return thread_policy_set(mach_thread_self(),
+ THREAD_AFFINITY_POLICY,
+ (thread_policy_t)&policy,
+ THREAD_AFFINITY_POLICY_COUNT);
+}
+#elif defined(__FreeBSD__)
+CK_CC_UNUSED static int
+aff_iterate(struct affinity *acb CK_CC_UNUSED)
+{
+ unsigned int c;
+ cpuset_t mask;
+
+ c = ck_pr_faa_uint(&acb->request, acb->delta) % CORES;
+ CPU_ZERO(&mask);
+ CPU_SET(c, &mask);
+ return (cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, -1,
+ sizeof(mask), &mask));
+}
+
+CK_CC_UNUSED static int
+aff_iterate_core(struct affinity *acb CK_CC_UNUSED, unsigned int *core)
+{
+ cpuset_t mask;
+
+ *core = ck_pr_faa_uint(&acb->request, acb->delta) % CORES;
+ CPU_ZERO(&mask);
+ CPU_SET(*core, &mask);
+ return (cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, -1,
+ sizeof(mask), &mask));
+}
+#else
+CK_CC_UNUSED static int
+aff_iterate(struct affinity *acb CK_CC_UNUSED)
+{
+
+ return (0);
+}
+
+CK_CC_UNUSED static int
+aff_iterate_core(struct affinity *acb CK_CC_UNUSED, unsigned int *core)
+{
+ *core = 0;
+ return (0);
+}
+#endif
+
+CK_CC_INLINE static uint64_t
+rdtsc(void)
+{
+#if defined(__x86_64__)
+ uint32_t eax = 0, edx;
+#if defined(CK_MD_RDTSCP)
+ __asm__ __volatile__("rdtscp"
+ : "+a" (eax), "=d" (edx)
+ :
+ : "%ecx", "memory");
+
+ return (((uint64_t)edx << 32) | eax);
+#else
+ __asm__ __volatile__("cpuid;"
+ "rdtsc;"
+ : "+a" (eax), "=d" (edx)
+ :
+ : "%ebx", "%ecx", "memory");
+
+ __asm__ __volatile__("xorl %%eax, %%eax;"
+ "cpuid;"
+ :
+ :
+ : "%eax", "%ebx", "%ecx", "%edx", "memory");
+
+ return (((uint64_t)edx << 32) | eax);
+#endif /* !CK_MD_RDTSCP */
+#elif defined(__x86__)
+ uint32_t eax = 0, edx;
+#if defined(CK_MD_RDTSCP)
+ __asm__ __volatile__("rdtscp"
+ : "+a" (eax), "=d" (edx)
+ :
+ : "%ecx", "memory");
+
+ return (((uint64_t)edx << 32) | eax);
+#else
+ __asm__ __volatile__("pushl %%ebx;"
+ "cpuid;"
+ "rdtsc;"
+ : "+a" (eax), "=d" (edx)
+ :
+ : "%ecx", "memory");
+
+ __asm__ __volatile__("xorl %%eax, %%eax;"
+ "cpuid;"
+ "popl %%ebx;"
+ :
+ :
+ : "%eax", "%ecx", "%edx", "memory");
+
+ return (((uint64_t)edx << 32) | eax);
+#endif /* !CK_MD_RDTSCP */
+#elif defined(__sparcv9__)
+ uint64_t r;
+
+ __asm__ __volatile__("rd %%tick, %0"
+ : "=r" (r)
+ :
+ : "memory");
+ return r;
+#elif defined(__ppc64__)
+ uint32_t high, low, snapshot;
+
+ do {
+ __asm__ __volatile__("isync;"
+ "mftbu %0;"
+ "mftb %1;"
+ "mftbu %2;"
+ : "=r" (high), "=r" (low), "=r" (snapshot)
+ :
+ : "memory");
+ } while (snapshot != high);
+
+ return (((uint64_t)high << 32) | low);
+#elif defined(__aarch64__)
+ uint64_t r;
+
+ __asm __volatile__ ("mrs %0, cntvct_el0" : "=r" (r) : : "memory");
+ return r;
+#else
+ return 0;
+#endif
+}
+
+CK_CC_USED static void
+ck_error(const char *message, ...)
+{
+ va_list ap;
+
+ va_start(ap, message);
+ vfprintf(stderr, message, ap);
+ va_end(ap);
+ exit(EXIT_FAILURE);
+}
+
+#define ck_test(A, B, ...) do { \
+ if (A) \
+ ck_error(B, ##__VA_ARGS__); \
+} while (0)
+
+#endif /* CK_COMMON_H */