summaryrefslogtreecommitdiffstats
path: root/regressions
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2021-07-23 11:29:01 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2021-07-23 11:37:28 +0000
commita7283ab143d4e95e8f5f22b58c61cb4e2f604749 (patch)
tree3ec5165ac7f1299f5c0dc3e41d7560a06e6267f5 /regressions
parentAdding debian version 0.6.0-2. (diff)
downloadck-a7283ab143d4e95e8f5f22b58c61cb4e2f604749.tar.xz
ck-a7283ab143d4e95e8f5f22b58c61cb4e2f604749.zip
Merging upstream version 0.7.1 (Closes: #991419).
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'regressions')
-rw-r--r--regressions/Makefile8
-rw-r--r--regressions/ck_bitmap/validate/serial.c6
-rw-r--r--regressions/ck_cc/validate/Makefile17
-rw-r--r--regressions/ck_cc/validate/ck_cc.c37
-rw-r--r--regressions/ck_ec/benchmark/Makefile18
-rw-r--r--regressions/ck_ec/benchmark/ck_ec.c484
-rw-r--r--regressions/ck_ec/validate/Makefile73
-rw-r--r--regressions/ck_ec/validate/ck_ec_smoke_test.c450
-rw-r--r--regressions/ck_ec/validate/fuzz_harness.h95
-rw-r--r--regressions/ck_ec/validate/prop_test_slow_wakeup.c110
-rw-r--r--regressions/ck_ec/validate/prop_test_timeutil_add.c101
-rw-r--r--regressions/ck_ec/validate/prop_test_timeutil_add_ns.c88
-rw-r--r--regressions/ck_ec/validate/prop_test_timeutil_cmp.c99
-rw-r--r--regressions/ck_ec/validate/prop_test_timeutil_scale.c41
-rw-r--r--regressions/ck_ec/validate/prop_test_value.c150
-rw-r--r--regressions/ck_ec/validate/prop_test_wakeup.c193
-rw-r--r--regressions/ck_epoch/validate/ck_epoch_call.c16
-rw-r--r--regressions/ck_epoch/validate/ck_epoch_poll.c37
-rw-r--r--regressions/ck_epoch/validate/ck_epoch_section.c9
-rw-r--r--regressions/ck_epoch/validate/ck_epoch_section_2.c21
-rw-r--r--regressions/ck_epoch/validate/ck_epoch_synchronize.c40
-rw-r--r--regressions/ck_epoch/validate/ck_stack.c4
-rw-r--r--regressions/ck_epoch/validate/torture.c28
-rw-r--r--regressions/ck_hp/validate/ck_hp_fifo.c4
-rw-r--r--regressions/ck_hs/benchmark/apply.c4
-rw-r--r--regressions/ck_hs/benchmark/parallel_bytestring.c16
-rw-r--r--regressions/ck_hs/benchmark/serial.c4
-rw-r--r--regressions/ck_hs/validate/serial.c92
-rw-r--r--regressions/ck_ht/benchmark/parallel_bytestring.c12
-rw-r--r--regressions/ck_ht/benchmark/parallel_direct.c12
-rw-r--r--regressions/ck_pr/benchmark/Makefile7
-rw-r--r--regressions/ck_pr/validate/Makefile11
-rw-r--r--regressions/ck_pr/validate/ck_pr_dec_zero.c105
-rw-r--r--regressions/ck_pr/validate/ck_pr_fence.c80
-rw-r--r--regressions/ck_pr/validate/ck_pr_inc_zero.c105
-rw-r--r--regressions/ck_pr/validate/ck_pr_load.c6
-rw-r--r--regressions/ck_pr/validate/ck_pr_store.c8
-rw-r--r--regressions/ck_rhs/benchmark/parallel_bytestring.c16
-rw-r--r--regressions/ck_rhs/benchmark/serial.c4
-rw-r--r--regressions/ck_rhs/validate/serial.c4
-rw-r--r--regressions/ck_ring/validate/Makefile2
-rw-r--r--regressions/ck_sequence/validate/ck_sequence.c4
-rw-r--r--regressions/ck_spinlock/ck_hclh.h9
-rw-r--r--regressions/common.h14
44 files changed, 2526 insertions, 118 deletions
diff --git a/regressions/Makefile b/regressions/Makefile
index 3195e52..c74b4fa 100644
--- a/regressions/Makefile
+++ b/regressions/Makefile
@@ -4,7 +4,9 @@ DIR=array \
bitmap \
brlock \
bytelock \
+ cc \
cohort \
+ ec \
epoch \
fifo \
hp \
@@ -27,6 +29,7 @@ DIR=array \
all:
$(MAKE) -C ./ck_array/validate all
+ $(MAKE) -C ./ck_cc/validate all
$(MAKE) -C ./ck_cohort/validate all
$(MAKE) -C ./ck_cohort/benchmark all
$(MAKE) -C ./ck_bitmap/validate all
@@ -69,9 +72,12 @@ all:
$(MAKE) -C ./ck_pflock/benchmark all
$(MAKE) -C ./ck_hp/validate all
$(MAKE) -C ./ck_hp/benchmark all
+ $(MAKE) -C ./ck_ec/validate all
+ $(MAKE) -C ./ck_ec/benchmark all
clean:
$(MAKE) -C ./ck_array/validate clean
+ $(MAKE) -C ./ck_cc/validate clean
$(MAKE) -C ./ck_pflock/validate clean
$(MAKE) -C ./ck_pflock/benchmark clean
$(MAKE) -C ./ck_tflock/validate clean
@@ -116,6 +122,8 @@ clean:
$(MAKE) -C ./ck_pflock/benchmark clean
$(MAKE) -C ./ck_hp/validate clean
$(MAKE) -C ./ck_hp/benchmark clean
+ $(MAKE) -C ./ck_ec/validate clean
+ $(MAKE) -C ./ck_ec/benchmark clean
check: all
rc=0; \
diff --git a/regressions/ck_bitmap/validate/serial.c b/regressions/ck_bitmap/validate/serial.c
index ba52588..1cf6c53 100644
--- a/regressions/ck_bitmap/validate/serial.c
+++ b/regressions/ck_bitmap/validate/serial.c
@@ -159,7 +159,7 @@ test_init(bool init)
bytes = ck_bitmap_size(length);
bitmap = malloc(bytes);
- memset(bitmap, random(), bytes);
+ memset(bitmap, common_rand(), bytes);
ck_bitmap_init(bitmap, length, init);
@@ -188,7 +188,7 @@ random_init(void)
ck_bitmap_init(bitmap, length, false);
for (i = 0; i < length; i++) {
- if (random() & 1) {
+ if (common_rand() & 1) {
ck_bitmap_set(bitmap, i);
}
}
@@ -259,7 +259,7 @@ random_test(unsigned int seed)
ck_bitmap_t *x, *x_copy, *y;
unsigned int i;
- srandom(seed);
+ common_srand(seed);
test_init(false);
test_init(true);
diff --git a/regressions/ck_cc/validate/Makefile b/regressions/ck_cc/validate/Makefile
new file mode 100644
index 0000000..2da34d1
--- /dev/null
+++ b/regressions/ck_cc/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean distribution
+
+OBJECTS=ck_cc
+
+all: $(OBJECTS)
+
+ck_cc: ck_cc.c ../../../include/ck_cc.h
+ $(CC) $(CFLAGS) -g2 -o ck_cc ck_cc.c
+
+check: all
+ ./ck_cc
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE
diff --git a/regressions/ck_cc/validate/ck_cc.c b/regressions/ck_cc/validate/ck_cc.c
new file mode 100644
index 0000000..a22030f
--- /dev/null
+++ b/regressions/ck_cc/validate/ck_cc.c
@@ -0,0 +1,37 @@
+#include <ck_pr.h>
+#include <limits.h>
+#include <stdio.h>
+
+#include "../../common.h"
+
+int
+main(void)
+{
+ unsigned int x;
+
+ ck_pr_store_uint(&x, 0x10110);
+
+ if (ck_cc_ffs(0) != 0)
+ ck_error("ffs(0) = %d\n", ck_cc_ffs(0));
+ if (ck_cc_ffs(4) != 3)
+ ck_error("ffs(4) = %d\n", ck_cc_ffs(4));
+ if (ck_cc_ffs(UINT_MAX) != 1)
+ ck_error("ffs(UINT_MAX) = %d\n", ck_cc_ffs(UINT_MAX));
+ if (ck_cc_ffs(x) != 5)
+ ck_error("ffs(%u) = %d\n", x, ck_cc_ffs(x));
+
+ if (ck_cc_ffs(x) != ck_cc_ffsl(x) ||
+ ck_cc_ffsl(x) != ck_cc_ffsll(x) ||
+ ck_cc_ffs(x) != ck_cc_ffsll(x)) {
+ ck_error(" ffs = %d, ffsl = %d, ffsll = %d\n",
+ ck_cc_ffs(x), ck_cc_ffsl(x), ck_cc_ffsll(x));
+ }
+
+ if (ck_cc_ctz(x) != 4)
+ ck_error("ctz = %d\n", ck_cc_ctz(x));
+
+ if (ck_cc_popcount(x) != 3)
+ ck_error("popcount = %d\n", ck_cc_popcount(x));
+
+ return 0;
+}
diff --git a/regressions/ck_ec/benchmark/Makefile b/regressions/ck_ec/benchmark/Makefile
new file mode 100644
index 0000000..c266023
--- /dev/null
+++ b/regressions/ck_ec/benchmark/Makefile
@@ -0,0 +1,18 @@
+.PHONY: check clean distribution
+
+OBJECTS=ck_ec
+
+all: $(OBJECTS)
+
+ck_ec: ck_ec.c ../../../include/ck_ec.h
+ $(CC) $(CFLAGS) ../../../src/ck_ec.c -o ck_ec ck_ec.c
+
+check: all
+ ./ck_ec $(CORES) 1
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE
+
diff --git a/regressions/ck_ec/benchmark/ck_ec.c b/regressions/ck_ec/benchmark/ck_ec.c
new file mode 100644
index 0000000..655f9d8
--- /dev/null
+++ b/regressions/ck_ec/benchmark/ck_ec.c
@@ -0,0 +1,484 @@
+/*
+ * Copyright 2018 Paul Khuong.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <ck_cc.h>
+#include <ck_ec.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <inttypes.h>
+
+#include "../../common.h"
+
+#ifndef STEPS
+#define STEPS (65536 * 64)
+#endif
+
+static int gettime(const struct ck_ec_ops *, struct timespec *out);
+static void wake32(const struct ck_ec_ops *, const uint32_t *);
+static void wait32(const struct ck_ec_wait_state *,
+ const uint32_t *, uint32_t, const struct timespec *);
+static void wake64(const struct ck_ec_ops *, const uint64_t *);
+static void wait64(const struct ck_ec_wait_state *,
+ const uint64_t *, uint64_t, const struct timespec *);
+
+static const struct ck_ec_ops test_ops = {
+ .gettime = gettime,
+ .wait32 = wait32,
+ .wait64 = wait64,
+ .wake32 = wake32,
+ .wake64 = wake64
+};
+
+#ifndef __linux__
+static int gettime(const struct ck_ec_ops *ops, struct timespec *out)
+{
+ (void)out;
+
+ assert(ops == &test_ops);
+ return -1;
+}
+
+static void wait32(const struct ck_ec_wait_state *state,
+ const uint32_t *address, uint32_t expected,
+ const struct timespec *deadline)
+{
+ (void)address;
+ (void)expected;
+ (void)deadline;
+
+ assert(state->ops == &test_ops);
+ return;
+}
+
+static void wait64(const struct ck_ec_wait_state *state,
+ const uint64_t *address, uint64_t expected,
+ const struct timespec *deadline)
+{
+ (void)address;
+ (void)expected;
+ (void)deadline;
+
+ assert(state->ops == &test_ops);
+ return;
+}
+
+static void wake32(const struct ck_ec_ops *ops, const uint32_t *address)
+{
+ (void)address;
+
+ assert(ops == &test_ops);
+ return;
+}
+
+static void wake64(const struct ck_ec_ops *ops, const uint64_t *address)
+{
+ (void)address;
+
+ assert(ops == &test_ops);
+ return;
+}
+#else
+#include <linux/futex.h>
+#include <sys/syscall.h>
+#include <time.h>
+#include <unistd.h>
+
+static int gettime(const struct ck_ec_ops *ops, struct timespec *out)
+{
+ assert(ops == &test_ops);
+ return clock_gettime(CLOCK_MONOTONIC, out);
+}
+
+static void wait32(const struct ck_ec_wait_state *state,
+ const uint32_t *address, uint32_t expected,
+ const struct timespec *deadline)
+{
+ assert(state->ops == &test_ops);
+ syscall(SYS_futex, address,
+ FUTEX_WAIT_BITSET, expected, deadline,
+ NULL, FUTEX_BITSET_MATCH_ANY, 0);
+ return;
+}
+
+static void wait64(const struct ck_ec_wait_state *state,
+ const uint64_t *address, uint64_t expected,
+ const struct timespec *deadline)
+{
+ const void *low_half;
+
+ assert(state->ops == &test_ops);
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ low_half = address;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ low_half = (uintptr_t)address + sizeof(uint32_t);
+#else
+# error "__BYTE_ORDER__ must be defined."
+#endif
+
+ syscall(SYS_futex, low_half,
+ FUTEX_WAIT_BITSET, (uint32_t)expected, deadline,
+ NULL, FUTEX_BITSET_MATCH_ANY, 0);
+ return;
+}
+
+static void wake32(const struct ck_ec_ops *ops, const uint32_t *address)
+{
+ assert(ops == &test_ops);
+ syscall(SYS_futex, address,
+ FUTEX_WAKE, INT_MAX,
+ /* ignored arguments */NULL, NULL, 0);
+ return;
+}
+
+static void wake64(const struct ck_ec_ops *ops, const uint64_t *address)
+{
+ const void *low_half;
+
+ assert(ops == &test_ops);
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ low_half = address;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ low_half = (uintptr_t)address + sizeof(uint32_t);
+#else
+# error "__BYTE_ORDER__ must be defined."
+#endif
+
+ syscall(SYS_futex, low_half,
+ FUTEX_WAKE, INT_MAX,
+ /* ignored arguments */NULL, NULL, 0);
+ return;
+}
+#endif /* __linux__ */
+
+static const struct ck_ec_mode sp = {
+ .ops = &test_ops,
+ .single_producer = true
+};
+
+static const struct ck_ec_mode mp = {
+ .ops = &test_ops,
+ .single_producer = false
+};
+
+static CK_CC_FORCE_INLINE void bench32(const struct ck_ec_mode mode)
+{
+ ck_ec32_t ec CK_CC_CACHELINE = CK_EC_INITIALIZER;
+ uint64_t a;
+ uint64_t baseline = 1000 * 1000;
+ uint32_t value;
+
+ for (size_t i = 0; i < STEPS; i++) {
+ uint64_t s = rdtsc();
+ uint64_t elapsed = rdtsc() - s;
+
+ if (elapsed < baseline) {
+ baseline = elapsed;
+ }
+ }
+
+ /* Read value. */
+ a = 0;
+ value = 0;
+ for (size_t i = 0; i < STEPS / 4; i++) {
+ uint64_t s = rdtsc();
+
+ value ^= ck_ec32_value(&ec);
+ value ^= ck_ec32_value(&ec);
+ value ^= ck_ec32_value(&ec);
+ value ^= ck_ec32_value(&ec);
+
+ __asm__ volatile("" :: "r"(value));
+ a += rdtsc() - s - baseline;
+ }
+
+ printf("%s ec32_value: %" PRIu64 "\n",
+ (mode.single_producer ? "SP" : "MP"), a / STEPS);
+
+ /* Wait (fast path). */
+ a = 0;
+ for (size_t i = 0; i < STEPS / 4; i++) {
+ uint64_t s = rdtsc();
+
+ ck_ec32_wait(&ec, &mode, 1, NULL);
+ ck_ec32_wait(&ec, &mode, 1, NULL);
+ ck_ec32_wait(&ec, &mode, 1, NULL);
+ ck_ec32_wait(&ec, &mode, 1, NULL);
+
+ a += rdtsc() - s - baseline;
+ }
+
+ printf("%s ec32_wait fast: %" PRIu64 "\n",
+ (mode.single_producer ? "SP" : "MP"), a / STEPS);
+
+ /* trywait. */
+ a = 0;
+ for (size_t i = 0; i < STEPS / 4; i++) {
+ struct timespec past = { .tv_sec = 0 };
+ uint64_t s = rdtsc();
+
+ ck_ec32_wait(&ec, &mode, 0, &past);
+ ck_ec32_wait(&ec, &mode, 0, &past);
+ ck_ec32_wait(&ec, &mode, 0, &past);
+ ck_ec32_wait(&ec, &mode, 0, &past);
+
+ a += rdtsc() - s - baseline;
+ }
+
+ printf("%s ec32_wait timeout: %" PRIu64 "\n",
+ (mode.single_producer ? "SP" : "MP"), a / STEPS);
+
+ /* Inc (no waiter). */
+ assert(!ck_ec32_has_waiters(&ec));
+ a = 0;
+ for (size_t i = 0; i < STEPS / 4; i++) {
+ uint64_t s = rdtsc();
+
+ ck_ec32_inc(&ec, &mode);
+ ck_ec32_inc(&ec, &mode);
+ ck_ec32_inc(&ec, &mode);
+ ck_ec32_inc(&ec, &mode);
+
+ a += rdtsc() - s - baseline;
+ }
+
+ printf("%s ec32_inc: %" PRIu64 "\n",
+ (mode.single_producer ? "SP" : "MP"), a / STEPS);
+
+ /* Inc (with waiter). */
+ assert(!ck_ec32_has_waiters(&ec));
+ a = 0;
+ for (size_t i = 0; i < STEPS; i++) {
+ struct timespec past = { .tv_sec = 1 };
+ uint64_t s;
+
+ ck_ec32_wait(&ec, &mode, ck_ec32_value(&ec), &past);
+ assert(ck_ec32_has_waiters(&ec));
+
+ s = rdtsc();
+ ck_ec32_inc(&ec, &mode);
+ a += rdtsc() - s - baseline;
+ }
+
+ printf("%s ec32_inc slow: %" PRIu64 "\n",
+ (mode.single_producer ? "SP" : "MP"), a / STEPS);
+
+ /* Add (no waiter). */
+ assert(!ck_ec32_has_waiters(&ec));
+ a = 0;
+ for (size_t i = 0; i < STEPS / 4; i++) {
+ uint64_t s = rdtsc();
+
+ ck_ec32_add(&ec, &mode, i + 1);
+ ck_ec32_add(&ec, &mode, i + 2);
+ ck_ec32_add(&ec, &mode, i + 3);
+ ck_ec32_add(&ec, &mode, i + 4);
+
+ a += rdtsc() - s - baseline;
+ }
+
+ printf("%s ec32_add: %" PRIu64 "\n",
+ (mode.single_producer ? "SP" : "MP"), a / STEPS);
+
+ assert(!ck_ec32_has_waiters(&ec));
+ a = 0;
+ for (size_t i = 0; i < STEPS; i++) {
+ struct timespec past = { .tv_sec = 1 };
+ uint64_t s;
+
+ ck_ec32_wait(&ec, &mode, ck_ec32_value(&ec), &past);
+ assert(ck_ec32_has_waiters(&ec));
+
+ s = rdtsc();
+ ck_ec32_add(&ec, &mode, i + 1);
+ a += rdtsc() - s - baseline;
+ }
+
+ printf("%s ec32_add slow: %" PRIu64 "\n",
+ (mode.single_producer ? "SP" : "MP"), a / STEPS);
+ return;
+}
+
+#ifdef CK_F_EC64
+static CK_CC_FORCE_INLINE void bench64(const struct ck_ec_mode mode)
+{
+ ck_ec64_t ec CK_CC_CACHELINE = CK_EC_INITIALIZER;
+ uint64_t a;
+ uint64_t baseline = 1000 * 1000;
+ uint64_t value;
+
+ for (size_t i = 0; i < STEPS; i++) {
+ uint64_t s = rdtsc();
+ uint64_t elapsed = rdtsc() - s;
+
+ if (elapsed < baseline) {
+ baseline = elapsed;
+ }
+ }
+
+ /* Read value. */
+ a = 0;
+ value = 0;
+ for (size_t i = 0; i < STEPS / 4; i++) {
+ uint64_t s = rdtsc();
+
+ value ^= ck_ec64_value(&ec);
+ value ^= ck_ec64_value(&ec);
+ value ^= ck_ec64_value(&ec);
+ value ^= ck_ec64_value(&ec);
+
+ __asm__ volatile("" :: "r"(value));
+ a += rdtsc() - s - baseline;
+ }
+
+ printf("%s ec64_value: %" PRIu64 "\n",
+ (mode.single_producer ? "SP" : "MP"), a / STEPS);
+
+ /* Wait (fast path). */
+ a = 0;
+ for (size_t i = 0; i < STEPS / 4; i++) {
+ uint64_t s = rdtsc();
+
+ ck_ec64_wait(&ec, &mode, 1, NULL);
+ ck_ec64_wait(&ec, &mode, 1, NULL);
+ ck_ec64_wait(&ec, &mode, 1, NULL);
+ ck_ec64_wait(&ec, &mode, 1, NULL);
+
+ a += rdtsc() - s - baseline;
+ }
+
+ printf("%s ec64_wait fast: %" PRIu64 "\n",
+ (mode.single_producer ? "SP" : "MP"), a / STEPS);
+
+ /* trywait. */
+ a = 0;
+ for (size_t i = 0; i < STEPS / 4; i++) {
+ struct timespec past = { .tv_sec = 0 };
+ uint64_t s = rdtsc();
+
+ ck_ec64_wait(&ec, &mode, 0, &past);
+ ck_ec64_wait(&ec, &mode, 0, &past);
+ ck_ec64_wait(&ec, &mode, 0, &past);
+ ck_ec64_wait(&ec, &mode, 0, &past);
+
+ a += rdtsc() - s - baseline;
+ }
+
+ printf("%s ec64_wait timeout: %" PRIu64 "\n",
+ (mode.single_producer ? "SP" : "MP"), a / STEPS);
+
+ /* Inc (no waiter). */
+ assert(!ck_ec64_has_waiters(&ec));
+ a = 0;
+ for (size_t i = 0; i < STEPS / 4; i++) {
+ uint64_t s = rdtsc();
+
+ ck_ec64_inc(&ec, &mode);
+ ck_ec64_inc(&ec, &mode);
+ ck_ec64_inc(&ec, &mode);
+ ck_ec64_inc(&ec, &mode);
+
+ a += rdtsc() - s - baseline;
+ }
+
+ printf("%s ec64_inc: %" PRIu64 "\n",
+ (mode.single_producer ? "SP" : "MP"), a / STEPS);
+
+ /* Inc (with waiter). */
+ assert(!ck_ec64_has_waiters(&ec));
+ a = 0;
+ for (size_t i = 0; i < STEPS; i++) {
+ struct timespec past = { .tv_sec = 1 };
+ uint64_t s;
+
+ ck_ec64_wait(&ec, &mode, ck_ec64_value(&ec), &past);
+ assert(ck_ec64_has_waiters(&ec));
+
+ s = rdtsc();
+ ck_ec64_inc(&ec, &mode);
+ a += rdtsc() - s - baseline;
+ }
+
+ printf("%s ec64_inc slow: %" PRIu64 "\n",
+ (mode.single_producer ? "SP" : "MP"), a / STEPS);
+
+ /* Add (no waiter). */
+ assert(!ck_ec64_has_waiters(&ec));
+ a = 0;
+ for (size_t i = 0; i < STEPS / 4; i++) {
+ uint64_t s = rdtsc();
+
+ ck_ec64_add(&ec, &mode, i + 1);
+ ck_ec64_add(&ec, &mode, i + 2);
+ ck_ec64_add(&ec, &mode, i + 3);
+ ck_ec64_add(&ec, &mode, i + 4);
+
+ a += rdtsc() - s - baseline;
+ }
+
+ printf("%s ec64_add: %" PRIu64 "\n",
+ (mode.single_producer ? "SP" : "MP"), a / STEPS);
+
+ assert(!ck_ec64_has_waiters(&ec));
+ a = 0;
+ for (size_t i = 0; i < STEPS; i++) {
+ struct timespec past = { .tv_sec = 1 };
+ uint64_t s;
+
+ ck_ec64_wait(&ec, &mode, ck_ec64_value(&ec), &past);
+ assert(ck_ec64_has_waiters(&ec));
+
+ s = rdtsc();
+ ck_ec64_add(&ec, &mode, i + 1);
+ a += rdtsc() - s - baseline;
+ }
+
+ printf("%s ec64_add slow: %" PRIu64 "\n",
+ (mode.single_producer ? "SP" : "MP"), a / STEPS);
+ return;
+}
+#endif /* CK_F_EC64 */
+
+int
+main(void)
+{
+ printf("SP ec32\n");
+ bench32(sp);
+ printf("\nMP ec32\n");
+ bench32(mp);
+
+#ifdef CK_F_EC64
+ printf("\nSP ec64\n");
+ bench64(sp);
+ printf("\nMP ec64\n");
+ bench64(mp);
+#endif /* CK_F_EC64 */
+
+ return 0;
+}
diff --git a/regressions/ck_ec/validate/Makefile b/regressions/ck_ec/validate/Makefile
new file mode 100644
index 0000000..f03f493
--- /dev/null
+++ b/regressions/ck_ec/validate/Makefile
@@ -0,0 +1,73 @@
+.PHONY: check clean distribution
+
+FUZZER ?= none
+
+FUZZ_CFLAGS ?=
+
+# See http://gallium.inria.fr/blog/portable-conditionals-in-makefiles/ for
+# the portable conditional technique below.
+none_fuzz_cflags =
+libfuzzer_fuzz_cflags = -DUSE_LIBFUZZER -fsanitize=fuzzer,memory,undefined
+
+FUZZ_CFLAGS += ${${FUZZER}_fuzz_cflags}
+
+OBJECTS = ck_ec_smoke_test \
+ prop_test_timeutil_add \
+ prop_test_timeutil_add_ns \
+ prop_test_timeutil_cmp \
+ prop_test_timeutil_scale \
+ prop_test_value \
+ prop_test_wakeup \
+ prop_test_slow_wakeup
+
+all: $(OBJECTS)
+
+check: all
+ ./ck_ec_smoke_test
+ # the command line arguments are only consumed by libfuzzer.
+ ./prop_test_slow_wakeup -max_total_time=60
+ ./prop_test_timeutil_add -max_total_time=60
+ ./prop_test_timeutil_add_ns -max_total_time=60
+ ./prop_test_timeutil_cmp -max_total_time=60
+ ./prop_test_timeutil_scale -max_total_time=60
+ ./prop_test_value -max_total_time=60
+ ./prop_test_wakeup -max_total_time=60
+
+quickfuzz: all
+ ./prop_test_slow_wakeup -max_total_time=5
+ ./prop_test_timeutil_add -max_total_time=5
+ ./prop_test_timeutil_add_ns -max_total_time=5
+ ./prop_test_timeutil_cmp -max_total_time=5
+ ./prop_test_timeutil_scale -max_total_time=5
+ ./prop_test_value -max_total_time=5
+ ./prop_test_wakeup -max_total_time=5
+
+ck_ec_smoke_test: ../../../src/ck_ec.c ck_ec_smoke_test.c ../../../src/ck_ec_timeutil.h ../../../include/ck_ec.h
+ $(CC) $(CFLAGS) -std=gnu11 ../../../src/ck_ec.c -o ck_ec_smoke_test ck_ec_smoke_test.c
+
+prop_test_slow_wakeup: ../../../src/ck_ec.c prop_test_slow_wakeup.c ../../../src/ck_ec_timeutil.h ../../../include/ck_ec.h fuzz_harness.h
+ $(CC) $(CFLAGS) $(FUZZ_CFLAGS) ../../../src/ck_ec.c -o prop_test_slow_wakeup prop_test_slow_wakeup.c
+
+prop_test_timeutil_add: ../../../src/ck_ec.c prop_test_timeutil_add.c ../../../src/ck_ec_timeutil.h ../../../include/ck_ec.h fuzz_harness.h
+ $(CC) $(CFLAGS) $(FUZZ_CFLAGS) ../../../src/ck_ec.c -o prop_test_timeutil_add prop_test_timeutil_add.c
+
+prop_test_timeutil_add_ns: ../../../src/ck_ec.c prop_test_timeutil_add_ns.c ../../../src/ck_ec_timeutil.h ../../../include/ck_ec.h fuzz_harness.h
+ $(CC) $(CFLAGS) $(FUZZ_CFLAGS) ../../../src/ck_ec.c -o prop_test_timeutil_add_ns prop_test_timeutil_add_ns.c
+
+prop_test_timeutil_cmp: ../../../src/ck_ec.c prop_test_timeutil_cmp.c ../../../src/ck_ec_timeutil.h ../../../include/ck_ec.h fuzz_harness.h
+ $(CC) $(CFLAGS) $(FUZZ_CFLAGS) ../../../src/ck_ec.c -o prop_test_timeutil_cmp prop_test_timeutil_cmp.c
+
+prop_test_timeutil_scale: ../../../src/ck_ec.c prop_test_timeutil_scale.c ../../../src/ck_ec_timeutil.h ../../../include/ck_ec.h fuzz_harness.h
+ $(CC) $(CFLAGS) $(FUZZ_CFLAGS) ../../../src/ck_ec.c -o prop_test_timeutil_scale prop_test_timeutil_scale.c
+
+prop_test_value: ../../../src/ck_ec.c prop_test_value.c ../../../src/ck_ec_timeutil.h ../../../include/ck_ec.h fuzz_harness.h
+ $(CC) $(CFLAGS) $(FUZZ_CFLAGS) ../../../src/ck_ec.c -o prop_test_value prop_test_value.c
+
+prop_test_wakeup: ../../../src/ck_ec.c prop_test_wakeup.c ../../../src/ck_ec_timeutil.h ../../../include/ck_ec.h fuzz_harness.h
+ $(CC) $(CFLAGS) $(FUZZ_CFLAGS) ../../../src/ck_ec.c -o prop_test_wakeup prop_test_wakeup.c
+
+clean:
+ rm -rf *~ *.o *.dSYM *.exe $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_ec/validate/ck_ec_smoke_test.c b/regressions/ck_ec/validate/ck_ec_smoke_test.c
new file mode 100644
index 0000000..3aca162
--- /dev/null
+++ b/regressions/ck_ec/validate/ck_ec_smoke_test.c
@@ -0,0 +1,450 @@
+#include <assert.h>
+#include <ck_ec.h>
+#include <ck_limits.h>
+#include <ck_stdbool.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+#define TIME_MAX ((time_t)((1ULL << ((sizeof(time_t) * CHAR_BIT) - 1)) - 1))
+
+#ifndef __linux__
+/* Zero-initialize to mark the ops as unavailable. */
+static const struct ck_ec_ops test_ops;
+#else
+#include <linux/futex.h>
+#include <sys/syscall.h>
+#include <time.h>
+
+static int gettime(const struct ck_ec_ops *, struct timespec *out);
+static void wake32(const struct ck_ec_ops *, const uint32_t *);
+static void wait32(const struct ck_ec_wait_state *, const uint32_t *,
+ uint32_t, const struct timespec *);
+static void wake64(const struct ck_ec_ops *, const uint64_t *);
+static void wait64(const struct ck_ec_wait_state *, const uint64_t *,
+ uint64_t, const struct timespec *);
+
+static const struct ck_ec_ops test_ops = {
+ .gettime = gettime,
+ .wait32 = wait32,
+ .wait64 = wait64,
+ .wake32 = wake32,
+ .wake64 = wake64
+};
+
+static int gettime(const struct ck_ec_ops *ops, struct timespec *out)
+{
+ assert(ops == &test_ops);
+ return clock_gettime(CLOCK_MONOTONIC, out);
+}
+
+static void wait32(const struct ck_ec_wait_state *state,
+ const uint32_t *address, uint32_t expected,
+ const struct timespec *deadline)
+{
+ assert(state->ops == &test_ops);
+ syscall(SYS_futex, address,
+ FUTEX_WAIT_BITSET, expected, deadline,
+ NULL, FUTEX_BITSET_MATCH_ANY, 0);
+ return;
+}
+
+static void wait64(const struct ck_ec_wait_state *state,
+ const uint64_t *address, uint64_t expected,
+ const struct timespec *deadline)
+{
+ const void *low_half;
+
+ assert(state->ops == &test_ops);
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ low_half = address;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ low_half = (uintptr_t)address + sizeof(uint32_t);
+#else
+# error "__BYTE_ORDER__ must be defined."
+#endif
+
+ syscall(SYS_futex, low_half,
+ FUTEX_WAIT_BITSET, (uint32_t)expected, deadline,
+ NULL, FUTEX_BITSET_MATCH_ANY, 0);
+ return;
+}
+
+static void wake32(const struct ck_ec_ops *ops, const uint32_t *address)
+{
+ assert(ops == &test_ops);
+ syscall(SYS_futex, address,
+ FUTEX_WAKE, INT_MAX,
+ /* ignored arguments */NULL, NULL, 0);
+ return;
+}
+
+static void wake64(const struct ck_ec_ops *ops, const uint64_t *address)
+{
+ const void *low_half;
+
+ assert(ops == &test_ops);
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ low_half = address;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ low_half = (uintptr_t)address + sizeof(uint32_t);
+#else
+# error "__BYTE_ORDER__ must be defined."
+#endif
+
+ syscall(SYS_futex, low_half,
+ FUTEX_WAKE, INT_MAX,
+ /* ignored arguments */NULL, NULL, 0);
+ return;
+}
+#endif /* __linux__ */
+
+static const struct ck_ec_mode sp = {
+ .ops = &test_ops,
+ .single_producer = true
+};
+
+static const struct ck_ec_mode mp = {
+ .ops = &test_ops,
+ .single_producer = false
+};
+
+static void test_update_counter_32(const struct ck_ec_mode *mode)
+{
+ struct ck_ec32 ec = CK_EC_INITIALIZER;
+
+ assert(ck_ec_value(&ec) == 0);
+
+ ck_ec_inc(&ec, mode);
+ assert(ck_ec_value(&ec) == 1);
+
+ uint32_t old = ck_ec_add(&ec, mode, 42);
+ assert(old == 1);
+ assert(ck_ec_value(&ec) == 43);
+ return;
+}
+
+#ifdef CK_F_EC64
+static void test_update_counter_64(const struct ck_ec_mode *mode)
+{
+ struct ck_ec64 ec = CK_EC_INITIALIZER;
+
+ assert(ck_ec_value(&ec) == 0);
+
+ ck_ec_inc(&ec, mode);
+ assert(ck_ec_value(&ec) == 1);
+
+ uint64_t old = ck_ec_add(&ec, mode, 42);
+ assert(old == 1);
+ assert(ck_ec_value(&ec) == 43);
+ return;
+}
+#endif
+
+static void test_deadline(void)
+{
+ struct timespec deadline;
+
+ assert(ck_ec_deadline(&deadline, &sp, NULL) == 0);
+ assert(deadline.tv_sec == TIME_MAX);
+
+ {
+ const struct timespec timeout = {
+ .tv_sec = 1,
+ .tv_nsec = 1000
+ };
+ const struct timespec no_timeout = {
+ .tv_sec = 0
+ };
+ struct timespec now;
+
+ assert(ck_ec_deadline(&deadline, &sp, &timeout) == 0);
+ assert(ck_ec_deadline(&now, &sp, &no_timeout) == 0);
+
+ double now_sec = now.tv_sec + 1e-9 * now.tv_nsec;
+ double deadline_sec = deadline.tv_sec + 1e-9 * deadline.tv_nsec;
+ assert(now_sec < deadline_sec);
+ assert(deadline_sec <= now_sec + 1 + 1000e-9);
+ }
+
+ {
+ const struct timespec timeout = {
+ .tv_sec = TIME_MAX - 1,
+ .tv_nsec = 1000
+ };
+
+ assert(ck_ec_deadline(&deadline, &sp, &timeout) == 0);
+ assert(deadline.tv_sec == TIME_MAX);
+ }
+
+ return;
+}
+
+static void test_wait_32(void)
+{
+ struct timespec deadline = { .tv_sec = 0 };
+ struct ck_ec32 ec;
+
+ ck_ec_init(&ec, 1);
+ assert(ck_ec_value(&ec) == 1);
+ assert(ck_ec_wait(&ec, &sp, 2, NULL) == 0);
+ assert(ck_ec_wait(&ec, &sp, 1, &deadline) == -1);
+
+ {
+ const struct timespec timeout = { .tv_nsec = 1 };
+
+ assert(ck_ec_deadline(&deadline, &sp, &timeout) == 0);
+ assert(ck_ec_wait(&ec, &sp, 1, &deadline) == -1);
+ assert(ck_ec_has_waiters(&ec));
+ }
+
+ return;
+}
+
+#ifdef CK_F_EC64
+static void test_wait_64(void)
+{
+ struct timespec deadline = { .tv_sec = 0 };
+ struct ck_ec64 ec;
+
+ ck_ec_init(&ec, 0);
+ assert(ck_ec_value(&ec) == 0);
+ assert(ck_ec_wait(&ec, &sp, 1, NULL) == 0);
+ assert(ck_ec_wait(&ec, &sp, 0, &deadline) == -1);
+
+ {
+ const struct timespec timeout = { .tv_nsec = 1 };
+
+ assert(ck_ec_deadline(&deadline, &sp, &timeout) == 0);
+ assert(ck_ec_wait(&ec, &sp, 0, &deadline) == -1);
+ assert(ck_ec_has_waiters(&ec));
+ }
+
+ return;
+}
+#endif
+
+static int pred(const struct ck_ec_wait_state *state,
+ struct timespec *deadline)
+{
+ double initial_ts = state->start.tv_sec +
+ 1e-9 * state->start.tv_nsec;
+ int *count = state->data;
+
+ printf("pred wait: %f\n",
+ deadline->tv_sec + 1e-9 * deadline->tv_nsec - initial_ts);
+
+ if ((*count)++ < 3) {
+ return 0;
+ }
+
+ return (*count)++;
+}
+
+/*
+ * Check that pred's return value is correctly bubbled up,
+ * and that the event count is marked as having waiters.
+ */
+static void test_wait_pred_32(void)
+{
+ struct ck_ec32 ec = CK_EC_INITIALIZER;
+ int count = 0;
+
+ assert(!ck_ec_has_waiters(&ec));
+ assert(ck_ec_wait_pred(&ec, &sp, 0, pred, &count, NULL) == 4);
+ assert(ck_ec_has_waiters(&ec));
+ assert(count == 5);
+ return;
+}
+
+#ifdef CK_F_EC64
+static int pred2(const struct ck_ec_wait_state *state,
+ struct timespec *deadline)
+{
+ double initial_ts = state->start.tv_sec +
+ 1e-9 * state->start.tv_nsec;
+ int *count = state->data;
+
+ printf("pred2 wait: %f\n",
+ deadline->tv_sec + 1e-9 * deadline->tv_nsec - initial_ts);
+
+ *deadline = state->now;
+ deadline->tv_sec++;
+
+ (*count)++;
+ return 0;
+}
+
+/*
+ * wait_pred_64 is nearly identical to _32. Now check that deadline
+ * overriding works.
+ */
+static void test_wait_pred_64(void)
+{
+ const struct timespec timeout = { .tv_sec = 5 };
+ struct timespec deadline;
+ struct ck_ec64 ec = CK_EC_INITIALIZER;
+ int count = 0;
+
+ assert(!ck_ec_has_waiters(&ec));
+ assert(ck_ec_deadline(&deadline, &sp, &timeout) == 0);
+ assert(ck_ec_wait_pred(&ec, &sp, 0, pred2, &count, &deadline) == -1);
+ assert(ck_ec_has_waiters(&ec));
+ assert(count == 5);
+ return;
+}
+#endif
+
+static int woken = 0;
+
+static void *test_threaded_32_waiter(void *data)
+{
+ struct ck_ec32 *ec = data;
+
+ ck_ec_wait(ec, &sp, 0, NULL);
+ ck_pr_store_int(&woken, 1);
+ return NULL;
+}
+
+static void test_threaded_inc_32(const struct ck_ec_mode *mode)
+{
+ struct ck_ec32 ec = CK_EC_INITIALIZER;
+ pthread_t waiter;
+
+ ck_pr_store_int(&woken, 0);
+
+ pthread_create(&waiter, NULL, test_threaded_32_waiter, &ec);
+ usleep(10000);
+
+ assert(ck_pr_load_int(&woken) == 0);
+ ck_ec_inc(&ec, mode);
+
+ pthread_join(waiter, NULL);
+ assert(ck_pr_load_int(&woken) == 1);
+ return;
+}
+
+static void test_threaded_add_32(const struct ck_ec_mode *mode)
+{
+ struct ck_ec32 ec = CK_EC_INITIALIZER;
+ pthread_t waiter;
+
+ ck_pr_store_int(&woken, 0);
+
+ pthread_create(&waiter, NULL, test_threaded_32_waiter, &ec);
+ usleep(10000);
+
+ assert(ck_pr_load_int(&woken) == 0);
+ ck_ec_add(&ec, mode, 4);
+
+ pthread_join(waiter, NULL);
+ assert(ck_pr_load_int(&woken) == 1);
+ return;
+}
+
+#ifdef CK_F_EC64
+static void *test_threaded_64_waiter(void *data)
+{
+ struct ck_ec64 *ec = data;
+
+ ck_ec_wait(ec, &sp, 0, NULL);
+ ck_pr_store_int(&woken, 1);
+ return NULL;
+}
+
+static void test_threaded_inc_64(const struct ck_ec_mode *mode)
+{
+ struct ck_ec64 ec = CK_EC_INITIALIZER;
+ pthread_t waiter;
+
+ ck_pr_store_int(&woken, 0);
+
+ pthread_create(&waiter, NULL, test_threaded_64_waiter, &ec);
+ usleep(10000);
+
+ assert(ck_pr_load_int(&woken) == 0);
+ ck_ec_inc(&ec, mode);
+
+ pthread_join(waiter, NULL);
+ assert(ck_pr_load_int(&woken) == 1);
+ return;
+}
+
+static void test_threaded_add_64(const struct ck_ec_mode *mode)
+{
+ struct ck_ec64 ec = CK_EC_INITIALIZER;
+ pthread_t waiter;
+
+ ck_pr_store_int(&woken, 0);
+
+ pthread_create(&waiter, NULL, test_threaded_64_waiter, &ec);
+ usleep(10000);
+
+ assert(ck_pr_load_int(&woken) == 0);
+ ck_ec_add(&ec, mode, 4);
+
+ pthread_join(waiter, NULL);
+ assert(ck_pr_load_int(&woken) == 1);
+ return;
+}
+#endif
+
+int main(int argc, char **argv)
+{
+ (void)argc;
+ (void)argv;
+
+ if (test_ops.gettime == NULL ||
+ test_ops.wake32 == NULL ||
+ test_ops.wait32 == NULL) {
+ printf("No ck_ec ops for this platform. Trivial success.\n");
+ return 0;
+ }
+
+ test_update_counter_32(&sp);
+#ifdef CK_F_EC64
+ test_update_counter_64(&sp);
+#endif
+ printf("test_update_counter SP passed.\n");
+
+ test_update_counter_32(&mp);
+#ifdef CK_F_EC64
+ test_update_counter_64(&mp);
+#endif
+ printf("test_update_counter MP passed.\n");
+
+ test_deadline();
+ printf("test_deadline passed.\n");
+
+ test_wait_32();
+#ifdef CK_F_EC64
+ test_wait_64();
+#endif
+ printf("test_wait passed.\n");
+
+ test_wait_pred_32();
+#ifdef CK_F_EC64
+ test_wait_pred_64();
+#endif
+ printf("test_wait_pred passed.\n");
+
+ test_threaded_inc_32(&sp);
+ test_threaded_add_32(&sp);
+#ifdef CK_F_EC64
+ test_threaded_inc_64(&sp);
+ test_threaded_add_64(&sp);
+#endif
+ printf("test_threaded SP passed.\n");
+
+ test_threaded_inc_32(&mp);
+ test_threaded_add_32(&mp);
+#ifdef CK_F_EC64
+ test_threaded_inc_64(&mp);
+ test_threaded_add_64(&mp);
+#endif
+ printf("test_threaded MP passed.\n");
+ return 0;
+}
diff --git a/regressions/ck_ec/validate/fuzz_harness.h b/regressions/ck_ec/validate/fuzz_harness.h
new file mode 100644
index 0000000..8ba6ebe
--- /dev/null
+++ b/regressions/ck_ec/validate/fuzz_harness.h
@@ -0,0 +1,95 @@
+#ifndef FUZZ_HARNESS_H
+#define FUZZ_HARNESS_H
+#include <assert.h>
+#include <ck_stddef.h>
+#include <ck_string.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#if defined(USE_LIBFUZZER)
+#define TEST(function, examples) \
+ void LLVMFuzzerInitialize(int *argcp, char ***argvp); \
+ int LLVMFuzzerTestOneInput(const void *data, size_t n); \
+ \
+ void LLVMFuzzerInitialize(int *argcp, char ***argvp) \
+ { \
+ static char size[128]; \
+ static char *argv[1024]; \
+ int argc = *argcp; \
+ \
+ assert(argc < 1023); \
+ \
+ int r = snprintf(size, sizeof(size), \
+ "-max_len=%zu", sizeof(examples[0])); \
+ assert((size_t)r < sizeof(size)); \
+ \
+ memcpy(argv, *argvp, argc * sizeof(argv[0])); \
+ argv[argc++] = size; \
+ \
+ *argcp = argc; \
+ *argvp = argv; \
+ \
+ for (size_t i = 0; \
+ i < sizeof(examples) / sizeof(examples[0]); \
+ i++) { \
+ assert(function(&examples[i]) == 0); \
+ } \
+ \
+ return; \
+ } \
+ \
+ int LLVMFuzzerTestOneInput(const void *data, size_t n) \
+ { \
+ char buf[sizeof(examples[0])]; \
+ \
+ memset(buf, 0, sizeof(buf)); \
+ if (n < sizeof(buf)) { \
+ memcpy(buf, data, n); \
+ } else { \
+ memcpy(buf, data, sizeof(buf)); \
+ } \
+ \
+ assert(function((const void *)buf) == 0); \
+ return 0; \
+ }
+#elif defined(USE_AFL)
+#define TEST(function, examples) \
+ int main(int argc, char **argv) \
+ { \
+ char buf[sizeof(examples[0])]; \
+ \
+ (void)argc; \
+ (void)argv; \
+ for (size_t i = 0; \
+ i < sizeof(examples) / sizeof(examples[0]); \
+ i++) { \
+ assert(function(&examples[i]) == 0); \
+ } \
+ \
+ \
+ while (__AFL_LOOP(10000)) { \
+ memset(buf, 0, sizeof(buf)); \
+ read(0, buf, sizeof(buf)); \
+ \
+ assert(function((const void *)buf) == 0); \
+ } \
+ \
+ return 0; \
+ }
+#else
+#define TEST(function, examples) \
+ int main(int argc, char **argv) \
+ { \
+ (void)argc; \
+ (void)argv; \
+ \
+ for (size_t i = 0; \
+ i < sizeof(examples) / sizeof(examples[0]); \
+ i++) { \
+ assert(function(&examples[i]) == 0); \
+ } \
+ \
+ return 0; \
+ }
+#endif
+#endif /* !FUZZ_HARNESS_H */
diff --git a/regressions/ck_ec/validate/prop_test_slow_wakeup.c b/regressions/ck_ec/validate/prop_test_slow_wakeup.c
new file mode 100644
index 0000000..d172676
--- /dev/null
+++ b/regressions/ck_ec/validate/prop_test_slow_wakeup.c
@@ -0,0 +1,110 @@
+#include <assert.h>
+#include <ck_ec.h>
+
+#include "fuzz_harness.h"
+
+static int gettime(const struct ck_ec_ops *, struct timespec *out);
+static void wake32(const struct ck_ec_ops *, const uint32_t *);
+static void wait32(const struct ck_ec_wait_state *, const uint32_t *,
+ uint32_t, const struct timespec *);
+static void wake64(const struct ck_ec_ops *, const uint64_t *);
+static void wait64(const struct ck_ec_wait_state *, const uint64_t *,
+ uint64_t, const struct timespec *);
+
+static const struct ck_ec_ops test_ops = {
+ .gettime = gettime,
+ .wait32 = wait32,
+ .wait64 = wait64,
+ .wake32 = wake32,
+ .wake64 = wake64
+};
+
+static int gettime(const struct ck_ec_ops *ops, struct timespec *out)
+{
+ (void)out;
+
+ assert(ops == &test_ops);
+ return -1;
+}
+
+static void wait32(const struct ck_ec_wait_state *wait_state,
+ const uint32_t *addr, uint32_t expected,
+ const struct timespec *deadline)
+{
+ (void)addr;
+ (void)expected;
+ (void)deadline;
+
+ assert(wait_state->ops == &test_ops);
+ return;
+}
+
+static void wait64(const struct ck_ec_wait_state *wait_state,
+ const uint64_t *addr, uint64_t expected,
+ const struct timespec *deadline)
+{
+ (void)addr;
+ (void)expected;
+ (void)deadline;
+
+ assert(wait_state->ops == &test_ops);
+ return;
+}
+
+static void wake32(const struct ck_ec_ops *ops, const uint32_t *addr)
+{
+ (void)addr;
+
+ assert(ops == &test_ops);
+ return;
+}
+
+static void wake64(const struct ck_ec_ops *ops, const uint64_t *addr)
+{
+ (void)addr;
+
+ assert(ops == &test_ops);
+ return;
+}
+
+/*
+ * Check that calling ck_ec{32,64}_wake always clears the waiting bit.
+ */
+
+struct example {
+ uint64_t value;
+};
+
+const struct example examples[] = {
+ { 0 },
+ { 1 },
+ { 1UL << 30 },
+ { 1UL << 31 },
+ { INT32_MAX },
+ { INT64_MAX },
+ { 1ULL << 62 },
+ { 1ULL << 63 },
+};
+
+static inline int test_slow_wakeup(const struct example *example)
+{
+ {
+ struct ck_ec32 ec = { .counter = example->value };
+
+ ck_ec32_wake(&ec, &test_ops);
+ assert(!ck_ec32_has_waiters(&ec));
+ }
+
+#ifdef CK_F_EC64
+ {
+ struct ck_ec64 ec = { .counter = example->value };
+
+ ck_ec64_wake(&ec, &test_ops);
+ assert(!ck_ec64_has_waiters(&ec));
+ }
+#endif /* CK_F_EC64 */
+
+ return 0;
+}
+
+TEST(test_slow_wakeup, examples)
diff --git a/regressions/ck_ec/validate/prop_test_timeutil_add.c b/regressions/ck_ec/validate/prop_test_timeutil_add.c
new file mode 100644
index 0000000..bd44607
--- /dev/null
+++ b/regressions/ck_ec/validate/prop_test_timeutil_add.c
@@ -0,0 +1,101 @@
+#include <assert.h>
+#include <ck_limits.h>
+#include <ck_stdint.h>
+
+#include "../../../src/ck_ec_timeutil.h"
+#include "fuzz_harness.h"
+
+#if ULONG_MAX > 4294967295
+typedef unsigned __int128 dword_t;
+#else
+typedef uint64_t dword_t;
+#endif
+
+struct example {
+ struct timespec ts;
+ struct timespec inc;
+};
+
+static const struct example examples[] = {
+ {
+ {
+ 42,
+ 100
+ },
+ {
+ 1,
+ 2
+ }
+ },
+ {
+ {
+ 42,
+ 100
+ },
+ {
+ 1,
+ NSEC_MAX
+ }
+ },
+ {
+ {
+ 42,
+ NSEC_MAX
+ },
+ {
+ 0,
+ NSEC_MAX
+ }
+ },
+ {
+ {
+ TIME_MAX - 1,
+ 1000
+ },
+ {
+ 2,
+ NSEC_MAX
+ }
+ }
+};
+
+static struct timespec normalize_ts(const struct timespec ts)
+{
+ struct timespec ret = ts;
+
+ if (ret.tv_sec < 0) {
+ ret.tv_sec = ~ret.tv_sec;
+ }
+
+ if (ret.tv_nsec < 0) {
+ ret.tv_nsec = ~ret.tv_nsec;
+ }
+
+ ret.tv_nsec %= NSEC_MAX + 1;
+ return ret;
+}
+
+static dword_t ts_to_nanos(const struct timespec ts)
+{
+ return (dword_t)ts.tv_sec * (NSEC_MAX + 1) + ts.tv_nsec;
+}
+
+static inline int test_timespec_add(const struct example *example)
+{
+ const struct timespec ts = normalize_ts(example->ts);
+ const struct timespec inc = normalize_ts(example->inc);
+ const struct timespec actual = timespec_add(ts, inc);
+ const dword_t nanos = ts_to_nanos(ts) + ts_to_nanos(inc);
+
+ if (nanos / (NSEC_MAX + 1) > TIME_MAX) {
+ assert(actual.tv_sec == TIME_MAX);
+ assert(actual.tv_nsec == NSEC_MAX);
+ } else {
+ assert(actual.tv_sec == (time_t)(nanos / (NSEC_MAX + 1)));
+ assert(actual.tv_nsec == (long)(nanos % (NSEC_MAX + 1)));
+ }
+
+ return 0;
+}
+
+TEST(test_timespec_add, examples)
diff --git a/regressions/ck_ec/validate/prop_test_timeutil_add_ns.c b/regressions/ck_ec/validate/prop_test_timeutil_add_ns.c
new file mode 100644
index 0000000..b62e1c7
--- /dev/null
+++ b/regressions/ck_ec/validate/prop_test_timeutil_add_ns.c
@@ -0,0 +1,88 @@
+#include <assert.h>
+
+#include "../../../src/ck_ec_timeutil.h"
+#include "fuzz_harness.h"
+
+#if ULONG_MAX > 4294967295
+typedef unsigned __int128 dword_t;
+#else
+typedef uint64_t dword_t;
+#endif
+
+struct example {
+ struct timespec ts;
+ uint32_t ns;
+};
+
+static const struct example examples[] = {
+ {
+ {
+ 42,
+ 100
+ },
+ 1
+ },
+ {
+ {
+ 42,
+ 100
+ },
+ 2 * NSEC_MAX
+ },
+ {
+ {
+ 42,
+ NSEC_MAX
+ },
+ NSEC_MAX
+ },
+ {
+ {
+ TIME_MAX - 1,
+ 1000
+ },
+ 2 * NSEC_MAX
+ }
+};
+
+static inline int test_timespec_add_ns(const struct example *example)
+{
+ struct timespec ts = {
+ .tv_sec = example->ts.tv_sec,
+ .tv_nsec = example->ts.tv_nsec
+ };
+ const uint32_t ns = example->ns;
+
+ if (ts.tv_sec < 0) {
+ ts.tv_sec = ~ts.tv_sec;
+ }
+
+ if (ts.tv_nsec < 0) {
+ ts.tv_nsec = ~ts.tv_nsec;
+ }
+
+ ts.tv_nsec %= NSEC_MAX + 1;
+
+ const struct timespec actual = timespec_add_ns(ts, ns);
+
+ dword_t nanos =
+ (dword_t)ts.tv_sec * (NSEC_MAX + 1) + ts.tv_nsec;
+
+ if (ns > NSEC_MAX) {
+ nanos += NSEC_MAX + 1;
+ } else {
+ nanos += ns;
+ }
+
+ if (nanos / (NSEC_MAX + 1) > TIME_MAX) {
+ assert(actual.tv_sec == TIME_MAX);
+ assert(actual.tv_nsec == NSEC_MAX);
+ } else {
+ assert(actual.tv_sec == (time_t)(nanos / (NSEC_MAX + 1)));
+ assert(actual.tv_nsec == (long)(nanos % (NSEC_MAX + 1)));
+ }
+
+ return 0;
+}
+
+TEST(test_timespec_add_ns, examples)
diff --git a/regressions/ck_ec/validate/prop_test_timeutil_cmp.c b/regressions/ck_ec/validate/prop_test_timeutil_cmp.c
new file mode 100644
index 0000000..00e7b2e
--- /dev/null
+++ b/regressions/ck_ec/validate/prop_test_timeutil_cmp.c
@@ -0,0 +1,99 @@
+#include <assert.h>
+
+#include "../../../src/ck_ec_timeutil.h"
+#include "fuzz_harness.h"
+
+#if ULONG_MAX > 4294967295
+typedef __int128 dsword_t;
+#else
+typedef int64_t dsword_t;
+#endif
+
+struct example {
+ struct timespec x;
+ struct timespec y;
+};
+
+static const struct example examples[] = {
+ {
+ {
+ 42,
+ 100
+ },
+ {
+ 1,
+ 2
+ }
+ },
+ {
+ {
+ 42,
+ 100
+ },
+ {
+ 1,
+ NSEC_MAX
+ }
+ },
+ {
+ {
+ 42,
+ NSEC_MAX
+ },
+ {
+ 0,
+ NSEC_MAX
+ }
+ },
+ {
+ {
+ TIME_MAX - 1,
+ 1000
+ },
+ {
+ 2,
+ NSEC_MAX
+ }
+ }
+};
+
+static struct timespec normalize_ts(const struct timespec ts)
+{
+ struct timespec ret = ts;
+
+ if (ret.tv_nsec < 0) {
+ ret.tv_nsec = ~ret.tv_nsec;
+ }
+
+ ret.tv_nsec %= NSEC_MAX + 1;
+ return ret;
+}
+
+static dsword_t ts_to_nanos(const struct timespec ts)
+{
+ return (dsword_t)ts.tv_sec * (NSEC_MAX + 1) + ts.tv_nsec;
+}
+
+static inline int test_timespec_cmp(const struct example *example)
+{
+ const struct timespec x = normalize_ts(example->y);
+ const struct timespec y = normalize_ts(example->x);
+ const dsword_t x_nanos = ts_to_nanos(x);
+ const dsword_t y_nanos = ts_to_nanos(y);
+
+ assert(timespec_cmp(x, x) == 0);
+ assert(timespec_cmp(y, y) == 0);
+ assert(timespec_cmp(x, y) == -timespec_cmp(y, x));
+
+ if (x_nanos == y_nanos) {
+ assert(timespec_cmp(x, y) == 0);
+ } else if (x_nanos < y_nanos) {
+ assert(timespec_cmp(x, y) == -1);
+ } else {
+ assert(timespec_cmp(x, y) == 1);
+ }
+
+ return 0;
+}
+
+TEST(test_timespec_cmp, examples)
diff --git a/regressions/ck_ec/validate/prop_test_timeutil_scale.c b/regressions/ck_ec/validate/prop_test_timeutil_scale.c
new file mode 100644
index 0000000..eb3040f
--- /dev/null
+++ b/regressions/ck_ec/validate/prop_test_timeutil_scale.c
@@ -0,0 +1,41 @@
+#include <assert.h>
+
+#include "../../../src/ck_ec_timeutil.h"
+#include "fuzz_harness.h"
+
+struct example {
+ uint32_t nsec;
+ uint32_t multiplier;
+ unsigned int shift;
+};
+
+static const struct example examples[] = {
+ {
+ UINT32_MAX,
+ UINT32_MAX,
+ 1
+ },
+ {
+ 10,
+ 20,
+ 0
+ }
+};
+
+static inline int test_wait_time_scale(const struct example *example)
+{
+ const uint32_t nsec = example->nsec;
+ const uint32_t multiplier = example->multiplier;
+ const unsigned int shift = example->shift % 32;
+ uint32_t actual = wait_time_scale(nsec, multiplier, shift);
+ uint64_t expected = ((uint64_t)nsec * multiplier) >> shift;
+
+ if (expected > UINT32_MAX) {
+ expected = UINT32_MAX;
+ }
+
+ assert(actual == expected);
+ return 0;
+}
+
+TEST(test_wait_time_scale, examples)
diff --git a/regressions/ck_ec/validate/prop_test_value.c b/regressions/ck_ec/validate/prop_test_value.c
new file mode 100644
index 0000000..8f9eab8
--- /dev/null
+++ b/regressions/ck_ec/validate/prop_test_value.c
@@ -0,0 +1,150 @@
+#include <assert.h>
+#include <ck_ec.h>
+
+#include "fuzz_harness.h"
+
+static int gettime(const struct ck_ec_ops *, struct timespec *out);
+static void wake32(const struct ck_ec_ops *, const uint32_t *);
+static void wait32(const struct ck_ec_wait_state *, const uint32_t *,
+ uint32_t, const struct timespec *);
+static void wake64(const struct ck_ec_ops *, const uint64_t *);
+static void wait64(const struct ck_ec_wait_state *, const uint64_t *,
+ uint64_t, const struct timespec *);
+
+static const struct ck_ec_ops test_ops = {
+ .gettime = gettime,
+ .wait32 = wait32,
+ .wait64 = wait64,
+ .wake32 = wake32,
+ .wake64 = wake64
+};
+
+static const struct ck_ec_mode modes[] = {
+ {
+ .single_producer = true,
+ .ops = &test_ops
+ },
+ {
+ .single_producer = false,
+ .ops = &test_ops
+ },
+};
+
+static int gettime(const struct ck_ec_ops *ops, struct timespec *out)
+{
+ (void)out;
+
+ assert(ops == &test_ops);
+ return -1;
+}
+
+static void wait32(const struct ck_ec_wait_state *wait_state,
+ const uint32_t *addr, uint32_t expected,
+ const struct timespec *deadline)
+{
+ (void)addr;
+ (void)expected;
+ (void)deadline;
+
+ assert(wait_state->ops == &test_ops);
+ return;
+}
+
+static void wait64(const struct ck_ec_wait_state *wait_state,
+ const uint64_t *addr, uint64_t expected,
+ const struct timespec *deadline)
+{
+ (void)addr;
+ (void)expected;
+ (void)deadline;
+
+ assert(wait_state->ops == &test_ops);
+ return;
+}
+
+static void wake32(const struct ck_ec_ops *ops, const uint32_t *addr)
+{
+ (void)addr;
+
+ assert(ops == &test_ops);
+ return;
+}
+
+static void wake64(const struct ck_ec_ops *ops, const uint64_t *addr)
+{
+ (void)addr;
+
+ assert(ops == &test_ops);
+ return;
+}
+
+/*
+ * Check that adding a value correctly updates the counter, and that
+ * incrementing after that also works.
+ */
+struct example {
+ uint64_t value[2];
+};
+
+static const struct example examples[] = {
+ { { 0, 0 } },
+ { { 1, 2 } },
+ { { 0, INT32_MAX - 2 } },
+ { { 0, INT32_MAX - 1 } },
+ { { 0, INT32_MAX } },
+ { { 0, INT64_MAX - 2 } },
+ { { 0, INT64_MAX - 1 } },
+ { { 0, INT64_MAX } },
+};
+
+static inline int test_value(const struct example *example)
+{
+ for (size_t i = 0; i < 2; i++) {
+ const struct ck_ec_mode *mode = &modes[i];
+ const uint32_t value0 = example->value[0] & INT32_MAX;
+ const uint32_t value1 = example->value[1] & INT32_MAX;
+ struct ck_ec32 ec;
+
+ ck_ec32_init(&ec, 0);
+ assert(ck_ec32_value(&ec) == 0);
+
+ ck_ec32_add(&ec, mode, value0);
+ assert(ck_ec32_value(&ec) == value0);
+
+ ck_ec32_add(&ec, mode, value1);
+ assert(ck_ec32_value(&ec) ==
+ ((value0 + value1) & INT32_MAX));
+
+
+ ck_ec32_inc(&ec, mode);
+ assert(ck_ec32_value(&ec) ==
+ ((value0 + value1 + 1) & INT32_MAX));
+ }
+
+#ifdef CK_F_EC64
+ for (size_t i = 0; i < 2; i++) {
+ const struct ck_ec_mode *mode = &modes[i];
+ const uint64_t value0 = example->value[0] & INT64_MAX;
+ const uint64_t value1 = example->value[1] & INT64_MAX;
+ struct ck_ec64 ec;
+
+ ck_ec64_init(&ec, 0);
+ assert(ck_ec64_value(&ec) == 0);
+
+ ck_ec64_add(&ec, mode, value0);
+ assert(ck_ec64_value(&ec) == value0);
+
+ ck_ec64_add(&ec, mode, value1);
+ assert(ck_ec64_value(&ec) ==
+ ((value0 + value1) & INT64_MAX));
+
+ ck_ec64_inc(&ec, mode);
+ assert(ck_ec64_value(&ec) ==
+ ((value0 + value1 + 1) & INT64_MAX));
+ }
+#endif /* CK_F_EC64 */
+
+ return 0;
+}
+
+TEST(test_value, examples)
diff --git a/regressions/ck_ec/validate/prop_test_wakeup.c b/regressions/ck_ec/validate/prop_test_wakeup.c
new file mode 100644
index 0000000..a858e2b
--- /dev/null
+++ b/regressions/ck_ec/validate/prop_test_wakeup.c
@@ -0,0 +1,193 @@
+#include <assert.h>
+#include <ck_ec.h>
+#include <ck_stdbool.h>
+
+#include "fuzz_harness.h"
+
+static int gettime(const struct ck_ec_ops *, struct timespec *out);
+static void wake32(const struct ck_ec_ops *, const uint32_t *);
+static void wait32(const struct ck_ec_wait_state *, const uint32_t *,
+ uint32_t, const struct timespec *);
+static void wake64(const struct ck_ec_ops *, const uint64_t *);
+static void wait64(const struct ck_ec_wait_state *, const uint64_t *,
+ uint64_t, const struct timespec *);
+
+static const struct ck_ec_ops test_ops = {
+ .gettime = gettime,
+ .wait32 = wait32,
+ .wait64 = wait64,
+ .wake32 = wake32,
+ .wake64 = wake64
+};
+
+static const struct ck_ec_mode modes[] = {
+ {
+ .single_producer = true,
+ .ops = &test_ops
+ },
+ {
+ .single_producer = false,
+ .ops = &test_ops
+ },
+};
+
+static bool woken = false;
+
+static int gettime(const struct ck_ec_ops *ops, struct timespec *out)
+{
+ (void)out;
+
+ assert(ops == &test_ops);
+ return -1;
+}
+
+static void wait32(const struct ck_ec_wait_state *state, const uint32_t *addr,
+ uint32_t expected, const struct timespec *deadline)
+{
+ (void)addr;
+ (void)expected;
+ (void)deadline;
+
+ assert(state->ops == &test_ops);
+ return;
+}
+
+static void wait64(const struct ck_ec_wait_state *state, const uint64_t *addr,
+ uint64_t expected, const struct timespec *deadline)
+{
+ (void)addr;
+ (void)expected;
+ (void)deadline;
+
+ assert(state->ops == &test_ops);
+ return;
+}
+
+static void wake32(const struct ck_ec_ops *ops, const uint32_t *addr)
+{
+ (void)addr;
+
+ assert(ops == &test_ops);
+ woken = true;
+ return;
+}
+
+static void wake64(const struct ck_ec_ops *ops, const uint64_t *addr)
+{
+ (void)addr;
+
+ assert(ops == &test_ops);
+ woken = true;
+ return;
+}
+
+/*
+ * Check that adding a value calls the wake function when the sign bit
+ * is set, and does not call it when the sign bit is unset (modulo
+ * wrap-around).
+ */
+struct example {
+ uint64_t initial;
+ uint64_t increment;
+};
+
+const struct example examples[] = {
+ { INT32_MAX, 0 },
+ { INT32_MAX, 1 },
+ { 0 + (0U << 31), 0 },
+ { 1 + (0U << 31), 0 },
+ { 0 + (1U << 31), 0 },
+ { 1 + (1U << 31), 0 },
+
+ { 0 + (0U << 31), 1 },
+ { 1 + (0U << 31), 1 },
+ { 0 + (1U << 31), 1 },
+ { 1 + (1U << 31), 1 },
+
+ { 0 + (0U << 31), INT32_MAX },
+ { 1 + (0U << 31), INT32_MAX },
+ { 0 + (1U << 31), INT32_MAX },
+ { 1 + (1U << 31), INT32_MAX },
+
+ { INT64_MAX, 0 },
+ { INT64_MAX, 1 },
+ { 0 + (0ULL << 63), 0 },
+ { 1 + (0ULL << 63), 0 },
+ { 0 + (1ULL << 63), 0 },
+ { 1 + (1ULL << 63), 0 },
+
+ { 0 + (0ULL << 63), 1 },
+ { 1 + (0ULL << 63), 1 },
+ { 0 + (1ULL << 63), 1 },
+ { 1 + (1ULL << 63), 1 },
+
+ { 0 + (0ULL << 63), INT64_MAX },
+ { 1 + (0ULL << 63), INT64_MAX },
+ { 0 + (1ULL << 63), INT64_MAX },
+ { 1 + (1ULL << 63), INT64_MAX },
+};
+
+static inline int test_wakeup(const struct example *example)
+{
+ for (size_t i = 0; i < 2; i++) {
+ const struct ck_ec_mode *mode = &modes[i];
+ const uint32_t increment = example->increment & INT32_MAX;
+ struct ck_ec32 ec;
+ bool should_wake;
+ bool may_wake;
+
+ ec.counter = example->initial;
+ should_wake = increment != 0 && (ec.counter & (1U << 31));
+ may_wake = should_wake || (ec.counter & (1U << 31));
+
+ woken = false;
+ ck_ec32_add(&ec, mode, increment);
+ assert(!should_wake || woken);
+ assert(may_wake || !woken);
+ assert(!woken || ck_ec32_has_waiters(&ec) == false);
+
+ /* Test inc now. */
+ ec.counter = example->initial + increment;
+ should_wake = ec.counter & (1U << 31);
+ may_wake = should_wake || ((ec.counter + 1) & (1U << 31));
+
+ woken = false;
+ ck_ec32_inc(&ec, mode);
+ assert(!should_wake || woken);
+ assert(may_wake || !woken);
+ assert(!woken || ck_ec32_has_waiters(&ec) == false);
+ }
+
+#ifdef CK_F_EC64
+ for (size_t i = 0; i < 2; i++) {
+ const struct ck_ec_mode *mode = &modes[i];
+ const uint64_t increment = example->increment & INT64_MAX;
+ struct ck_ec64 ec;
+ bool should_wake;
+ bool may_wake;
+
+ ec.counter = example->initial;
+ should_wake = increment != 0 && (ec.counter & 1);
+ may_wake = should_wake || (ec.counter & 1);
+
+ woken = false;
+ ck_ec64_add(&ec, mode, increment);
+ assert(!should_wake || woken);
+ assert(may_wake || !woken);
+ assert(!woken || ck_ec64_has_waiters(&ec) == false);
+
+ /* Test inc now. */
+ ec.counter = example->initial + increment;
+ should_wake = ec.counter & 1;
+
+ woken = false;
+ ck_ec64_inc(&ec, mode);
+ assert(should_wake == woken);
+ assert(!woken || ck_ec64_has_waiters(&ec) == false);
+ }
+#endif /* CK_F_EC64 */
+
+ return 0;
+}
+
+TEST(test_wakeup, examples)
diff --git a/regressions/ck_epoch/validate/ck_epoch_call.c b/regressions/ck_epoch/validate/ck_epoch_call.c
index 29e0df8..1c274e0 100644
--- a/regressions/ck_epoch/validate/ck_epoch_call.c
+++ b/regressions/ck_epoch/validate/ck_epoch_call.c
@@ -37,6 +37,7 @@ static void
cb(ck_epoch_entry_t *p)
{
+ /* Test that we can reregister the callback. */
if (counter == 0)
ck_epoch_call(&record[1], p, cb);
@@ -50,15 +51,22 @@ int
main(void)
{
ck_epoch_entry_t entry;
+ ck_epoch_entry_t another;
- ck_epoch_register(&epoch, &record[0]);
- ck_epoch_register(&epoch, &record[1]);
+ ck_epoch_register(&epoch, &record[0], NULL);
+ ck_epoch_register(&epoch, &record[1], NULL);
ck_epoch_call(&record[1], &entry, cb);
ck_epoch_barrier(&record[1]);
ck_epoch_barrier(&record[1]);
- if (counter != 2)
- ck_error("Expected counter value 2, read %u.\n", counter);
+
+ /* Make sure that strict works. */
+ ck_epoch_call_strict(&record[1], &entry, cb);
+ ck_epoch_call_strict(&record[1], &another, cb);
+ ck_epoch_barrier(&record[1]);
+
+ if (counter != 4)
+ ck_error("Expected counter value 4, read %u.\n", counter);
return 0;
}
diff --git a/regressions/ck_epoch/validate/ck_epoch_poll.c b/regressions/ck_epoch/validate/ck_epoch_poll.c
index aec6dd0..6f782ee 100644
--- a/regressions/ck_epoch/validate/ck_epoch_poll.c
+++ b/regressions/ck_epoch/validate/ck_epoch_poll.c
@@ -86,10 +86,14 @@ static void *
read_thread(void *unused CK_CC_UNUSED)
{
unsigned int j;
- ck_epoch_record_t record CK_CC_CACHELINE;
+ ck_epoch_record_t *record CK_CC_CACHELINE;
ck_stack_entry_t *cursor, *n;
- ck_epoch_register(&stack_epoch, &record);
+ record = malloc(sizeof *record);
+ if (record == NULL)
+ ck_error("record allocation failure");
+
+ ck_epoch_register(&stack_epoch, record, NULL);
if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread");
@@ -108,7 +112,7 @@ read_thread(void *unused CK_CC_UNUSED)
j = 0;
for (;;) {
- ck_epoch_begin(&record, NULL);
+ ck_epoch_begin(record, NULL);
CK_STACK_FOREACH(&stack, cursor) {
if (cursor == NULL)
continue;
@@ -116,7 +120,7 @@ read_thread(void *unused CK_CC_UNUSED)
n = CK_STACK_NEXT(cursor);
j += ck_pr_load_ptr(&n) != NULL;
}
- ck_epoch_end(&record, NULL);
+ ck_epoch_end(record, NULL);
if (j != 0 && ck_pr_load_uint(&readers) == 0)
ck_pr_store_uint(&readers, 1);
@@ -138,10 +142,13 @@ write_thread(void *unused CK_CC_UNUSED)
{
struct node **entry, *e;
unsigned int i, j, tid;
- ck_epoch_record_t record;
+ ck_epoch_record_t *record;
ck_stack_entry_t *s;
- ck_epoch_register(&stack_epoch, &record);
+ record = malloc(sizeof *record);
+ if (record == NULL)
+ ck_error("record allocation failure");
+ ck_epoch_register(&stack_epoch, record, NULL);
if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread");
@@ -178,23 +185,23 @@ write_thread(void *unused CK_CC_UNUSED)
}
for (i = 0; i < PAIRS_S; i++) {
- ck_epoch_begin(&record, NULL);
+ ck_epoch_begin(record, NULL);
s = ck_stack_pop_upmc(&stack);
e = stack_container(s);
- ck_epoch_end(&record, NULL);
+ ck_epoch_end(record, NULL);
- ck_epoch_call(&record, &e->epoch_entry, destructor);
- ck_epoch_poll(&record);
+ ck_epoch_call(record, &e->epoch_entry, destructor);
+ ck_epoch_poll(record);
}
}
- ck_epoch_barrier(&record);
+ ck_epoch_barrier(record);
if (tid == 0) {
- fprintf(stderr, "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b[W] Peak: %u (%2.2f%%)\n Reclamations: %lu\n\n",
- record.n_peak,
- (double)record.n_peak / ((double)PAIRS_S * ITERATE_S) * 100,
- record.n_dispatch);
+ fprintf(stderr, "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b[W] Peak: %u (%2.2f%%)\n Reclamations: %u\n\n",
+ record->n_peak,
+ (double)record->n_peak / ((double)PAIRS_S * ITERATE_S) * 100,
+ record->n_dispatch);
}
ck_pr_inc_uint(&e_barrier);
diff --git a/regressions/ck_epoch/validate/ck_epoch_section.c b/regressions/ck_epoch/validate/ck_epoch_section.c
index 12bcca1..7b76d1c 100644
--- a/regressions/ck_epoch/validate/ck_epoch_section.c
+++ b/regressions/ck_epoch/validate/ck_epoch_section.c
@@ -46,8 +46,8 @@ setup_test(void)
{
ck_epoch_init(&epc);
- ck_epoch_register(&epc, &record);
- ck_epoch_register(&epc, &record2);
+ ck_epoch_register(&epc, &record, NULL);
+ ck_epoch_register(&epc, &record2, NULL);
cleanup_calls = 0;
return;
@@ -88,7 +88,8 @@ test_simple_read_section(void)
ck_epoch_begin(&record, &section);
ck_epoch_call(&record, &entry, cleanup);
assert(cleanup_calls == 0);
- ck_epoch_end(&record, &section);
+ if (ck_epoch_end(&record, &section) == false)
+ ck_error("expected no more sections");
ck_epoch_barrier(&record);
assert(cleanup_calls == 1);
@@ -157,7 +158,7 @@ reader_work(void *arg)
ck_epoch_section_t section;
struct obj *o;
- ck_epoch_register(&epc, &local_record);
+ ck_epoch_register(&epc, &local_record, NULL);
o = (struct obj *)arg;
diff --git a/regressions/ck_epoch/validate/ck_epoch_section_2.c b/regressions/ck_epoch/validate/ck_epoch_section_2.c
index aed3661..dcb3fd0 100644
--- a/regressions/ck_epoch/validate/ck_epoch_section_2.c
+++ b/regressions/ck_epoch/validate/ck_epoch_section_2.c
@@ -64,7 +64,7 @@ read_thread(void *unused CK_CC_UNUSED)
record = malloc(sizeof *record);
assert(record != NULL);
- ck_epoch_register(&epoch, record);
+ ck_epoch_register(&epoch, record, NULL);
if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread");
@@ -110,11 +110,14 @@ read_thread(void *unused CK_CC_UNUSED)
}
ck_epoch_begin(record, &section[1]);
-
- assert(section[0].bucket != section[1].bucket);
+ if (section[0].bucket == section[1].bucket) {
+ ck_error("%u == %u\n",
+ section[0].bucket, section[1].bucket);
+ }
ck_epoch_end(record, &section[0]);
- assert(ck_pr_load_uint(&record->active) > 0);
+ if (ck_pr_load_uint(&record->active) == 0)
+ ck_error("active: %u\n", record->active);
if (ck_pr_load_uint(&leave) == 1) {
ck_epoch_end(record, &section[1]);
@@ -130,10 +133,14 @@ read_thread(void *unused CK_CC_UNUSED)
static void *
write_thread(void *unused CK_CC_UNUSED)
{
- ck_epoch_record_t record;
+ ck_epoch_record_t *record;
unsigned long iterations = 0;
- ck_epoch_register(&epoch, &record);
+ record = malloc(sizeof *record);
+ if (record == NULL)
+ ck_error("record allocation failure");
+
+ ck_epoch_register(&epoch, record, NULL);
if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread");
@@ -147,7 +154,7 @@ write_thread(void *unused CK_CC_UNUSED)
if (!(iterations % 1048575))
fprintf(stderr, ".");
- ck_epoch_synchronize(&record);
+ ck_epoch_synchronize(record);
iterations++;
if (ck_pr_load_uint(&leave) == 1)
diff --git a/regressions/ck_epoch/validate/ck_epoch_synchronize.c b/regressions/ck_epoch/validate/ck_epoch_synchronize.c
index a03a4f7..67e23a3 100644
--- a/regressions/ck_epoch/validate/ck_epoch_synchronize.c
+++ b/regressions/ck_epoch/validate/ck_epoch_synchronize.c
@@ -86,12 +86,15 @@ static void *
read_thread(void *unused CK_CC_UNUSED)
{
unsigned int j;
- ck_epoch_record_t record CK_CC_CACHELINE;
+ ck_epoch_record_t *record CK_CC_CACHELINE;
ck_stack_entry_t *cursor;
ck_stack_entry_t *n;
unsigned int i;
- ck_epoch_register(&stack_epoch, &record);
+ record = malloc(sizeof *record);
+ if (record == NULL)
+ ck_error("record allocation failure");
+ ck_epoch_register(&stack_epoch, record, NULL);
if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread");
@@ -112,7 +115,7 @@ read_thread(void *unused CK_CC_UNUSED)
for (;;) {
i = 0;
- ck_epoch_begin(&record, NULL);
+ ck_epoch_begin(record, NULL);
CK_STACK_FOREACH(&stack, cursor) {
if (cursor == NULL)
continue;
@@ -123,7 +126,7 @@ read_thread(void *unused CK_CC_UNUSED)
if (i++ > 4098)
break;
}
- ck_epoch_end(&record, NULL);
+ ck_epoch_end(record, NULL);
if (j != 0 && ck_pr_load_uint(&readers) == 0)
ck_pr_store_uint(&readers, 1);
@@ -145,10 +148,13 @@ write_thread(void *unused CK_CC_UNUSED)
{
struct node **entry, *e;
unsigned int i, j, tid;
- ck_epoch_record_t record;
+ ck_epoch_record_t *record;
ck_stack_entry_t *s;
- ck_epoch_register(&stack_epoch, &record);
+ record = malloc(sizeof *record);
+ if (record == NULL)
+ ck_error("record allocation failure");
+ ck_epoch_register(&stack_epoch, record, NULL);
if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread");
@@ -180,17 +186,17 @@ write_thread(void *unused CK_CC_UNUSED)
ck_pr_stall();
for (i = 0; i < PAIRS_S; i++) {
- ck_epoch_begin(&record, NULL);
+ ck_epoch_begin(record, NULL);
s = ck_stack_pop_upmc(&stack);
e = stack_container(s);
- ck_epoch_end(&record, NULL);
+ ck_epoch_end(record, NULL);
if (i & 1) {
- ck_epoch_synchronize(&record);
- ck_epoch_reclaim(&record);
- ck_epoch_call(&record, &e->epoch_entry, destructor);
+ ck_epoch_synchronize(record);
+ ck_epoch_reclaim(record);
+ ck_epoch_call(record, &e->epoch_entry, destructor);
} else {
- ck_epoch_barrier(&record);
+ ck_epoch_barrier(record);
destructor(&e->epoch_entry);
}
@@ -201,13 +207,13 @@ write_thread(void *unused CK_CC_UNUSED)
}
}
- ck_epoch_synchronize(&record);
+ ck_epoch_synchronize(record);
if (tid == 0) {
- fprintf(stderr, "[W] Peak: %u (%2.2f%%)\n Reclamations: %lu\n\n",
- record.n_peak,
- (double)record.n_peak / ((double)PAIRS_S * ITERATE_S) * 100,
- record.n_dispatch);
+ fprintf(stderr, "[W] Peak: %u (%2.2f%%)\n Reclamations: %u\n\n",
+ record->n_peak,
+ (double)record->n_peak / ((double)PAIRS_S * ITERATE_S) * 100,
+ record->n_dispatch);
}
ck_pr_inc_uint(&e_barrier);
diff --git a/regressions/ck_epoch/validate/ck_stack.c b/regressions/ck_epoch/validate/ck_stack.c
index fc50228..6d493e1 100644
--- a/regressions/ck_epoch/validate/ck_stack.c
+++ b/regressions/ck_epoch/validate/ck_stack.c
@@ -81,7 +81,7 @@ thread(void *unused CK_CC_UNUSED)
unsigned long smr = 0;
unsigned int i;
- ck_epoch_register(&stack_epoch, &record);
+ ck_epoch_register(&stack_epoch, &record, NULL);
if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread");
@@ -118,7 +118,7 @@ thread(void *unused CK_CC_UNUSED)
while (ck_pr_load_uint(&e_barrier) < n_threads);
fprintf(stderr, "Deferrals: %lu (%2.2f)\n", smr, (double)smr / PAIRS);
- fprintf(stderr, "Peak: %u (%2.2f%%), %u pending\nReclamations: %lu\n\n",
+ fprintf(stderr, "Peak: %u (%2.2f%%), %u pending\nReclamations: %u\n\n",
record.n_peak,
(double)record.n_peak / PAIRS * 100,
record.n_pending,
diff --git a/regressions/ck_epoch/validate/torture.c b/regressions/ck_epoch/validate/torture.c
index ce3c049..f49d412 100644
--- a/regressions/ck_epoch/validate/torture.c
+++ b/regressions/ck_epoch/validate/torture.c
@@ -31,8 +31,8 @@
#include <unistd.h>
#include <ck_cc.h>
#include <ck_pr.h>
+#include <inttypes.h>
#include <stdbool.h>
-#include <stddef.h>
#include <string.h>
#include <ck_epoch.h>
#include <ck_stack.h>
@@ -119,7 +119,7 @@ read_thread(void *unused CK_CC_UNUSED)
record = malloc(sizeof *record);
assert(record != NULL);
- ck_epoch_register(&epoch, record);
+ ck_epoch_register(&epoch, record, NULL);
if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread");
@@ -147,10 +147,11 @@ write_thread(void *unused CK_CC_UNUSED)
ck_epoch_record_t *record;
unsigned long iterations = 0;
bool c = ck_pr_faa_uint(&first, 1);
+ uint64_t ac = 0;
record = malloc(sizeof *record);
assert(record != NULL);
- ck_epoch_register(&epoch, record);
+ ck_epoch_register(&epoch, record, NULL);
if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread");
@@ -160,6 +161,12 @@ write_thread(void *unused CK_CC_UNUSED)
ck_pr_inc_uint(&barrier);
while (ck_pr_load_uint(&barrier) < n_threads);
+#define CK_EPOCH_S do { \
+ uint64_t _s = rdtsc(); \
+ ck_epoch_synchronize(record); \
+ ac += rdtsc() - _s; \
+} while (0)
+
do {
/*
* A thread should never observe invalid.value > valid.value.
@@ -167,33 +174,34 @@ write_thread(void *unused CK_CC_UNUSED)
* invalid.value <= valid.value is valid.
*/
if (!c) ck_pr_store_uint(&valid.value, 1);
- ck_epoch_synchronize(record);
+ CK_EPOCH_S;
if (!c) ck_pr_store_uint(&invalid.value, 1);
ck_pr_fence_store();
if (!c) ck_pr_store_uint(&valid.value, 2);
- ck_epoch_synchronize(record);
+ CK_EPOCH_S;
if (!c) ck_pr_store_uint(&invalid.value, 2);
ck_pr_fence_store();
if (!c) ck_pr_store_uint(&valid.value, 3);
- ck_epoch_synchronize(record);
+ CK_EPOCH_S;
if (!c) ck_pr_store_uint(&invalid.value, 3);
ck_pr_fence_store();
if (!c) ck_pr_store_uint(&valid.value, 4);
- ck_epoch_synchronize(record);
+ CK_EPOCH_S;
if (!c) ck_pr_store_uint(&invalid.value, 4);
- ck_epoch_synchronize(record);
+ CK_EPOCH_S;
if (!c) ck_pr_store_uint(&invalid.value, 0);
- ck_epoch_synchronize(record);
+ CK_EPOCH_S;
- iterations += 4;
+ iterations += 6;
} while (ck_pr_load_uint(&leave) == 0 &&
ck_pr_load_uint(&n_rd) > 0);
fprintf(stderr, "%lu iterations\n", iterations);
+ fprintf(stderr, "%" PRIu64 " average latency\n", ac / iterations);
return NULL;
}
diff --git a/regressions/ck_hp/validate/ck_hp_fifo.c b/regressions/ck_hp/validate/ck_hp_fifo.c
index 4454283..5820f1a 100644
--- a/regressions/ck_hp/validate/ck_hp_fifo.c
+++ b/regressions/ck_hp/validate/ck_hp_fifo.c
@@ -55,6 +55,7 @@ static struct affinity a;
static int size;
static unsigned int barrier;
static unsigned int e_barrier;
+static unsigned int s_barrier;
static void *
test(void *c)
@@ -98,6 +99,9 @@ test(void *c)
}
}
+ ck_pr_inc_uint(&s_barrier);
+ while (ck_pr_load_uint(&s_barrier) < (unsigned int)nthr);
+
for (i = 0; i < ITERATIONS; i++) {
for (j = 0; j < size; j++) {
fifo_entry = malloc(sizeof(ck_hp_fifo_entry_t));
diff --git a/regressions/ck_hs/benchmark/apply.c b/regressions/ck_hs/benchmark/apply.c
index ca4a3da..e8b2294 100644
--- a/regressions/ck_hs/benchmark/apply.c
+++ b/regressions/ck_hs/benchmark/apply.c
@@ -6,9 +6,9 @@
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
- * 1. Redistributions of source code must retain the above copyrighs
+ * 1. Redistributions of source code must retain the above copyrights
* notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyrighs
+ * 2. Redistributions in binary form must reproduce the above copyrights
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
diff --git a/regressions/ck_hs/benchmark/parallel_bytestring.c b/regressions/ck_hs/benchmark/parallel_bytestring.c
index 6d38379..3275b05 100644
--- a/regressions/ck_hs/benchmark/parallel_bytestring.c
+++ b/regressions/ck_hs/benchmark/parallel_bytestring.c
@@ -5,9 +5,9 @@
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
- * 1. Redistributions of source code must retain the above copyrighs
+ * 1. Redistributions of source code must retain the above copyrights
* notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyrighs
+ * 2. Redistributions in binary form must reproduce the above copyrights
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
@@ -147,7 +147,7 @@ set_init(void)
#endif
ck_epoch_init(&epoch_hs);
- ck_epoch_register(&epoch_hs, &epoch_wr);
+ ck_epoch_register(&epoch_hs, &epoch_wr, NULL);
common_srand48((long int)time(NULL));
if (ck_hs_init(&hs, mode, hs_hash, hs_compare, &my_allocator, 65536, common_lrand48()) == false) {
perror("ck_hs_init");
@@ -234,7 +234,7 @@ reader(void *unused)
perror("WARNING: Failed to affine thread");
s = j = a = 0;
- ck_epoch_register(&epoch_hs, &epoch_record);
+ ck_epoch_register(&epoch_hs, &epoch_record, NULL);
for (;;) {
j++;
ck_epoch_begin(&epoch_record, NULL);
@@ -454,8 +454,8 @@ main(int argc, char *argv[])
ck_epoch_record_t epoch_temporary = epoch_wr;
ck_epoch_synchronize(&epoch_wr);
- fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
- "%u pending, %u peak, %lu reclamations\n\n",
+ fprintf(stderr, " '- Summary: %u pending, %u peak, %u reclamations -> "
+ "%u pending, %u peak, %u reclamations\n\n",
epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
@@ -593,8 +593,8 @@ main(int argc, char *argv[])
epoch_temporary = epoch_wr;
ck_epoch_synchronize(&epoch_wr);
- fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
- "%u pending, %u peak, %lu reclamations\n\n",
+ fprintf(stderr, " '- Summary: %u pending, %u peak, %u reclamations -> "
+ "%u pending, %u peak, %u reclamations\n\n",
epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
return 0;
diff --git a/regressions/ck_hs/benchmark/serial.c b/regressions/ck_hs/benchmark/serial.c
index ac4caff..5b4cd50 100644
--- a/regressions/ck_hs/benchmark/serial.c
+++ b/regressions/ck_hs/benchmark/serial.c
@@ -5,9 +5,9 @@
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
- * 1. Redistributions of source code must retain the above copyrighs
+ * 1. Redistributions of source code must retain the above copyrights
* notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyrighs
+ * 2. Redistributions in binary form must reproduce the above copyrights
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
diff --git a/regressions/ck_hs/validate/serial.c b/regressions/ck_hs/validate/serial.c
index a16fc82..634924a 100644
--- a/regressions/ck_hs/validate/serial.c
+++ b/regressions/ck_hs/validate/serial.c
@@ -5,9 +5,9 @@
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
- * 1. Redistributions of source code must retain the above copyrighs
+ * 1. Redistributions of source code must retain the above copyrights
* notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyrighs
+ * 2. Redistributions in binary form must reproduce the above copyrights
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
@@ -57,12 +57,28 @@ static struct ck_malloc my_allocator = {
.free = hs_free
};
+static void
+stub_free(void *p, size_t b, bool r)
+{
+
+ (void)b;
+ (void)r;
+
+ fprintf(stderr, "Ignoring reclamation of %p\n", p);
+ return;
+}
+
+static struct ck_malloc stub_allocator = {
+ .malloc = hs_malloc,
+ .free = stub_free
+};
+
const char *test[] = { "Samy", "Al", "Bahra", "dances", "in", "the", "wind.", "Once",
- "upon", "a", "time", "his", "gypsy", "ate", "one", "itsy",
- "bitsy", "spider.", "What", "goes", "up", "must",
- "come", "down.", "What", "is", "down", "stays",
- "down.", "A", "B", "C", "D", "E", "F", "G", "H",
- "I", "J", "K", "L", "M", "N", "O", "P", "Q" };
+ "upon", "a", "time", "his", "gypsy", "ate", "one", "itsy",
+ "bitsy", "spider.", "What", "goes", "up", "must",
+ "come", "down.", "What", "is", "down", "stays",
+ "down.", "A", "B", "C", "D", "E", "F", "G", "H",
+ "I", "J", "K", "L", "M", "N", "O", "P", "Q" };
const char *negative = "negative";
@@ -136,13 +152,21 @@ run_test(unsigned int is, unsigned int ad)
size_t i, j;
const char *blob = "#blobs";
unsigned long h;
+ ck_hs_iterator_t it;
if (ck_hs_init(&hs[0], CK_HS_MODE_SPMC | CK_HS_MODE_OBJECT | ad, hs_hash, hs_compare, &my_allocator, is, 6602834) == false)
ck_error("ck_hs_init\n");
for (j = 0; j < size; j++) {
for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
- h = test[i][0];
+ unsigned long h_1;
+
+ h = CK_HS_HASH(&hs[j], hs_hash, test[i]);
+ h_1 = ck_hs_hash(&hs[j], test[i]);
+
+ if (h != h_1)
+ ck_error("h != h_1 (%lu != %lu)\n", h, h_1);
+
if (ck_hs_get(&hs[j], h, test[i]) != NULL) {
continue;
}
@@ -181,6 +205,58 @@ run_test(unsigned int is, unsigned int ad)
}
}
+ /* Test iteration */
+ if (j == 0) {
+ /* Avoid the blob stuff as it's not in the test array. */
+ ck_hs_iterator_init(&it);
+ void *k = NULL;
+ int matches = 0;
+ int entries = 0;
+ while (ck_hs_next(&hs[j], &it, &k) == true) {
+ entries++;
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ int x = strcmp(test[i], (char *)k);
+ if (x == 0) {
+ matches++;
+ break;
+ }
+ }
+ }
+
+ if (entries != matches) {
+ ck_error("Iteration must match all elements, has: %d, matched: %d [%d]", entries, matches, is);
+ }
+
+ /*
+ * Now test iteration in the face of grows (spmc).
+ * In order to test usage after reclamation, we
+ * stub the allocator.
+ */
+ ck_hs_iterator_init(&it);
+ k = NULL;
+ matches = 0;
+ entries = 0;
+ hs[j].m = &stub_allocator;
+ while (ck_hs_next_spmc(&hs[j], &it, &k) == true) {
+ entries++;
+ for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
+ int x = strcmp(test[i], (char *)k);
+ if (x == 0) {
+ matches++;
+ break;
+ }
+ }
+ if (entries == 20) {
+ ck_hs_grow(&hs[j], 128);
+ }
+ }
+ hs[j].m = &my_allocator;
+
+ if (entries != matches) {
+ ck_error("After growth, iteration must match all elements, has: %d, matched: %d [%d]", entries, matches, is);
+ }
+ }
+
/* Test grow semantics. */
ck_hs_grow(&hs[j], 128);
for (i = 0; i < sizeof(test) / sizeof(*test); i++) {
diff --git a/regressions/ck_ht/benchmark/parallel_bytestring.c b/regressions/ck_ht/benchmark/parallel_bytestring.c
index f3d3854..bb8f462 100644
--- a/regressions/ck_ht/benchmark/parallel_bytestring.c
+++ b/regressions/ck_ht/benchmark/parallel_bytestring.c
@@ -132,7 +132,7 @@ table_init(void)
#endif
ck_epoch_init(&epoch_ht);
- ck_epoch_register(&epoch_ht, &epoch_wr);
+ ck_epoch_register(&epoch_ht, &epoch_wr, NULL);
common_srand48((long int)time(NULL));
if (ck_ht_init(&ht, mode, NULL, &my_allocator, 8, common_lrand48()) == false) {
perror("ck_ht_init");
@@ -221,7 +221,7 @@ reader(void *unused)
perror("WARNING: Failed to affine thread");
s = j = a = 0;
- ck_epoch_register(&epoch_ht, &epoch_record);
+ ck_epoch_register(&epoch_ht, &epoch_record, NULL);
for (;;) {
j++;
ck_epoch_begin(&epoch_record, NULL);
@@ -426,8 +426,8 @@ main(int argc, char *argv[])
ck_epoch_record_t epoch_temporary = epoch_wr;
ck_epoch_synchronize(&epoch_wr);
- fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
- "%u pending, %u peak, %lu reclamations\n\n",
+ fprintf(stderr, " '- Summary: %u pending, %u peak, %u reclamations -> "
+ "%u pending, %u peak, %u reclamations\n\n",
epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
@@ -551,8 +551,8 @@ main(int argc, char *argv[])
epoch_temporary = epoch_wr;
ck_epoch_synchronize(&epoch_wr);
- fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
- "%u pending, %u peak, %lu reclamations\n\n",
+ fprintf(stderr, " '- Summary: %u pending, %u peak, %u reclamations -> "
+ "%u pending, %u peak, %u reclamations\n\n",
epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
return 0;
diff --git a/regressions/ck_ht/benchmark/parallel_direct.c b/regressions/ck_ht/benchmark/parallel_direct.c
index 195bb25..de1d12e 100644
--- a/regressions/ck_ht/benchmark/parallel_direct.c
+++ b/regressions/ck_ht/benchmark/parallel_direct.c
@@ -136,7 +136,7 @@ table_init(void)
{
ck_epoch_init(&epoch_ht);
- ck_epoch_register(&epoch_ht, &epoch_wr);
+ ck_epoch_register(&epoch_ht, &epoch_wr, NULL);
common_srand48((long int)time(NULL));
if (ck_ht_init(&ht, CK_HT_MODE_DIRECT, hash_function, &my_allocator, 8, common_lrand48()) == false) {
perror("ck_ht_init");
@@ -221,7 +221,7 @@ ht_reader(void *unused)
perror("WARNING: Failed to affine thread");
s = j = a = 0;
- ck_epoch_register(&epoch_ht, &epoch_record);
+ ck_epoch_register(&epoch_ht, &epoch_record, NULL);
for (;;) {
j++;
ck_epoch_begin(&epoch_record, NULL);
@@ -412,8 +412,8 @@ main(int argc, char *argv[])
ck_epoch_record_t epoch_temporary = epoch_wr;
ck_epoch_synchronize(&epoch_wr);
- fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
- "%u pending, %u peak, %lu reclamations\n\n",
+ fprintf(stderr, " '- Summary: %u pending, %u peak, %u reclamations -> "
+ "%u pending, %u peak, %u reclamations\n\n",
epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
@@ -537,8 +537,8 @@ main(int argc, char *argv[])
epoch_temporary = epoch_wr;
ck_epoch_synchronize(&epoch_wr);
- fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
- "%u pending, %u peak, %lu reclamations\n\n",
+ fprintf(stderr, " '- Summary: %u pending, %u peak, %u reclamations -> "
+ "%u pending, %u peak, %u reclamations\n\n",
epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
return 0;
diff --git a/regressions/ck_pr/benchmark/Makefile b/regressions/ck_pr/benchmark/Makefile
index 55183d8..3d2654d 100644
--- a/regressions/ck_pr/benchmark/Makefile
+++ b/regressions/ck_pr/benchmark/Makefile
@@ -1,6 +1,8 @@
.PHONY: clean
-all: ck_pr_cas_64 ck_pr_fas_64 ck_pr_cas_64_2 ck_pr_add_64 ck_pr_faa_64 ck_pr_neg_64 fp
+OBJECTS=ck_pr_cas_64 ck_pr_fas_64 ck_pr_cas_64_2 ck_pr_add_64 ck_pr_faa_64 ck_pr_neg_64 fp
+
+all: $(OBJECTS)
fp: fp.c
$(CC) $(CFLAGS) -o fp fp.c
@@ -24,8 +26,7 @@ ck_pr_neg_64: ck_pr_neg_64.c
$(CC) $(CFLAGS) -o ck_pr_neg_64 ck_pr_neg_64.c -lm
clean:
- rm -rf ck_pr_cas_64 ck_pr_fas_64 ck_pr_cas_64_2 ck_pr_add_64 \
- ck_pr_faa_64 ck_pr_neg_64 *.dSYM *.exe
+ rm -rf *.dSYM *.exe *.o $(OBJECTS)
include ../../../build/regressions.build
CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_pr/validate/Makefile b/regressions/ck_pr/validate/Makefile
index 9e4a82d..11f1b8d 100644
--- a/regressions/ck_pr/validate/Makefile
+++ b/regressions/ck_pr/validate/Makefile
@@ -4,7 +4,7 @@ OBJECTS=ck_pr_cas ck_pr_faa ck_pr_inc ck_pr_dec ck_pr_bts \
ck_pr_btr ck_pr_btc ck_pr_load ck_pr_store \
ck_pr_and ck_pr_or ck_pr_xor ck_pr_add ck_pr_sub \
ck_pr_fas ck_pr_bin ck_pr_btx ck_pr_fax ck_pr_n \
- ck_pr_unary
+ ck_pr_unary ck_pr_fence ck_pr_dec_zero ck_pr_inc_zero
all: $(OBJECTS)
@@ -20,12 +20,21 @@ ck_pr_cas: ck_pr_cas.c
ck_pr_inc: ck_pr_inc.c
$(CC) $(CFLAGS) -o ck_pr_inc ck_pr_inc.c
+ck_pr_inc_zero: ck_pr_inc_zero.c
+ $(CC) $(CFLAGS) -o ck_pr_inc_zero ck_pr_inc_zero.c
+
ck_pr_dec: ck_pr_dec.c
$(CC) $(CFLAGS) -o ck_pr_dec ck_pr_dec.c
+ck_pr_dec_zero: ck_pr_dec_zero.c
+ $(CC) $(CFLAGS) -o ck_pr_dec_zero ck_pr_dec_zero.c
+
ck_pr_faa: ck_pr_faa.c
$(CC) $(CFLAGS) -o ck_pr_faa ck_pr_faa.c
+ck_pr_fence: ck_pr_fence.c
+ $(CC) $(CFLAGS) -o ck_pr_fence ck_pr_fence.c
+
ck_pr_btc: ck_pr_btc.c
$(CC) $(CFLAGS) -o ck_pr_btc ck_pr_btc.c
diff --git a/regressions/ck_pr/validate/ck_pr_dec_zero.c b/regressions/ck_pr/validate/ck_pr_dec_zero.c
new file mode 100644
index 0000000..0f3e85f
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_dec_zero.c
@@ -0,0 +1,105 @@
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include <ck_pr.h>
+
+#define EXPECT(ACTUAL, IS_ZERO, TYPE, INITIAL) do { \
+ TYPE expected = (TYPE)((TYPE)INITIAL - (TYPE)1); \
+ if ((ACTUAL) != expected) { \
+ printf("FAIL [ %" PRIx64" != %" PRIx64" ]\n", \
+ (uint64_t)(ACTUAL), \
+ (uint64_t)expected); \
+ exit(EXIT_FAILURE); \
+ } \
+ \
+ if ((IS_ZERO) != ((ACTUAL) == 0)) { \
+ printf("FAIL [ %s != %s ]\n", \
+ ((IS_ZERO) ? "true" : "false"), \
+ (((ACTUAL) == 0) ? "true" : "false")); \
+ exit(EXIT_FAILURE); \
+ } \
+ } while (0)
+
+#define TEST_ZERO(TYPE, SUFFIX) do { \
+ TYPE datum; \
+ bool is_zero; \
+ \
+ datum = 0; \
+ ck_pr_dec_##SUFFIX##_zero(&datum, &is_zero); \
+ EXPECT(datum, is_zero, TYPE, 0); \
+ \
+ datum = (TYPE)-1; \
+ ck_pr_dec_##SUFFIX##_zero(&datum, &is_zero); \
+ EXPECT(datum, is_zero, TYPE, -1); \
+ \
+ datum = (TYPE)1; \
+ ck_pr_dec_##SUFFIX##_zero(&datum, &is_zero); \
+ EXPECT(datum, is_zero, TYPE, 1); \
+ \
+ datum = (TYPE)2; \
+ ck_pr_dec_##SUFFIX##_zero(&datum, &is_zero); \
+ EXPECT(datum, is_zero, TYPE, 2); \
+ } while (0)
+
+#define TEST_IS_ZERO(TYPE, SUFFIX) do { \
+ TYPE datum; \
+ bool is_zero; \
+ \
+ datum = 0; \
+ is_zero = ck_pr_dec_##SUFFIX##_is_zero(&datum); \
+ EXPECT(datum, is_zero, TYPE, 0); \
+ \
+ datum = (TYPE)-1; \
+ is_zero = ck_pr_dec_##SUFFIX##_is_zero(&datum); \
+ EXPECT(datum, is_zero, TYPE, -1); \
+ \
+ datum = (TYPE)1; \
+ is_zero = ck_pr_dec_##SUFFIX##_is_zero(&datum); \
+ EXPECT(datum, is_zero, TYPE, 1); \
+ \
+ datum = (TYPE)2; \
+ is_zero = ck_pr_dec_##SUFFIX##_is_zero(&datum); \
+ EXPECT(datum, is_zero, TYPE, 2); \
+ } while (0)
+
+#define TEST(TYPE, SUFFIX) do { \
+ TEST_ZERO(TYPE, SUFFIX); \
+ TEST_IS_ZERO(TYPE, SUFFIX); \
+} while (0)
+
+int
+main(void)
+{
+
+#ifdef CK_F_PR_DEC_64_ZERO
+ TEST(uint64_t, 64);
+#endif
+
+#ifdef CK_F_PR_DEC_32_ZERO
+ TEST(uint32_t, 32);
+#endif
+
+#ifdef CK_F_PR_DEC_16_ZERO
+ TEST(uint16_t, 16);
+#endif
+
+#ifdef CK_F_PR_DEC_8_ZERO
+ TEST(uint8_t, 8);
+#endif
+
+#ifdef CK_F_PR_DEC_UINT_ZERO
+ TEST(unsigned int, uint);
+#endif
+
+#ifdef CK_F_PR_DEC_INT_ZERO
+ TEST(int, int);
+#endif
+
+#ifdef CK_F_PR_DEC_CHAR_ZERO
+ TEST(char, char);
+#endif
+
+ return (0);
+}
diff --git a/regressions/ck_pr/validate/ck_pr_fence.c b/regressions/ck_pr/validate/ck_pr_fence.c
new file mode 100644
index 0000000..976a184
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_fence.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2009-2018 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <ck_pr.h>
+#include "../../common.h"
+
+int
+main(void)
+{
+ int r = 0;
+
+ /* Below serves as a marker. */
+ ck_pr_sub_int(&r, 31337);
+
+ /*
+ * This is a simple test to help ensure all fences compile or crash
+ * on target. Below are generated according to the underlying memory
+ * model's ordering.
+ */
+ ck_pr_fence_atomic();
+ ck_pr_fence_atomic_store();
+ ck_pr_fence_atomic_load();
+ ck_pr_fence_store_atomic();
+ ck_pr_fence_load_atomic();
+ ck_pr_fence_load();
+ ck_pr_fence_load_store();
+ ck_pr_fence_store();
+ ck_pr_fence_store_load();
+ ck_pr_fence_memory();
+ ck_pr_fence_release();
+ ck_pr_fence_acquire();
+ ck_pr_fence_acqrel();
+ ck_pr_fence_lock();
+ ck_pr_fence_unlock();
+
+ /* Below serves as a marker. */
+ ck_pr_sub_int(&r, 31337);
+
+ /* The following are generating assuming RMO. */
+ ck_pr_fence_strict_atomic();
+ ck_pr_fence_strict_atomic_store();
+ ck_pr_fence_strict_atomic_load();
+ ck_pr_fence_strict_store_atomic();
+ ck_pr_fence_strict_load_atomic();
+ ck_pr_fence_strict_load();
+ ck_pr_fence_strict_load_store();
+ ck_pr_fence_strict_store();
+ ck_pr_fence_strict_store_load();
+ ck_pr_fence_strict_memory();
+ ck_pr_fence_strict_release();
+ ck_pr_fence_strict_acquire();
+ ck_pr_fence_strict_acqrel();
+ ck_pr_fence_strict_lock();
+ ck_pr_fence_strict_unlock();
+ return 0;
+}
+
diff --git a/regressions/ck_pr/validate/ck_pr_inc_zero.c b/regressions/ck_pr/validate/ck_pr_inc_zero.c
new file mode 100644
index 0000000..e74ffba
--- /dev/null
+++ b/regressions/ck_pr/validate/ck_pr_inc_zero.c
@@ -0,0 +1,105 @@
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include <ck_pr.h>
+
+#define EXPECT(ACTUAL, IS_ZERO, TYPE, INITIAL) do { \
+ TYPE expected = (TYPE)((TYPE)INITIAL + (TYPE)1); \
+ if ((ACTUAL) != expected) { \
+ printf("FAIL [ %" PRIx64" != %" PRIx64" ]\n", \
+ (uint64_t)(ACTUAL), \
+ (uint64_t)expected); \
+ exit(EXIT_FAILURE); \
+ } \
+ \
+ if ((IS_ZERO) != ((ACTUAL) == 0)) { \
+ printf("FAIL [ %s != %s ]\n", \
+ ((IS_ZERO) ? "true" : "false"), \
+ (((ACTUAL) == 0) ? "true" : "false")); \
+ exit(EXIT_FAILURE); \
+ } \
+ } while (0)
+
+#define TEST_ZERO(TYPE, SUFFIX) do { \
+ TYPE datum; \
+ bool is_zero; \
+ \
+ datum = 0; \
+ ck_pr_inc_##SUFFIX##_zero(&datum, &is_zero); \
+ EXPECT(datum, is_zero, TYPE, 0); \
+ \
+ datum = (TYPE)-1; \
+ ck_pr_inc_##SUFFIX##_zero(&datum, &is_zero); \
+ EXPECT(datum, is_zero, TYPE, -1); \
+ \
+ datum = (TYPE)1; \
+ ck_pr_inc_##SUFFIX##_zero(&datum, &is_zero); \
+ EXPECT(datum, is_zero, TYPE, 1); \
+ \
+ datum = (TYPE)2; \
+ ck_pr_inc_##SUFFIX##_zero(&datum, &is_zero); \
+ EXPECT(datum, is_zero, TYPE, 2); \
+ } while (0)
+
+#define TEST_IS_ZERO(TYPE, SUFFIX) do { \
+ TYPE datum; \
+ bool is_zero; \
+ \
+ datum = 0; \
+ is_zero = ck_pr_inc_##SUFFIX##_is_zero(&datum); \
+ EXPECT(datum, is_zero, TYPE, 0); \
+ \
+ datum = (TYPE)-1; \
+ is_zero = ck_pr_inc_##SUFFIX##_is_zero(&datum); \
+ EXPECT(datum, is_zero, TYPE, -1); \
+ \
+ datum = (TYPE)1; \
+ is_zero = ck_pr_inc_##SUFFIX##_is_zero(&datum); \
+ EXPECT(datum, is_zero, TYPE, 1); \
+ \
+ datum = (TYPE)2; \
+ is_zero = ck_pr_inc_##SUFFIX##_is_zero(&datum); \
+ EXPECT(datum, is_zero, TYPE, 2); \
+ } while (0)
+
+#define TEST(TYPE, SUFFIX) do { \
+ TEST_ZERO(TYPE, SUFFIX); \
+ TEST_IS_ZERO(TYPE, SUFFIX); \
+} while (0)
+
+int
+main(void)
+{
+
+#ifdef CK_F_PR_INC_64_ZERO
+ TEST(uint64_t, 64);
+#endif
+
+#ifdef CK_F_PR_INC_32_ZERO
+ TEST(uint32_t, 32);
+#endif
+
+#ifdef CK_F_PR_INC_16_ZERO
+ TEST(uint16_t, 16);
+#endif
+
+#ifdef CK_F_PR_INC_8_ZERO
+ TEST(uint8_t, 8);
+#endif
+
+#ifdef CK_F_PR_INC_UINT_ZERO
+ TEST(unsigned int, uint);
+#endif
+
+#ifdef CK_F_PR_INC_INT_ZERO
+ TEST(int, int);
+#endif
+
+#ifdef CK_F_PR_INC_CHAR_ZERO
+ TEST(char, char);
+#endif
+
+ return (0);
+}
diff --git a/regressions/ck_pr/validate/ck_pr_load.c b/regressions/ck_pr/validate/ck_pr_load.c
index a15acd0..d3b8520 100644
--- a/regressions/ck_pr/validate/ck_pr_load.c
+++ b/regressions/ck_pr/validate/ck_pr_load.c
@@ -118,6 +118,7 @@ rg_width(int m)
int
main(void)
{
+ void *ptr = (void *)(intptr_t)-1;
common_srand((unsigned int)getpid());
@@ -143,6 +144,11 @@ main(void)
ck_pr_load_64_2(&b, &a);
printf("%" PRIx64 ":%" PRIx64 "\n", a[0], a[1]);
#endif
+ printf("ck_pr_load_ptr: ");
+ if (ck_pr_load_ptr(&ptr) != (void *)(intptr_t)(-1))
+ printf("Failed : %p != %p\n", ck_pr_load_ptr(&ptr), (void *)(intptr_t)(-1));
+ else
+ printf("SUCCESS\n");
return (0);
}
diff --git a/regressions/ck_pr/validate/ck_pr_store.c b/regressions/ck_pr/validate/ck_pr_store.c
index e4b852b..e012b22 100644
--- a/regressions/ck_pr/validate/ck_pr_store.c
+++ b/regressions/ck_pr/validate/ck_pr_store.c
@@ -119,6 +119,8 @@ rg_width(int m)
int
main(void)
{
+ void *ptr;
+
#if defined(CK_F_PR_STORE_DOUBLE) && defined(CK_F_PR_LOAD_DOUBLE)
double d;
@@ -145,6 +147,12 @@ main(void)
#ifdef CK_F_PR_STORE_8
CK_PR_STORE_B(8);
#endif
+ printf("ck_pr_store_ptr: ");
+ ck_pr_store_ptr(&ptr, (void *)(intptr_t)-1);
+ if (ptr != (void *)(intptr_t)(-1))
+ printf("Failed : %p != %p\n", ptr, (void *)(intptr_t)-1);
+ else
+ printf("SUCCESS\n");
return (0);
}
diff --git a/regressions/ck_rhs/benchmark/parallel_bytestring.c b/regressions/ck_rhs/benchmark/parallel_bytestring.c
index a95d940..1c2d244 100644
--- a/regressions/ck_rhs/benchmark/parallel_bytestring.c
+++ b/regressions/ck_rhs/benchmark/parallel_bytestring.c
@@ -5,9 +5,9 @@
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
- * 1. Redistributions of source code must retain the above copyrighs
+ * 1. Redistributions of source code must retain the above copyrights
* notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyrighs
+ * 2. Redistributions in binary form must reproduce the above copyrights
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
@@ -144,7 +144,7 @@ set_init(void)
ck_epoch_init(&epoch_hs);
- ck_epoch_register(&epoch_hs, &epoch_wr);
+ ck_epoch_register(&epoch_hs, &epoch_wr, NULL);
common_srand48((long int)time(NULL));
if (ck_rhs_init(&hs, mode, hs_hash, hs_compare, &my_allocator, 65536, common_lrand48()) == false) {
perror("ck_rhs_init");
@@ -231,7 +231,7 @@ reader(void *unused)
perror("WARNING: Failed to affine thread");
s = j = a = 0;
- ck_epoch_register(&epoch_hs, &epoch_record);
+ ck_epoch_register(&epoch_hs, &epoch_record, NULL);
for (;;) {
j++;
ck_epoch_begin(&epoch_record, NULL);
@@ -451,8 +451,8 @@ main(int argc, char *argv[])
ck_epoch_record_t epoch_temporary = epoch_wr;
ck_epoch_synchronize(&epoch_wr);
- fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
- "%u pending, %u peak, %lu reclamations\n\n",
+ fprintf(stderr, " '- Summary: %u pending, %u peak, %u reclamations -> "
+ "%u pending, %u peak, %u reclamations\n\n",
epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
@@ -590,8 +590,8 @@ main(int argc, char *argv[])
epoch_temporary = epoch_wr;
ck_epoch_synchronize(&epoch_wr);
- fprintf(stderr, " '- Summary: %u pending, %u peak, %lu reclamations -> "
- "%u pending, %u peak, %lu reclamations\n\n",
+ fprintf(stderr, " '- Summary: %u pending, %u peak, %u reclamations -> "
+ "%u pending, %u peak, %u reclamations\n\n",
epoch_temporary.n_pending, epoch_temporary.n_peak, epoch_temporary.n_dispatch,
epoch_wr.n_pending, epoch_wr.n_peak, epoch_wr.n_dispatch);
return 0;
diff --git a/regressions/ck_rhs/benchmark/serial.c b/regressions/ck_rhs/benchmark/serial.c
index 18fa892..9689d2c 100644
--- a/regressions/ck_rhs/benchmark/serial.c
+++ b/regressions/ck_rhs/benchmark/serial.c
@@ -5,9 +5,9 @@
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
- * 1. Redistributions of source code must retain the above copyrighs
+ * 1. Redistributions of source code must retain the above copyrights
* notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyrighs
+ * 2. Redistributions in binary form must reproduce the above copyrights
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
diff --git a/regressions/ck_rhs/validate/serial.c b/regressions/ck_rhs/validate/serial.c
index ef9365f..92caf18 100644
--- a/regressions/ck_rhs/validate/serial.c
+++ b/regressions/ck_rhs/validate/serial.c
@@ -5,9 +5,9 @@
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
- * 1. Redistributions of source code must retain the above copyrighs
+ * 1. Redistributions of source code must retain the above copyrights
* notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyrighs
+ * 2. Redistributions in binary form must reproduce the above copyrights
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
diff --git a/regressions/ck_ring/validate/Makefile b/regressions/ck_ring/validate/Makefile
index 0b68fad..f849a56 100644
--- a/regressions/ck_ring/validate/Makefile
+++ b/regressions/ck_ring/validate/Makefile
@@ -2,7 +2,7 @@
OBJECTS=ck_ring_spsc ck_ring_spmc ck_ring_spmc_template ck_ring_mpmc \
ck_ring_mpmc_template
-SIZE=16384
+SIZE=2048
all: $(OBJECTS)
diff --git a/regressions/ck_sequence/validate/ck_sequence.c b/regressions/ck_sequence/validate/ck_sequence.c
index e0bc700..47de852 100644
--- a/regressions/ck_sequence/validate/ck_sequence.c
+++ b/regressions/ck_sequence/validate/ck_sequence.c
@@ -122,7 +122,7 @@ main(int argc, char *argv[])
ck_error("Usage: ck_sequence <number of threads> <affinity delta>\n");
}
- n_threads = atoi(argv[1]);
+ n_threads = atoi(argv[1]) - 1;
if (n_threads <= 0) {
ck_error("ERROR: Number of threads must be greater than 0\n");
}
@@ -163,6 +163,8 @@ main(int argc, char *argv[])
counter++;
if (ck_pr_load_uint(&barrier) == 0)
break;
+
+ ck_pr_stall();
}
printf("%u updates made.\n", counter);
diff --git a/regressions/ck_spinlock/ck_hclh.h b/regressions/ck_spinlock/ck_hclh.h
index eb2e6eb..cdc0474 100644
--- a/regressions/ck_spinlock/ck_hclh.h
+++ b/regressions/ck_spinlock/ck_hclh.h
@@ -1,9 +1,16 @@
#define MAX(a,b) ((a) > (b) ? (a) : (b))
+
+#if CORES < 2
+#undef CORES
+#define CORES 2
+#endif
+
#define LOCK_NAME "ck_clh"
#define LOCK_DEFINE static ck_spinlock_hclh_t CK_CC_CACHELINE *glob_lock; \
static ck_spinlock_hclh_t CK_CC_CACHELINE *local_lock[CORES / 2]
+
#define LOCK_STATE ck_spinlock_hclh_t *na = malloc(MAX(sizeof(ck_spinlock_hclh_t), 64))
-#define LOCK ck_spinlock_hclh_lock(&glob_lock, &local_lock[(core % CORES) / 2], na)
+#define LOCK ck_spinlock_hclh_lock(&glob_lock, &local_lock[core % (CORES / 2)], na)
#define UNLOCK ck_spinlock_hclh_unlock(&na)
#define LOCK_INIT do { \
int _i; \
diff --git a/regressions/common.h b/regressions/common.h
index f67c2af..9cdc690 100644
--- a/regressions/common.h
+++ b/regressions/common.h
@@ -267,13 +267,11 @@ struct affinity {
#define AFFINITY_INITIALIZER {0, 0}
#ifdef __linux__
-#ifndef gettid
static pid_t
-gettid(void)
+common_gettid(void)
{
return syscall(__NR_gettid);
}
-#endif /* gettid */
CK_CC_UNUSED static int
aff_iterate(struct affinity *acb)
@@ -285,7 +283,10 @@ aff_iterate(struct affinity *acb)
CPU_ZERO(&s);
CPU_SET(c % CORES, &s);
- return sched_setaffinity(gettid(), sizeof(s), &s);
+ if (sched_setaffinity(common_gettid(), sizeof(s), &s) != 0)
+ perror("WARNING: Could not affine thread");
+
+ return 0;
}
CK_CC_UNUSED static int
@@ -297,7 +298,10 @@ aff_iterate_core(struct affinity *acb, unsigned int *core)
CPU_ZERO(&s);
CPU_SET((*core) % CORES, &s);
- return sched_setaffinity(gettid(), sizeof(s), &s);
+ if (sched_setaffinity(common_gettid(), sizeof(s), &s) != 0)
+ perror("WARNING: Could not affine thread");
+
+ return 0;
}
#elif defined(__MACH__)
CK_CC_UNUSED static int