summaryrefslogtreecommitdiffstats
path: root/regressions/ck_ring
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2021-07-23 11:24:09 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2021-07-23 11:24:09 +0000
commite36b37583bebd229102f46c4ed7d2f6fad8697d4 (patch)
tree73937b6f051fcaaa1ccbdfbaa9f3a1f36bbedb9e /regressions/ck_ring
parentInitial commit. (diff)
downloadck-e36b37583bebd229102f46c4ed7d2f6fad8697d4.tar.xz
ck-e36b37583bebd229102f46c4ed7d2f6fad8697d4.zip
Adding upstream version 0.6.0.upstream/0.6.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'regressions/ck_ring')
-rw-r--r--regressions/ck_ring/benchmark/Makefile14
-rw-r--r--regressions/ck_ring/benchmark/latency.c142
-rw-r--r--regressions/ck_ring/validate/Makefile40
-rw-r--r--regressions/ck_ring/validate/ck_ring_mpmc.c448
-rw-r--r--regressions/ck_ring/validate/ck_ring_mpmc_template.c349
-rw-r--r--regressions/ck_ring/validate/ck_ring_spmc.c340
-rw-r--r--regressions/ck_ring/validate/ck_ring_spmc_template.c350
-rw-r--r--regressions/ck_ring/validate/ck_ring_spsc.c213
8 files changed, 1896 insertions, 0 deletions
diff --git a/regressions/ck_ring/benchmark/Makefile b/regressions/ck_ring/benchmark/Makefile
new file mode 100644
index 0000000..4087ed1
--- /dev/null
+++ b/regressions/ck_ring/benchmark/Makefile
@@ -0,0 +1,14 @@
+.PHONY: clean distribution
+
+OBJECTS=latency
+
+all: $(OBJECTS)
+
+latency: latency.c ../../../include/ck_ring.h
+ $(CC) $(CFLAGS) -o latency latency.c
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=-D_GNU_SOURCE
diff --git a/regressions/ck_ring/benchmark/latency.c b/regressions/ck_ring/benchmark/latency.c
new file mode 100644
index 0000000..657be4d
--- /dev/null
+++ b/regressions/ck_ring/benchmark/latency.c
@@ -0,0 +1,142 @@
+#include <ck_ring.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "../../common.h"
+
+#ifndef ITERATIONS
+#define ITERATIONS (128000)
+#endif
+
+struct entry {
+ int tid;
+ int value;
+};
+
+int
+main(int argc, char *argv[])
+{
+ int i, r, size;
+ uint64_t s, e, e_a, d_a;
+ struct entry entry = {0, 0};
+ ck_ring_buffer_t *buf;
+ ck_ring_t ring;
+
+ if (argc != 2) {
+ ck_error("Usage: latency <size>\n");
+ }
+
+ size = atoi(argv[1]);
+ if (size <= 4 || (size & (size - 1))) {
+ ck_error("ERROR: Size must be a power of 2 greater than 4.\n");
+ }
+
+ buf = malloc(sizeof(ck_ring_buffer_t) * size);
+ if (buf == NULL) {
+ ck_error("ERROR: Failed to allocate buffer\n");
+ }
+
+ ck_ring_init(&ring, size);
+
+ e_a = d_a = s = e = 0;
+ for (r = 0; r < ITERATIONS; r++) {
+ for (i = 0; i < size / 4; i += 4) {
+ s = rdtsc();
+ ck_ring_enqueue_spsc(&ring, buf, &entry);
+ ck_ring_enqueue_spsc(&ring, buf, &entry);
+ ck_ring_enqueue_spsc(&ring, buf, &entry);
+ ck_ring_enqueue_spsc(&ring, buf, &entry);
+ e = rdtsc();
+ }
+ e_a += (e - s) / 4;
+
+ for (i = 0; i < size / 4; i += 4) {
+ s = rdtsc();
+ ck_ring_dequeue_spsc(&ring, buf, &entry);
+ ck_ring_dequeue_spsc(&ring, buf, &entry);
+ ck_ring_dequeue_spsc(&ring, buf, &entry);
+ ck_ring_dequeue_spsc(&ring, buf, &entry);
+ e = rdtsc();
+ }
+ d_a += (e - s) / 4;
+ }
+
+ printf("spsc %10d %16" PRIu64 " %16" PRIu64 "\n", size, e_a / ITERATIONS, d_a / ITERATIONS);
+
+ e_a = d_a = s = e = 0;
+ for (r = 0; r < ITERATIONS; r++) {
+ for (i = 0; i < size / 4; i += 4) {
+ s = rdtsc();
+ ck_ring_enqueue_spmc(&ring, buf, &entry);
+ ck_ring_enqueue_spmc(&ring, buf, &entry);
+ ck_ring_enqueue_spmc(&ring, buf, &entry);
+ ck_ring_enqueue_spmc(&ring, buf, &entry);
+ e = rdtsc();
+ }
+ e_a += (e - s) / 4;
+
+ for (i = 0; i < size / 4; i += 4) {
+ s = rdtsc();
+ ck_ring_dequeue_spmc(&ring, buf, &entry);
+ ck_ring_dequeue_spmc(&ring, buf, &entry);
+ ck_ring_dequeue_spmc(&ring, buf, &entry);
+ ck_ring_dequeue_spmc(&ring, buf, &entry);
+ e = rdtsc();
+ }
+ d_a += (e - s) / 4;
+ }
+
+ printf("spmc %10d %16" PRIu64 " %16" PRIu64 "\n", size, e_a / ITERATIONS, d_a / ITERATIONS);
+
+ ck_ring_init(&ring, size);
+ e_a = d_a = s = e = 0;
+ for (r = 0; r < ITERATIONS; r++) {
+ for (i = 0; i < size / 4; i += 4) {
+ s = rdtsc();
+ ck_ring_enqueue_mpsc(&ring, buf, &entry);
+ ck_ring_enqueue_mpsc(&ring, buf, &entry);
+ ck_ring_enqueue_mpsc(&ring, buf, &entry);
+ ck_ring_enqueue_mpsc(&ring, buf, &entry);
+ e = rdtsc();
+ }
+ e_a += (e - s) / 4;
+
+ for (i = 0; i < size / 4; i += 4) {
+ s = rdtsc();
+ ck_ring_dequeue_mpsc(&ring, buf, &entry);
+ ck_ring_dequeue_mpsc(&ring, buf, &entry);
+ ck_ring_dequeue_mpsc(&ring, buf, &entry);
+ ck_ring_dequeue_mpsc(&ring, buf, &entry);
+ e = rdtsc();
+ }
+ d_a += (e - s) / 4;
+ }
+ printf("mpsc %10d %16" PRIu64 " %16" PRIu64 "\n", size, e_a / ITERATIONS, d_a / ITERATIONS);
+ ck_ring_init(&ring, size);
+ e_a = d_a = s = e = 0;
+ for (r = 0; r < ITERATIONS; r++) {
+ for (i = 0; i < size / 4; i += 4) {
+ s = rdtsc();
+ ck_ring_enqueue_mpmc(&ring, buf, &entry);
+ ck_ring_enqueue_mpmc(&ring, buf, &entry);
+ ck_ring_enqueue_mpmc(&ring, buf, &entry);
+ ck_ring_enqueue_mpmc(&ring, buf, &entry);
+ e = rdtsc();
+ }
+ e_a += (e - s) / 4;
+
+ for (i = 0; i < size / 4; i += 4) {
+ s = rdtsc();
+ ck_ring_dequeue_mpmc(&ring, buf, &entry);
+ ck_ring_dequeue_mpmc(&ring, buf, &entry);
+ ck_ring_dequeue_mpmc(&ring, buf, &entry);
+ ck_ring_dequeue_mpmc(&ring, buf, &entry);
+ e = rdtsc();
+ }
+ d_a += (e - s) / 4;
+ }
+ printf("mpmc %10d %16" PRIu64 " %16" PRIu64 "\n", size, e_a / ITERATIONS, d_a / ITERATIONS);
+ return (0);
+}
diff --git a/regressions/ck_ring/validate/Makefile b/regressions/ck_ring/validate/Makefile
new file mode 100644
index 0000000..0b68fad
--- /dev/null
+++ b/regressions/ck_ring/validate/Makefile
@@ -0,0 +1,40 @@
+.PHONY: check clean distribution
+
+OBJECTS=ck_ring_spsc ck_ring_spmc ck_ring_spmc_template ck_ring_mpmc \
+ ck_ring_mpmc_template
+SIZE=16384
+
+all: $(OBJECTS)
+
+check: all
+ ./ck_ring_spsc $(CORES) 1 $(SIZE)
+ ./ck_ring_spmc $(CORES) 1 $(SIZE)
+ ./ck_ring_spmc_template $(CORES) 1 $(SIZE)
+ ./ck_ring_mpmc $(CORES) 1 $(SIZE)
+ ./ck_ring_mpmc_template $(CORES) 1 $(SIZE)
+
+ck_ring_spsc: ck_ring_spsc.c ../../../include/ck_ring.h
+ $(CC) $(CFLAGS) -o ck_ring_spsc ck_ring_spsc.c \
+ ../../../src/ck_barrier_centralized.c
+
+ck_ring_spmc: ck_ring_spmc.c ../../../include/ck_ring.h
+ $(CC) $(CFLAGS) -o ck_ring_spmc ck_ring_spmc.c \
+ ../../../src/ck_barrier_centralized.c
+
+ck_ring_mpmc: ck_ring_mpmc.c ../../../include/ck_ring.h
+ $(CC) $(CFLAGS) -o ck_ring_mpmc ck_ring_mpmc.c \
+ ../../../src/ck_barrier_centralized.c
+
+ck_ring_mpmc_template: ck_ring_mpmc_template.c ../../../include/ck_ring.h
+ $(CC) $(CFLAGS) -o ck_ring_mpmc_template ck_ring_mpmc_template.c \
+ ../../../src/ck_barrier_centralized.c
+
+ck_ring_spmc_template: ck_ring_spmc_template.c ../../../include/ck_ring.h
+ $(CC) $(CFLAGS) -o ck_ring_spmc_template ck_ring_spmc_template.c \
+ ../../../src/ck_barrier_centralized.c
+
+clean:
+ rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_ring/validate/ck_ring_mpmc.c b/regressions/ck_ring/validate/ck_ring_mpmc.c
new file mode 100644
index 0000000..66d7f39
--- /dev/null
+++ b/regressions/ck_ring/validate/ck_ring_mpmc.c
@@ -0,0 +1,448 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <pthread.h>
+
+#include <ck_barrier.h>
+#include <ck_ring.h>
+#include <ck_spinlock.h>
+#include "../../common.h"
+
+#ifndef ITERATIONS
+#define ITERATIONS 128
+#endif
+
+struct context {
+ unsigned int tid;
+ unsigned int previous;
+ unsigned int next;
+ ck_ring_buffer_t *buffer;
+};
+
+struct entry {
+ unsigned long value_long;
+ unsigned int magic;
+ unsigned int ref;
+ int tid;
+ int value;
+};
+
+static int nthr;
+static ck_ring_t *ring;
+static ck_ring_t ring_mpmc CK_CC_CACHELINE;
+static ck_ring_t ring_mw CK_CC_CACHELINE;
+static struct affinity a;
+static int size;
+static int eb;
+static ck_barrier_centralized_t barrier = CK_BARRIER_CENTRALIZED_INITIALIZER;
+static struct context *_context;
+
+static unsigned int global_counter;
+
+static void *
+test_mpmc(void *c)
+{
+ unsigned int observed = 0;
+ unsigned int enqueue = 0;
+ unsigned int seed;
+ int i, k, j, tid;
+ struct context *context = c;
+ ck_ring_buffer_t *buffer;
+ unsigned int *csp;
+
+ csp = malloc(sizeof(*csp) * nthr);
+ assert(csp != NULL);
+
+ memset(csp, 0, sizeof(*csp) * nthr);
+
+ buffer = context->buffer;
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ tid = ck_pr_faa_int(&eb, 1);
+ ck_pr_fence_memory();
+ while (ck_pr_load_int(&eb) != nthr - 1);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ struct entry *o = NULL;
+ int spin;
+
+ /* Keep trying until we encounter at least one node. */
+ if (j & 1) {
+ if (ck_ring_dequeue_mpmc(&ring_mw, buffer, &o) == false)
+ o = NULL;
+ } else {
+ if (ck_ring_trydequeue_mpmc(&ring_mw, buffer, &o) == false)
+ o = NULL;
+ }
+
+ if (o == NULL) {
+ o = malloc(sizeof(*o));
+ if (o == NULL)
+ continue;
+
+ o->value_long = (unsigned long)ck_pr_faa_uint(&global_counter, 1) + 1;
+
+ o->magic = 0xdead;
+ o->ref = 0;
+ o->tid = tid;
+
+ if (ck_ring_enqueue_mpmc(&ring_mw, buffer, o) == false) {
+ free(o);
+ } else {
+ enqueue++;
+ }
+
+ continue;
+ }
+
+ observed++;
+
+ if (o->magic != 0xdead) {
+ ck_error("[%p] (%x)\n",
+ (void *)o, o->magic);
+ }
+
+ o->magic = 0xbeef;
+
+ if (csp[o->tid] >= o->value_long)
+ ck_error("queue semantics violated: %lu <= %lu\n", o->value_long, csp[o->tid]);
+
+ csp[o->tid] = o->value_long;
+
+ if (ck_pr_faa_uint(&o->ref, 1) != 0) {
+ ck_error("[%p] We dequeued twice.\n", (void *)o);
+ }
+
+ if ((i % 4) == 0) {
+ spin = common_rand_r(&seed) % 16384;
+ for (k = 0; k < spin; k++) {
+ ck_pr_stall();
+ }
+ }
+
+ free(o);
+ }
+ }
+
+ fprintf(stderr, "[%d] dequeue=%u enqueue=%u\n", tid, observed, enqueue);
+ return NULL;
+}
+
+static void *
+test_spmc(void *c)
+{
+ unsigned int observed = 0;
+ unsigned long previous = 0;
+ unsigned int seed;
+ int i, k, j, tid;
+ struct context *context = c;
+ ck_ring_buffer_t *buffer;
+
+ buffer = context->buffer;
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ tid = ck_pr_faa_int(&eb, 1);
+ ck_pr_fence_memory();
+ while (ck_pr_load_int(&eb) != nthr - 1);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ struct entry *o;
+ int spin;
+
+ /* Keep trying until we encounter at least one node. */
+ if (j & 1) {
+ while (ck_ring_dequeue_mpmc(&ring_mpmc, buffer,
+ &o) == false);
+ } else {
+ while (ck_ring_trydequeue_mpmc(&ring_mpmc, buffer,
+ &o) == false);
+ }
+
+ observed++;
+ if (o->value < 0
+ || o->value != o->tid
+ || o->magic != 0xdead
+ || (previous != 0 && previous >= o->value_long)) {
+ ck_error("[0x%p] (%x) (%d, %d) >< (0, %d)\n",
+ (void *)o, o->magic, o->tid, o->value, size);
+ }
+
+ o->magic = 0xbeef;
+ o->value = -31337;
+ o->tid = -31338;
+ previous = o->value_long;
+
+ if (ck_pr_faa_uint(&o->ref, 1) != 0) {
+ ck_error("[%p] We dequeued twice.\n", (void *)o);
+ }
+
+ if ((i % 4) == 0) {
+ spin = common_rand_r(&seed) % 16384;
+ for (k = 0; k < spin; k++) {
+ ck_pr_stall();
+ }
+ }
+
+ free(o);
+ }
+ }
+
+ fprintf(stderr, "[%d] Observed %u\n", tid, observed);
+ return NULL;
+}
+
+static void *
+test(void *c)
+{
+ struct context *context = c;
+ struct entry *entry;
+ unsigned int s;
+ int i, j;
+ bool r;
+ ck_ring_buffer_t *buffer = context->buffer;
+ ck_barrier_centralized_state_t sense =
+ CK_BARRIER_CENTRALIZED_STATE_INITIALIZER;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ if (context->tid == 0) {
+ struct entry *entries;
+
+ entries = malloc(sizeof(struct entry) * size);
+ assert(entries != NULL);
+
+ if (ck_ring_size(ring) != 0) {
+ ck_error("More entries than expected: %u > 0\n",
+ ck_ring_size(ring));
+ }
+
+ for (i = 0; i < size; i++) {
+ entries[i].value = i;
+ entries[i].tid = 0;
+
+ if (true) {
+ r = ck_ring_enqueue_mpmc(ring, buffer,
+ entries + i);
+ } else {
+ r = ck_ring_enqueue_mpmc_size(ring, buffer,
+ entries + i, &s);
+
+ if ((int)s != i) {
+ ck_error("Size is %u, expected %d.\n",
+ s, size);
+ }
+ }
+
+ assert(r != false);
+ }
+
+ if (ck_ring_size(ring) != (unsigned int)size) {
+ ck_error("Less entries than expected: %u < %d\n",
+ ck_ring_size(ring), size);
+ }
+
+ if (ck_ring_capacity(ring) != ck_ring_size(ring) + 1) {
+ ck_error("Capacity less than expected: %u < %u\n",
+ ck_ring_size(ring), ck_ring_capacity(ring));
+ }
+ }
+
+ /*
+ * Wait for all threads. The idea here is to maximize the contention.
+ */
+ ck_barrier_centralized(&barrier, &sense, nthr);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ buffer = _context[context->previous].buffer;
+ while (ck_ring_dequeue_mpmc(ring + context->previous,
+ buffer, &entry) == false);
+
+ if (context->previous != (unsigned int)entry->tid) {
+ ck_error("[%u:%p] %u != %u\n",
+ context->tid, (void *)entry, entry->tid, context->previous);
+ }
+
+ if (entry->value < 0 || entry->value >= size) {
+ ck_error("[%u:%p] %u </> %u\n",
+ context->tid, (void *)entry, entry->tid, context->previous);
+ }
+
+ entry->tid = context->tid;
+ buffer = context->buffer;
+
+ if (true) {
+ r = ck_ring_enqueue_mpmc(ring + context->tid,
+ buffer, entry);
+ } else {
+ r = ck_ring_enqueue_mpmc_size(ring + context->tid,
+ buffer, entry, &s);
+
+ if ((int)s >= size) {
+ ck_error("Size %u out of range of %d\n",
+ s, size);
+ }
+ }
+ assert(r == true);
+ }
+ }
+
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int i, r;
+ unsigned long l;
+ pthread_t *thread;
+ ck_ring_buffer_t *buffer;
+
+ if (argc != 4) {
+ ck_error("Usage: validate <threads> <affinity delta> <size>\n");
+ }
+
+ a.request = 0;
+ a.delta = atoi(argv[2]);
+
+ nthr = atoi(argv[1]);
+ assert(nthr >= 1);
+
+ size = atoi(argv[3]);
+ assert(size >= 4 && (size & size - 1) == 0);
+ size -= 1;
+
+ ring = malloc(sizeof(ck_ring_t) * nthr);
+ assert(ring);
+
+ _context = malloc(sizeof(*_context) * nthr);
+ assert(_context);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread);
+ fprintf(stderr, "SPSC test:");
+ for (i = 0; i < nthr; i++) {
+ _context[i].tid = i;
+ if (i == 0) {
+ _context[i].previous = nthr - 1;
+ _context[i].next = i + 1;
+ } else if (i == nthr - 1) {
+ _context[i].next = 0;
+ _context[i].previous = i - 1;
+ } else {
+ _context[i].next = i + 1;
+ _context[i].previous = i - 1;
+ }
+
+ buffer = malloc(sizeof(ck_ring_buffer_t) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(ck_ring_buffer_t) * (size + 1));
+ _context[i].buffer = buffer;
+ ck_ring_init(ring + i, size + 1);
+ r = pthread_create(thread + i, NULL, test, _context + i);
+ assert(r == 0);
+ }
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ fprintf(stderr, " done\n");
+
+ fprintf(stderr, "SPMC test:\n");
+ buffer = malloc(sizeof(ck_ring_buffer_t) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(void *) * (size + 1));
+ ck_ring_init(&ring_mpmc, size + 1);
+ for (i = 0; i < nthr - 1; i++) {
+ _context[i].buffer = buffer;
+ r = pthread_create(thread + i, NULL, test_spmc, _context + i);
+ assert(r == 0);
+ }
+
+ for (l = 0; l < (unsigned long)size * ITERATIONS * (nthr - 1) ; l++) {
+ struct entry *entry = malloc(sizeof *entry);
+
+ assert(entry != NULL);
+ entry->value_long = l;
+ entry->value = (int)l;
+ entry->tid = (int)l;
+ entry->magic = 0xdead;
+ entry->ref = 0;
+
+ /* Wait until queue is not full. */
+ if (l & 1) {
+ while (ck_ring_enqueue_mpmc(&ring_mpmc,
+ buffer,
+ entry) == false)
+ ck_pr_stall();
+ } else {
+ unsigned int s;
+
+ while (ck_ring_enqueue_mpmc_size(&ring_mpmc,
+ buffer, entry, &s) == false) {
+ ck_pr_stall();
+ }
+
+ if ((int)s >= (size * ITERATIONS * (nthr - 1))) {
+ ck_error("MPMC: Unexpected size of %u\n", s);
+ }
+ }
+ }
+
+ for (i = 0; i < nthr - 1; i++)
+ pthread_join(thread[i], NULL);
+ ck_pr_store_int(&eb, 0);
+ fprintf(stderr, "MPMC test:\n");
+ buffer = malloc(sizeof(ck_ring_buffer_t) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(void *) * (size + 1));
+ ck_ring_init(&ring_mw, size + 1);
+ for (i = 0; i < nthr - 1; i++) {
+ _context[i].buffer = buffer;
+ r = pthread_create(thread + i, NULL, test_mpmc, _context + i);
+ assert(r == 0);
+ }
+
+ for (i = 0; i < nthr - 1; i++)
+ pthread_join(thread[i], NULL);
+
+ return (0);
+}
diff --git a/regressions/ck_ring/validate/ck_ring_mpmc_template.c b/regressions/ck_ring/validate/ck_ring_mpmc_template.c
new file mode 100644
index 0000000..f076e9a
--- /dev/null
+++ b/regressions/ck_ring/validate/ck_ring_mpmc_template.c
@@ -0,0 +1,349 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <pthread.h>
+
+#include <ck_barrier.h>
+#include <ck_ring.h>
+#include <ck_spinlock.h>
+#include "../../common.h"
+
+#ifndef ITERATIONS
+#define ITERATIONS 128
+#endif
+
+struct context {
+ unsigned int tid;
+ unsigned int previous;
+ unsigned int next;
+ struct entry **buffer;
+};
+
+struct entry {
+ unsigned long value_long;
+ unsigned int magic;
+ unsigned int ref;
+ int tid;
+ int value;
+};
+
+CK_RING_PROTOTYPE(entry, entry *)
+
+static int nthr;
+static ck_ring_t *ring;
+static ck_ring_t ring_spmc CK_CC_CACHELINE;
+static struct affinity a;
+static int size;
+static int eb;
+static ck_barrier_centralized_t barrier = CK_BARRIER_CENTRALIZED_INITIALIZER;
+static struct context *_context;
+
+static void *
+test_spmc(void *c)
+{
+ unsigned int observed = 0;
+ unsigned long previous = 0;
+ unsigned int seed;
+ int i, k, j, tid;
+ struct context *context = c;
+ struct entry **buffer;
+
+ buffer = context->buffer;
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ tid = ck_pr_faa_int(&eb, 1);
+ ck_pr_fence_memory();
+ while (ck_pr_load_int(&eb) != nthr - 1);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ struct entry *o;
+ int spin;
+
+ /* Keep trying until we encounter at least one node. */
+ if (j & 1) {
+ while (CK_RING_DEQUEUE_MPMC(entry,
+ &ring_spmc, buffer, &o) == false);
+ } else {
+ while (CK_RING_TRYDEQUEUE_MPMC(entry,
+ &ring_spmc, buffer, &o) == false);
+ }
+
+ observed++;
+ if (o->value < 0
+ || o->value != o->tid
+ || o->magic != 0xdead
+ || (previous != 0 && previous >= o->value_long)) {
+ ck_error("[0x%p] (%x) (%d, %d) >< (0, %d)\n",
+ (void *)o, o->magic, o->tid, o->value, size);
+ }
+
+ o->magic = 0xbeef;
+ o->value = -31337;
+ o->tid = -31338;
+ previous = o->value_long;
+
+ if (ck_pr_faa_uint(&o->ref, 1) != 0) {
+ ck_error("[%p] We dequeued twice.\n", (void *)o);
+ }
+
+ if ((i % 4) == 0) {
+ spin = common_rand_r(&seed) % 16384;
+ for (k = 0; k < spin; k++) {
+ ck_pr_stall();
+ }
+ }
+
+ free(o);
+ }
+ }
+
+ fprintf(stderr, "[%d] Observed %u\n", tid, observed);
+ return NULL;
+}
+
+static void *
+test(void *c)
+{
+ struct context *context = c;
+ struct entry *entry;
+ unsigned int s;
+ int i, j;
+ bool r;
+ struct entry **buffer = context->buffer;
+ ck_barrier_centralized_state_t sense =
+ CK_BARRIER_CENTRALIZED_STATE_INITIALIZER;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ if (context->tid == 0) {
+ struct entry **entries;
+
+ entries = malloc(sizeof(struct entry *) * size);
+ assert(entries != NULL);
+
+ if (ck_ring_size(ring) != 0) {
+ ck_error("More entries than expected: %u > 0\n",
+ ck_ring_size(ring));
+ }
+
+ for (i = 0; i < size; i++) {
+ entries[i] = malloc(sizeof(struct entry));
+ assert(entries[i] != NULL);
+
+ entries[i]->value = i;
+ entries[i]->tid = 0;
+
+ if (i & 1) {
+ r = CK_RING_ENQUEUE_MPMC(entry, ring, buffer,
+ &entries[i]);
+ } else {
+ r = CK_RING_ENQUEUE_MPMC_SIZE(entry, ring,
+ buffer, &entries[i], &s);
+
+ if ((int)s != i) {
+ ck_error("Size is %u, expected %d.\n",
+ s, size);
+ }
+ }
+
+ assert(r != false);
+ }
+
+ if (ck_ring_size(ring) != (unsigned int)size) {
+ ck_error("Less entries than expected: %u < %d\n",
+ ck_ring_size(ring), size);
+ }
+
+ if (ck_ring_capacity(ring) != ck_ring_size(ring) + 1) {
+ ck_error("Capacity less than expected: %u < %u\n",
+ ck_ring_size(ring), ck_ring_capacity(ring));
+ }
+ }
+
+ /*
+ * Wait for all threads. The idea here is to maximize the contention.
+ */
+ ck_barrier_centralized(&barrier, &sense, nthr);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ buffer = _context[context->previous].buffer;
+ while (CK_RING_DEQUEUE_MPMC(entry,
+ ring + context->previous,
+ buffer, &entry) == false);
+
+ if (context->previous != (unsigned int)entry->tid) {
+ ck_error("[%u:%p] %u != %u\n",
+ context->tid, (void *)entry,
+ entry->tid, context->previous);
+ }
+
+ if (entry->value < 0 || entry->value >= size) {
+ ck_error("[%u:%p] %u </> %u\n",
+ context->tid, (void *)entry,
+ entry->tid, context->previous);
+ }
+
+ entry->tid = context->tid;
+ buffer = context->buffer;
+
+ if (i & 1) {
+ r = CK_RING_ENQUEUE_MPMC(entry,
+ ring + context->tid,
+ buffer, &entry);
+ } else {
+ r = CK_RING_ENQUEUE_MPMC_SIZE(entry,
+ ring + context->tid,
+ buffer, &entry, &s);
+
+ if ((int)s >= size) {
+ ck_error("Size %u out of range of %d\n",
+ s, size);
+ }
+ }
+ assert(r == true);
+ }
+ }
+
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int i, r;
+ unsigned long l;
+ pthread_t *thread;
+ struct entry **buffer;
+
+ if (argc != 4) {
+ ck_error("Usage: validate <threads> <affinity delta> <size>\n");
+ }
+
+ a.request = 0;
+ a.delta = atoi(argv[2]);
+
+ nthr = atoi(argv[1]);
+ assert(nthr >= 1);
+
+ size = atoi(argv[3]);
+ assert(size >= 4 && (size & size - 1) == 0);
+ size -= 1;
+
+ ring = malloc(sizeof(ck_ring_t) * nthr);
+ assert(ring);
+
+ _context = malloc(sizeof(*_context) * nthr);
+ assert(_context);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread);
+
+ fprintf(stderr, "SPSC test:");
+ for (i = 0; i < nthr; i++) {
+ _context[i].tid = i;
+ if (i == 0) {
+ _context[i].previous = nthr - 1;
+ _context[i].next = i + 1;
+ } else if (i == nthr - 1) {
+ _context[i].next = 0;
+ _context[i].previous = i - 1;
+ } else {
+ _context[i].next = i + 1;
+ _context[i].previous = i - 1;
+ }
+
+ buffer = malloc(sizeof(struct entry *) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(struct entry *) * (size + 1));
+ _context[i].buffer = buffer;
+ ck_ring_init(ring + i, size + 1);
+ r = pthread_create(thread + i, NULL, test, _context + i);
+ assert(r == 0);
+ }
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ fprintf(stderr, " done\n");
+
+ fprintf(stderr, "MPMC test:\n");
+ buffer = malloc(sizeof(struct entry *) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(struct entry *) * (size + 1));
+ ck_ring_init(&ring_spmc, size + 1);
+ for (i = 0; i < nthr - 1; i++) {
+ _context[i].buffer = buffer;
+ r = pthread_create(thread + i, NULL, test_spmc, _context + i);
+ assert(r == 0);
+ }
+
+ for (l = 0; l < (unsigned long)size * ITERATIONS * (nthr - 1) ; l++) {
+ struct entry *entry = malloc(sizeof *entry);
+
+ assert(entry != NULL);
+ entry->value_long = l;
+ entry->value = (int)l;
+ entry->tid = (int)l;
+ entry->magic = 0xdead;
+ entry->ref = 0;
+
+ /* Wait until queue is not full. */
+ if (l & 1) {
+ while (CK_RING_ENQUEUE_MPMC(entry, &ring_spmc,
+ buffer, &entry) == false) {
+ ck_pr_stall();
+ }
+ } else {
+ unsigned int s;
+
+ while (CK_RING_ENQUEUE_MPMC_SIZE(entry, &ring_spmc,
+ buffer, &entry, &s) == false) {
+ ck_pr_stall();
+ }
+
+ if ((int)s >= (size * ITERATIONS * (nthr - 1))) {
+ ck_error("MPMC: Unexpected size of %u\n", s);
+ }
+ }
+ }
+
+ for (i = 0; i < nthr - 1; i++)
+ pthread_join(thread[i], NULL);
+
+ return 0;
+}
diff --git a/regressions/ck_ring/validate/ck_ring_spmc.c b/regressions/ck_ring/validate/ck_ring_spmc.c
new file mode 100644
index 0000000..161c0d8
--- /dev/null
+++ b/regressions/ck_ring/validate/ck_ring_spmc.c
@@ -0,0 +1,340 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <pthread.h>
+
+#include <ck_barrier.h>
+#include <ck_ring.h>
+#include <ck_spinlock.h>
+#include "../../common.h"
+
+#ifndef ITERATIONS
+#define ITERATIONS 128
+#endif
+
+struct context {
+ unsigned int tid;
+ unsigned int previous;
+ unsigned int next;
+ ck_ring_buffer_t *buffer;
+};
+
+struct entry {
+ unsigned long value_long;
+ unsigned int magic;
+ unsigned int ref;
+ int tid;
+ int value;
+};
+
+static int nthr;
+static ck_ring_t *ring;
+static ck_ring_t ring_spmc CK_CC_CACHELINE;
+static struct affinity a;
+static int size;
+static int eb;
+static ck_barrier_centralized_t barrier = CK_BARRIER_CENTRALIZED_INITIALIZER;
+static struct context *_context;
+
+static void *
+test_spmc(void *c)
+{
+ unsigned int observed = 0;
+ unsigned long previous = 0;
+ unsigned int seed;
+ int i, k, j, tid;
+ struct context *context = c;
+ ck_ring_buffer_t *buffer;
+
+ buffer = context->buffer;
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ tid = ck_pr_faa_int(&eb, 1);
+ ck_pr_fence_memory();
+ while (ck_pr_load_int(&eb) != nthr - 1);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ struct entry *o;
+ int spin;
+
+ /* Keep trying until we encounter at least one node. */
+ if (j & 1) {
+ while (ck_ring_dequeue_spmc(&ring_spmc, buffer,
+ &o) == false);
+ } else {
+ while (ck_ring_trydequeue_spmc(&ring_spmc, buffer,
+ &o) == false);
+ }
+
+ observed++;
+ if (o->value < 0
+ || o->value != o->tid
+ || o->magic != 0xdead
+ || (previous != 0 && previous >= o->value_long)) {
+ ck_error("[0x%p] (%x) (%d, %d) >< (0, %d)\n",
+ (void *)o, o->magic, o->tid, o->value, size);
+ }
+
+ o->magic = 0xbeef;
+ o->value = -31337;
+ o->tid = -31338;
+ previous = o->value_long;
+
+ if (ck_pr_faa_uint(&o->ref, 1) != 0) {
+ ck_error("[%p] We dequeued twice.\n", (void *)o);
+ }
+
+ if ((i % 4) == 0) {
+ spin = common_rand_r(&seed) % 16384;
+ for (k = 0; k < spin; k++) {
+ ck_pr_stall();
+ }
+ }
+
+ free(o);
+ }
+ }
+
+ fprintf(stderr, "[%d] Observed %u\n", tid, observed);
+ return NULL;
+}
+
+static void *
+test(void *c)
+{
+ struct context *context = c;
+ struct entry *entry;
+ unsigned int s;
+ int i, j;
+ bool r;
+ ck_ring_buffer_t *buffer = context->buffer;
+ ck_barrier_centralized_state_t sense =
+ CK_BARRIER_CENTRALIZED_STATE_INITIALIZER;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ if (context->tid == 0) {
+ struct entry *entries;
+
+ entries = malloc(sizeof(struct entry) * size);
+ assert(entries != NULL);
+
+ if (ck_ring_size(ring) != 0) {
+ ck_error("More entries than expected: %u > 0\n",
+ ck_ring_size(ring));
+ }
+
+ for (i = 0; i < size; i++) {
+ entries[i].value = i;
+ entries[i].tid = 0;
+
+ if (i & 1) {
+ r = ck_ring_enqueue_spmc(ring, buffer,
+ entries + i);
+ } else {
+ r = ck_ring_enqueue_spmc_size(ring, buffer,
+ entries + i, &s);
+
+ if ((int)s != i) {
+ ck_error("Size is %u, expected %d.\n",
+ s, size);
+ }
+ }
+
+ assert(r != false);
+ }
+
+ if (ck_ring_size(ring) != (unsigned int)size) {
+ ck_error("Less entries than expected: %u < %d\n",
+ ck_ring_size(ring), size);
+ }
+
+ if (ck_ring_capacity(ring) != ck_ring_size(ring) + 1) {
+ ck_error("Capacity less than expected: %u < %u\n",
+ ck_ring_size(ring), ck_ring_capacity(ring));
+ }
+ }
+
+ /*
+ * Wait for all threads. The idea here is to maximize the contention.
+ */
+ ck_barrier_centralized(&barrier, &sense, nthr);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ buffer = _context[context->previous].buffer;
+ while (ck_ring_dequeue_spmc(ring + context->previous,
+ buffer, &entry) == false);
+
+ if (context->previous != (unsigned int)entry->tid) {
+ ck_error("[%u:%p] %u != %u\n",
+ context->tid, (void *)entry, entry->tid, context->previous);
+ }
+
+ if (entry->value < 0 || entry->value >= size) {
+ ck_error("[%u:%p] %u </> %u\n",
+ context->tid, (void *)entry, entry->tid, context->previous);
+ }
+
+ entry->tid = context->tid;
+ buffer = context->buffer;
+
+ if (i & 1) {
+ r = ck_ring_enqueue_spmc(ring + context->tid,
+ buffer, entry);
+ } else {
+ r = ck_ring_enqueue_spmc_size(ring + context->tid,
+ buffer, entry, &s);
+
+ if ((int)s >= size) {
+ ck_error("Size %u out of range of %d\n",
+ s, size);
+ }
+ }
+ assert(r == true);
+ }
+ }
+
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int i, r;
+ unsigned long l;
+ pthread_t *thread;
+ ck_ring_buffer_t *buffer;
+
+ if (argc != 4) {
+ ck_error("Usage: validate <threads> <affinity delta> <size>\n");
+ }
+
+ a.request = 0;
+ a.delta = atoi(argv[2]);
+
+ nthr = atoi(argv[1]);
+ assert(nthr >= 1);
+
+ size = atoi(argv[3]);
+ assert(size >= 4 && (size & size - 1) == 0);
+ size -= 1;
+
+ ring = malloc(sizeof(ck_ring_t) * nthr);
+ assert(ring);
+
+ _context = malloc(sizeof(*_context) * nthr);
+ assert(_context);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread);
+
+ fprintf(stderr, "SPSC test:");
+ for (i = 0; i < nthr; i++) {
+ _context[i].tid = i;
+ if (i == 0) {
+ _context[i].previous = nthr - 1;
+ _context[i].next = i + 1;
+ } else if (i == nthr - 1) {
+ _context[i].next = 0;
+ _context[i].previous = i - 1;
+ } else {
+ _context[i].next = i + 1;
+ _context[i].previous = i - 1;
+ }
+
+ buffer = malloc(sizeof(ck_ring_buffer_t) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(ck_ring_buffer_t) * (size + 1));
+ _context[i].buffer = buffer;
+ ck_ring_init(ring + i, size + 1);
+ r = pthread_create(thread + i, NULL, test, _context + i);
+ assert(r == 0);
+ }
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ fprintf(stderr, " done\n");
+
+ fprintf(stderr, "SPMC test:\n");
+ buffer = malloc(sizeof(ck_ring_buffer_t) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(void *) * (size + 1));
+ ck_ring_init(&ring_spmc, size + 1);
+ for (i = 0; i < nthr - 1; i++) {
+ _context[i].buffer = buffer;
+ r = pthread_create(thread + i, NULL, test_spmc, _context + i);
+ assert(r == 0);
+ }
+
+ for (l = 0; l < (unsigned long)size * ITERATIONS * (nthr - 1) ; l++) {
+ struct entry *entry = malloc(sizeof *entry);
+
+ assert(entry != NULL);
+ entry->value_long = l;
+ entry->value = (int)l;
+ entry->tid = (int)l;
+ entry->magic = 0xdead;
+ entry->ref = 0;
+
+ /* Wait until queue is not full. */
+ if (l & 1) {
+ while (ck_ring_enqueue_spmc(&ring_spmc,
+ buffer,
+ entry) == false)
+ ck_pr_stall();
+ } else {
+ unsigned int s;
+
+ while (ck_ring_enqueue_spmc_size(&ring_spmc,
+ buffer, entry, &s) == false) {
+ ck_pr_stall();
+ }
+
+ if ((int)s >= (size * ITERATIONS * (nthr - 1))) {
+ ck_error("MPMC: Unexpected size of %u\n", s);
+ }
+ }
+ }
+
+ for (i = 0; i < nthr - 1; i++)
+ pthread_join(thread[i], NULL);
+
+ return (0);
+}
+
diff --git a/regressions/ck_ring/validate/ck_ring_spmc_template.c b/regressions/ck_ring/validate/ck_ring_spmc_template.c
new file mode 100644
index 0000000..bbc75c1
--- /dev/null
+++ b/regressions/ck_ring/validate/ck_ring_spmc_template.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <pthread.h>
+
+#include <ck_barrier.h>
+#include <ck_ring.h>
+#include <ck_spinlock.h>
+#include "../../common.h"
+
+#ifndef ITERATIONS
+#define ITERATIONS 128
+#endif
+
+struct context {
+ unsigned int tid;
+ unsigned int previous;
+ unsigned int next;
+ struct entry **buffer;
+};
+
+struct entry {
+ unsigned long value_long;
+ unsigned int magic;
+ unsigned int ref;
+ int tid;
+ int value;
+};
+
+CK_RING_PROTOTYPE(entry, entry *)
+
+static int nthr;
+static ck_ring_t *ring;
+static ck_ring_t ring_spmc CK_CC_CACHELINE;
+static struct affinity a;
+static int size;
+static int eb;
+static ck_barrier_centralized_t barrier = CK_BARRIER_CENTRALIZED_INITIALIZER;
+static struct context *_context;
+
+static void *
+test_spmc(void *c)
+{
+ unsigned int observed = 0;
+ unsigned long previous = 0;
+ unsigned int seed;
+ int i, k, j, tid;
+ struct context *context = c;
+ struct entry **buffer;
+
+ buffer = context->buffer;
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ tid = ck_pr_faa_int(&eb, 1);
+ ck_pr_fence_memory();
+ while (ck_pr_load_int(&eb) != nthr - 1);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ struct entry *o;
+ int spin;
+
+ /* Keep trying until we encounter at least one node. */
+ if (j & 1) {
+ while (CK_RING_DEQUEUE_SPMC(entry,
+ &ring_spmc, buffer, &o) == false);
+ } else {
+ while (CK_RING_TRYDEQUEUE_SPMC(entry,
+ &ring_spmc, buffer, &o) == false);
+ }
+
+ observed++;
+ if (o->value < 0
+ || o->value != o->tid
+ || o->magic != 0xdead
+ || (previous != 0 && previous >= o->value_long)) {
+ ck_error("[0x%p] (%x) (%d, %d) >< (0, %d)\n",
+ (void *)o, o->magic, o->tid, o->value, size);
+ }
+
+ o->magic = 0xbeef;
+ o->value = -31337;
+ o->tid = -31338;
+ previous = o->value_long;
+
+ if (ck_pr_faa_uint(&o->ref, 1) != 0) {
+ ck_error("[%p] We dequeued twice.\n", (void *)o);
+ }
+
+ if ((i % 4) == 0) {
+ spin = common_rand_r(&seed) % 16384;
+ for (k = 0; k < spin; k++) {
+ ck_pr_stall();
+ }
+ }
+
+ free(o);
+ }
+ }
+
+ fprintf(stderr, "[%d] Observed %u\n", tid, observed);
+ return NULL;
+}
+
+static void *
+test(void *c)
+{
+ struct context *context = c;
+ struct entry *entry;
+ unsigned int s;
+ int i, j;
+ bool r;
+ struct entry **buffer = context->buffer;
+ ck_barrier_centralized_state_t sense =
+ CK_BARRIER_CENTRALIZED_STATE_INITIALIZER;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ if (context->tid == 0) {
+ struct entry **entries;
+
+ entries = malloc(sizeof(struct entry *) * size);
+ assert(entries != NULL);
+
+ if (ck_ring_size(ring) != 0) {
+ ck_error("More entries than expected: %u > 0\n",
+ ck_ring_size(ring));
+ }
+
+ for (i = 0; i < size; i++) {
+ entries[i] = malloc(sizeof(struct entry));
+ assert(entries[i] != NULL);
+
+ entries[i]->value = i;
+ entries[i]->tid = 0;
+
+ if (i & 1) {
+ r = CK_RING_ENQUEUE_SPMC(entry, ring, buffer,
+ &entries[i]);
+ } else {
+ r = CK_RING_ENQUEUE_SPMC_SIZE(entry, ring,
+ buffer, &entries[i], &s);
+
+ if ((int)s != i) {
+ ck_error("Size is %u, expected %d.\n",
+ s, size);
+ }
+ }
+
+ assert(r != false);
+ }
+
+ if (ck_ring_size(ring) != (unsigned int)size) {
+ ck_error("Less entries than expected: %u < %d\n",
+ ck_ring_size(ring), size);
+ }
+
+ if (ck_ring_capacity(ring) != ck_ring_size(ring) + 1) {
+ ck_error("Capacity less than expected: %u < %u\n",
+ ck_ring_size(ring), ck_ring_capacity(ring));
+ }
+ }
+
+ /*
+ * Wait for all threads. The idea here is to maximize the contention.
+ */
+ ck_barrier_centralized(&barrier, &sense, nthr);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ buffer = _context[context->previous].buffer;
+ while (CK_RING_DEQUEUE_SPMC(entry,
+ ring + context->previous,
+ buffer, &entry) == false);
+
+ if (context->previous != (unsigned int)entry->tid) {
+ ck_error("[%u:%p] %u != %u\n",
+ context->tid, (void *)entry,
+ entry->tid, context->previous);
+ }
+
+ if (entry->value < 0 || entry->value >= size) {
+ ck_error("[%u:%p] %u </> %u\n",
+ context->tid, (void *)entry,
+ entry->tid, context->previous);
+ }
+
+ entry->tid = context->tid;
+ buffer = context->buffer;
+
+ if (i & 1) {
+ r = CK_RING_ENQUEUE_SPMC(entry,
+ ring + context->tid,
+ buffer, &entry);
+ } else {
+ r = CK_RING_ENQUEUE_SPMC_SIZE(entry,
+ ring + context->tid,
+ buffer, &entry, &s);
+
+ if ((int)s >= size) {
+ ck_error("Size %u out of range of %d\n",
+ s, size);
+ }
+ }
+ assert(r == true);
+ }
+ }
+
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int i, r;
+ unsigned long l;
+ pthread_t *thread;
+ struct entry **buffer;
+
+ if (argc != 4) {
+ ck_error("Usage: validate <threads> <affinity delta> <size>\n");
+ }
+
+ a.request = 0;
+ a.delta = atoi(argv[2]);
+
+ nthr = atoi(argv[1]);
+ assert(nthr >= 1);
+
+ size = atoi(argv[3]);
+ assert(size >= 4 && (size & size - 1) == 0);
+ size -= 1;
+
+ ring = malloc(sizeof(ck_ring_t) * nthr);
+ assert(ring);
+
+ _context = malloc(sizeof(*_context) * nthr);
+ assert(_context);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread);
+
+ fprintf(stderr, "SPSC test:");
+ for (i = 0; i < nthr; i++) {
+ _context[i].tid = i;
+ if (i == 0) {
+ _context[i].previous = nthr - 1;
+ _context[i].next = i + 1;
+ } else if (i == nthr - 1) {
+ _context[i].next = 0;
+ _context[i].previous = i - 1;
+ } else {
+ _context[i].next = i + 1;
+ _context[i].previous = i - 1;
+ }
+
+ buffer = malloc(sizeof(struct entry *) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(struct entry *) * (size + 1));
+ _context[i].buffer = buffer;
+ ck_ring_init(ring + i, size + 1);
+ r = pthread_create(thread + i, NULL, test, _context + i);
+ assert(r == 0);
+ }
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ fprintf(stderr, " done\n");
+
+ fprintf(stderr, "SPMC test:\n");
+ buffer = malloc(sizeof(struct entry *) * (size + 1));
+ assert(buffer);
+ memset(buffer, 0, sizeof(struct entry *) * (size + 1));
+ ck_ring_init(&ring_spmc, size + 1);
+ for (i = 0; i < nthr - 1; i++) {
+ _context[i].buffer = buffer;
+ r = pthread_create(thread + i, NULL, test_spmc, _context + i);
+ assert(r == 0);
+ }
+
+ for (l = 0; l < (unsigned long)size * ITERATIONS * (nthr - 1) ; l++) {
+ struct entry *entry = malloc(sizeof *entry);
+
+ assert(entry != NULL);
+ entry->value_long = l;
+ entry->value = (int)l;
+ entry->tid = (int)l;
+ entry->magic = 0xdead;
+ entry->ref = 0;
+
+ /* Wait until queue is not full. */
+ if (l & 1) {
+ while (CK_RING_ENQUEUE_SPMC(entry, &ring_spmc,
+ buffer, &entry) == false) {
+ ck_pr_stall();
+ }
+ } else {
+ unsigned int s;
+
+ while (CK_RING_ENQUEUE_SPMC_SIZE(entry, &ring_spmc,
+ buffer, &entry, &s) == false) {
+ ck_pr_stall();
+ }
+
+ if ((int)s >= (size * ITERATIONS * (nthr - 1))) {
+ ck_error("MPMC: Unexpected size of %u\n", s);
+ }
+ }
+ }
+
+ for (i = 0; i < nthr - 1; i++)
+ pthread_join(thread[i], NULL);
+
+ return 0;
+}
+
diff --git a/regressions/ck_ring/validate/ck_ring_spsc.c b/regressions/ck_ring/validate/ck_ring_spsc.c
new file mode 100644
index 0000000..910f7e6
--- /dev/null
+++ b/regressions/ck_ring/validate/ck_ring_spsc.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2011-2015 Samy Al Bahra.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <pthread.h>
+
+#include <ck_barrier.h>
+#include <ck_ring.h>
+#include "../../common.h"
+
+#ifndef ITERATIONS
+#define ITERATIONS 128
+#endif
+
+struct context {
+ unsigned int tid;
+ unsigned int previous;
+ unsigned int next;
+ void *buffer;
+};
+
+struct entry {
+ int tid;
+ int value;
+};
+
+static int nthr;
+static ck_ring_t *ring;
+static struct affinity a;
+static int size;
+static ck_barrier_centralized_t barrier = CK_BARRIER_CENTRALIZED_INITIALIZER;
+static struct context *_context;
+
+static void *
+test(void *c)
+{
+ struct context *context = c;
+ struct entry *entry;
+ unsigned int s;
+ int i, j;
+ bool r;
+ ck_barrier_centralized_state_t sense =
+ CK_BARRIER_CENTRALIZED_STATE_INITIALIZER;
+ ck_ring_buffer_t *buffer;
+
+ if (aff_iterate(&a)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ buffer = context->buffer;
+ if (context->tid == 0) {
+ struct entry *entries;
+
+ entries = malloc(sizeof(struct entry) * size);
+ assert(entries != NULL);
+
+ if (ck_ring_size(ring) != 0) {
+ ck_error("More entries than expected: %u > 0\n",
+ ck_ring_size(ring));
+ }
+
+ for (i = 0; i < size; i++) {
+ entries[i].value = i;
+ entries[i].tid = 0;
+
+ if (i & 1) {
+ r = ck_ring_enqueue_spsc(ring, buffer,
+ entries + i);
+ } else {
+ r = ck_ring_enqueue_spsc_size(ring,
+ buffer, entries + i, &s);
+
+ if ((int)s != i) {
+ ck_error("Size is %u, expected %d\n",
+ s, i + 1);
+ }
+ }
+
+ assert(r != false);
+ }
+
+ if (ck_ring_size(ring) != (unsigned int)size) {
+ ck_error("Less entries than expected: %u < %d\n",
+ ck_ring_size(ring), size);
+ }
+
+ if (ck_ring_capacity(ring) != ck_ring_size(ring) + 1) {
+ ck_error("Capacity less than expected: %u < %u\n",
+ ck_ring_size(ring), ck_ring_capacity(ring));
+ }
+ }
+
+ ck_barrier_centralized(&barrier, &sense, nthr);
+
+ for (i = 0; i < ITERATIONS; i++) {
+ for (j = 0; j < size; j++) {
+ buffer = _context[context->previous].buffer;
+ while (ck_ring_dequeue_spsc(ring + context->previous,
+ buffer, &entry) == false);
+
+ if (context->previous != (unsigned int)entry->tid) {
+ ck_error("[%u:%p] %u != %u\n",
+ context->tid, (void *)entry, entry->tid, context->previous);
+ }
+
+ if (entry->value != j) {
+ ck_error("[%u:%p] %u != %u\n",
+ context->tid, (void *)entry, entry->tid, context->previous);
+ }
+
+ entry->tid = context->tid;
+ buffer = context->buffer;
+ if (i & 1) {
+ r = ck_ring_enqueue_spsc(ring + context->tid,
+ buffer, entry);
+ } else {
+ r = ck_ring_enqueue_spsc_size(ring +
+ context->tid, buffer, entry, &s);
+
+ if ((int)s >= size) {
+ ck_error("Size %u is out of range %d\n",
+ s, size);
+ }
+ }
+ assert(r == true);
+ }
+ }
+
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int i, r;
+ ck_ring_buffer_t *buffer;
+ pthread_t *thread;
+
+ if (argc != 4) {
+ ck_error("Usage: validate <threads> <affinity delta> <size>\n");
+ }
+
+ a.request = 0;
+ a.delta = atoi(argv[2]);
+
+ nthr = atoi(argv[1]);
+ assert(nthr >= 1);
+
+ size = atoi(argv[3]);
+ assert(size >= 4 && (size & size - 1) == 0);
+ size -= 1;
+
+ ring = malloc(sizeof(ck_ring_t) * nthr);
+ assert(ring);
+
+ _context = malloc(sizeof(*_context) * nthr);
+ assert(_context);
+
+ thread = malloc(sizeof(pthread_t) * nthr);
+ assert(thread);
+
+ for (i = 0; i < nthr; i++) {
+ _context[i].tid = i;
+ if (i == 0) {
+ _context[i].previous = nthr - 1;
+ _context[i].next = i + 1;
+ } else if (i == nthr - 1) {
+ _context[i].next = 0;
+ _context[i].previous = i - 1;
+ } else {
+ _context[i].next = i + 1;
+ _context[i].previous = i - 1;
+ }
+
+ buffer = malloc(sizeof(ck_ring_buffer_t) * (size + 1));
+ assert(buffer);
+ _context[i].buffer = buffer;
+ ck_ring_init(ring + i, size + 1);
+ r = pthread_create(thread + i, NULL, test, _context + i);
+ assert(r == 0);
+ }
+
+ for (i = 0; i < nthr; i++)
+ pthread_join(thread[i], NULL);
+
+ return (0);
+}