summaryrefslogtreecommitdiffstats
path: root/regressions/ck_cohort
diff options
context:
space:
mode:
Diffstat (limited to 'regressions/ck_cohort')
-rw-r--r--regressions/ck_cohort/benchmark/Makefile17
-rw-r--r--regressions/ck_cohort/benchmark/ck_cohort.c8
-rw-r--r--regressions/ck_cohort/benchmark/throughput.c239
-rw-r--r--regressions/ck_cohort/ck_cohort.h35
-rw-r--r--regressions/ck_cohort/validate/Makefile17
-rw-r--r--regressions/ck_cohort/validate/validate.c205
6 files changed, 521 insertions, 0 deletions
diff --git a/regressions/ck_cohort/benchmark/Makefile b/regressions/ck_cohort/benchmark/Makefile
new file mode 100644
index 0000000..6af18b9
--- /dev/null
+++ b/regressions/ck_cohort/benchmark/Makefile
@@ -0,0 +1,17 @@
+.PHONY: all clean
+
+OBJECTS=ck_cohort.THROUGHPUT ck_cohort.LATENCY
+
+all: $(OBJECTS)
+
+ck_cohort.THROUGHPUT: ck_cohort.c
+ $(CC) $(CFLAGS) -o ck_cohort.THROUGHPUT throughput.c -lm
+
+ck_cohort.LATENCY: ck_cohort.c
+ $(CC) -DLATENCY $(CFLAGS) -o ck_cohort.LATENCY ck_cohort.c
+
+clean:
+ rm -rf *.dSYM *.exe $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE -lm
diff --git a/regressions/ck_cohort/benchmark/ck_cohort.c b/regressions/ck_cohort/benchmark/ck_cohort.c
new file mode 100644
index 0000000..954b616
--- /dev/null
+++ b/regressions/ck_cohort/benchmark/ck_cohort.c
@@ -0,0 +1,8 @@
+#include "../ck_cohort.h"
+
+#include <ck_cohort.h>
+#ifdef THROUGHPUT
+#include "../../ck_spinlock/benchmark/throughput.h"
+#elif defined(LATENCY)
+#include "../../ck_spinlock/benchmark/latency.h"
+#endif
diff --git a/regressions/ck_cohort/benchmark/throughput.c b/regressions/ck_cohort/benchmark/throughput.c
new file mode 100644
index 0000000..7c4776d
--- /dev/null
+++ b/regressions/ck_cohort/benchmark/throughput.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2013-2015 Samy Al Bahra.
+ * Copyright 2013 Brendon Scheinman.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <ck_pr.h>
+#include <ck_cohort.h>
+#include <ck_md.h>
+#include <ck_spinlock.h>
+
+#include "../../common.h"
+
+#define max(x, y) (((x) > (y)) ? (x) : (y))
+
+static struct affinity a;
+static unsigned int ready;
+
+struct counters {
+ uint64_t value;
+} CK_CC_CACHELINE;
+
+static struct counters *count;
+static uint64_t nthr;
+static unsigned int n_cohorts;
+static unsigned int barrier;
+static int critical CK_CC_CACHELINE;
+
+static void
+ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+
+ (void)context;
+ ck_spinlock_fas_lock(lock);
+ return;
+}
+
+static void
+ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+
+ (void)context;
+ ck_spinlock_fas_unlock(lock);
+ return;
+}
+
+static bool
+ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+
+ (void)context;
+ return ck_spinlock_fas_locked(lock);
+}
+
+CK_COHORT_PROTOTYPE(basic,
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context,
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context)
+
+struct cohort_record {
+ CK_COHORT_INSTANCE(basic) cohort;
+} CK_CC_CACHELINE;
+static struct cohort_record *cohorts;
+
+static ck_spinlock_t global_lock = CK_SPINLOCK_INITIALIZER;
+
+struct block {
+ unsigned int tid;
+};
+
+static void *
+fairness(void *null)
+{
+ struct block *context = null;
+ unsigned int i = context->tid;
+ volatile int j;
+ long int base;
+ unsigned int core;
+ CK_COHORT_INSTANCE(basic) *cohort;
+
+
+ if (aff_iterate_core(&a, &core)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ cohort = &((cohorts + (core / (int)(a.delta)) % n_cohorts)->cohort);
+
+ while (ck_pr_load_uint(&ready) == 0);
+
+ ck_pr_inc_uint(&barrier);
+ while (ck_pr_load_uint(&barrier) != nthr);
+
+ while (ck_pr_load_uint(&ready)) {
+ CK_COHORT_LOCK(basic, cohort, NULL, NULL);
+
+ count[i].value++;
+ if (critical) {
+ base = common_lrand48() % critical;
+ for (j = 0; j < base; j++);
+ }
+
+ CK_COHORT_UNLOCK(basic, cohort, NULL, NULL);
+ }
+
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ uint64_t v, d;
+ unsigned int i;
+ pthread_t *threads;
+ struct block *context;
+ ck_spinlock_t *local_lock;
+
+ if (argc != 5) {
+ ck_error("Usage: ck_cohort <number of cohorts> <threads per cohort> "
+ "<affinity delta> <critical section>\n");
+ }
+
+ n_cohorts = atoi(argv[1]);
+ if (n_cohorts <= 0) {
+ ck_error("ERROR: Number of cohorts must be greater than 0\n");
+ }
+
+ nthr = n_cohorts * atoi(argv[2]);
+ if (nthr <= 0) {
+ ck_error("ERROR: Number of threads must be greater than 0\n");
+ }
+
+ critical = atoi(argv[4]);
+ if (critical < 0) {
+ ck_error("ERROR: critical section cannot be negative\n");
+ }
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ cohorts = malloc(sizeof(struct cohort_record) * n_cohorts);
+ if (cohorts == NULL) {
+ ck_error("ERROR: Could not allocate cohort structures\n");
+ }
+
+ context = malloc(sizeof(struct block) * nthr);
+ if (context == NULL) {
+ ck_error("ERROR: Could not allocate thread contexts\n");
+ }
+
+ a.delta = atoi(argv[2]);
+ a.request = 0;
+
+ count = malloc(sizeof(*count) * nthr);
+ if (count == NULL) {
+ ck_error("ERROR: Could not create acquisition buffer\n");
+ }
+ memset(count, 0, sizeof(*count) * nthr);
+
+ fprintf(stderr, "Creating cohorts...");
+ for (i = 0 ; i < n_cohorts ; i++) {
+ local_lock = malloc(max(CK_MD_CACHELINE, sizeof(ck_spinlock_t)));
+ if (local_lock == NULL) {
+ ck_error("ERROR: Could not allocate local lock\n");
+ }
+ CK_COHORT_INIT(basic, &((cohorts + i)->cohort), &global_lock, local_lock,
+ CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT);
+ local_lock = NULL;
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Creating threads (fairness)...");
+ for (i = 0; i < nthr; i++) {
+ context[i].tid = i;
+ if (pthread_create(&threads[i], NULL, fairness, context + i)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ ck_pr_store_uint(&ready, 1);
+ common_sleep(10);
+ ck_pr_store_uint(&ready, 0);
+
+ fprintf(stderr, "Waiting for threads to finish acquisition regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done\n\n");
+
+ for (i = 0, v = 0; i < nthr; i++) {
+ printf("%d %15" PRIu64 "\n", i, count[i].value);
+ v += count[i].value;
+ }
+
+ printf("\n# total : %15" PRIu64 "\n", v);
+ printf("# throughput : %15" PRIu64 " a/s\n", (v /= nthr) / 10);
+
+ for (i = 0, d = 0; i < nthr; i++)
+ d += (count[i].value - v) * (count[i].value - v);
+
+ printf("# average : %15" PRIu64 "\n", v);
+ printf("# deviation : %.2f (%.2f%%)\n\n", sqrt(d / nthr), (sqrt(d / nthr) / v) * 100.00);
+
+ return 0;
+}
diff --git a/regressions/ck_cohort/ck_cohort.h b/regressions/ck_cohort/ck_cohort.h
new file mode 100644
index 0000000..b0d7f0a
--- /dev/null
+++ b/regressions/ck_cohort/ck_cohort.h
@@ -0,0 +1,35 @@
+#define LOCK_NAME "ck_cohort"
+#define LOCK_DEFINE \
+ static ck_spinlock_fas_t global_fas_lock = CK_SPINLOCK_FAS_INITIALIZER; \
+ static ck_spinlock_fas_t local_fas_lock = CK_SPINLOCK_FAS_INITIALIZER; \
+ static void \
+ ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context) \
+ { \
+ (void)context; \
+ ck_spinlock_fas_lock(lock); \
+ } \
+ \
+ static void \
+ ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context) \
+ { \
+ (void)context; \
+ ck_spinlock_fas_unlock(lock); \
+ } \
+ \
+ static bool \
+ ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context) \
+ { \
+ (void)context; \
+ return ck_spinlock_fas_locked(lock); \
+ } \
+ CK_COHORT_PROTOTYPE(fas_fas, \
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context, \
+ ck_spinlock_fas_locked_with_context, ck_spinlock_fas_lock_with_context, \
+ ck_spinlock_fas_unlock_with_context, ck_spinlock_fas_locked_with_context) \
+ static CK_COHORT_INSTANCE(fas_fas) CK_CC_CACHELINE cohort = CK_COHORT_INITIALIZER
+
+
+#define LOCK_INIT CK_COHORT_INIT(fas_fas, &cohort, &global_fas_lock, &local_fas_lock, \
+ CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT)
+#define LOCK CK_COHORT_LOCK(fas_fas, &cohort, NULL, NULL)
+#define UNLOCK CK_COHORT_UNLOCK(fas_fas, &cohort, NULL, NULL)
diff --git a/regressions/ck_cohort/validate/Makefile b/regressions/ck_cohort/validate/Makefile
new file mode 100644
index 0000000..145af3a
--- /dev/null
+++ b/regressions/ck_cohort/validate/Makefile
@@ -0,0 +1,17 @@
+.PHONY: check clean distribution
+
+OBJECTS=validate
+
+all: $(OBJECTS)
+
+validate: validate.c ../../../include/ck_cohort.h
+ $(CC) $(CFLAGS) -o validate validate.c
+
+check: all
+ ./validate `expr $(CORES) / 2` 2 1
+
+clean:
+ rm -rf *.dSYM *.exe *~ *.o $(OBJECTS)
+
+include ../../../build/regressions.build
+CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE
diff --git a/regressions/ck_cohort/validate/validate.c b/regressions/ck_cohort/validate/validate.c
new file mode 100644
index 0000000..cffbf77
--- /dev/null
+++ b/regressions/ck_cohort/validate/validate.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2013-2015 Samy Al Bahra.
+ * Copyright 2013 Brendon Scheinman.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <ck_pr.h>
+#include <ck_cohort.h>
+#include <ck_spinlock.h>
+
+#include "../../common.h"
+
+#ifndef ITERATE
+#define ITERATE 1000000
+#endif
+
+static struct affinity a;
+static unsigned int locked;
+static int nthr;
+static ck_spinlock_fas_t global_fas_lock = CK_SPINLOCK_FAS_INITIALIZER;
+
+static void
+ck_spinlock_fas_lock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+ (void)context;
+ ck_spinlock_fas_lock(lock);
+}
+
+static void
+ck_spinlock_fas_unlock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+ (void)context;
+ ck_spinlock_fas_unlock(lock);
+}
+
+static bool
+ck_spinlock_fas_locked_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+ (void)context;
+ return ck_spinlock_fas_locked(lock);
+}
+
+static bool
+ck_spinlock_fas_trylock_with_context(ck_spinlock_fas_t *lock, void *context)
+{
+ (void)context;
+ return ck_spinlock_fas_trylock(lock);
+}
+
+CK_COHORT_TRYLOCK_PROTOTYPE(fas_fas,
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context,
+ ck_spinlock_fas_locked_with_context, ck_spinlock_fas_trylock_with_context,
+ ck_spinlock_fas_lock_with_context, ck_spinlock_fas_unlock_with_context,
+ ck_spinlock_fas_locked_with_context, ck_spinlock_fas_trylock_with_context)
+static CK_COHORT_INSTANCE(fas_fas) *cohorts;
+static int n_cohorts;
+
+static void *
+thread(void *null CK_CC_UNUSED)
+{
+ int i = ITERATE;
+ unsigned int l;
+ unsigned int core;
+ CK_COHORT_INSTANCE(fas_fas) *cohort;
+
+ if (aff_iterate_core(&a, &core)) {
+ perror("ERROR: Could not affine thread");
+ exit(EXIT_FAILURE);
+ }
+
+ cohort = cohorts + (core / (int)(a.delta)) % n_cohorts;
+
+ while (i--) {
+
+ if (i & 1) {
+ CK_COHORT_LOCK(fas_fas, cohort, NULL, NULL);
+ } else {
+ while (CK_COHORT_TRYLOCK(fas_fas, cohort, NULL, NULL, NULL) == false) {
+ ck_pr_stall();
+ }
+ }
+
+ {
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+ ck_pr_inc_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 8) {
+ ck_error("ERROR [WR:%d]: %u != 2\n", __LINE__, l);
+ }
+
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+ ck_pr_dec_uint(&locked);
+
+ l = ck_pr_load_uint(&locked);
+ if (l != 0) {
+ ck_error("ERROR [WR:%d]: %u != 0\n", __LINE__, l);
+ }
+ }
+ CK_COHORT_UNLOCK(fas_fas, cohort, NULL, NULL);
+ }
+
+ return (NULL);
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t *threads;
+ int threads_per_cohort;
+ ck_spinlock_fas_t *local_lock;
+ int i;
+
+ if (argc != 4) {
+ ck_error("Usage: validate <number of cohorts> <threads per cohort> <affinity delta>\n");
+ }
+
+ n_cohorts = atoi(argv[1]);
+ if (n_cohorts <= 0) {
+ fprintf(stderr, "setting number of cohorts per thread to 1\n");
+ n_cohorts = 1;
+ }
+
+ threads_per_cohort = atoi(argv[2]);
+ if (threads_per_cohort <= 0) {
+ ck_error("ERROR: Threads per cohort must be greater than 0\n");
+ }
+
+ nthr = n_cohorts * threads_per_cohort;
+
+ threads = malloc(sizeof(pthread_t) * nthr);
+ if (threads == NULL) {
+ ck_error("ERROR: Could not allocate thread structures\n");
+ }
+
+ a.delta = atoi(argv[3]);
+
+ fprintf(stderr, "Creating cohorts...");
+ cohorts = malloc(sizeof(CK_COHORT_INSTANCE(fas_fas)) * n_cohorts);
+ for (i = 0 ; i < n_cohorts ; i++) {
+ local_lock = malloc(sizeof(ck_spinlock_fas_t));
+ CK_COHORT_INIT(fas_fas, cohorts + i, &global_fas_lock, local_lock,
+ CK_COHORT_DEFAULT_LOCAL_PASS_LIMIT);
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Creating threads...");
+ for (i = 0; i < nthr; i++) {
+ if (pthread_create(&threads[i], NULL, thread, NULL)) {
+ ck_error("ERROR: Could not create thread %d\n", i);
+ }
+ }
+ fprintf(stderr, "done\n");
+
+ fprintf(stderr, "Waiting for threads to finish correctness regression...");
+ for (i = 0; i < nthr; i++)
+ pthread_join(threads[i], NULL);
+ fprintf(stderr, "done (passed)\n");
+
+ return (0);
+}
+