diff options
Diffstat (limited to 'regressions/ck_epoch/validate')
-rw-r--r-- | regressions/ck_epoch/validate/ck_epoch_call.c | 16 | ||||
-rw-r--r-- | regressions/ck_epoch/validate/ck_epoch_poll.c | 37 | ||||
-rw-r--r-- | regressions/ck_epoch/validate/ck_epoch_section.c | 9 | ||||
-rw-r--r-- | regressions/ck_epoch/validate/ck_epoch_section_2.c | 21 | ||||
-rw-r--r-- | regressions/ck_epoch/validate/ck_epoch_synchronize.c | 40 | ||||
-rw-r--r-- | regressions/ck_epoch/validate/ck_stack.c | 4 | ||||
-rw-r--r-- | regressions/ck_epoch/validate/torture.c | 28 |
7 files changed, 96 insertions, 59 deletions
diff --git a/regressions/ck_epoch/validate/ck_epoch_call.c b/regressions/ck_epoch/validate/ck_epoch_call.c index 29e0df8..1c274e0 100644 --- a/regressions/ck_epoch/validate/ck_epoch_call.c +++ b/regressions/ck_epoch/validate/ck_epoch_call.c @@ -37,6 +37,7 @@ static void cb(ck_epoch_entry_t *p) { + /* Test that we can reregister the callback. */ if (counter == 0) ck_epoch_call(&record[1], p, cb); @@ -50,15 +51,22 @@ int main(void) { ck_epoch_entry_t entry; + ck_epoch_entry_t another; - ck_epoch_register(&epoch, &record[0]); - ck_epoch_register(&epoch, &record[1]); + ck_epoch_register(&epoch, &record[0], NULL); + ck_epoch_register(&epoch, &record[1], NULL); ck_epoch_call(&record[1], &entry, cb); ck_epoch_barrier(&record[1]); ck_epoch_barrier(&record[1]); - if (counter != 2) - ck_error("Expected counter value 2, read %u.\n", counter); + + /* Make sure that strict works. */ + ck_epoch_call_strict(&record[1], &entry, cb); + ck_epoch_call_strict(&record[1], &another, cb); + ck_epoch_barrier(&record[1]); + + if (counter != 4) + ck_error("Expected counter value 4, read %u.\n", counter); return 0; } diff --git a/regressions/ck_epoch/validate/ck_epoch_poll.c b/regressions/ck_epoch/validate/ck_epoch_poll.c index aec6dd0..6f782ee 100644 --- a/regressions/ck_epoch/validate/ck_epoch_poll.c +++ b/regressions/ck_epoch/validate/ck_epoch_poll.c @@ -86,10 +86,14 @@ static void * read_thread(void *unused CK_CC_UNUSED) { unsigned int j; - ck_epoch_record_t record CK_CC_CACHELINE; + ck_epoch_record_t *record CK_CC_CACHELINE; ck_stack_entry_t *cursor, *n; - ck_epoch_register(&stack_epoch, &record); + record = malloc(sizeof *record); + if (record == NULL) + ck_error("record allocation failure"); + + ck_epoch_register(&stack_epoch, record, NULL); if (aff_iterate(&a)) { perror("ERROR: failed to affine thread"); @@ -108,7 +112,7 @@ read_thread(void *unused CK_CC_UNUSED) j = 0; for (;;) { - ck_epoch_begin(&record, NULL); + ck_epoch_begin(record, NULL); CK_STACK_FOREACH(&stack, cursor) { if (cursor == NULL) continue; @@ -116,7 +120,7 @@ read_thread(void *unused CK_CC_UNUSED) n = CK_STACK_NEXT(cursor); j += ck_pr_load_ptr(&n) != NULL; } - ck_epoch_end(&record, NULL); + ck_epoch_end(record, NULL); if (j != 0 && ck_pr_load_uint(&readers) == 0) ck_pr_store_uint(&readers, 1); @@ -138,10 +142,13 @@ write_thread(void *unused CK_CC_UNUSED) { struct node **entry, *e; unsigned int i, j, tid; - ck_epoch_record_t record; + ck_epoch_record_t *record; ck_stack_entry_t *s; - ck_epoch_register(&stack_epoch, &record); + record = malloc(sizeof *record); + if (record == NULL) + ck_error("record allocation failure"); + ck_epoch_register(&stack_epoch, record, NULL); if (aff_iterate(&a)) { perror("ERROR: failed to affine thread"); @@ -178,23 +185,23 @@ write_thread(void *unused CK_CC_UNUSED) } for (i = 0; i < PAIRS_S; i++) { - ck_epoch_begin(&record, NULL); + ck_epoch_begin(record, NULL); s = ck_stack_pop_upmc(&stack); e = stack_container(s); - ck_epoch_end(&record, NULL); + ck_epoch_end(record, NULL); - ck_epoch_call(&record, &e->epoch_entry, destructor); - ck_epoch_poll(&record); + ck_epoch_call(record, &e->epoch_entry, destructor); + ck_epoch_poll(record); } } - ck_epoch_barrier(&record); + ck_epoch_barrier(record); if (tid == 0) { - fprintf(stderr, "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b[W] Peak: %u (%2.2f%%)\n Reclamations: %lu\n\n", - record.n_peak, - (double)record.n_peak / ((double)PAIRS_S * ITERATE_S) * 100, - record.n_dispatch); + fprintf(stderr, "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b[W] Peak: %u (%2.2f%%)\n Reclamations: %u\n\n", + record->n_peak, + (double)record->n_peak / ((double)PAIRS_S * ITERATE_S) * 100, + record->n_dispatch); } ck_pr_inc_uint(&e_barrier); diff --git a/regressions/ck_epoch/validate/ck_epoch_section.c b/regressions/ck_epoch/validate/ck_epoch_section.c index 12bcca1..7b76d1c 100644 --- a/regressions/ck_epoch/validate/ck_epoch_section.c +++ b/regressions/ck_epoch/validate/ck_epoch_section.c @@ -46,8 +46,8 @@ setup_test(void) { ck_epoch_init(&epc); - ck_epoch_register(&epc, &record); - ck_epoch_register(&epc, &record2); + ck_epoch_register(&epc, &record, NULL); + ck_epoch_register(&epc, &record2, NULL); cleanup_calls = 0; return; @@ -88,7 +88,8 @@ test_simple_read_section(void) ck_epoch_begin(&record, §ion); ck_epoch_call(&record, &entry, cleanup); assert(cleanup_calls == 0); - ck_epoch_end(&record, §ion); + if (ck_epoch_end(&record, §ion) == false) + ck_error("expected no more sections"); ck_epoch_barrier(&record); assert(cleanup_calls == 1); @@ -157,7 +158,7 @@ reader_work(void *arg) ck_epoch_section_t section; struct obj *o; - ck_epoch_register(&epc, &local_record); + ck_epoch_register(&epc, &local_record, NULL); o = (struct obj *)arg; diff --git a/regressions/ck_epoch/validate/ck_epoch_section_2.c b/regressions/ck_epoch/validate/ck_epoch_section_2.c index aed3661..dcb3fd0 100644 --- a/regressions/ck_epoch/validate/ck_epoch_section_2.c +++ b/regressions/ck_epoch/validate/ck_epoch_section_2.c @@ -64,7 +64,7 @@ read_thread(void *unused CK_CC_UNUSED) record = malloc(sizeof *record); assert(record != NULL); - ck_epoch_register(&epoch, record); + ck_epoch_register(&epoch, record, NULL); if (aff_iterate(&a)) { perror("ERROR: failed to affine thread"); @@ -110,11 +110,14 @@ read_thread(void *unused CK_CC_UNUSED) } ck_epoch_begin(record, §ion[1]); - - assert(section[0].bucket != section[1].bucket); + if (section[0].bucket == section[1].bucket) { + ck_error("%u == %u\n", + section[0].bucket, section[1].bucket); + } ck_epoch_end(record, §ion[0]); - assert(ck_pr_load_uint(&record->active) > 0); + if (ck_pr_load_uint(&record->active) == 0) + ck_error("active: %u\n", record->active); if (ck_pr_load_uint(&leave) == 1) { ck_epoch_end(record, §ion[1]); @@ -130,10 +133,14 @@ read_thread(void *unused CK_CC_UNUSED) static void * write_thread(void *unused CK_CC_UNUSED) { - ck_epoch_record_t record; + ck_epoch_record_t *record; unsigned long iterations = 0; - ck_epoch_register(&epoch, &record); + record = malloc(sizeof *record); + if (record == NULL) + ck_error("record allocation failure"); + + ck_epoch_register(&epoch, record, NULL); if (aff_iterate(&a)) { perror("ERROR: failed to affine thread"); @@ -147,7 +154,7 @@ write_thread(void *unused CK_CC_UNUSED) if (!(iterations % 1048575)) fprintf(stderr, "."); - ck_epoch_synchronize(&record); + ck_epoch_synchronize(record); iterations++; if (ck_pr_load_uint(&leave) == 1) diff --git a/regressions/ck_epoch/validate/ck_epoch_synchronize.c b/regressions/ck_epoch/validate/ck_epoch_synchronize.c index a03a4f7..67e23a3 100644 --- a/regressions/ck_epoch/validate/ck_epoch_synchronize.c +++ b/regressions/ck_epoch/validate/ck_epoch_synchronize.c @@ -86,12 +86,15 @@ static void * read_thread(void *unused CK_CC_UNUSED) { unsigned int j; - ck_epoch_record_t record CK_CC_CACHELINE; + ck_epoch_record_t *record CK_CC_CACHELINE; ck_stack_entry_t *cursor; ck_stack_entry_t *n; unsigned int i; - ck_epoch_register(&stack_epoch, &record); + record = malloc(sizeof *record); + if (record == NULL) + ck_error("record allocation failure"); + ck_epoch_register(&stack_epoch, record, NULL); if (aff_iterate(&a)) { perror("ERROR: failed to affine thread"); @@ -112,7 +115,7 @@ read_thread(void *unused CK_CC_UNUSED) for (;;) { i = 0; - ck_epoch_begin(&record, NULL); + ck_epoch_begin(record, NULL); CK_STACK_FOREACH(&stack, cursor) { if (cursor == NULL) continue; @@ -123,7 +126,7 @@ read_thread(void *unused CK_CC_UNUSED) if (i++ > 4098) break; } - ck_epoch_end(&record, NULL); + ck_epoch_end(record, NULL); if (j != 0 && ck_pr_load_uint(&readers) == 0) ck_pr_store_uint(&readers, 1); @@ -145,10 +148,13 @@ write_thread(void *unused CK_CC_UNUSED) { struct node **entry, *e; unsigned int i, j, tid; - ck_epoch_record_t record; + ck_epoch_record_t *record; ck_stack_entry_t *s; - ck_epoch_register(&stack_epoch, &record); + record = malloc(sizeof *record); + if (record == NULL) + ck_error("record allocation failure"); + ck_epoch_register(&stack_epoch, record, NULL); if (aff_iterate(&a)) { perror("ERROR: failed to affine thread"); @@ -180,17 +186,17 @@ write_thread(void *unused CK_CC_UNUSED) ck_pr_stall(); for (i = 0; i < PAIRS_S; i++) { - ck_epoch_begin(&record, NULL); + ck_epoch_begin(record, NULL); s = ck_stack_pop_upmc(&stack); e = stack_container(s); - ck_epoch_end(&record, NULL); + ck_epoch_end(record, NULL); if (i & 1) { - ck_epoch_synchronize(&record); - ck_epoch_reclaim(&record); - ck_epoch_call(&record, &e->epoch_entry, destructor); + ck_epoch_synchronize(record); + ck_epoch_reclaim(record); + ck_epoch_call(record, &e->epoch_entry, destructor); } else { - ck_epoch_barrier(&record); + ck_epoch_barrier(record); destructor(&e->epoch_entry); } @@ -201,13 +207,13 @@ write_thread(void *unused CK_CC_UNUSED) } } - ck_epoch_synchronize(&record); + ck_epoch_synchronize(record); if (tid == 0) { - fprintf(stderr, "[W] Peak: %u (%2.2f%%)\n Reclamations: %lu\n\n", - record.n_peak, - (double)record.n_peak / ((double)PAIRS_S * ITERATE_S) * 100, - record.n_dispatch); + fprintf(stderr, "[W] Peak: %u (%2.2f%%)\n Reclamations: %u\n\n", + record->n_peak, + (double)record->n_peak / ((double)PAIRS_S * ITERATE_S) * 100, + record->n_dispatch); } ck_pr_inc_uint(&e_barrier); diff --git a/regressions/ck_epoch/validate/ck_stack.c b/regressions/ck_epoch/validate/ck_stack.c index fc50228..6d493e1 100644 --- a/regressions/ck_epoch/validate/ck_stack.c +++ b/regressions/ck_epoch/validate/ck_stack.c @@ -81,7 +81,7 @@ thread(void *unused CK_CC_UNUSED) unsigned long smr = 0; unsigned int i; - ck_epoch_register(&stack_epoch, &record); + ck_epoch_register(&stack_epoch, &record, NULL); if (aff_iterate(&a)) { perror("ERROR: failed to affine thread"); @@ -118,7 +118,7 @@ thread(void *unused CK_CC_UNUSED) while (ck_pr_load_uint(&e_barrier) < n_threads); fprintf(stderr, "Deferrals: %lu (%2.2f)\n", smr, (double)smr / PAIRS); - fprintf(stderr, "Peak: %u (%2.2f%%), %u pending\nReclamations: %lu\n\n", + fprintf(stderr, "Peak: %u (%2.2f%%), %u pending\nReclamations: %u\n\n", record.n_peak, (double)record.n_peak / PAIRS * 100, record.n_pending, diff --git a/regressions/ck_epoch/validate/torture.c b/regressions/ck_epoch/validate/torture.c index ce3c049..f49d412 100644 --- a/regressions/ck_epoch/validate/torture.c +++ b/regressions/ck_epoch/validate/torture.c @@ -31,8 +31,8 @@ #include <unistd.h> #include <ck_cc.h> #include <ck_pr.h> +#include <inttypes.h> #include <stdbool.h> -#include <stddef.h> #include <string.h> #include <ck_epoch.h> #include <ck_stack.h> @@ -119,7 +119,7 @@ read_thread(void *unused CK_CC_UNUSED) record = malloc(sizeof *record); assert(record != NULL); - ck_epoch_register(&epoch, record); + ck_epoch_register(&epoch, record, NULL); if (aff_iterate(&a)) { perror("ERROR: failed to affine thread"); @@ -147,10 +147,11 @@ write_thread(void *unused CK_CC_UNUSED) ck_epoch_record_t *record; unsigned long iterations = 0; bool c = ck_pr_faa_uint(&first, 1); + uint64_t ac = 0; record = malloc(sizeof *record); assert(record != NULL); - ck_epoch_register(&epoch, record); + ck_epoch_register(&epoch, record, NULL); if (aff_iterate(&a)) { perror("ERROR: failed to affine thread"); @@ -160,6 +161,12 @@ write_thread(void *unused CK_CC_UNUSED) ck_pr_inc_uint(&barrier); while (ck_pr_load_uint(&barrier) < n_threads); +#define CK_EPOCH_S do { \ + uint64_t _s = rdtsc(); \ + ck_epoch_synchronize(record); \ + ac += rdtsc() - _s; \ +} while (0) + do { /* * A thread should never observe invalid.value > valid.value. @@ -167,33 +174,34 @@ write_thread(void *unused CK_CC_UNUSED) * invalid.value <= valid.value is valid. */ if (!c) ck_pr_store_uint(&valid.value, 1); - ck_epoch_synchronize(record); + CK_EPOCH_S; if (!c) ck_pr_store_uint(&invalid.value, 1); ck_pr_fence_store(); if (!c) ck_pr_store_uint(&valid.value, 2); - ck_epoch_synchronize(record); + CK_EPOCH_S; if (!c) ck_pr_store_uint(&invalid.value, 2); ck_pr_fence_store(); if (!c) ck_pr_store_uint(&valid.value, 3); - ck_epoch_synchronize(record); + CK_EPOCH_S; if (!c) ck_pr_store_uint(&invalid.value, 3); ck_pr_fence_store(); if (!c) ck_pr_store_uint(&valid.value, 4); - ck_epoch_synchronize(record); + CK_EPOCH_S; if (!c) ck_pr_store_uint(&invalid.value, 4); - ck_epoch_synchronize(record); + CK_EPOCH_S; if (!c) ck_pr_store_uint(&invalid.value, 0); - ck_epoch_synchronize(record); + CK_EPOCH_S; - iterations += 4; + iterations += 6; } while (ck_pr_load_uint(&leave) == 0 && ck_pr_load_uint(&n_rd) > 0); fprintf(stderr, "%lu iterations\n", iterations); + fprintf(stderr, "%" PRIu64 " average latency\n", ac / iterations); return NULL; } |