summaryrefslogtreecommitdiffstats
path: root/include/ck_ec.h
blob: cd2a36813a797b5355c4dc65d69b613840194084 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
/*
 * Copyright 2018 Paul Khuong, Google LLC.
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 */

/*
 * Overview
 * ========
 *
 * ck_ec implements 32- and 64- bit event counts. Event counts let us
 * easily integrate OS-level blocking (e.g., futexes) in lock-free
 * protocols. Waiters block conditionally, if the event count's value
 * is still equal to some old value.
 *
 * Event counts come in four variants: 32 and 64 bit (with one bit
 * stolen for internal signaling, so 31 and 63 bit counters), and
 * single or multiple producers (wakers). Waiters are always multiple
 * consumers. The 32 bit variants are smaller, and more efficient,
 * especially in single producer mode. The 64 bit variants are larger,
 * but practically invulnerable to ABA.
 *
 * The 32 bit variant is always available. The 64 bit variant is only
 * available if CK supports 64-bit atomic operations. Currently,
 * specialization for single producer is only implemented for x86 and
 * x86-64, on compilers that support GCC extended inline assembly;
 * other platforms fall back to the multiple producer code path.
 *
 * A typical usage pattern is:
 *
 *  1. On the producer side:
 *
 *    - Make changes to some shared data structure, without involving
 *	the event count at all.
 *    - After each change, call ck_ec_inc on the event count. The call
 *	acts as a write-write barrier, and wakes up any consumer blocked
 *	on the event count (waiting for new changes).
 *
 *  2. On the consumer side:
 *
 *    - Snapshot ck_ec_value of the event count. The call acts as a
 *	read barrier.
 *    - Read and process the shared data structure.
 *    - Wait for new changes by calling ck_ec_wait with the snapshot value.
 *
 * Some data structures may opt for tighter integration with their
 * event count. For example, an SPMC ring buffer or disruptor might
 * use the event count's value as the write pointer. If the buffer is
 * regularly full, it might also make sense to store the read pointer
 * in an MP event count.
 *
 * This event count implementation supports tighter integration in two
 * ways.
 *
 * Producers may opt to increment by an arbitrary value (less than
 * INT32_MAX / INT64_MAX), in order to encode, e.g., byte
 * offsets. Larger increment values make wraparound more likely, so
 * the increments should still be relatively small.
 *
 * Consumers may pass a predicate to ck_ec_wait_pred. This predicate
 * can make `ck_ec_wait_pred` return early, before the event count's
 * value changes, and can override the deadline passed to futex_wait.
 * This lets consumer block on one eventcount, while optimistically
 * looking at other waking conditions.
 *
 * API Reference
 * =============
 *
 * When compiled as C11 or later, this header defines type-generic
 * macros for ck_ec32 and ck_ec64; the reference describes this
 * type-generic API.
 *
 * ck_ec needs additional OS primitives to determine the current time,
 * to wait on an address, and to wake all threads waiting on a given
 * address. These are defined with fields in a struct ck_ec_ops.  Each
 * ck_ec_ops may additionally define the number of spin loop
 * iterations in the slow path, as well as the initial wait time in
 * the internal exponential backoff, the exponential scale factor, and
 * the right shift count (< 32).
 *
 * The ops, in addition to the single/multiple producer flag, are
 * encapsulated in a struct ck_ec_mode, passed to most ck_ec
 * operations.
 *
 * ec is a struct ck_ec32 *, or a struct ck_ec64 *.
 *
 * value is an uint32_t for ck_ec32, and an uint64_t for ck_ec64. It
 * never exceeds INT32_MAX and INT64_MAX respectively.
 *
 * mode is a struct ck_ec_mode *.
 *
 * deadline is either NULL, or a `const struct timespec *` that will
 * be treated as an absolute deadline.
 *
 * `void ck_ec_init(ec, value)`: initializes the event count to value.
 *
 * `value ck_ec_value(ec)`: returns the current value of the event
 *  counter.  This read acts as a read (acquire) barrier.
 *
 * `bool ck_ec_has_waiters(ec)`: returns whether some thread has
 *  marked the event count as requiring an OS wakeup.
 *
 * `void ck_ec_inc(ec, mode)`: increments the value of the event
 *  counter by one. This writes acts as a write barrier. Wakes up
 *  any waiting thread.
 *
 * `value ck_ec_add(ec, mode, value)`: increments the event counter by
 *  `value`, and returns the event counter's previous value. This
 *  write acts as a write barrier. Wakes up any waiting thread.
 *
 * `int ck_ec_deadline(struct timespec *new_deadline,
 *		       mode,
 *		       const struct timespec *timeout)`:
 *  computes a deadline `timeout` away from the current time. If
 *  timeout is NULL, computes a deadline in the infinite future. The
 *  resulting deadline is written to `new_deadline`. Returns 0 on
 *  success, and -1 if ops->gettime failed (without touching errno).
 *
 * `int ck_ec_wait(ec, mode, value, deadline)`: waits until the event
 *  counter's value differs from `value`, or, if `deadline` is
 *  provided and non-NULL, until the current time is after that
 *  deadline. Use a deadline with tv_sec = 0 for a non-blocking
 *  execution. Returns 0 if the event counter has changed, and -1 on
 *  timeout. This function acts as a read (acquire) barrier.
 *
 * `int ck_ec_wait_pred(ec, mode, value, pred, data, deadline)`: waits
 * until the event counter's value differs from `value`, or until
 * `pred` returns non-zero, or, if `deadline` is provided and
 * non-NULL, until the current time is after that deadline. Use a
 * deadline with tv_sec = 0 for a non-blocking execution. Returns 0 if
 * the event counter has changed, `pred`'s return value if non-zero,
 * and -1 on timeout. This function acts as a read (acquire) barrier.
 *
 * `pred` is always called as `pred(data, iteration_deadline, now)`,
 * where `iteration_deadline` is a timespec of the deadline for this
 * exponential backoff iteration, and `now` is the current time. If
 * `pred` returns a non-zero value, that value is immediately returned
 * to the waiter. Otherwise, `pred` is free to modify
 * `iteration_deadline` (moving it further in the future is a bad
 * idea).
 *
 * Implementation notes
 * ====================
 *
 * The multiple producer implementation is a regular locked event
 * count, with a single flag bit to denote the need to wake up waiting
 * threads.
 *
 * The single producer specialization is heavily tied to
 * [x86-TSO](https://www.cl.cam.ac.uk/~pes20/weakmemory/cacm.pdf), and
 * to non-atomic read-modify-write instructions (e.g., `inc mem`);
 * these non-atomic RMW let us write to the same memory locations with
 * atomic and non-atomic instructions, without suffering from process
 * scheduling stalls.
 *
 * The reason we can mix atomic and non-atomic writes to the `counter`
 * word is that every non-atomic write obviates the need for the
 * atomically flipped flag bit: we only use non-atomic writes to
 * update the event count, and the atomic flag only informs the
 * producer that we would like a futex_wake, because of the update.
 * We only require the non-atomic RMW counter update to prevent
 * preemption from introducing arbitrarily long worst case delays.
 *
 * Correctness does not rely on the usual ordering argument: in the
 * absence of fences, there is no strict ordering between atomic and
 * non-atomic writes. The key is instead x86-TSO's guarantee that a
 * read is satisfied from the most recent buffered write in the local
 * store queue if there is one, or from memory if there is no write to
 * that address in the store queue.
 *
 * x86-TSO's constraint on reads suffices to guarantee that the
 * producer will never forget about a counter update. If the last
 * update is still queued, the new update will be based on the queued
 * value. Otherwise, the new update will be based on the value in
 * memory, which may or may not have had its flag flipped. In either
 * case, the value of the counter (modulo flag) is correct.
 *
 * When the producer forwards the counter's value from its store
 * queue, the new update might not preserve a flag flip. Any waiter
 * thus has to check from time to time to determine if it wasn't
 * woken up because the flag bit was silently cleared.
 *
 * In reality, the store queue in x86-TSO stands for in-flight
 * instructions in the chip's out-of-order backend. In the vast
 * majority of cases, instructions will only remain in flight for a
 * few hundred or thousand of cycles. That's why ck_ec_wait spins on
 * the `counter` word for ~100 iterations after flipping its flag bit:
 * if the counter hasn't changed after that many iterations, it is
 * very likely that the producer's next counter update will observe
 * the flag flip.
 *
 * That's still not a hard guarantee of correctness. Conservatively,
 * we can expect that no instruction will remain in flight for more
 * than 1 second... if only because some interrupt will have forced
 * the chip to store its architectural state in memory, at which point
 * an instruction is either fully retired or rolled back. Interrupts,
 * particularly the pre-emption timer, are why single-producer updates
 * must happen in a single non-atomic read-modify-write instruction.
 * Having a single instruction as the critical section means we only
 * have to consider the worst-case execution time for that
 * instruction. That's easier than doing the same for a pair of
 * instructions, which an unlucky pre-emption could delay for
 * arbitrarily long.
 *
 * Thus, after a short spin loop, ck_ec_wait enters an exponential
 * backoff loop, where each "sleep" is instead a futex_wait.  The
 * backoff is only necessary to handle rare cases where the flag flip
 * was overwritten after the spin loop. Eventually, more than one
 * second will have elapsed since the flag flip, and the sleep timeout
 * becomes infinite: since the flag bit has been set for much longer
 * than the time for which an instruction may remain in flight, the
 * flag will definitely be observed at the next counter update.
 *
 * The 64 bit ck_ec_wait pulls another trick: futexes only handle 32
 * bit ints, so we must treat the 64 bit counter's low 32 bits as an
 * int in futex_wait. That's a bit dodgy, but fine in practice, given
 * that the OS's futex code will always read whatever value is
 * currently in memory: even if the producer thread were to wait on
 * its own event count, the syscall and ring transition would empty
 * the store queue (the out-of-order execution backend).
 *
 * Finally, what happens when the producer is migrated to another core
 * or otherwise pre-empted? Migration must already incur a barrier, so
 * that thread always sees its own writes, so that's safe. As for
 * pre-emption, that requires storing the architectural state, which
 * means every instruction must either be executed fully or not at
 * all when pre-emption happens.
 */

#ifndef CK_EC_H
#define CK_EC_H
#include <ck_cc.h>
#include <ck_pr.h>
#include <ck_stdbool.h>
#include <ck_stdint.h>
#include <ck_stddef.h>
#include <sys/time.h>

/*
 * If we have ck_pr_faa_64 (and, presumably, ck_pr_load_64), we
 * support 63 bit counters.
 */
#ifdef CK_F_PR_FAA_64
#define CK_F_EC64
#endif /* CK_F_PR_FAA_64 */

/*
 * GCC inline assembly lets us exploit non-atomic read-modify-write
 * instructions on x86/x86_64 for a fast single-producer mode.
 *
 * If we CK_F_EC_SP is not defined, CK_EC always uses the slower
 * multiple producer code.
 */
#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
#define CK_F_EC_SP
#endif /* GNUC && (__i386__ || __x86_64__) */

struct ck_ec_ops;

struct ck_ec_wait_state {
	struct timespec start;	/* Time when we entered ck_ec_wait. */
	struct timespec now;  /* Time now. */
	const struct ck_ec_ops *ops;
	void *data;  /* Opaque pointer for the predicate's internal state. */

};

/*
 * ck_ec_ops define system-specific functions to get the current time,
 * atomically wait on an address if it still has some expected value,
 * and to wake all threads waiting on an address.
 *
 * Each platform is expected to have few (one) opaque pointer to a
 * const ops struct, and reuse it for all ck_ec_mode structs.
 */
struct ck_ec_ops {
	/* Populates out with the current time. Returns non-zero on failure. */
	int (*gettime)(const struct ck_ec_ops *, struct timespec *out);

	/*
	 * Waits on address if its value is still `expected`.  If
	 * deadline is non-NULL, stops waiting once that deadline is
	 * reached. May return early for any reason.
	 */
	void (*wait32)(const struct ck_ec_wait_state *, const uint32_t *,
		       uint32_t expected, const struct timespec *deadline);

	/*
	 * Same as wait32, but for a 64 bit counter. Only used if
	 * CK_F_EC64 is defined.
	 *
	 * If underlying blocking primitive only supports 32 bit
	 * control words, it should be safe to block on the least
	 * significant half of the 64 bit address.
	 */
	void (*wait64)(const struct ck_ec_wait_state *, const uint64_t *,
		       uint64_t expected, const struct timespec *deadline);

	/* Wakes up all threads waiting on address. */
	void (*wake32)(const struct ck_ec_ops *, const uint32_t *address);

	/*
	 * Same as wake32, but for a 64 bit counter. Only used if
	 * CK_F_EC64 is defined.
	 *
	 * When wait64 truncates the control word at address to `only`
	 * consider its least significant half, wake64 should perform
	 * any necessary fixup (e.g., on big endian platforms).
	 */
	void (*wake64)(const struct ck_ec_ops *, const uint64_t *address);

	/*
	 * Number of iterations for the initial busy wait. 0 defaults
	 * to 100 (not ABI stable).
	 */
	uint32_t busy_loop_iter;

	/*
	 * Delay in nanoseconds for the first iteration of the
	 * exponential backoff. 0 defaults to 2 ms (not ABI stable).
	 */
	uint32_t initial_wait_ns;

	/*
	 * Scale factor for the exponential backoff. 0 defaults to 8x
	 * (not ABI stable).
	 */
	uint32_t wait_scale_factor;

	/*
	 * Right shift count for the exponential backoff. The update
	 * after each iteration is
	 *     wait_ns = (wait_ns * wait_scale_factor) >> wait_shift_count,
	 * until one second has elapsed. After that, the deadline goes
	 * to infinity.
	 */
	uint32_t wait_shift_count;
};

/*
 * ck_ec_mode wraps the ops table, and informs the fast path whether
 * it should attempt to specialize for single producer mode.
 *
 * mode structs are expected to be exposed by value, e.g.,
 *
 *    extern const struct ck_ec_ops system_ec_ops;
 *
 *    static const struct ck_ec_mode ec_sp = {
 *	  .ops = &system_ec_ops,
 *	  .single_producer = true
 *    };
 *
 *    static const struct ck_ec_mode ec_mp = {
 *	  .ops = &system_ec_ops,
 *	  .single_producer = false
 *    };
 *
 * ck_ec_mode structs are only passed to inline functions defined in
 * this header, and never escape to their slow paths, so they should
 * not result in any object file size increase.
 */
struct ck_ec_mode {
	const struct ck_ec_ops *ops;
	/*
	 * If single_producer is true, the event count has a unique
	 * incrementer. The implementation will specialize ck_ec_inc
	 * and ck_ec_add if possible (if CK_F_EC_SP is defined).
	 */
	bool single_producer;
};

struct ck_ec32 {
	/* Flag is "sign" bit, value in bits 0:30. */
	uint32_t counter;
};

typedef struct ck_ec32 ck_ec32_t;

#ifdef CK_F_EC64
struct ck_ec64 {
	/*
	 * Flag is bottom bit, value in bits 1:63. Eventcount only
	 * works on x86-64 (i.e., little endian), so the futex int
	 * lies in the first 4 (bottom) bytes.
	 */
	uint64_t counter;
};

typedef struct ck_ec64 ck_ec64_t;
#endif /* CK_F_EC64 */

#define CK_EC_INITIALIZER { .counter = 0 }

/*
 * Initializes the event count to `value`. The value must not
 * exceed INT32_MAX.
 */
static void ck_ec32_init(struct ck_ec32 *ec, uint32_t value);

#ifndef CK_F_EC64
#define ck_ec_init ck_ec32_init
#else
/*
 * Initializes the event count to `value`. The value must not
 * exceed INT64_MAX.
 */
static void ck_ec64_init(struct ck_ec64 *ec, uint64_t value);

#if __STDC_VERSION__ >= 201112L
#define ck_ec_init(EC, VALUE)				\
	(_Generic(*(EC),				\
		  struct ck_ec32 : ck_ec32_init,	\
		  struct ck_ec64 : ck_ec64_init)((EC), (VALUE)))
#endif /* __STDC_VERSION__ */
#endif /* CK_F_EC64 */

/*
 * Returns the counter value in the event count. The value is at most
 * INT32_MAX.
 */
static uint32_t ck_ec32_value(const struct ck_ec32* ec);

#ifndef CK_F_EC64
#define ck_ec_value ck_ec32_value
#else
/*
 * Returns the counter value in the event count. The value is at most
 * INT64_MAX.
 */
static uint64_t ck_ec64_value(const struct ck_ec64* ec);

#if __STDC_VERSION__ >= 201112L
#define ck_ec_value(EC)					\
	(_Generic(*(EC),				\
		  struct ck_ec32 : ck_ec32_value,	\
		struct ck_ec64 : ck_ec64_value)((EC)))
#endif /* __STDC_VERSION__ */
#endif /* CK_F_EC64 */

/*
 * Returns whether there may be slow pathed waiters that need an
 * explicit OS wakeup for this event count.
 */
static bool ck_ec32_has_waiters(const struct ck_ec32 *ec);

#ifndef CK_F_EC64
#define ck_ec_has_waiters ck_ec32_has_waiters
#else
static bool ck_ec64_has_waiters(const struct ck_ec64 *ec);

#if __STDC_VERSION__ >= 201112L
#define ck_ec_has_waiters(EC)				      \
	(_Generic(*(EC),				      \
		  struct ck_ec32 : ck_ec32_has_waiters,	      \
		  struct ck_ec64 : ck_ec64_has_waiters)((EC)))
#endif /* __STDC_VERSION__ */
#endif /* CK_F_EC64 */

/*
 * Increments the counter value in the event count by one, and wakes
 * up any waiter.
 */
static void ck_ec32_inc(struct ck_ec32 *ec, const struct ck_ec_mode *mode);

#ifndef CK_F_EC64
#define ck_ec_inc ck_ec32_inc
#else
static void ck_ec64_inc(struct ck_ec64 *ec, const struct ck_ec_mode *mode);

#if __STDC_VERSION__ >= 201112L
#define ck_ec_inc(EC, MODE)					\
	(_Generic(*(EC),					\
		  struct ck_ec32 : ck_ec32_inc,			\
		  struct ck_ec64 : ck_ec64_inc)((EC), (MODE)))
#endif /* __STDC_VERSION__ */
#endif /* CK_F_EC64 */

/*
 * Increments the counter value in the event count by delta, wakes
 * up any waiter, and returns the previous counter value.
 */
static uint32_t ck_ec32_add(struct ck_ec32 *ec,
			    const struct ck_ec_mode *mode,
			    uint32_t delta);

#ifndef CK_F_EC64
#define ck_ec_add ck_ec32_add
#else
static uint64_t ck_ec64_add(struct ck_ec64 *ec,
			    const struct ck_ec_mode *mode,
			    uint64_t delta);

#if __STDC_VERSION__ >= 201112L
#define ck_ec_add(EC, MODE, DELTA)					\
	(_Generic(*(EC),						\
		  struct ck_ec32 : ck_ec32_add,				\
		  struct ck_ec64 : ck_ec64_add)((EC), (MODE), (DELTA)))
#endif /* __STDC_VERSION__ */
#endif /* CK_F_EC64 */

/*
 * Populates `new_deadline` with a deadline `timeout` in the future.
 * Returns 0 on success, and -1 if clock_gettime failed, in which
 * case errno is left as is.
 */
static int ck_ec_deadline(struct timespec *new_deadline,
			  const struct ck_ec_mode *mode,
			  const struct timespec *timeout);

/*
 * Waits until the counter value in the event count differs from
 * old_value, or, if deadline is non-NULL, until CLOCK_MONOTONIC is
 * past the deadline.
 *
 * Returns 0 on success, and -1 on timeout.
 */
static int ck_ec32_wait(struct ck_ec32 *ec,
			const struct ck_ec_mode *mode,
			uint32_t old_value,
			const struct timespec *deadline);

#ifndef CK_F_EC64
#define ck_ec_wait ck_ec32_wait
#else
static int ck_ec64_wait(struct ck_ec64 *ec,
			const struct ck_ec_mode *mode,
			uint64_t old_value,
			const struct timespec *deadline);

#if __STDC_VERSION__ >= 201112L
#define ck_ec_wait(EC, MODE, OLD_VALUE, DEADLINE)			\
	(_Generic(*(EC),						\
		  struct ck_ec32 : ck_ec32_wait,			\
		  struct ck_ec64 : ck_ec64_wait)((EC), (MODE),		\
						 (OLD_VALUE), (DEADLINE)))

#endif /* __STDC_VERSION__ */
#endif /* CK_F_EC64 */

/*
 * Waits until the counter value in the event count differs from
 * old_value, pred returns non-zero, or, if deadline is non-NULL,
 * until CLOCK_MONOTONIC is past the deadline.
 *
 * Returns 0 on success, -1 on timeout, and the return value of pred
 * if it returns non-zero.
 *
 * A NULL pred represents a function that always returns 0.
 */
static int ck_ec32_wait_pred(struct ck_ec32 *ec,
			     const struct ck_ec_mode *mode,
			     uint32_t old_value,
			     int (*pred)(const struct ck_ec_wait_state *,
					 struct timespec *deadline),
			     void *data,
			     const struct timespec *deadline);

#ifndef CK_F_EC64
#define ck_ec_wait_pred ck_ec32_wait_pred
#else
static int ck_ec64_wait_pred(struct ck_ec64 *ec,
			     const struct ck_ec_mode *mode,
			     uint64_t old_value,
			     int (*pred)(const struct ck_ec_wait_state *,
					 struct timespec *deadline),
			     void *data,
			     const struct timespec *deadline);

#if __STDC_VERSION__ >= 201112L
#define ck_ec_wait_pred(EC, MODE, OLD_VALUE, PRED, DATA, DEADLINE)	\
	(_Generic(*(EC),						\
		  struct ck_ec32 : ck_ec32_wait_pred,			\
		  struct ck_ec64 : ck_ec64_wait_pred)			\
	 ((EC), (MODE), (OLD_VALUE), (PRED), (DATA), (DEADLINE)))
#endif /* __STDC_VERSION__ */
#endif /* CK_F_EC64 */

/*
 * Inline implementation details. 32 bit first, then 64 bit
 * conditionally.
 */
CK_CC_FORCE_INLINE void ck_ec32_init(struct ck_ec32 *ec, uint32_t value)
{
	ec->counter = value & ~(1UL << 31);
	return;
}

CK_CC_FORCE_INLINE uint32_t ck_ec32_value(const struct ck_ec32 *ec)
{
	uint32_t ret = ck_pr_load_32(&ec->counter) & ~(1UL << 31);

	ck_pr_fence_acquire();
	return ret;
}

CK_CC_FORCE_INLINE bool ck_ec32_has_waiters(const struct ck_ec32 *ec)
{
	return ck_pr_load_32(&ec->counter) & (1UL << 31);
}

/* Slow path for ck_ec{32,64}_{inc,add} */
void ck_ec32_wake(struct ck_ec32 *ec, const struct ck_ec_ops *ops);

CK_CC_FORCE_INLINE void ck_ec32_inc(struct ck_ec32 *ec,
				    const struct ck_ec_mode *mode)
{
#if !defined(CK_F_EC_SP)
	/* Nothing to specialize if we don't have EC_SP. */
	ck_ec32_add(ec, mode, 1);
	return;
#else
	char flagged;

#if __GNUC__ >= 6
	/*
	 * We don't want to wake if the sign bit is 0. We do want to
	 * wake if the sign bit just flipped from 1 to 0. We don't
	 * care what happens when our increment caused the sign bit to
	 * flip from 0 to 1 (that's once per 2^31 increment).
	 *
	 * This leaves us with four cases:
	 *
	 *  old sign bit | new sign bit | SF | OF | ZF
	 *  -------------------------------------------
	 *	       0 |	      0 |  0 |	0 | ?
	 *	       0 |	      1 |  1 |	0 | ?
	 *	       1 |	      1 |  1 |	0 | ?
	 *	       1 |	      0 |  0 |	0 | 1
	 *
	 * In the first case, we don't want to hit ck_ec32_wake. In
	 * the last two cases, we do want to call ck_ec32_wake. In the
	 * second case, we don't care, so we arbitrarily choose to
	 * call ck_ec32_wake.
	 *
	 * The "le" condition checks if SF != OF, or ZF == 1, which
	 * meets our requirements.
	 */
#define CK_EC32_INC_ASM(PREFIX)					\
	__asm__ volatile(PREFIX " incl %0"		    \
			 : "+m"(ec->counter), "=@ccle"(flagged)	 \
			 :: "cc", "memory")
#else
#define CK_EC32_INC_ASM(PREFIX)						\
	__asm__ volatile(PREFIX " incl %0; setle %1"			\
			 : "+m"(ec->counter), "=r"(flagged)		\
			 :: "cc", "memory")
#endif /* __GNUC__ */

	if (mode->single_producer == true) {
		ck_pr_fence_store();
		CK_EC32_INC_ASM("");
	} else {
		ck_pr_fence_store_atomic();
		CK_EC32_INC_ASM("lock");
	}
#undef CK_EC32_INC_ASM

	if (CK_CC_UNLIKELY(flagged)) {
		ck_ec32_wake(ec, mode->ops);
	}

	return;
#endif /* CK_F_EC_SP */
}

CK_CC_FORCE_INLINE uint32_t ck_ec32_add_epilogue(struct ck_ec32 *ec,
						 const struct ck_ec_mode *mode,
						 uint32_t old)
{
	const uint32_t flag_mask = 1U << 31;
	uint32_t ret;

	ret = old & ~flag_mask;
	/* These two only differ if the flag bit is set. */
	if (CK_CC_UNLIKELY(old != ret)) {
		ck_ec32_wake(ec, mode->ops);
	}

	return ret;
}

static CK_CC_INLINE uint32_t ck_ec32_add_mp(struct ck_ec32 *ec,
					    const struct ck_ec_mode *mode,
					    uint32_t delta)
{
	uint32_t old;

	ck_pr_fence_store_atomic();
	old = ck_pr_faa_32(&ec->counter, delta);
	return ck_ec32_add_epilogue(ec, mode, old);
}

#ifdef CK_F_EC_SP
static CK_CC_INLINE uint32_t ck_ec32_add_sp(struct ck_ec32 *ec,
					    const struct ck_ec_mode *mode,
					    uint32_t delta)
{
	uint32_t old;

	/*
	 * Correctness of this racy write depends on actually
	 * having an update to write. Exit here if the update
	 * is a no-op.
	 */
	if (CK_CC_UNLIKELY(delta == 0)) {
		return ck_ec32_value(ec);
	}

	ck_pr_fence_store();
	old = delta;
	__asm__ volatile("xaddl %1, %0"
			 : "+m"(ec->counter), "+r"(old)
			 :: "cc", "memory");
	return ck_ec32_add_epilogue(ec, mode, old);
}
#endif /* CK_F_EC_SP */

CK_CC_FORCE_INLINE uint32_t ck_ec32_add(struct ck_ec32 *ec,
					const struct ck_ec_mode *mode,
					uint32_t delta)
{
#ifdef CK_F_EC_SP
	if (mode->single_producer == true) {
		return ck_ec32_add_sp(ec, mode, delta);
	}
#endif

	return ck_ec32_add_mp(ec, mode, delta);
}

int ck_ec_deadline_impl(struct timespec *new_deadline,
			const struct ck_ec_ops *ops,
			const struct timespec *timeout);

CK_CC_FORCE_INLINE int ck_ec_deadline(struct timespec *new_deadline,
				      const struct ck_ec_mode *mode,
				      const struct timespec *timeout)
{
	return ck_ec_deadline_impl(new_deadline, mode->ops, timeout);
}


int ck_ec32_wait_slow(struct ck_ec32 *ec,
		      const struct ck_ec_ops *ops,
		      uint32_t old_value,
		      const struct timespec *deadline);

CK_CC_FORCE_INLINE int ck_ec32_wait(struct ck_ec32 *ec,
				    const struct ck_ec_mode *mode,
				    uint32_t old_value,
				    const struct timespec *deadline)
{
	if (ck_ec32_value(ec) != old_value) {
		return 0;
	}

	return ck_ec32_wait_slow(ec, mode->ops, old_value, deadline);
}

int ck_ec32_wait_pred_slow(struct ck_ec32 *ec,
			   const struct ck_ec_ops *ops,
			   uint32_t old_value,
			   int (*pred)(const struct ck_ec_wait_state *state,
				       struct timespec *deadline),
			   void *data,
			   const struct timespec *deadline);

CK_CC_FORCE_INLINE int
ck_ec32_wait_pred(struct ck_ec32 *ec,
		  const struct ck_ec_mode *mode,
		  uint32_t old_value,
		  int (*pred)(const struct ck_ec_wait_state *state,
			      struct timespec *deadline),
		  void *data,
		  const struct timespec *deadline)
{
	if (ck_ec32_value(ec) != old_value) {
		return 0;
	}

	return ck_ec32_wait_pred_slow(ec, mode->ops, old_value,
				      pred, data, deadline);
}

#ifdef CK_F_EC64
CK_CC_FORCE_INLINE void ck_ec64_init(struct ck_ec64 *ec, uint64_t value)
{
	ec->counter = value << 1;
	return;
}

CK_CC_FORCE_INLINE uint64_t ck_ec64_value(const struct ck_ec64 *ec)
{
	uint64_t ret = ck_pr_load_64(&ec->counter) >> 1;

	ck_pr_fence_acquire();
	return ret;
}

CK_CC_FORCE_INLINE bool ck_ec64_has_waiters(const struct ck_ec64 *ec)
{
	return ck_pr_load_64(&ec->counter) & 1;
}

void ck_ec64_wake(struct ck_ec64 *ec, const struct ck_ec_ops *ops);

CK_CC_FORCE_INLINE void ck_ec64_inc(struct ck_ec64 *ec,
				    const struct ck_ec_mode *mode)
{
	/* We always xadd, so there's no special optimization here. */
	(void)ck_ec64_add(ec, mode, 1);
	return;
}

CK_CC_FORCE_INLINE uint64_t ck_ec_add64_epilogue(struct ck_ec64 *ec,
					       const struct ck_ec_mode *mode,
					       uint64_t old)
{
	uint64_t ret = old >> 1;

	if (CK_CC_UNLIKELY(old & 1)) {
		ck_ec64_wake(ec, mode->ops);
	}

	return ret;
}

static CK_CC_INLINE uint64_t ck_ec64_add_mp(struct ck_ec64 *ec,
					    const struct ck_ec_mode *mode,
					    uint64_t delta)
{
	uint64_t inc = 2 * delta;  /* The low bit is the flag bit. */

	ck_pr_fence_store_atomic();
	return ck_ec_add64_epilogue(ec, mode, ck_pr_faa_64(&ec->counter, inc));
}

#ifdef CK_F_EC_SP
/* Single-producer specialisation. */
static CK_CC_INLINE uint64_t ck_ec64_add_sp(struct ck_ec64 *ec,
					    const struct ck_ec_mode *mode,
					    uint64_t delta)
{
	uint64_t old;

	/*
	 * Correctness of this racy write depends on actually
	 * having an update to write. Exit here if the update
	 * is a no-op.
	 */
	if (CK_CC_UNLIKELY(delta == 0)) {
		return ck_ec64_value(ec);
	}

	ck_pr_fence_store();
	old = 2 * delta;  /* The low bit is the flag bit. */
	__asm__ volatile("xaddq %1, %0"
			 : "+m"(ec->counter), "+r"(old)
			 :: "cc", "memory");
	return ck_ec_add64_epilogue(ec, mode, old);
}
#endif /* CK_F_EC_SP */

/*
 * Dispatch on mode->single_producer in this FORCE_INLINE function:
 * the end result is always small, but not all compilers have enough
 * foresight to inline and get the reduction.
 */
CK_CC_FORCE_INLINE uint64_t ck_ec64_add(struct ck_ec64 *ec,
					const struct ck_ec_mode *mode,
					uint64_t delta)
{
#ifdef CK_F_EC_SP
	if (mode->single_producer == true) {
		return ck_ec64_add_sp(ec, mode, delta);
	}
#endif

	return ck_ec64_add_mp(ec, mode, delta);
}

int ck_ec64_wait_slow(struct ck_ec64 *ec,
		      const struct ck_ec_ops *ops,
		      uint64_t old_value,
		      const struct timespec *deadline);

CK_CC_FORCE_INLINE int ck_ec64_wait(struct ck_ec64 *ec,
				    const struct ck_ec_mode *mode,
				    uint64_t old_value,
				    const struct timespec *deadline)
{
	if (ck_ec64_value(ec) != old_value) {
		return 0;
	}

	return ck_ec64_wait_slow(ec, mode->ops, old_value, deadline);
}

int ck_ec64_wait_pred_slow(struct ck_ec64 *ec,
			   const struct ck_ec_ops *ops,
			   uint64_t old_value,
			   int (*pred)(const struct ck_ec_wait_state *state,
				       struct timespec *deadline),
			   void *data,
			   const struct timespec *deadline);


CK_CC_FORCE_INLINE int
ck_ec64_wait_pred(struct ck_ec64 *ec,
		  const struct ck_ec_mode *mode,
		  uint64_t old_value,
		  int (*pred)(const struct ck_ec_wait_state *state,
			      struct timespec *deadline),
		  void *data,
		  const struct timespec *deadline)
{
	if (ck_ec64_value(ec) != old_value) {
		return 0;
	}

	return ck_ec64_wait_pred_slow(ec, mode->ops, old_value,
				      pred, data, deadline);
}
#endif /* CK_F_EC64 */
#endif /* !CK_EC_H */