summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers/event/sw/iq_chunk.h
blob: 31d013eab74da7b8b2cc4b5772642e814a2aad13 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
/* SPDX-License-Identifier: BSD-3-Clause
 * Copyright(c) 2017 Intel Corporation
 */

#ifndef _IQ_CHUNK_H_
#define _IQ_CHUNK_H_

#include <stdint.h>
#include <stdbool.h>
#include <rte_eventdev.h>

#define IQ_ROB_NAMESIZE 12

struct sw_queue_chunk {
	struct rte_event events[SW_EVS_PER_Q_CHUNK];
	struct sw_queue_chunk *next;
} __rte_cache_aligned;

static __rte_always_inline bool
iq_empty(struct sw_iq *iq)
{
	return (iq->count == 0);
}

static __rte_always_inline uint16_t
iq_count(const struct sw_iq *iq)
{
	return iq->count;
}

static __rte_always_inline struct sw_queue_chunk *
iq_alloc_chunk(struct sw_evdev *sw)
{
	struct sw_queue_chunk *chunk = sw->chunk_list_head;
	sw->chunk_list_head = chunk->next;
	chunk->next = NULL;
	return chunk;
}

static __rte_always_inline void
iq_free_chunk(struct sw_evdev *sw, struct sw_queue_chunk *chunk)
{
	chunk->next = sw->chunk_list_head;
	sw->chunk_list_head = chunk;
}

static __rte_always_inline void
iq_free_chunk_list(struct sw_evdev *sw, struct sw_queue_chunk *head)
{
	while (head) {
		struct sw_queue_chunk *next;
		next = head->next;
		iq_free_chunk(sw, head);
		head = next;
	}
}

static __rte_always_inline void
iq_init(struct sw_evdev *sw, struct sw_iq *iq)
{
	iq->head = iq_alloc_chunk(sw);
	iq->tail = iq->head;
	iq->head_idx = 0;
	iq->tail_idx = 0;
	iq->count = 0;
}

static __rte_always_inline void
iq_enqueue(struct sw_evdev *sw, struct sw_iq *iq, const struct rte_event *ev)
{
	iq->tail->events[iq->tail_idx++] = *ev;
	iq->count++;

	if (unlikely(iq->tail_idx == SW_EVS_PER_Q_CHUNK)) {
		/* The number of chunks is defined in relation to the total
		 * number of inflight events and number of IQS such that
		 * allocation will always succeed.
		 */
		struct sw_queue_chunk *chunk = iq_alloc_chunk(sw);
		iq->tail->next = chunk;
		iq->tail = chunk;
		iq->tail_idx = 0;
	}
}

static __rte_always_inline void
iq_pop(struct sw_evdev *sw, struct sw_iq *iq)
{
	iq->head_idx++;
	iq->count--;

	if (unlikely(iq->head_idx == SW_EVS_PER_Q_CHUNK)) {
		struct sw_queue_chunk *next = iq->head->next;
		iq_free_chunk(sw, iq->head);
		iq->head = next;
		iq->head_idx = 0;
	}
}

static __rte_always_inline const struct rte_event *
iq_peek(struct sw_iq *iq)
{
	return &iq->head->events[iq->head_idx];
}

/* Note: the caller must ensure that count <= iq_count() */
static __rte_always_inline uint16_t
iq_dequeue_burst(struct sw_evdev *sw,
		 struct sw_iq *iq,
		 struct rte_event *ev,
		 uint16_t count)
{
	struct sw_queue_chunk *current;
	uint16_t total, index;

	count = RTE_MIN(count, iq_count(iq));

	current = iq->head;
	index = iq->head_idx;
	total = 0;

	/* Loop over the chunks */
	while (1) {
		struct sw_queue_chunk *next;
		for (; index < SW_EVS_PER_Q_CHUNK;) {
			ev[total++] = current->events[index++];

			if (unlikely(total == count))
				goto done;
		}

		/* Move to the next chunk */
		next = current->next;
		iq_free_chunk(sw, current);
		current = next;
		index = 0;
	}

done:
	if (unlikely(index == SW_EVS_PER_Q_CHUNK)) {
		struct sw_queue_chunk *next = current->next;
		iq_free_chunk(sw, current);
		iq->head = next;
		iq->head_idx = 0;
	} else {
		iq->head = current;
		iq->head_idx = index;
	}

	iq->count -= total;

	return total;
}

static __rte_always_inline void
iq_put_back(struct sw_evdev *sw,
	    struct sw_iq *iq,
	    struct rte_event *ev,
	    unsigned int count)
{
	/* Put back events that fit in the current head chunk. If necessary,
	 * put back events in a new head chunk. The caller must ensure that
	 * count <= SW_EVS_PER_Q_CHUNK, to ensure that at most one new head is
	 * needed.
	 */
	uint16_t avail_space = iq->head_idx;

	if (avail_space >= count) {
		const uint16_t idx = avail_space - count;
		uint16_t i;

		for (i = 0; i < count; i++)
			iq->head->events[idx + i] = ev[i];

		iq->head_idx = idx;
	} else if (avail_space < count) {
		const uint16_t remaining = count - avail_space;
		struct sw_queue_chunk *new_head;
		uint16_t i;

		for (i = 0; i < avail_space; i++)
			iq->head->events[i] = ev[remaining + i];

		new_head = iq_alloc_chunk(sw);
		new_head->next = iq->head;
		iq->head = new_head;
		iq->head_idx = SW_EVS_PER_Q_CHUNK - remaining;

		for (i = 0; i < remaining; i++)
			iq->head->events[iq->head_idx + i] = ev[i];
	}

	iq->count += count;
}

#endif /* _IQ_CHUNK_H_ */