Branch data Line data Source code
1 : : /* SPDX-License-Identifier: BSD-3-Clause
2 : : * Copyright (c) 2018-2020 Arm Limited
3 : : */
4 : :
5 : : #ifndef _RTE_RCU_QSBR_H_
6 : : #define _RTE_RCU_QSBR_H_
7 : :
8 : : /**
9 : : * @file
10 : : *
11 : : * RTE Quiescent State Based Reclamation (QSBR).
12 : : *
13 : : * Quiescent State (QS) is any point in the thread execution
14 : : * where the thread does not hold a reference to a data structure
15 : : * in shared memory. While using lock-less data structures, the writer
16 : : * can safely free memory once all the reader threads have entered
17 : : * quiescent state.
18 : : *
19 : : * This library provides the ability for the readers to report quiescent
20 : : * state and for the writers to identify when all the readers have
21 : : * entered quiescent state.
22 : : */
23 : :
24 : : #ifdef __cplusplus
25 : : extern "C" {
26 : : #endif
27 : :
28 : : #include <inttypes.h>
29 : : #include <stdbool.h>
30 : : #include <stdio.h>
31 : : #include <stdint.h>
32 : :
33 : : #include <rte_common.h>
34 : : #include <rte_debug.h>
35 : : #include <rte_atomic.h>
36 : : #include <rte_ring.h>
37 : :
38 : : extern int rte_rcu_log_type;
39 : : #define RTE_LOGTYPE_RCU rte_rcu_log_type
40 : :
41 : : #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
42 : : #define __RTE_RCU_DP_LOG(level, fmt, args...) \
43 : : RTE_LOG_LINE(level, RCU, "%s(): " fmt, __func__, ## args)
44 : : #else
45 : : #define __RTE_RCU_DP_LOG(level, fmt, args...)
46 : : #endif
47 : :
48 : : #if defined(RTE_LIBRTE_RCU_DEBUG)
49 : : #define __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, level, fmt, args...) do { \
50 : : if (v->qsbr_cnt[thread_id].lock_cnt) \
51 : : RTE_LOG_LINE(level, RCU, "%s(): " fmt, __func__, ## args); \
52 : : } while (0)
53 : : #else
54 : : #define __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, level, fmt, args...)
55 : : #endif
56 : :
57 : : /* Registered thread IDs are stored as a bitmap of 64b element array.
58 : : * Given thread id needs to be converted to index into the array and
59 : : * the id within the array element.
60 : : */
61 : : #define __RTE_QSBR_THRID_ARRAY_ELM_SIZE (sizeof(RTE_ATOMIC(uint64_t)) * 8)
62 : : #define __RTE_QSBR_THRID_ARRAY_SIZE(max_threads) \
63 : : RTE_ALIGN(RTE_ALIGN_MUL_CEIL(max_threads, \
64 : : __RTE_QSBR_THRID_ARRAY_ELM_SIZE) >> 3, RTE_CACHE_LINE_SIZE)
65 : : #define __RTE_QSBR_THRID_ARRAY_ELM(v, i) ((uint64_t __rte_atomic *) \
66 : : ((struct rte_rcu_qsbr_cnt *)(v + 1) + v->max_threads) + i)
67 : : #define __RTE_QSBR_THRID_INDEX_SHIFT 6
68 : : #define __RTE_QSBR_THRID_MASK 0x3f
69 : : #define RTE_QSBR_THRID_INVALID 0xffffffff
70 : :
71 : : /* Worker thread counter */
72 : : struct rte_rcu_qsbr_cnt {
73 : : RTE_ATOMIC(uint64_t) cnt;
74 : : /**< Quiescent state counter. Value 0 indicates the thread is offline
75 : : * 64b counter is used to avoid adding more code to address
76 : : * counter overflow. Changing this to 32b would require additional
77 : : * changes to various APIs.
78 : : */
79 : : RTE_ATOMIC(uint32_t) lock_cnt;
80 : : /**< Lock counter. Used when RTE_LIBRTE_RCU_DEBUG is enabled */
81 : : } __rte_cache_aligned;
82 : :
83 : : #define __RTE_QSBR_CNT_THR_OFFLINE 0
84 : : #define __RTE_QSBR_CNT_INIT 1
85 : : #define __RTE_QSBR_CNT_MAX ((uint64_t)~0)
86 : : #define __RTE_QSBR_TOKEN_SIZE sizeof(uint64_t)
87 : :
88 : : /* RTE Quiescent State variable structure.
89 : : * This structure has two elements that vary in size based on the
90 : : * 'max_threads' parameter.
91 : : * 1) Quiescent state counter array
92 : : * 2) Register thread ID array
93 : : */
94 : : struct rte_rcu_qsbr {
95 : : RTE_ATOMIC(uint64_t) token __rte_cache_aligned;
96 : : /**< Counter to allow for multiple concurrent quiescent state queries */
97 : : RTE_ATOMIC(uint64_t) acked_token;
98 : : /**< Least token acked by all the threads in the last call to
99 : : * rte_rcu_qsbr_check API.
100 : : */
101 : :
102 : : uint32_t num_elems __rte_cache_aligned;
103 : : /**< Number of elements in the thread ID array */
104 : : RTE_ATOMIC(uint32_t) num_threads;
105 : : /**< Number of threads currently using this QS variable */
106 : : uint32_t max_threads;
107 : : /**< Maximum number of threads using this QS variable */
108 : :
109 : : struct rte_rcu_qsbr_cnt qsbr_cnt[0] __rte_cache_aligned;
110 : : /**< Quiescent state counter array of 'max_threads' elements */
111 : :
112 : : /**< Registered thread IDs are stored in a bitmap array,
113 : : * after the quiescent state counter array.
114 : : */
115 : : } __rte_cache_aligned;
116 : :
117 : : /**
118 : : * Call back function called to free the resources.
119 : : *
120 : : * @param p
121 : : * Pointer provided while creating the defer queue
122 : : * @param e
123 : : * Pointer to the resource data stored on the defer queue
124 : : * @param n
125 : : * Number of resources to free. Currently, this is set to 1.
126 : : *
127 : : * @return
128 : : * None
129 : : */
130 : : typedef void (*rte_rcu_qsbr_free_resource_t)(void *p, void *e, unsigned int n);
131 : :
132 : : #define RTE_RCU_QSBR_DQ_NAMESIZE RTE_RING_NAMESIZE
133 : :
134 : : /**
135 : : * Various flags supported.
136 : : */
137 : : /**< Enqueue and reclaim operations are multi-thread safe by default.
138 : : * The call back functions registered to free the resources are
139 : : * assumed to be multi-thread safe.
140 : : * Set this flag if multi-thread safety is not required.
141 : : */
142 : : #define RTE_RCU_QSBR_DQ_MT_UNSAFE 1
143 : :
144 : : /**
145 : : * Parameters used when creating the defer queue.
146 : : */
147 : : struct rte_rcu_qsbr_dq_parameters {
148 : : const char *name;
149 : : /**< Name of the queue. */
150 : : uint32_t flags;
151 : : /**< Flags to control API behaviors */
152 : : uint32_t size;
153 : : /**< Number of entries in queue. Typically, this will be
154 : : * the same as the maximum number of entries supported in the
155 : : * lock free data structure.
156 : : * Data structures with unbounded number of entries is not
157 : : * supported currently.
158 : : */
159 : : uint32_t esize;
160 : : /**< Size (in bytes) of each element in the defer queue.
161 : : * This has to be multiple of 4B.
162 : : */
163 : : uint32_t trigger_reclaim_limit;
164 : : /**< Trigger automatic reclamation after the defer queue
165 : : * has at least these many resources waiting. This auto
166 : : * reclamation is triggered in rte_rcu_qsbr_dq_enqueue API
167 : : * call.
168 : : * If this is greater than 'size', auto reclamation is
169 : : * not triggered.
170 : : * If this is set to 0, auto reclamation is triggered
171 : : * in every call to rte_rcu_qsbr_dq_enqueue API.
172 : : */
173 : : uint32_t max_reclaim_size;
174 : : /**< When automatic reclamation is enabled, reclaim at the max
175 : : * these many resources. This should contain a valid value, if
176 : : * auto reclamation is on. Setting this to 'size' or greater will
177 : : * reclaim all possible resources currently on the defer queue.
178 : : */
179 : : rte_rcu_qsbr_free_resource_t free_fn;
180 : : /**< Function to call to free the resource. */
181 : : void *p;
182 : : /**< Pointer passed to the free function. Typically, this is the
183 : : * pointer to the data structure to which the resource to free
184 : : * belongs. This can be NULL.
185 : : */
186 : : struct rte_rcu_qsbr *v;
187 : : /**< RCU QSBR variable to use for this defer queue */
188 : : };
189 : :
190 : : /* RTE defer queue structure.
191 : : * This structure holds the defer queue. The defer queue is used to
192 : : * hold the deleted entries from the data structure that are not
193 : : * yet freed.
194 : : */
195 : : struct rte_rcu_qsbr_dq;
196 : :
197 : : /**
198 : : * Return the size of the memory occupied by a Quiescent State variable.
199 : : *
200 : : * @param max_threads
201 : : * Maximum number of threads reporting quiescent state on this variable.
202 : : * @return
203 : : * On success - size of memory in bytes required for this QS variable.
204 : : * On error - 1 with error code set in rte_errno.
205 : : * Possible rte_errno codes are:
206 : : * - EINVAL - max_threads is 0
207 : : */
208 : : size_t
209 : : rte_rcu_qsbr_get_memsize(uint32_t max_threads);
210 : :
211 : : /**
212 : : * Initialize a Quiescent State (QS) variable.
213 : : *
214 : : * @param v
215 : : * QS variable
216 : : * @param max_threads
217 : : * Maximum number of threads reporting quiescent state on this variable.
218 : : * This should be the same value as passed to rte_rcu_qsbr_get_memsize.
219 : : * @return
220 : : * On success - 0
221 : : * On error - 1 with error code set in rte_errno.
222 : : * Possible rte_errno codes are:
223 : : * - EINVAL - max_threads is 0 or 'v' is NULL.
224 : : */
225 : : int
226 : : rte_rcu_qsbr_init(struct rte_rcu_qsbr *v, uint32_t max_threads);
227 : :
228 : : /**
229 : : * Register a reader thread to report its quiescent state
230 : : * on a QS variable.
231 : : *
232 : : * This is implemented as a lock-free function. It is multi-thread
233 : : * safe.
234 : : * Any reader thread that wants to report its quiescent state must
235 : : * call this API. This can be called during initialization or as part
236 : : * of the packet processing loop.
237 : : *
238 : : * Note that rte_rcu_qsbr_thread_online must be called before the
239 : : * thread updates its quiescent state using rte_rcu_qsbr_quiescent.
240 : : *
241 : : * @param v
242 : : * QS variable
243 : : * @param thread_id
244 : : * Reader thread with this thread ID will report its quiescent state on
245 : : * the QS variable. thread_id is a value between 0 and (max_threads - 1).
246 : : * 'max_threads' is the parameter passed in 'rte_rcu_qsbr_init' API.
247 : : */
248 : : int
249 : : rte_rcu_qsbr_thread_register(struct rte_rcu_qsbr *v, unsigned int thread_id);
250 : :
251 : : /**
252 : : * Remove a reader thread, from the list of threads reporting their
253 : : * quiescent state on a QS variable.
254 : : *
255 : : * This is implemented as a lock-free function. It is multi-thread safe.
256 : : * This API can be called from the reader threads during shutdown.
257 : : * Ongoing quiescent state queries will stop waiting for the status from this
258 : : * unregistered reader thread.
259 : : *
260 : : * @param v
261 : : * QS variable
262 : : * @param thread_id
263 : : * Reader thread with this thread ID will stop reporting its quiescent
264 : : * state on the QS variable.
265 : : */
266 : : int
267 : : rte_rcu_qsbr_thread_unregister(struct rte_rcu_qsbr *v, unsigned int thread_id);
268 : :
269 : : /**
270 : : * Add a registered reader thread, to the list of threads reporting their
271 : : * quiescent state on a QS variable.
272 : : *
273 : : * This is implemented as a lock-free function. It is multi-thread
274 : : * safe.
275 : : *
276 : : * Any registered reader thread that wants to report its quiescent state must
277 : : * call this API before calling rte_rcu_qsbr_quiescent. This can be called
278 : : * during initialization or as part of the packet processing loop.
279 : : *
280 : : * The reader thread must call rte_rcu_qsbr_thread_offline API, before
281 : : * calling any functions that block, to ensure that rte_rcu_qsbr_check
282 : : * API does not wait indefinitely for the reader thread to update its QS.
283 : : *
284 : : * The reader thread must call rte_rcu_thread_online API, after the blocking
285 : : * function call returns, to ensure that rte_rcu_qsbr_check API
286 : : * waits for the reader thread to update its quiescent state.
287 : : *
288 : : * @param v
289 : : * QS variable
290 : : * @param thread_id
291 : : * Reader thread with this thread ID will report its quiescent state on
292 : : * the QS variable.
293 : : */
294 : : static __rte_always_inline void
295 : : rte_rcu_qsbr_thread_online(struct rte_rcu_qsbr *v, unsigned int thread_id)
296 : : {
297 : : uint64_t t;
298 : :
299 : : RTE_ASSERT(v != NULL && thread_id < v->max_threads);
300 : :
301 : : __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u",
302 : : v->qsbr_cnt[thread_id].lock_cnt);
303 : :
304 : : /* Copy the current value of token.
305 : : * The fence at the end of the function will ensure that
306 : : * the following will not move down after the load of any shared
307 : : * data structure.
308 : : */
309 : 401 : t = rte_atomic_load_explicit(&v->token, rte_memory_order_relaxed);
310 : :
311 : : /* rte_atomic_store_explicit(cnt, rte_memory_order_relaxed) is used to ensure
312 : : * 'cnt' (64b) is accessed atomically.
313 : : */
314 : 401 : rte_atomic_store_explicit(&v->qsbr_cnt[thread_id].cnt,
315 : : t, rte_memory_order_relaxed);
316 : :
317 : : /* The subsequent load of the data structure should not
318 : : * move above the store. Hence a store-load barrier
319 : : * is required.
320 : : * If the load of the data structure moves above the store,
321 : : * writer might not see that the reader is online, even though
322 : : * the reader is referencing the shared data structure.
323 : : */
324 : : rte_atomic_thread_fence(rte_memory_order_seq_cst);
325 : 4 : }
326 : :
327 : : /**
328 : : * Remove a registered reader thread from the list of threads reporting their
329 : : * quiescent state on a QS variable.
330 : : *
331 : : * This is implemented as a lock-free function. It is multi-thread
332 : : * safe.
333 : : *
334 : : * This can be called during initialization or as part of the packet
335 : : * processing loop.
336 : : *
337 : : * The reader thread must call rte_rcu_qsbr_thread_offline API, before
338 : : * calling any functions that block, to ensure that rte_rcu_qsbr_check
339 : : * API does not wait indefinitely for the reader thread to update its QS.
340 : : *
341 : : * @param v
342 : : * QS variable
343 : : * @param thread_id
344 : : * rte_rcu_qsbr_check API will not wait for the reader thread with
345 : : * this thread ID to report its quiescent state on the QS variable.
346 : : */
347 : : static __rte_always_inline void
348 : : rte_rcu_qsbr_thread_offline(struct rte_rcu_qsbr *v, unsigned int thread_id)
349 : : {
350 : : RTE_ASSERT(v != NULL && thread_id < v->max_threads);
351 : :
352 : : __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u",
353 : : v->qsbr_cnt[thread_id].lock_cnt);
354 : :
355 : : /* The reader can go offline only after the load of the
356 : : * data structure is completed. i.e. any load of the
357 : : * data structure can not move after this store.
358 : : */
359 : :
360 [ + - ]: 140 : rte_atomic_store_explicit(&v->qsbr_cnt[thread_id].cnt,
361 : : __RTE_QSBR_CNT_THR_OFFLINE, rte_memory_order_release);
362 : 0 : }
363 : :
364 : : /**
365 : : * Acquire a lock for accessing a shared data structure.
366 : : *
367 : : * This is implemented as a lock-free function. It is multi-thread
368 : : * safe.
369 : : *
370 : : * This API is provided to aid debugging. This should be called before
371 : : * accessing a shared data structure.
372 : : *
373 : : * When RTE_LIBRTE_RCU_DEBUG is enabled a lock counter is incremented.
374 : : * Similarly rte_rcu_qsbr_unlock will decrement the counter. When the
375 : : * rte_rcu_qsbr_check API will verify that this counter is 0.
376 : : *
377 : : * When RTE_LIBRTE_RCU_DEBUG is disabled, this API will do nothing.
378 : : *
379 : : * @param v
380 : : * QS variable
381 : : * @param thread_id
382 : : * Reader thread id
383 : : */
384 : : static __rte_always_inline void
385 : : rte_rcu_qsbr_lock(__rte_unused struct rte_rcu_qsbr *v,
386 : : __rte_unused unsigned int thread_id)
387 : : {
388 : : RTE_ASSERT(v != NULL && thread_id < v->max_threads);
389 : :
390 : : #if defined(RTE_LIBRTE_RCU_DEBUG)
391 : : /* Increment the lock counter */
392 : : rte_atomic_fetch_add_explicit(&v->qsbr_cnt[thread_id].lock_cnt,
393 : : 1, rte_memory_order_acquire);
394 : : #endif
395 : : }
396 : :
397 : : /**
398 : : * Release a lock after accessing a shared data structure.
399 : : *
400 : : * This is implemented as a lock-free function. It is multi-thread
401 : : * safe.
402 : : *
403 : : * This API is provided to aid debugging. This should be called after
404 : : * accessing a shared data structure.
405 : : *
406 : : * When RTE_LIBRTE_RCU_DEBUG is enabled, rte_rcu_qsbr_unlock will
407 : : * decrement a lock counter. rte_rcu_qsbr_check API will verify that this
408 : : * counter is 0.
409 : : *
410 : : * When RTE_LIBRTE_RCU_DEBUG is disabled, this API will do nothing.
411 : : *
412 : : * @param v
413 : : * QS variable
414 : : * @param thread_id
415 : : * Reader thread id
416 : : */
417 : : static __rte_always_inline void
418 : : rte_rcu_qsbr_unlock(__rte_unused struct rte_rcu_qsbr *v,
419 : : __rte_unused unsigned int thread_id)
420 : : {
421 : : RTE_ASSERT(v != NULL && thread_id < v->max_threads);
422 : :
423 : : #if defined(RTE_LIBRTE_RCU_DEBUG)
424 : : /* Decrement the lock counter */
425 : : rte_atomic_fetch_sub_explicit(&v->qsbr_cnt[thread_id].lock_cnt,
426 : : 1, rte_memory_order_release);
427 : :
428 : : __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, WARNING,
429 : : "Lock counter %u. Nested locks?",
430 : : v->qsbr_cnt[thread_id].lock_cnt);
431 : : #endif
432 : : }
433 : :
434 : : /**
435 : : * Ask the reader threads to report the quiescent state
436 : : * status.
437 : : *
438 : : * This is implemented as a lock-free function. It is multi-thread
439 : : * safe and can be called from worker threads.
440 : : *
441 : : * @param v
442 : : * QS variable
443 : : * @return
444 : : * - This is the token for this call of the API. This should be
445 : : * passed to rte_rcu_qsbr_check API.
446 : : */
447 : : static __rte_always_inline uint64_t
448 : : rte_rcu_qsbr_start(struct rte_rcu_qsbr *v)
449 : : {
450 : : uint64_t t;
451 : :
452 : : RTE_ASSERT(v != NULL);
453 : :
454 : : /* Release the changes to the shared data structure.
455 : : * This store release will ensure that changes to any data
456 : : * structure are visible to the workers before the token
457 : : * update is visible.
458 : : */
459 [ + + + + : 2661 : t = rte_atomic_fetch_add_explicit(&v->token, 1, rte_memory_order_release) + 1;
+ - + - +
- - + - +
- + - + -
+ ]
460 : :
461 : : return t;
462 : : }
463 : :
464 : : /**
465 : : * Update quiescent state for a reader thread.
466 : : *
467 : : * This is implemented as a lock-free function. It is multi-thread safe.
468 : : * All the reader threads registered to report their quiescent state
469 : : * on the QS variable must call this API.
470 : : *
471 : : * @param v
472 : : * QS variable
473 : : * @param thread_id
474 : : * Update the quiescent state for the reader with this thread ID.
475 : : */
476 : : static __rte_always_inline void
477 : : rte_rcu_qsbr_quiescent(struct rte_rcu_qsbr *v, unsigned int thread_id)
478 : : {
479 : : uint64_t t;
480 : :
481 : : RTE_ASSERT(v != NULL && thread_id < v->max_threads);
482 : :
483 : : __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u",
484 : : v->qsbr_cnt[thread_id].lock_cnt);
485 : :
486 : : /* Acquire the changes to the shared data structure released
487 : : * by rte_rcu_qsbr_start.
488 : : * Later loads of the shared data structure should not move
489 : : * above this load. Hence, use load-acquire.
490 : : */
491 : 213781 : t = rte_atomic_load_explicit(&v->token, rte_memory_order_acquire);
492 : :
493 : : /* Check if there are updates available from the writer.
494 : : * Inform the writer that updates are visible to this reader.
495 : : * Prior loads of the shared data structure should not move
496 : : * beyond this store. Hence use store-release.
497 : : */
498 [ + + + - : 213781 : if (t != rte_atomic_load_explicit(&v->qsbr_cnt[thread_id].cnt, rte_memory_order_relaxed))
+ - + - +
- + - + -
+ - + + +
- + - +
- ]
499 : 1560 : rte_atomic_store_explicit(&v->qsbr_cnt[thread_id].cnt,
500 : : t, rte_memory_order_release);
501 : :
502 : : __RTE_RCU_DP_LOG(DEBUG, "%s: update: token = %" PRIu64 ", Thread ID = %d",
503 : : __func__, t, thread_id);
504 : : }
505 : :
506 : : /* Check the quiescent state counter for registered threads only, assuming
507 : : * that not all threads have registered.
508 : : */
509 : : static __rte_always_inline int
510 : : __rte_rcu_qsbr_check_selective(struct rte_rcu_qsbr *v, uint64_t t, bool wait)
511 : : {
512 : : uint32_t i, j, id;
513 : : uint64_t bmap;
514 : : uint64_t c;
515 : : RTE_ATOMIC(uint64_t) *reg_thread_id;
516 : : uint64_t acked_token = __RTE_QSBR_CNT_MAX;
517 : :
518 : 2055 : for (i = 0, reg_thread_id = __RTE_QSBR_THRID_ARRAY_ELM(v, 0);
519 [ + + + + : 4225 : i < v->num_elems;
- - - - +
+ + + - -
- - - - -
- + + + +
- - - - +
+ + + + +
+ + + + -
- + + -
- ]
520 : 2170 : i++, reg_thread_id++) {
521 : : /* Load the current registered thread bit map before
522 : : * loading the reader thread quiescent state counters.
523 : : */
524 : 3140 : bmap = rte_atomic_load_explicit(reg_thread_id, rte_memory_order_acquire);
525 : 3140 : id = i << __RTE_QSBR_THRID_INDEX_SHIFT;
526 : :
527 [ + + + + : 10503256 : while (bmap) {
- - - - +
+ + + - -
- - - - -
- + + + +
- - - - -
+ + + + +
- + + + -
- + + -
- ]
528 : 8 : j = rte_ctz64(bmap);
529 : : __RTE_RCU_DP_LOG(DEBUG,
530 : : "%s: check: token = %" PRIu64 ", wait = %d, Bit Map = 0x%" PRIx64 ", Thread ID = %d",
531 : : __func__, t, wait, bmap, id + j);
532 : 10501086 : c = rte_atomic_load_explicit(
533 : : &v->qsbr_cnt[id + j].cnt,
534 : : rte_memory_order_acquire);
535 : : __RTE_RCU_DP_LOG(DEBUG,
536 : : "%s: status: token = %" PRIu64 ", wait = %d, Thread QS cnt = %" PRIu64 ", Thread ID = %d",
537 : : __func__, t, wait, c, id+j);
538 : :
539 : : /* Counter is not checked for wrap-around condition
540 : : * as it is a 64b counter.
541 : : */
542 [ + + + + : 10501086 : if (unlikely(c !=
- - - - -
+ - + - -
- - - - -
- - + - +
- - - - -
- + - + -
- - - + -
- + - -
- ]
543 : : __RTE_QSBR_CNT_THR_OFFLINE && c < t)) {
544 : : /* This thread is not in quiescent state */
545 [ # # ]: 0 : if (!wait)
546 : : return 0;
547 : :
548 : : rte_pause();
549 : : /* This thread might have unregistered.
550 : : * Re-read the bitmap.
551 : : */
552 : 10499572 : bmap = rte_atomic_load_explicit(reg_thread_id,
553 : : rte_memory_order_acquire);
554 : :
555 : 10499572 : continue;
556 : : }
557 : :
558 : : /* This thread is in quiescent state. Use the counter
559 : : * to find the least acknowledged token among all the
560 : : * readers.
561 : : */
562 [ + + + - : 544 : if (c != __RTE_QSBR_CNT_THR_OFFLINE && acked_token > c)
- - - - -
+ + - - -
- - - - -
- + + + +
- - - - -
- - + - +
- - + - -
- + - -
- ]
563 : : acked_token = c;
564 : :
565 : 544 : bmap &= ~(1UL << j);
566 : : }
567 : : }
568 : :
569 : : /* All readers are checked, update least acknowledged token.
570 : : * There might be multiple writers trying to update this. There is
571 : : * no need to update this very accurately using compare-and-swap.
572 : : */
573 [ + + + + : 1085 : if (acked_token != __RTE_QSBR_CNT_MAX)
- - - - -
+ + - - -
- - - - -
- + - + -
- - - - +
- - + - +
- + + - -
- + - -
- ]
574 : 538 : rte_atomic_store_explicit(&v->acked_token, acked_token,
575 : : rte_memory_order_relaxed);
576 : :
577 : : return 1;
578 : : }
579 : :
580 : : /* Check the quiescent state counter for all threads, assuming that
581 : : * all the threads have registered.
582 : : */
583 : : static __rte_always_inline int
584 : : __rte_rcu_qsbr_check_all(struct rte_rcu_qsbr *v, uint64_t t, bool wait)
585 : : {
586 : : uint32_t i;
587 : : struct rte_rcu_qsbr_cnt *cnt;
588 : : uint64_t c;
589 : : uint64_t acked_token = __RTE_QSBR_CNT_MAX;
590 : :
591 [ + + + + : 2317 : for (i = 0, cnt = v->qsbr_cnt; i < v->max_threads; i++, cnt++) {
- - - - -
- - - + +
+ + + + +
+ - - - -
+ + + + -
- - - - -
- - - - +
+ - - -
- ]
592 : : __RTE_RCU_DP_LOG(DEBUG,
593 : : "%s: check: token = %" PRIu64 ", wait = %d, Thread ID = %d",
594 : : __func__, t, wait, i);
595 : : while (1) {
596 : 25197 : c = rte_atomic_load_explicit(&cnt->cnt, rte_memory_order_acquire);
597 : : __RTE_RCU_DP_LOG(DEBUG,
598 : : "%s: status: token = %" PRIu64 ", wait = %d, Thread QS cnt = %" PRIu64 ", Thread ID = %d",
599 : : __func__, t, wait, c, i);
600 : :
601 : : /* Counter is not checked for wrap-around condition
602 : : * as it is a 64b counter.
603 : : */
604 [ + + + + : 25197 : if (likely(c == __RTE_QSBR_CNT_THR_OFFLINE || c >= t))
- - - - -
- - - - +
- + - + -
+ - - - -
- + - + -
- - - - -
- - - - +
- - - -
- ]
605 : : break;
606 : :
607 : : /* This thread is not in quiescent state */
608 [ # # ]: 0 : if (!wait)
609 : : return 0;
610 : :
611 : : rte_pause();
612 : : }
613 : :
614 : : /* This thread is in quiescent state. Use the counter to find
615 : : * the least acknowledged token among all the readers.
616 : : */
617 [ + - + + : 1793 : if (likely(c != __RTE_QSBR_CNT_THR_OFFLINE && acked_token > c))
- - - - -
- - - + +
+ + - + +
+ - - - -
+ + + + -
- - - - -
- - - - +
+ - - -
- ]
618 : : acked_token = c;
619 : : }
620 : :
621 : : /* All readers are checked, update least acknowledged token.
622 : : * There might be multiple writers trying to update this. There is
623 : : * no need to update this very accurately using compare-and-swap.
624 : : */
625 [ + - + + : 523 : if (acked_token != __RTE_QSBR_CNT_MAX)
- - - - -
- - - + -
+ - - + +
- - - - -
+ - + - -
- - - - -
- - - - +
- - - -
- ]
626 : 521 : rte_atomic_store_explicit(&v->acked_token, acked_token,
627 : : rte_memory_order_relaxed);
628 : :
629 : : return 1;
630 : : }
631 : :
632 : : /**
633 : : * Checks if all the reader threads have entered the quiescent state
634 : : * referenced by token.
635 : : *
636 : : * This is implemented as a lock-free function. It is multi-thread
637 : : * safe and can be called from the worker threads as well.
638 : : *
639 : : * If this API is called with 'wait' set to true, the following
640 : : * factors must be considered:
641 : : *
642 : : * 1) If the calling thread is also reporting the status on the
643 : : * same QS variable, it must update the quiescent state status, before
644 : : * calling this API.
645 : : *
646 : : * 2) In addition, while calling from multiple threads, only
647 : : * one of those threads can be reporting the quiescent state status
648 : : * on a given QS variable.
649 : : *
650 : : * @param v
651 : : * QS variable
652 : : * @param t
653 : : * Token returned by rte_rcu_qsbr_start API
654 : : * @param wait
655 : : * If true, block till all the reader threads have completed entering
656 : : * the quiescent state referenced by token 't'.
657 : : * @return
658 : : * - 0 if all reader threads have NOT passed through specified number
659 : : * of quiescent states.
660 : : * - 1 if all reader threads have passed through specified number
661 : : * of quiescent states.
662 : : */
663 : : static __rte_always_inline int
664 : : rte_rcu_qsbr_check(struct rte_rcu_qsbr *v, uint64_t t, bool wait)
665 : : {
666 : : RTE_ASSERT(v != NULL);
667 : :
668 : : /* Check if all the readers have already acknowledged this token */
669 [ + + + - : 3629 : if (likely(t <= v->acked_token)) {
- + - - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- + - + -
+ - + - +
- + + -
+ ]
670 : : __RTE_RCU_DP_LOG(DEBUG,
671 : : "%s: check: token = %" PRIu64 ", wait = %d",
672 : : __func__, t, wait);
673 : : __RTE_RCU_DP_LOG(DEBUG,
674 : : "%s: status: least acked token = %" PRIu64,
675 : : __func__, v->acked_token);
676 : : return 1;
677 : : }
678 : :
679 [ + + + + : 2579 : if (likely(v->num_threads == v->max_threads))
- - - - -
+ - + + -
+ - + - +
- - + - +
+ - + - -
+ - + - +
- + - + +
- - + -
- ]
680 : : return __rte_rcu_qsbr_check_all(v, t, wait);
681 : : else
682 : : return __rte_rcu_qsbr_check_selective(v, t, wait);
683 : : }
684 : :
685 : : /**
686 : : * Wait till the reader threads have entered quiescent state.
687 : : *
688 : : * This is implemented as a lock-free function. It is multi-thread safe.
689 : : * This API can be thought of as a wrapper around rte_rcu_qsbr_start and
690 : : * rte_rcu_qsbr_check APIs.
691 : : *
692 : : * If this API is called from multiple threads, only one of
693 : : * those threads can be reporting the quiescent state status on a
694 : : * given QS variable.
695 : : *
696 : : * @param v
697 : : * QS variable
698 : : * @param thread_id
699 : : * Thread ID of the caller if it is registered to report quiescent state
700 : : * on this QS variable (i.e. the calling thread is also part of the
701 : : * readside critical section). If not, pass RTE_QSBR_THRID_INVALID.
702 : : */
703 : : void
704 : : rte_rcu_qsbr_synchronize(struct rte_rcu_qsbr *v, unsigned int thread_id);
705 : :
706 : : /**
707 : : * Dump the details of a single QS variables to a file.
708 : : *
709 : : * It is NOT multi-thread safe.
710 : : *
711 : : * @param f
712 : : * A pointer to a file for output
713 : : * @param v
714 : : * QS variable
715 : : * @return
716 : : * On success - 0
717 : : * On error - 1 with error code set in rte_errno.
718 : : * Possible rte_errno codes are:
719 : : * - EINVAL - NULL parameters are passed
720 : : */
721 : : int
722 : : rte_rcu_qsbr_dump(FILE *f, struct rte_rcu_qsbr *v);
723 : :
724 : : /**
725 : : * Create a queue used to store the data structure elements that can
726 : : * be freed later. This queue is referred to as 'defer queue'.
727 : : *
728 : : * @param params
729 : : * Parameters to create a defer queue.
730 : : * @return
731 : : * On success - Valid pointer to defer queue
732 : : * On error - NULL
733 : : * Possible rte_errno codes are:
734 : : * - EINVAL - NULL parameters are passed
735 : : * - ENOMEM - Not enough memory
736 : : */
737 : : struct rte_rcu_qsbr_dq *
738 : : rte_rcu_qsbr_dq_create(const struct rte_rcu_qsbr_dq_parameters *params);
739 : :
740 : : /**
741 : : * Enqueue one resource to the defer queue and start the grace period.
742 : : * The resource will be freed later after at least one grace period
743 : : * is over.
744 : : *
745 : : * If the defer queue is full, it will attempt to reclaim resources.
746 : : * It will also reclaim resources at regular intervals to avoid
747 : : * the defer queue from growing too big.
748 : : *
749 : : * Multi-thread safety is provided as the defer queue configuration.
750 : : * When multi-thread safety is requested, it is possible that the
751 : : * resources are not stored in their order of deletion. This results
752 : : * in resources being held in the defer queue longer than they should.
753 : : *
754 : : * @param dq
755 : : * Defer queue to allocate an entry from.
756 : : * @param e
757 : : * Pointer to resource data to copy to the defer queue. The size of
758 : : * the data to copy is equal to the element size provided when the
759 : : * defer queue was created.
760 : : * @return
761 : : * On success - 0
762 : : * On error - 1 with rte_errno set to
763 : : * - EINVAL - NULL parameters are passed
764 : : * - ENOSPC - Defer queue is full. This condition can not happen
765 : : * if the defer queue size is equal (or larger) than the
766 : : * number of elements in the data structure.
767 : : */
768 : : int
769 : : rte_rcu_qsbr_dq_enqueue(struct rte_rcu_qsbr_dq *dq, void *e);
770 : :
771 : : /**
772 : : * Free resources from the defer queue.
773 : : *
774 : : * This API is multi-thread safe.
775 : : *
776 : : * @param dq
777 : : * Defer queue to free an entry from.
778 : : * @param n
779 : : * Maximum number of resources to free.
780 : : * @param freed
781 : : * Number of resources that were freed.
782 : : * @param pending
783 : : * Number of resources pending on the defer queue. This number might not
784 : : * be accurate if multi-thread safety is configured.
785 : : * @param available
786 : : * Number of resources that can be added to the defer queue.
787 : : * This number might not be accurate if multi-thread safety is configured.
788 : : * @return
789 : : * On successful reclamation of at least 1 resource - 0
790 : : * On error - 1 with rte_errno set to
791 : : * - EINVAL - NULL parameters are passed
792 : : */
793 : : int
794 : : rte_rcu_qsbr_dq_reclaim(struct rte_rcu_qsbr_dq *dq, unsigned int n,
795 : : unsigned int *freed, unsigned int *pending, unsigned int *available);
796 : :
797 : : /**
798 : : * Delete a defer queue.
799 : : *
800 : : * It tries to reclaim all the resources on the defer queue.
801 : : * If any of the resources have not completed the grace period
802 : : * the reclamation stops and returns immediately. The rest of
803 : : * the resources are not reclaimed and the defer queue is not
804 : : * freed.
805 : : *
806 : : * @param dq
807 : : * Defer queue to delete.
808 : : * @return
809 : : * On success - 0
810 : : * On error - 1
811 : : * Possible rte_errno codes are:
812 : : * - EAGAIN - Some of the resources have not completed at least 1 grace
813 : : * period, try again.
814 : : */
815 : : int
816 : : rte_rcu_qsbr_dq_delete(struct rte_rcu_qsbr_dq *dq);
817 : :
818 : : #ifdef __cplusplus
819 : : }
820 : : #endif
821 : :
822 : : #endif /* _RTE_RCU_QSBR_H_ */
|