DPDK  20.11.5
rte_ring_elem.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2019 Arm Limited
4  * Copyright (c) 2010-2017 Intel Corporation
5  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
6  * All rights reserved.
7  * Derived from FreeBSD's bufring.h
8  * Used as BSD-3 Licensed with permission from Kip Macy.
9  */
10 
11 #ifndef _RTE_RING_ELEM_H_
12 #define _RTE_RING_ELEM_H_
13 
19 #ifdef __cplusplus
20 extern "C" {
21 #endif
22 
23 #include <rte_ring_core.h>
24 
43 ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count);
44 
105 struct rte_ring *rte_ring_create_elem(const char *name, unsigned int esize,
106  unsigned int count, int socket_id, unsigned int flags);
107 
108 static __rte_always_inline void
109 __rte_ring_enqueue_elems_32(struct rte_ring *r, const uint32_t size,
110  uint32_t idx, const void *obj_table, uint32_t n)
111 {
112  unsigned int i;
113  uint32_t *ring = (uint32_t *)&r[1];
114  const uint32_t *obj = (const uint32_t *)obj_table;
115  if (likely(idx + n <= size)) {
116  for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
117  ring[idx] = obj[i];
118  ring[idx + 1] = obj[i + 1];
119  ring[idx + 2] = obj[i + 2];
120  ring[idx + 3] = obj[i + 3];
121  ring[idx + 4] = obj[i + 4];
122  ring[idx + 5] = obj[i + 5];
123  ring[idx + 6] = obj[i + 6];
124  ring[idx + 7] = obj[i + 7];
125  }
126  switch (n & 0x7) {
127  case 7:
128  ring[idx++] = obj[i++]; /* fallthrough */
129  case 6:
130  ring[idx++] = obj[i++]; /* fallthrough */
131  case 5:
132  ring[idx++] = obj[i++]; /* fallthrough */
133  case 4:
134  ring[idx++] = obj[i++]; /* fallthrough */
135  case 3:
136  ring[idx++] = obj[i++]; /* fallthrough */
137  case 2:
138  ring[idx++] = obj[i++]; /* fallthrough */
139  case 1:
140  ring[idx++] = obj[i++]; /* fallthrough */
141  }
142  } else {
143  for (i = 0; idx < size; i++, idx++)
144  ring[idx] = obj[i];
145  /* Start at the beginning */
146  for (idx = 0; i < n; i++, idx++)
147  ring[idx] = obj[i];
148  }
149 }
150 
151 static __rte_always_inline void
152 __rte_ring_enqueue_elems_64(struct rte_ring *r, uint32_t prod_head,
153  const void *obj_table, uint32_t n)
154 {
155  unsigned int i;
156  const uint32_t size = r->size;
157  uint32_t idx = prod_head & r->mask;
158  uint64_t *ring = (uint64_t *)&r[1];
159  const unaligned_uint64_t *obj = (const unaligned_uint64_t *)obj_table;
160  if (likely(idx + n <= size)) {
161  for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
162  ring[idx] = obj[i];
163  ring[idx + 1] = obj[i + 1];
164  ring[idx + 2] = obj[i + 2];
165  ring[idx + 3] = obj[i + 3];
166  }
167  switch (n & 0x3) {
168  case 3:
169  ring[idx++] = obj[i++]; /* fallthrough */
170  case 2:
171  ring[idx++] = obj[i++]; /* fallthrough */
172  case 1:
173  ring[idx++] = obj[i++];
174  }
175  } else {
176  for (i = 0; idx < size; i++, idx++)
177  ring[idx] = obj[i];
178  /* Start at the beginning */
179  for (idx = 0; i < n; i++, idx++)
180  ring[idx] = obj[i];
181  }
182 }
183 
184 static __rte_always_inline void
185 __rte_ring_enqueue_elems_128(struct rte_ring *r, uint32_t prod_head,
186  const void *obj_table, uint32_t n)
187 {
188  unsigned int i;
189  const uint32_t size = r->size;
190  uint32_t idx = prod_head & r->mask;
191  rte_int128_t *ring = (rte_int128_t *)&r[1];
192  const rte_int128_t *obj = (const rte_int128_t *)obj_table;
193  if (likely(idx + n <= size)) {
194  for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
195  memcpy((void *)(ring + idx),
196  (const void *)(obj + i), 32);
197  switch (n & 0x1) {
198  case 1:
199  memcpy((void *)(ring + idx),
200  (const void *)(obj + i), 16);
201  }
202  } else {
203  for (i = 0; idx < size; i++, idx++)
204  memcpy((void *)(ring + idx),
205  (const void *)(obj + i), 16);
206  /* Start at the beginning */
207  for (idx = 0; i < n; i++, idx++)
208  memcpy((void *)(ring + idx),
209  (const void *)(obj + i), 16);
210  }
211 }
212 
213 /* the actual enqueue of elements on the ring.
214  * Placed here since identical code needed in both
215  * single and multi producer enqueue functions.
216  */
217 static __rte_always_inline void
218 __rte_ring_enqueue_elems(struct rte_ring *r, uint32_t prod_head,
219  const void *obj_table, uint32_t esize, uint32_t num)
220 {
221  /* 8B and 16B copies implemented individually to retain
222  * the current performance.
223  */
224  if (esize == 8)
225  __rte_ring_enqueue_elems_64(r, prod_head, obj_table, num);
226  else if (esize == 16)
227  __rte_ring_enqueue_elems_128(r, prod_head, obj_table, num);
228  else {
229  uint32_t idx, scale, nr_idx, nr_num, nr_size;
230 
231  /* Normalize to uint32_t */
232  scale = esize / sizeof(uint32_t);
233  nr_num = num * scale;
234  idx = prod_head & r->mask;
235  nr_idx = idx * scale;
236  nr_size = r->size * scale;
237  __rte_ring_enqueue_elems_32(r, nr_size, nr_idx,
238  obj_table, nr_num);
239  }
240 }
241 
242 static __rte_always_inline void
243 __rte_ring_dequeue_elems_32(struct rte_ring *r, const uint32_t size,
244  uint32_t idx, void *obj_table, uint32_t n)
245 {
246  unsigned int i;
247  uint32_t *ring = (uint32_t *)&r[1];
248  uint32_t *obj = (uint32_t *)obj_table;
249  if (likely(idx + n <= size)) {
250  for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
251  obj[i] = ring[idx];
252  obj[i + 1] = ring[idx + 1];
253  obj[i + 2] = ring[idx + 2];
254  obj[i + 3] = ring[idx + 3];
255  obj[i + 4] = ring[idx + 4];
256  obj[i + 5] = ring[idx + 5];
257  obj[i + 6] = ring[idx + 6];
258  obj[i + 7] = ring[idx + 7];
259  }
260  switch (n & 0x7) {
261  case 7:
262  obj[i++] = ring[idx++]; /* fallthrough */
263  case 6:
264  obj[i++] = ring[idx++]; /* fallthrough */
265  case 5:
266  obj[i++] = ring[idx++]; /* fallthrough */
267  case 4:
268  obj[i++] = ring[idx++]; /* fallthrough */
269  case 3:
270  obj[i++] = ring[idx++]; /* fallthrough */
271  case 2:
272  obj[i++] = ring[idx++]; /* fallthrough */
273  case 1:
274  obj[i++] = ring[idx++]; /* fallthrough */
275  }
276  } else {
277  for (i = 0; idx < size; i++, idx++)
278  obj[i] = ring[idx];
279  /* Start at the beginning */
280  for (idx = 0; i < n; i++, idx++)
281  obj[i] = ring[idx];
282  }
283 }
284 
285 static __rte_always_inline void
286 __rte_ring_dequeue_elems_64(struct rte_ring *r, uint32_t prod_head,
287  void *obj_table, uint32_t n)
288 {
289  unsigned int i;
290  const uint32_t size = r->size;
291  uint32_t idx = prod_head & r->mask;
292  uint64_t *ring = (uint64_t *)&r[1];
293  unaligned_uint64_t *obj = (unaligned_uint64_t *)obj_table;
294  if (likely(idx + n <= size)) {
295  for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
296  obj[i] = ring[idx];
297  obj[i + 1] = ring[idx + 1];
298  obj[i + 2] = ring[idx + 2];
299  obj[i + 3] = ring[idx + 3];
300  }
301  switch (n & 0x3) {
302  case 3:
303  obj[i++] = ring[idx++]; /* fallthrough */
304  case 2:
305  obj[i++] = ring[idx++]; /* fallthrough */
306  case 1:
307  obj[i++] = ring[idx++]; /* fallthrough */
308  }
309  } else {
310  for (i = 0; idx < size; i++, idx++)
311  obj[i] = ring[idx];
312  /* Start at the beginning */
313  for (idx = 0; i < n; i++, idx++)
314  obj[i] = ring[idx];
315  }
316 }
317 
318 static __rte_always_inline void
319 __rte_ring_dequeue_elems_128(struct rte_ring *r, uint32_t prod_head,
320  void *obj_table, uint32_t n)
321 {
322  unsigned int i;
323  const uint32_t size = r->size;
324  uint32_t idx = prod_head & r->mask;
325  rte_int128_t *ring = (rte_int128_t *)&r[1];
326  rte_int128_t *obj = (rte_int128_t *)obj_table;
327  if (likely(idx + n <= size)) {
328  for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
329  memcpy((void *)(obj + i), (void *)(ring + idx), 32);
330  switch (n & 0x1) {
331  case 1:
332  memcpy((void *)(obj + i), (void *)(ring + idx), 16);
333  }
334  } else {
335  for (i = 0; idx < size; i++, idx++)
336  memcpy((void *)(obj + i), (void *)(ring + idx), 16);
337  /* Start at the beginning */
338  for (idx = 0; i < n; i++, idx++)
339  memcpy((void *)(obj + i), (void *)(ring + idx), 16);
340  }
341 }
342 
343 /* the actual dequeue of elements from the ring.
344  * Placed here since identical code needed in both
345  * single and multi producer enqueue functions.
346  */
347 static __rte_always_inline void
348 __rte_ring_dequeue_elems(struct rte_ring *r, uint32_t cons_head,
349  void *obj_table, uint32_t esize, uint32_t num)
350 {
351  /* 8B and 16B copies implemented individually to retain
352  * the current performance.
353  */
354  if (esize == 8)
355  __rte_ring_dequeue_elems_64(r, cons_head, obj_table, num);
356  else if (esize == 16)
357  __rte_ring_dequeue_elems_128(r, cons_head, obj_table, num);
358  else {
359  uint32_t idx, scale, nr_idx, nr_num, nr_size;
360 
361  /* Normalize to uint32_t */
362  scale = esize / sizeof(uint32_t);
363  nr_num = num * scale;
364  idx = cons_head & r->mask;
365  nr_idx = idx * scale;
366  nr_size = r->size * scale;
367  __rte_ring_dequeue_elems_32(r, nr_size, nr_idx,
368  obj_table, nr_num);
369  }
370 }
371 
372 /* Between load and load. there might be cpu reorder in weak model
373  * (powerpc/arm).
374  * There are 2 choices for the users
375  * 1.use rmb() memory barrier
376  * 2.use one-direction load_acquire/store_release barrier
377  * It depends on performance test results.
378  * By default, move common functions to rte_ring_generic.h
379  */
380 #ifdef RTE_USE_C11_MEM_MODEL
381 #include "rte_ring_c11_mem.h"
382 #else
383 #include "rte_ring_generic.h"
384 #endif
385 
410 static __rte_always_inline unsigned int
411 __rte_ring_do_enqueue_elem(struct rte_ring *r, const void *obj_table,
412  unsigned int esize, unsigned int n,
413  enum rte_ring_queue_behavior behavior, unsigned int is_sp,
414  unsigned int *free_space)
415 {
416  uint32_t prod_head, prod_next;
417  uint32_t free_entries;
418 
419  n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
420  &prod_head, &prod_next, &free_entries);
421  if (n == 0)
422  goto end;
423 
424  __rte_ring_enqueue_elems(r, prod_head, obj_table, esize, n);
425 
426  update_tail(&r->prod, prod_head, prod_next, is_sp, 1);
427 end:
428  if (free_space != NULL)
429  *free_space = free_entries - n;
430  return n;
431 }
432 
457 static __rte_always_inline unsigned int
458 __rte_ring_do_dequeue_elem(struct rte_ring *r, void *obj_table,
459  unsigned int esize, unsigned int n,
460  enum rte_ring_queue_behavior behavior, unsigned int is_sc,
461  unsigned int *available)
462 {
463  uint32_t cons_head, cons_next;
464  uint32_t entries;
465 
466  n = __rte_ring_move_cons_head(r, (int)is_sc, n, behavior,
467  &cons_head, &cons_next, &entries);
468  if (n == 0)
469  goto end;
470 
471  __rte_ring_dequeue_elems(r, cons_head, obj_table, esize, n);
472 
473  update_tail(&r->cons, cons_head, cons_next, is_sc, 0);
474 
475 end:
476  if (available != NULL)
477  *available = entries - n;
478  return n;
479 }
480 
503 static __rte_always_inline unsigned int
504 rte_ring_mp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
505  unsigned int esize, unsigned int n, unsigned int *free_space)
506 {
507  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
509 }
510 
532 static __rte_always_inline unsigned int
533 rte_ring_sp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
534  unsigned int esize, unsigned int n, unsigned int *free_space)
535 {
536  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
538 }
539 
540 #ifdef ALLOW_EXPERIMENTAL_API
541 #include <rte_ring_hts.h>
542 #include <rte_ring_rts.h>
543 #endif
544 
568 static __rte_always_inline unsigned int
569 rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
570  unsigned int esize, unsigned int n, unsigned int *free_space)
571 {
572  switch (r->prod.sync_type) {
573  case RTE_RING_SYNC_MT:
574  return rte_ring_mp_enqueue_bulk_elem(r, obj_table, esize, n,
575  free_space);
576  case RTE_RING_SYNC_ST:
577  return rte_ring_sp_enqueue_bulk_elem(r, obj_table, esize, n,
578  free_space);
579 #ifdef ALLOW_EXPERIMENTAL_API
580  case RTE_RING_SYNC_MT_RTS:
581  return rte_ring_mp_rts_enqueue_bulk_elem(r, obj_table, esize, n,
582  free_space);
583  case RTE_RING_SYNC_MT_HTS:
584  return rte_ring_mp_hts_enqueue_bulk_elem(r, obj_table, esize, n,
585  free_space);
586 #endif
587  }
588 
589  /* valid ring should never reach this point */
590  RTE_ASSERT(0);
591  if (free_space != NULL)
592  *free_space = 0;
593  return 0;
594 }
595 
614 static __rte_always_inline int
615 rte_ring_mp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
616 {
617  return rte_ring_mp_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
618  -ENOBUFS;
619 }
620 
638 static __rte_always_inline int
639 rte_ring_sp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
640 {
641  return rte_ring_sp_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
642  -ENOBUFS;
643 }
644 
664 static __rte_always_inline int
665 rte_ring_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
666 {
667  return rte_ring_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
668  -ENOBUFS;
669 }
670 
693 static __rte_always_inline unsigned int
694 rte_ring_mc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
695  unsigned int esize, unsigned int n, unsigned int *available)
696 {
697  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
699 }
700 
721 static __rte_always_inline unsigned int
722 rte_ring_sc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
723  unsigned int esize, unsigned int n, unsigned int *available)
724 {
725  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
727 }
728 
752 static __rte_always_inline unsigned int
753 rte_ring_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
754  unsigned int esize, unsigned int n, unsigned int *available)
755 {
756  switch (r->cons.sync_type) {
757  case RTE_RING_SYNC_MT:
758  return rte_ring_mc_dequeue_bulk_elem(r, obj_table, esize, n,
759  available);
760  case RTE_RING_SYNC_ST:
761  return rte_ring_sc_dequeue_bulk_elem(r, obj_table, esize, n,
762  available);
763 #ifdef ALLOW_EXPERIMENTAL_API
764  case RTE_RING_SYNC_MT_RTS:
765  return rte_ring_mc_rts_dequeue_bulk_elem(r, obj_table, esize,
766  n, available);
767  case RTE_RING_SYNC_MT_HTS:
768  return rte_ring_mc_hts_dequeue_bulk_elem(r, obj_table, esize,
769  n, available);
770 #endif
771  }
772 
773  /* valid ring should never reach this point */
774  RTE_ASSERT(0);
775  if (available != NULL)
776  *available = 0;
777  return 0;
778 }
779 
799 static __rte_always_inline int
800 rte_ring_mc_dequeue_elem(struct rte_ring *r, void *obj_p,
801  unsigned int esize)
802 {
803  return rte_ring_mc_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
804  -ENOENT;
805 }
806 
823 static __rte_always_inline int
824 rte_ring_sc_dequeue_elem(struct rte_ring *r, void *obj_p,
825  unsigned int esize)
826 {
827  return rte_ring_sc_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
828  -ENOENT;
829 }
830 
851 static __rte_always_inline int
852 rte_ring_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
853 {
854  return rte_ring_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
855  -ENOENT;
856 }
857 
880 static __rte_always_inline unsigned int
881 rte_ring_mp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
882  unsigned int esize, unsigned int n, unsigned int *free_space)
883 {
884  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
886 }
887 
909 static __rte_always_inline unsigned int
910 rte_ring_sp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
911  unsigned int esize, unsigned int n, unsigned int *free_space)
912 {
913  return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
915 }
916 
940 static __rte_always_inline unsigned int
941 rte_ring_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
942  unsigned int esize, unsigned int n, unsigned int *free_space)
943 {
944  switch (r->prod.sync_type) {
945  case RTE_RING_SYNC_MT:
946  return rte_ring_mp_enqueue_burst_elem(r, obj_table, esize, n,
947  free_space);
948  case RTE_RING_SYNC_ST:
949  return rte_ring_sp_enqueue_burst_elem(r, obj_table, esize, n,
950  free_space);
951 #ifdef ALLOW_EXPERIMENTAL_API
952  case RTE_RING_SYNC_MT_RTS:
953  return rte_ring_mp_rts_enqueue_burst_elem(r, obj_table, esize,
954  n, free_space);
955  case RTE_RING_SYNC_MT_HTS:
956  return rte_ring_mp_hts_enqueue_burst_elem(r, obj_table, esize,
957  n, free_space);
958 #endif
959  }
960 
961  /* valid ring should never reach this point */
962  RTE_ASSERT(0);
963  if (free_space != NULL)
964  *free_space = 0;
965  return 0;
966 }
967 
992 static __rte_always_inline unsigned int
993 rte_ring_mc_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
994  unsigned int esize, unsigned int n, unsigned int *available)
995 {
996  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
998 }
999 
1021 static __rte_always_inline unsigned int
1022 rte_ring_sc_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
1023  unsigned int esize, unsigned int n, unsigned int *available)
1024 {
1025  return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
1027 }
1028 
1052 static __rte_always_inline unsigned int
1053 rte_ring_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
1054  unsigned int esize, unsigned int n, unsigned int *available)
1055 {
1056  switch (r->cons.sync_type) {
1057  case RTE_RING_SYNC_MT:
1058  return rte_ring_mc_dequeue_burst_elem(r, obj_table, esize, n,
1059  available);
1060  case RTE_RING_SYNC_ST:
1061  return rte_ring_sc_dequeue_burst_elem(r, obj_table, esize, n,
1062  available);
1063 #ifdef ALLOW_EXPERIMENTAL_API
1064  case RTE_RING_SYNC_MT_RTS:
1065  return rte_ring_mc_rts_dequeue_burst_elem(r, obj_table, esize,
1066  n, available);
1067  case RTE_RING_SYNC_MT_HTS:
1068  return rte_ring_mc_hts_dequeue_burst_elem(r, obj_table, esize,
1069  n, available);
1070 #endif
1071  }
1072 
1073  /* valid ring should never reach this point */
1074  RTE_ASSERT(0);
1075  if (available != NULL)
1076  *available = 0;
1077  return 0;
1078 }
1079 
1080 #ifdef ALLOW_EXPERIMENTAL_API
1081 #include <rte_ring_peek.h>
1082 #include <rte_ring_peek_zc.h>
1083 #endif
1084 
1085 #include <rte_ring.h>
1086 
1087 #ifdef __cplusplus
1088 }
1089 #endif
1090 
1091 #endif /* _RTE_RING_ELEM_H_ */
rte_ring_mp_rts_enqueue_bulk_elem
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_rts.h:164
rte_ring_create_elem
struct rte_ring * rte_ring_create_elem(const char *name, unsigned int esize, unsigned int count, int socket_id, unsigned int flags)
rte_ring_mp_enqueue_bulk_elem
static __rte_always_inline unsigned int rte_ring_mp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_elem.h:504
RTE_RING_SYNC_ST
@ RTE_RING_SYNC_ST
Definition: rte_ring_core.h:59
rte_ring_sp_enqueue_elem
static __rte_always_inline int rte_ring_sp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
Definition: rte_ring_elem.h:639
rte_ring_sc_dequeue_elem
static __rte_always_inline int rte_ring_sc_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
Definition: rte_ring_elem.h:824
rte_ring_rts.h
rte_ring::flags
int flags
Definition: rte_ring_core.h:131
__rte_always_inline
#define __rte_always_inline
Definition: rte_common.h:226
rte_ring_mp_rts_enqueue_burst_elem
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_rts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_rts.h:220
rte_ring.h
rte_ring_mc_hts_dequeue_burst_elem
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_hts_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_hts.h:223
rte_ring_enqueue_elem
static __rte_always_inline int rte_ring_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
Definition: rte_ring_elem.h:665
rte_ring_mc_rts_dequeue_bulk_elem
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_rts.h:192
rte_ring::size
uint32_t size
Definition: rte_ring_core.h:134
rte_ring_enqueue_burst_elem
static __rte_always_inline unsigned int rte_ring_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_elem.h:941
rte_ring_dequeue_burst_elem
static __rte_always_inline unsigned int rte_ring_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_elem.h:1053
rte_ring_dequeue_elem
static __rte_always_inline int rte_ring_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
Definition: rte_ring_elem.h:852
rte_ring_mp_hts_enqueue_bulk_elem
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_hts_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_hts.h:137
rte_ring_sc_dequeue_bulk_elem
static __rte_always_inline unsigned int rte_ring_sc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_elem.h:722
rte_ring_sp_enqueue_bulk_elem
static __rte_always_inline unsigned int rte_ring_sp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_elem.h:533
rte_ring_mc_dequeue_elem
static __rte_always_inline int rte_ring_mc_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
Definition: rte_ring_elem.h:800
rte_ring_mp_enqueue_burst_elem
static __rte_always_inline unsigned int rte_ring_mp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_elem.h:881
rte_ring_core.h
RTE_RING_QUEUE_VARIABLE
@ RTE_RING_QUEUE_VARIABLE
Definition: rte_ring_core.h:48
rte_ring
Definition: rte_ring_core.h:123
rte_ring_peek.h
rte_ring_dequeue_bulk_elem
static __rte_always_inline unsigned int rte_ring_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_elem.h:753
rte_ring_mp_hts_enqueue_burst_elem
static __rte_experimental __rte_always_inline unsigned int rte_ring_mp_hts_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_hts.h:193
RTE_RING_SYNC_MT
@ RTE_RING_SYNC_MT
Definition: rte_ring_core.h:58
rte_ring_mc_hts_dequeue_bulk_elem
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_hts_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_hts.h:165
rte_ring_enqueue_bulk_elem
static __rte_always_inline unsigned int rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_elem.h:569
rte_ring_mp_enqueue_elem
static __rte_always_inline int rte_ring_mp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
Definition: rte_ring_elem.h:615
rte_ring_mc_dequeue_burst_elem
static __rte_always_inline unsigned int rte_ring_mc_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_elem.h:993
RTE_RING_QUEUE_FIXED
@ RTE_RING_QUEUE_FIXED
Definition: rte_ring_core.h:46
rte_ring_mc_dequeue_bulk_elem
static __rte_always_inline unsigned int rte_ring_mc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_elem.h:694
rte_ring_headtail::sync_type
enum rte_ring_sync_type sync_type
Definition: rte_ring_core.h:77
likely
#define likely(x)
Definition: rte_branch_prediction.h:28
rte_ring_hts.h
rte_ring::mask
uint32_t mask
Definition: rte_ring_core.h:135
rte_ring_peek_zc.h
rte_ring_queue_behavior
rte_ring_queue_behavior
Definition: rte_ring_core.h:44
rte_ring_sc_dequeue_burst_elem
static __rte_always_inline unsigned int rte_ring_sc_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_elem.h:1022
rte_ring_get_memsize_elem
ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count)
rte_ring_mc_rts_dequeue_burst_elem
static __rte_experimental __rte_always_inline unsigned int rte_ring_mc_rts_dequeue_burst_elem(struct rte_ring *r, void *obj_table, unsigned int esize, unsigned int n, unsigned int *available)
Definition: rte_ring_rts.h:250
rte_ring_sp_enqueue_burst_elem
static __rte_always_inline unsigned int rte_ring_sp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table, unsigned int esize, unsigned int n, unsigned int *free_space)
Definition: rte_ring_elem.h:910