DPDK 22.11.1
Loading...
Searching...
No Matches
rte_mempool.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright(c) 2016 6WIND S.A.
4 * Copyright(c) 2022 SmartShare Systems
5 */
6
7#ifndef _RTE_MEMPOOL_H_
8#define _RTE_MEMPOOL_H_
9
37#include <stdio.h>
38#include <stdint.h>
39#include <inttypes.h>
40
41#include <rte_compat.h>
42#include <rte_config.h>
43#include <rte_spinlock.h>
44#include <rte_debug.h>
45#include <rte_lcore.h>
47#include <rte_ring.h>
48#include <rte_memcpy.h>
49#include <rte_common.h>
50
52
53#ifdef __cplusplus
54extern "C" {
55#endif
56
57#define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL
58#define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL
59#define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL
61#ifdef RTE_LIBRTE_MEMPOOL_STATS
68struct rte_mempool_debug_stats {
69 uint64_t put_bulk;
70 uint64_t put_objs;
71 uint64_t put_common_pool_bulk;
72 uint64_t put_common_pool_objs;
73 uint64_t get_common_pool_bulk;
74 uint64_t get_common_pool_objs;
75 uint64_t get_success_bulk;
76 uint64_t get_success_objs;
77 uint64_t get_fail_bulk;
78 uint64_t get_fail_objs;
79 uint64_t get_success_blks;
80 uint64_t get_fail_blks;
82#endif
83
88 uint32_t size;
89 uint32_t flushthresh;
90 uint32_t len;
91#ifdef RTE_LIBRTE_MEMPOOL_STATS
92 uint32_t unused;
93 /*
94 * Alternative location for the most frequently updated mempool statistics (per-lcore),
95 * providing faster update access when using a mempool cache.
96 */
97 struct {
98 uint64_t put_bulk;
99 uint64_t put_objs;
100 uint64_t get_success_bulk;
101 uint64_t get_success_objs;
102 } stats;
103#endif
110 void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 2] __rte_cache_aligned;
112
117 uint32_t elt_size;
118 uint32_t header_size;
119 uint32_t trailer_size;
120 uint32_t total_size;
122};
123
125#define RTE_MEMPOOL_NAMESIZE (RTE_RING_NAMESIZE - \
126 sizeof(RTE_MEMPOOL_MZ_PREFIX) + 1)
127#define RTE_MEMPOOL_MZ_PREFIX "MP_"
128
129/* "MP_<name>" */
130#define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s"
131
132#ifndef RTE_MEMPOOL_ALIGN
136#define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE
137#endif
138
139#define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1)
140
152 struct rte_mempool *mp;
154#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
155 uint64_t cookie;
156#endif
157};
158
162RTE_STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr);
163
164#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
165
172struct rte_mempool_objtlr {
173 uint64_t cookie;
174};
175
176#endif
177
181RTE_STAILQ_HEAD(rte_mempool_memhdr_list, rte_mempool_memhdr);
182
187 void *opaque);
188
197 struct rte_mempool *mp;
198 void *addr;
200 size_t len;
202 void *opaque;
203};
204
213 unsigned int contig_block_size;
215
220 char name[RTE_MEMPOOL_NAMESIZE];
222 union {
223 void *pool_data;
224 uint64_t pool_id;
225 };
227 const struct rte_memzone *mz;
228 unsigned int flags;
230 uint32_t size;
231 uint32_t cache_size;
234 uint32_t elt_size;
235 uint32_t header_size;
236 uint32_t trailer_size;
246 int32_t ops_index;
247
250 uint32_t populated_size;
251 struct rte_mempool_objhdr_list elt_list;
252 uint32_t nb_mem_chunks;
253 struct rte_mempool_memhdr_list mem_list;
255#ifdef RTE_LIBRTE_MEMPOOL_STATS
260 struct rte_mempool_debug_stats stats[RTE_MAX_LCORE + 1];
261#endif
263
265#define RTE_MEMPOOL_F_NO_SPREAD 0x0001
270#define MEMPOOL_F_NO_SPREAD RTE_MEMPOOL_F_NO_SPREAD
272#define RTE_MEMPOOL_F_NO_CACHE_ALIGN 0x0002
277#define MEMPOOL_F_NO_CACHE_ALIGN RTE_MEMPOOL_F_NO_CACHE_ALIGN
279#define RTE_MEMPOOL_F_SP_PUT 0x0004
284#define MEMPOOL_F_SP_PUT RTE_MEMPOOL_F_SP_PUT
286#define RTE_MEMPOOL_F_SC_GET 0x0008
291#define MEMPOOL_F_SC_GET RTE_MEMPOOL_F_SC_GET
293#define RTE_MEMPOOL_F_POOL_CREATED 0x0010
295#define RTE_MEMPOOL_F_NO_IOVA_CONTIG 0x0020
300#define MEMPOOL_F_NO_IOVA_CONTIG RTE_MEMPOOL_F_NO_IOVA_CONTIG
302#define RTE_MEMPOOL_F_NON_IO 0x0040
303
307#define RTE_MEMPOOL_VALID_USER_FLAGS (RTE_MEMPOOL_F_NO_SPREAD \
308 | RTE_MEMPOOL_F_NO_CACHE_ALIGN \
309 | RTE_MEMPOOL_F_SP_PUT \
310 | RTE_MEMPOOL_F_SC_GET \
311 | RTE_MEMPOOL_F_NO_IOVA_CONTIG \
312 )
313
324#ifdef RTE_LIBRTE_MEMPOOL_STATS
325#define RTE_MEMPOOL_STAT_ADD(mp, name, n) do { \
326 unsigned int __lcore_id = rte_lcore_id(); \
327 if (likely(__lcore_id < RTE_MAX_LCORE)) \
328 (mp)->stats[__lcore_id].name += (n); \
329 else \
330 __atomic_fetch_add(&((mp)->stats[RTE_MAX_LCORE].name), \
331 (n), __ATOMIC_RELAXED); \
332 } while (0)
333#else
334#define RTE_MEMPOOL_STAT_ADD(mp, name, n) do {} while (0)
335#endif
336
347#ifdef RTE_LIBRTE_MEMPOOL_STATS
348#define RTE_MEMPOOL_CACHE_STAT_ADD(cache, name, n) ((cache)->stats.name += (n))
349#else
350#define RTE_MEMPOOL_CACHE_STAT_ADD(cache, name, n) do {} while (0)
351#endif
352
361#define RTE_MEMPOOL_HEADER_SIZE(mp, cs) \
362 (sizeof(*(mp)) + (((cs) == 0) ? 0 : \
363 (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE)))
364
365/* return the header of a mempool object (internal) */
366static inline struct rte_mempool_objhdr *
367rte_mempool_get_header(void *obj)
368{
369 return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj,
370 sizeof(struct rte_mempool_objhdr));
371}
372
382static inline struct rte_mempool *rte_mempool_from_obj(void *obj)
383{
384 struct rte_mempool_objhdr *hdr = rte_mempool_get_header(obj);
385 return hdr->mp;
386}
387
388/* return the trailer of a mempool object (internal) */
389static inline struct rte_mempool_objtlr *rte_mempool_get_trailer(void *obj)
390{
391 struct rte_mempool *mp = rte_mempool_from_obj(obj);
392 return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size);
393}
394
409void rte_mempool_check_cookies(const struct rte_mempool *mp,
410 void * const *obj_table_const, unsigned n, int free);
411
412#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
413#define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) \
414 rte_mempool_check_cookies(mp, obj_table_const, n, free)
415#else
416#define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) do {} while (0)
417#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
418
434void rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp,
435 void * const *first_obj_table_const, unsigned int n, int free);
436
437#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
438#define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \
439 free) \
440 rte_mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
441 free)
442#else
443#define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \
444 free) \
445 do {} while (0)
446#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
447
448#define RTE_MEMPOOL_OPS_NAMESIZE 32
460typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp);
461
465typedef void (*rte_mempool_free_t)(struct rte_mempool *mp);
466
470typedef int (*rte_mempool_enqueue_t)(struct rte_mempool *mp,
471 void * const *obj_table, unsigned int n);
472
476typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp,
477 void **obj_table, unsigned int n);
478
483 void **first_obj_table, unsigned int n);
484
488typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp);
489
513typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp,
514 uint32_t obj_num, uint32_t pg_shift,
515 size_t *min_chunk_size, size_t *align);
516
552ssize_t rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,
553 uint32_t obj_num, uint32_t pg_shift, size_t chunk_reserve,
554 size_t *min_chunk_size, size_t *align);
555
564 uint32_t obj_num, uint32_t pg_shift,
565 size_t *min_chunk_size, size_t *align);
566
580 void *opaque, void *vaddr, rte_iova_t iova);
581
610typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp,
611 unsigned int max_objs,
612 void *vaddr, rte_iova_t iova, size_t len,
613 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
614
618#define RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ 0x0001
619
652int rte_mempool_op_populate_helper(struct rte_mempool *mp,
653 unsigned int flags, unsigned int max_objs,
654 void *vaddr, rte_iova_t iova, size_t len,
655 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
656
664 unsigned int max_objs,
665 void *vaddr, rte_iova_t iova, size_t len,
666 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
667
671typedef int (*rte_mempool_get_info_t)(const struct rte_mempool *mp,
672 struct rte_mempool_info *info);
673
674
702
703#define RTE_MEMPOOL_MAX_OPS_IDX 16
716 uint32_t num_ops;
722
725
735static inline struct rte_mempool_ops *
736rte_mempool_get_ops(int ops_index)
737{
738 RTE_VERIFY((ops_index >= 0) && (ops_index < RTE_MEMPOOL_MAX_OPS_IDX));
739
740 return &rte_mempool_ops_table.ops[ops_index];
741}
742
752int
753rte_mempool_ops_alloc(struct rte_mempool *mp);
754
768static inline int
769rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp,
770 void **obj_table, unsigned n)
771{
772 struct rte_mempool_ops *ops;
773 int ret;
774
775 rte_mempool_trace_ops_dequeue_bulk(mp, obj_table, n);
776 ops = rte_mempool_get_ops(mp->ops_index);
777 ret = ops->dequeue(mp, obj_table, n);
778 if (ret == 0) {
779 RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_bulk, 1);
780 RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_objs, n);
781 }
782 return ret;
783}
784
798static inline int
799rte_mempool_ops_dequeue_contig_blocks(struct rte_mempool *mp,
800 void **first_obj_table, unsigned int n)
801{
802 struct rte_mempool_ops *ops;
803
804 ops = rte_mempool_get_ops(mp->ops_index);
805 RTE_ASSERT(ops->dequeue_contig_blocks != NULL);
806 rte_mempool_trace_ops_dequeue_contig_blocks(mp, first_obj_table, n);
807 return ops->dequeue_contig_blocks(mp, first_obj_table, n);
808}
809
823static inline int
824rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table,
825 unsigned n)
826{
827 struct rte_mempool_ops *ops;
828 int ret;
829
830 RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_bulk, 1);
831 RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_objs, n);
832 rte_mempool_trace_ops_enqueue_bulk(mp, obj_table, n);
833 ops = rte_mempool_get_ops(mp->ops_index);
834 ret = ops->enqueue(mp, obj_table, n);
835#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
836 if (unlikely(ret < 0))
837 RTE_LOG(CRIT, MEMPOOL, "cannot enqueue %u objects to mempool %s\n",
838 n, mp->name);
839#endif
840 return ret;
841}
842
851unsigned
852rte_mempool_ops_get_count(const struct rte_mempool *mp);
853
873ssize_t rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,
874 uint32_t obj_num, uint32_t pg_shift,
875 size_t *min_chunk_size, size_t *align);
876
900int rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs,
901 void *vaddr, rte_iova_t iova, size_t len,
903 void *obj_cb_arg);
904
918 struct rte_mempool_info *info);
919
926void
927rte_mempool_ops_free(struct rte_mempool *mp);
928
946int
948 void *pool_config);
949
961
967#define RTE_MEMPOOL_REGISTER_OPS(ops) \
968 RTE_INIT(mp_hdlr_init_##ops) \
969 { \
970 rte_mempool_register_ops(&ops); \
971 }
972
978typedef void (rte_mempool_obj_cb_t)(struct rte_mempool *mp,
979 void *opaque, void *obj, unsigned obj_idx);
980typedef rte_mempool_obj_cb_t rte_mempool_obj_ctor_t; /* compat */
981
987typedef void (rte_mempool_mem_cb_t)(struct rte_mempool *mp,
988 void *opaque, struct rte_mempool_memhdr *memhdr,
989 unsigned mem_idx);
990
997typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
998
1077struct rte_mempool *
1078rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
1079 unsigned cache_size, unsigned private_data_size,
1080 rte_mempool_ctor_t *mp_init, void *mp_init_arg,
1081 rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
1082 int socket_id, unsigned flags);
1083
1118struct rte_mempool *
1119rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
1120 unsigned cache_size, unsigned private_data_size,
1121 int socket_id, unsigned flags);
1133void
1135
1166int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
1167 rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
1168 void *opaque);
1169
1196int
1198 size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
1199 void *opaque);
1200
1215
1230
1247 rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg);
1248
1265 rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg);
1266
1275void rte_mempool_dump(FILE *f, struct rte_mempool *mp);
1276
1291struct rte_mempool_cache *
1292rte_mempool_cache_create(uint32_t size, int socket_id);
1293
1300void
1302
1315rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
1316{
1317 if (mp->cache_size == 0)
1318 return NULL;
1319
1320 if (lcore_id >= RTE_MAX_LCORE)
1321 return NULL;
1322
1323 rte_mempool_trace_default_cache(mp, lcore_id,
1324 &mp->local_cache[lcore_id]);
1325 return &mp->local_cache[lcore_id];
1326}
1327
1336static __rte_always_inline void
1338 struct rte_mempool *mp)
1339{
1340 if (cache == NULL)
1342 if (cache == NULL || cache->len == 0)
1343 return;
1344 rte_mempool_trace_cache_flush(cache, mp);
1345 rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len);
1346 cache->len = 0;
1347}
1348
1361static __rte_always_inline void
1362rte_mempool_do_generic_put(struct rte_mempool *mp, void * const *obj_table,
1363 unsigned int n, struct rte_mempool_cache *cache)
1364{
1365 void **cache_objs;
1366
1367 /* No cache provided */
1368 if (unlikely(cache == NULL))
1369 goto driver_enqueue;
1370
1371 /* increment stat now, adding in mempool always success */
1372 RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_bulk, 1);
1373 RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_objs, n);
1374
1375 /* The request itself is too big for the cache */
1376 if (unlikely(n > cache->flushthresh))
1377 goto driver_enqueue_stats_incremented;
1378
1379 /*
1380 * The cache follows the following algorithm:
1381 * 1. If the objects cannot be added to the cache without crossing
1382 * the flush threshold, flush the cache to the backend.
1383 * 2. Add the objects to the cache.
1384 */
1385
1386 if (cache->len + n <= cache->flushthresh) {
1387 cache_objs = &cache->objs[cache->len];
1388 cache->len += n;
1389 } else {
1390 cache_objs = &cache->objs[0];
1391 rte_mempool_ops_enqueue_bulk(mp, cache_objs, cache->len);
1392 cache->len = n;
1393 }
1394
1395 /* Add the objects to the cache. */
1396 rte_memcpy(cache_objs, obj_table, sizeof(void *) * n);
1397
1398 return;
1399
1400driver_enqueue:
1401
1402 /* increment stat now, adding in mempool always success */
1403 RTE_MEMPOOL_STAT_ADD(mp, put_bulk, 1);
1404 RTE_MEMPOOL_STAT_ADD(mp, put_objs, n);
1405
1406driver_enqueue_stats_incremented:
1407
1408 /* push objects to the backend */
1409 rte_mempool_ops_enqueue_bulk(mp, obj_table, n);
1410}
1411
1412
1425static __rte_always_inline void
1426rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
1427 unsigned int n, struct rte_mempool_cache *cache)
1428{
1429 rte_mempool_trace_generic_put(mp, obj_table, n, cache);
1430 RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 0);
1431 rte_mempool_do_generic_put(mp, obj_table, n, cache);
1432}
1433
1448static __rte_always_inline void
1449rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
1450 unsigned int n)
1451{
1452 struct rte_mempool_cache *cache;
1454 rte_mempool_trace_put_bulk(mp, obj_table, n, cache);
1455 rte_mempool_generic_put(mp, obj_table, n, cache);
1456}
1457
1470static __rte_always_inline void
1471rte_mempool_put(struct rte_mempool *mp, void *obj)
1472{
1473 rte_mempool_put_bulk(mp, &obj, 1);
1474}
1475
1490static __rte_always_inline int
1491rte_mempool_do_generic_get(struct rte_mempool *mp, void **obj_table,
1492 unsigned int n, struct rte_mempool_cache *cache)
1493{
1494 int ret;
1495 unsigned int remaining = n;
1496 uint32_t index, len;
1497 void **cache_objs;
1498
1499 /* No cache provided */
1500 if (unlikely(cache == NULL))
1501 goto driver_dequeue;
1502
1503 /* Use the cache as much as we have to return hot objects first */
1504 len = RTE_MIN(remaining, cache->len);
1505 cache_objs = &cache->objs[cache->len];
1506 cache->len -= len;
1507 remaining -= len;
1508 for (index = 0; index < len; index++)
1509 *obj_table++ = *--cache_objs;
1510
1511 if (remaining == 0) {
1512 /* The entire request is satisfied from the cache. */
1513
1514 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1515 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
1516
1517 return 0;
1518 }
1519
1520 /* if dequeue below would overflow mem allocated for cache */
1521 if (unlikely(remaining > RTE_MEMPOOL_CACHE_MAX_SIZE))
1522 goto driver_dequeue;
1523
1524 /* Fill the cache from the backend; fetch size + remaining objects. */
1525 ret = rte_mempool_ops_dequeue_bulk(mp, cache->objs,
1526 cache->size + remaining);
1527 if (unlikely(ret < 0)) {
1528 /*
1529 * We are buffer constrained, and not able to allocate
1530 * cache + remaining.
1531 * Do not fill the cache, just satisfy the remaining part of
1532 * the request directly from the backend.
1533 */
1534 goto driver_dequeue;
1535 }
1536
1537 /* Satisfy the remaining part of the request from the filled cache. */
1538 cache_objs = &cache->objs[cache->size + remaining];
1539 for (index = 0; index < remaining; index++)
1540 *obj_table++ = *--cache_objs;
1541
1542 cache->len = cache->size;
1543
1544 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1545 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
1546
1547 return 0;
1548
1549driver_dequeue:
1550
1551 /* Get remaining objects directly from the backend. */
1552 ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, remaining);
1553
1554 if (ret < 0) {
1555 if (likely(cache != NULL)) {
1556 cache->len = n - remaining;
1557 /*
1558 * No further action is required to roll the first part
1559 * of the request back into the cache, as objects in
1560 * the cache are intact.
1561 */
1562 }
1563
1564 RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
1565 RTE_MEMPOOL_STAT_ADD(mp, get_fail_objs, n);
1566 } else {
1567 if (likely(cache != NULL)) {
1568 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_bulk, 1);
1569 RTE_MEMPOOL_CACHE_STAT_ADD(cache, get_success_objs, n);
1570 } else {
1571 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1572 RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n);
1573 }
1574 }
1575
1576 return ret;
1577}
1578
1599static __rte_always_inline int
1600rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table,
1601 unsigned int n, struct rte_mempool_cache *cache)
1602{
1603 int ret;
1604 ret = rte_mempool_do_generic_get(mp, obj_table, n, cache);
1605 if (ret == 0)
1606 RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 1);
1607 rte_mempool_trace_generic_get(mp, obj_table, n, cache);
1608 return ret;
1609}
1610
1633static __rte_always_inline int
1634rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
1635{
1636 struct rte_mempool_cache *cache;
1638 rte_mempool_trace_get_bulk(mp, obj_table, n, cache);
1639 return rte_mempool_generic_get(mp, obj_table, n, cache);
1640}
1641
1662static __rte_always_inline int
1663rte_mempool_get(struct rte_mempool *mp, void **obj_p)
1664{
1665 return rte_mempool_get_bulk(mp, obj_p, 1);
1666}
1667
1689static __rte_always_inline int
1691 void **first_obj_table, unsigned int n)
1692{
1693 int ret;
1694
1695 ret = rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n);
1696 if (ret == 0) {
1697 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1);
1698 RTE_MEMPOOL_STAT_ADD(mp, get_success_blks, n);
1699 RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table, n,
1700 1);
1701 } else {
1702 RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1);
1703 RTE_MEMPOOL_STAT_ADD(mp, get_fail_blks, n);
1704 }
1705
1706 rte_mempool_trace_get_contig_blocks(mp, first_obj_table, n);
1707 return ret;
1708}
1709
1722unsigned int rte_mempool_avail_count(const struct rte_mempool *mp);
1723
1736unsigned int
1738
1752static inline int
1754{
1755 return rte_mempool_avail_count(mp) == mp->size;
1756}
1757
1771static inline int
1773{
1774 return rte_mempool_avail_count(mp) == 0;
1775}
1776
1787static inline rte_iova_t
1789{
1790 const struct rte_mempool_objhdr *hdr;
1791 hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt,
1792 sizeof(*hdr));
1793 return hdr->iova;
1794}
1795
1807
1816static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
1817{
1818 return (char *)mp +
1819 RTE_MEMPOOL_HEADER_SIZE(mp, mp->cache_size);
1820}
1821
1829
1843
1861uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
1862 struct rte_mempool_objsz *sz);
1863
1872void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
1873 void *arg);
1874
1879int
1880rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz);
1881
1891};
1892
1902typedef void (rte_mempool_event_callback)(
1903 enum rte_mempool_event event,
1904 struct rte_mempool *mp,
1905 void *user_data);
1906
1923__rte_internal
1924int
1925rte_mempool_event_callback_register(rte_mempool_event_callback *func,
1926 void *user_data);
1927
1941__rte_internal
1942int
1943rte_mempool_event_callback_unregister(rte_mempool_event_callback *func,
1944 void *user_data);
1945
1946#ifdef __cplusplus
1947}
1948#endif
1949
1950#endif /* _RTE_MEMPOOL_H_ */
#define likely(x)
#define unlikely(x)
#define __rte_cache_aligned
Definition rte_common.h:440
#define RTE_MIN(a, b)
Definition rte_common.h:613
#define RTE_PTR_SUB(ptr, x)
Definition rte_common.h:295
uint64_t rte_iova_t
Definition rte_common.h:458
#define RTE_PTR_ADD(ptr, x)
Definition rte_common.h:290
#define RTE_STD_C11
Definition rte_common.h:39
#define __rte_always_inline
Definition rte_common.h:255
static unsigned rte_lcore_id(void)
Definition rte_lcore.h:79
#define RTE_LOG(l, t,...)
Definition rte_log.h:335
static void * rte_memcpy(void *dst, const void *src, size_t n)
void() rte_mempool_memchunk_free_cb_t(struct rte_mempool_memhdr *memhdr, void *opaque)
int rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name, void *pool_config)
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
int rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs, void *vaddr, rte_iova_t iova, size_t len, rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
static __rte_always_inline struct rte_mempool_cache * rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, struct rte_mempool_objsz *sz)
void() rte_mempool_obj_cb_t(struct rte_mempool *mp, void *opaque, void *obj, unsigned obj_idx)
struct rte_mempool * rte_mempool_lookup(const char *name)
int(* rte_mempool_enqueue_t)(struct rte_mempool *mp, void *const *obj_table, unsigned int n)
static struct rte_mempool * rte_mempool_from_obj(void *obj)
static rte_iova_t rte_mempool_virt2iova(const void *elt)
int(* rte_mempool_dequeue_t)(struct rte_mempool *mp, void **obj_table, unsigned int n)
void rte_mempool_free(struct rte_mempool *mp)
rte_mempool_event
@ RTE_MEMPOOL_EVENT_DESTROY
@ RTE_MEMPOOL_EVENT_READY
unsigned(* rte_mempool_get_count)(const struct rte_mempool *mp)
void() rte_mempool_populate_obj_cb_t(struct rte_mempool *mp, void *opaque, void *vaddr, rte_iova_t iova)
int rte_mempool_populate_default(struct rte_mempool *mp)
unsigned int rte_mempool_avail_count(const struct rte_mempool *mp)
static __rte_always_inline int rte_mempool_get_contig_blocks(struct rte_mempool *mp, void **first_obj_table, unsigned int n)
int(* rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp, void **first_obj_table, unsigned int n)
void(* rte_mempool_free_t)(struct rte_mempool *mp)
static __rte_always_inline void rte_mempool_cache_flush(struct rte_mempool_cache *cache, struct rte_mempool *mp)
static __rte_always_inline void rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table, unsigned int n)
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
int rte_mempool_register_ops(const struct rte_mempool_ops *ops)
int rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp, uint32_t obj_num, uint32_t pg_shift, size_t *min_chunk_size, size_t *align)
int rte_mempool_populate_anon(struct rte_mempool *mp)
struct rte_mempool * rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, unsigned cache_size, unsigned private_data_size, int socket_id, unsigned flags)
void rte_mempool_cache_free(struct rte_mempool_cache *cache)
static __rte_always_inline void rte_mempool_generic_put(struct rte_mempool *mp, void *const *obj_table, unsigned int n, struct rte_mempool_cache *cache)
static int rte_mempool_full(const struct rte_mempool *mp)
int(* rte_mempool_alloc_t)(struct rte_mempool *mp)
void rte_mempool_dump(FILE *f, struct rte_mempool *mp)
static __rte_always_inline int rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned int n, struct rte_mempool_cache *cache)
struct rte_mempool_cache * rte_mempool_cache_create(uint32_t size, int socket_id)
void rte_mempool_audit(struct rte_mempool *mp)
void rte_mempool_walk(void(*func)(struct rte_mempool *, void *arg), void *arg)
#define RTE_MEMPOOL_OPS_NAMESIZE
void() rte_mempool_ctor_t(struct rte_mempool *, void *)
struct rte_mempool * rte_mempool_create(const char *name, unsigned n, unsigned elt_size, unsigned cache_size, unsigned private_data_size, rte_mempool_ctor_t *mp_init, void *mp_init_arg, rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, int socket_id, unsigned flags)
void() rte_mempool_mem_cb_t(struct rte_mempool *mp, void *opaque, struct rte_mempool_memhdr *memhdr, unsigned mem_idx)
ssize_t(* rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp, uint32_t obj_num, uint32_t pg_shift, size_t *min_chunk_size, size_t *align)
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
int rte_mempool_ops_get_info(const struct rte_mempool *mp, struct rte_mempool_info *info)
unsigned int rte_mempool_in_use_count(const struct rte_mempool *mp)
uint32_t rte_mempool_obj_iter(struct rte_mempool *mp, rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg)
int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
void rte_mempool_list_dump(FILE *f)
#define RTE_MEMPOOL_MAX_OPS_IDX
static int rte_mempool_empty(const struct rte_mempool *mp)
uint32_t rte_mempool_mem_iter(struct rte_mempool *mp, rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg)
int(* rte_mempool_populate_t)(struct rte_mempool *mp, unsigned int max_objs, void *vaddr, rte_iova_t iova, size_t len, rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
static void * rte_mempool_get_priv(struct rte_mempool *mp)
RTE_STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr)
int(* rte_mempool_get_info_t)(const struct rte_mempool *mp, struct rte_mempool_info *info)
uint32_t flushthresh
Definition rte_mempool.h:89
void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE *2] __rte_cache_aligned
unsigned int contig_block_size
RTE_STAILQ_ENTRY(rte_mempool_memhdr) next
struct rte_mempool * mp
rte_mempool_memchunk_free_cb_t * free_cb
struct rte_mempool * mp
RTE_STAILQ_ENTRY(rte_mempool_objhdr) next
uint32_t trailer_size
struct rte_mempool_ops ops[RTE_MEMPOOL_MAX_OPS_IDX]
rte_spinlock_t sl
char name[RTE_MEMPOOL_OPS_NAMESIZE]
rte_mempool_alloc_t alloc
rte_mempool_dequeue_t dequeue
rte_mempool_get_info_t get_info
rte_mempool_calc_mem_size_t calc_mem_size
rte_mempool_get_count get_count
rte_mempool_populate_t populate
rte_mempool_dequeue_contig_blocks_t dequeue_contig_blocks
rte_mempool_free_t free
rte_mempool_enqueue_t enqueue
uint32_t nb_mem_chunks
const struct rte_memzone * mz
struct rte_mempool_memhdr_list mem_list
uint32_t populated_size
uint32_t header_size
uint64_t pool_id
int32_t ops_index
void * pool_config
uint32_t trailer_size
char name[RTE_MEMPOOL_NAMESIZE]
uint32_t size
uint32_t cache_size
unsigned int flags
uint32_t elt_size
unsigned private_data_size
struct rte_mempool_cache * local_cache
struct rte_mempool_objhdr_list elt_list
void * pool_data