Branch data Line data Source code
1 : : /* SPDX-License-Identifier: BSD-3-Clause
2 : : * Copyright (c) 2023 Marvell.
3 : : */
4 : :
5 : : #include "test.h"
6 : : #include <string.h>
7 : : #include <rte_common.h>
8 : : #include <rte_malloc.h>
9 : : #include <rte_mempool.h>
10 : : #include <rte_mbuf.h>
11 : : #include <rte_random.h>
12 : :
13 : : #ifdef RTE_EXEC_ENV_WINDOWS
14 : : static int
15 : : test_event_dma_adapter(void)
16 : : {
17 : : printf("event_dma_adapter not supported on Windows, skipping test\n");
18 : : return TEST_SKIPPED;
19 : : }
20 : :
21 : : #else
22 : :
23 : : #include <rte_bus_vdev.h>
24 : : #include <rte_dmadev.h>
25 : : #include <rte_eventdev.h>
26 : : #include <rte_event_dma_adapter.h>
27 : : #include <rte_service.h>
28 : :
29 : : #define NUM_MBUFS (8191)
30 : : #define MBUF_CACHE_SIZE (256)
31 : : #define TEST_APP_PORT_ID 0
32 : : #define TEST_APP_EV_QUEUE_ID 0
33 : : #define TEST_APP_EV_PRIORITY 0
34 : : #define TEST_APP_EV_FLOWID 0xAABB
35 : : #define TEST_DMA_EV_QUEUE_ID 1
36 : : #define TEST_ADAPTER_ID 0
37 : : #define TEST_DMA_DEV_ID 0
38 : : #define TEST_DMA_VCHAN_ID 0
39 : : #define PACKET_LENGTH 1024
40 : : #define NB_TEST_PORTS 1
41 : : #define NB_TEST_QUEUES 2
42 : : #define NUM_CORES 2
43 : : #define DMA_OP_POOL_SIZE 128
44 : : #define TEST_MAX_OP 32
45 : : #define TEST_RINGSIZE 512
46 : :
47 : : #define MBUF_SIZE (RTE_PKTMBUF_HEADROOM + PACKET_LENGTH)
48 : :
49 : : /* Handle log statements in same manner as test macros */
50 : : #define LOG_DBG(...) RTE_LOG(DEBUG, EAL, __VA_ARGS__)
51 : :
52 : : struct event_dma_adapter_test_params {
53 : : struct rte_mempool *src_mbuf_pool;
54 : : struct rte_mempool *dst_mbuf_pool;
55 : : struct rte_mempool *op_mpool;
56 : : uint8_t dma_event_port_id;
57 : : uint8_t internal_port_op_fwd;
58 : : };
59 : :
60 : : struct rte_event dma_response_info = {
61 : : .queue_id = TEST_APP_EV_QUEUE_ID,
62 : : .sched_type = RTE_SCHED_TYPE_ATOMIC,
63 : : .flow_id = TEST_APP_EV_FLOWID,
64 : : .priority = TEST_APP_EV_PRIORITY
65 : : };
66 : :
67 : : static struct event_dma_adapter_test_params params;
68 : : static uint8_t dma_adapter_setup_done;
69 : : static uint32_t slcore_id;
70 : : static int evdev;
71 : :
72 : : static int
73 : 0 : send_recv_ev(struct rte_event *ev)
74 : : {
75 : : struct rte_event recv_ev[TEST_MAX_OP];
76 : : uint16_t nb_enqueued = 0;
77 : : int i = 0;
78 : :
79 [ # # ]: 0 : if (params.internal_port_op_fwd) {
80 : 0 : nb_enqueued = rte_event_dma_adapter_enqueue(evdev, TEST_APP_PORT_ID, ev,
81 : : TEST_MAX_OP);
82 : : } else {
83 [ # # ]: 0 : while (nb_enqueued < TEST_MAX_OP) {
84 : 0 : nb_enqueued += rte_event_enqueue_burst(evdev, TEST_APP_PORT_ID,
85 : 0 : &ev[nb_enqueued], TEST_MAX_OP -
86 : : nb_enqueued);
87 : : }
88 : : }
89 : :
90 [ # # ]: 0 : TEST_ASSERT_EQUAL(nb_enqueued, TEST_MAX_OP, "Failed to send event to dma adapter\n");
91 : :
92 [ # # ]: 0 : while (i < TEST_MAX_OP) {
93 [ # # ]: 0 : if (rte_event_dequeue_burst(evdev, TEST_APP_PORT_ID, &recv_ev[i], 1, 0) != 1)
94 : 0 : continue;
95 : 0 : i++;
96 : : }
97 : :
98 : : TEST_ASSERT_EQUAL(i, TEST_MAX_OP, "Test failed. Failed to dequeue events.\n");
99 : :
100 : : return TEST_SUCCESS;
101 : : }
102 : :
103 : : static int
104 : 0 : test_dma_adapter_stats(void)
105 : : {
106 : : struct rte_event_dma_adapter_stats stats;
107 : :
108 : 0 : rte_event_dma_adapter_stats_get(TEST_ADAPTER_ID, &stats);
109 : : printf(" +------------------------------------------------------+\n");
110 : : printf(" + DMA adapter stats for instance %u:\n", TEST_ADAPTER_ID);
111 : 0 : printf(" + Event port poll count 0x%" PRIx64 "\n",
112 : : stats.event_poll_count);
113 : 0 : printf(" + Event dequeue count 0x%" PRIx64 "\n",
114 : : stats.event_deq_count);
115 : 0 : printf(" + DMA dev enqueue count 0x%" PRIx64 "\n",
116 : : stats.dma_enq_count);
117 : 0 : printf(" + DMA dev enqueue failed count 0x%" PRIx64 "\n",
118 : : stats.dma_enq_fail_count);
119 : 0 : printf(" + DMA dev dequeue count 0x%" PRIx64 "\n",
120 : : stats.dma_deq_count);
121 : 0 : printf(" + Event enqueue count 0x%" PRIx64 "\n",
122 : : stats.event_enq_count);
123 : 0 : printf(" + Event enqueue retry count 0x%" PRIx64 "\n",
124 : : stats.event_enq_retry_count);
125 : 0 : printf(" + Event enqueue fail count 0x%" PRIx64 "\n",
126 : : stats.event_enq_fail_count);
127 : : printf(" +------------------------------------------------------+\n");
128 : :
129 : 0 : rte_event_dma_adapter_stats_reset(TEST_ADAPTER_ID);
130 : 0 : return TEST_SUCCESS;
131 : : }
132 : :
133 : : static int
134 : 0 : test_dma_adapter_params(void)
135 : : {
136 : : struct rte_event_dma_adapter_runtime_params out_params;
137 : : struct rte_event_dma_adapter_runtime_params in_params;
138 : : struct rte_event event;
139 : : uint32_t cap;
140 : : int err, rc;
141 : :
142 : 0 : err = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap);
143 [ # # ]: 0 : TEST_ASSERT_SUCCESS(err, "Failed to get adapter capabilities\n");
144 : :
145 [ # # ]: 0 : if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND) {
146 : 0 : err = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
147 : : TEST_DMA_VCHAN_ID, &event);
148 : : } else
149 : 0 : err = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
150 : : TEST_DMA_VCHAN_ID, NULL);
151 : :
152 [ # # ]: 0 : TEST_ASSERT_SUCCESS(err, "Failed to add vchan\n");
153 : :
154 : 0 : err = rte_event_dma_adapter_runtime_params_init(&in_params);
155 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
156 : 0 : err = rte_event_dma_adapter_runtime_params_init(&out_params);
157 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
158 : :
159 : : /* Case 1: Get the default value of mbufs processed by adapter */
160 : 0 : err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);
161 [ # # ]: 0 : if (err == -ENOTSUP) {
162 : : rc = TEST_SKIPPED;
163 : 0 : goto vchan_del;
164 : : }
165 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
166 : :
167 : : /* Case 2: Set max_nb = 32 (=BATCH_SEIZE) */
168 : 0 : in_params.max_nb = 32;
169 : :
170 : 0 : err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params);
171 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
172 : :
173 : 0 : err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);
174 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
175 [ # # ]: 0 : TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u",
176 : : in_params.max_nb, out_params.max_nb);
177 : :
178 : : /* Case 3: Set max_nb = 192 */
179 : 0 : in_params.max_nb = 192;
180 : :
181 : 0 : err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params);
182 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
183 : :
184 : 0 : err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);
185 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
186 [ # # ]: 0 : TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u",
187 : : in_params.max_nb, out_params.max_nb);
188 : :
189 : : /* Case 4: Set max_nb = 256 */
190 : 0 : in_params.max_nb = 256;
191 : :
192 : 0 : err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params);
193 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
194 : :
195 : 0 : err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);
196 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
197 [ # # ]: 0 : TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u",
198 : : in_params.max_nb, out_params.max_nb);
199 : :
200 : : /* Case 5: Set max_nb = 30(<BATCH_SIZE) */
201 : 0 : in_params.max_nb = 30;
202 : :
203 : 0 : err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params);
204 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
205 : :
206 : 0 : err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);
207 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
208 [ # # ]: 0 : TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u",
209 : : in_params.max_nb, out_params.max_nb);
210 : :
211 : : /* Case 6: Set max_nb = 512 */
212 : 0 : in_params.max_nb = 512;
213 : :
214 : 0 : err = rte_event_dma_adapter_runtime_params_set(TEST_ADAPTER_ID, &in_params);
215 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
216 : :
217 : 0 : err = rte_event_dma_adapter_runtime_params_get(TEST_ADAPTER_ID, &out_params);
218 [ # # ]: 0 : TEST_ASSERT(err == 0, "Expected 0 got %d", err);
219 [ # # ]: 0 : TEST_ASSERT(in_params.max_nb == out_params.max_nb, "Expected %u got %u",
220 : : in_params.max_nb, out_params.max_nb);
221 : :
222 : : rc = TEST_SUCCESS;
223 : 0 : vchan_del:
224 : 0 : err = rte_event_dma_adapter_vchan_del(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
225 : : TEST_DMA_VCHAN_ID);
226 [ # # ]: 0 : TEST_ASSERT_SUCCESS(err, "Failed to delete vchan\n");
227 : :
228 : : return rc;
229 : : }
230 : :
231 : : static int
232 : 0 : test_op_forward_mode(void)
233 : : {
234 : : struct rte_mbuf *src_mbuf[TEST_MAX_OP];
235 : : struct rte_mbuf *dst_mbuf[TEST_MAX_OP];
236 : : struct rte_event_dma_adapter_op *op;
237 : : struct rte_event ev[TEST_MAX_OP];
238 : : struct rte_event response_info;
239 : : int ret, i;
240 : :
241 : 0 : ret = rte_pktmbuf_alloc_bulk(params.src_mbuf_pool, src_mbuf, TEST_MAX_OP);
242 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "alloc src mbufs failed.\n");
243 : :
244 : 0 : ret = rte_pktmbuf_alloc_bulk(params.dst_mbuf_pool, dst_mbuf, TEST_MAX_OP);
245 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "alloc dst mbufs failed.\n");
246 : :
247 [ # # ]: 0 : for (i = 0; i < TEST_MAX_OP; i++) {
248 : 0 : memset(rte_pktmbuf_mtod(src_mbuf[i], void *), rte_rand(), PACKET_LENGTH);
249 : 0 : memset(rte_pktmbuf_mtod(dst_mbuf[i], void *), 0, PACKET_LENGTH);
250 : : }
251 : :
252 [ # # ]: 0 : for (i = 0; i < TEST_MAX_OP; i++) {
253 [ # # ]: 0 : rte_mempool_get(params.op_mpool, (void **)&op);
254 [ # # ]: 0 : TEST_ASSERT_NOT_NULL(op, "Failed to allocate dma operation struct\n");
255 : :
256 : 0 : op->src_seg = rte_malloc(NULL, sizeof(struct rte_dma_sge), 0);
257 : 0 : op->dst_seg = rte_malloc(NULL, sizeof(struct rte_dma_sge), 0);
258 : :
259 : : /* Update Op */
260 [ # # ]: 0 : op->src_seg->addr = rte_pktmbuf_iova(src_mbuf[i]);
261 : 0 : op->dst_seg->addr = rte_pktmbuf_iova(dst_mbuf[i]);
262 : 0 : op->src_seg->length = PACKET_LENGTH;
263 : 0 : op->dst_seg->length = PACKET_LENGTH;
264 : 0 : op->nb_src = 1;
265 : 0 : op->nb_dst = 1;
266 : 0 : op->flags = RTE_DMA_OP_FLAG_SUBMIT;
267 : 0 : op->op_mp = params.op_mpool;
268 : 0 : op->dma_dev_id = TEST_DMA_DEV_ID;
269 : 0 : op->vchan = TEST_DMA_VCHAN_ID;
270 : :
271 : 0 : response_info.event = dma_response_info.event;
272 [ # # ]: 0 : rte_memcpy((uint8_t *)op + sizeof(struct rte_event_dma_adapter_op), &response_info,
273 : : sizeof(struct rte_event));
274 : :
275 : : /* Fill in event info and update event_ptr with rte_event_dma_adapter_op */
276 : 0 : memset(&ev[i], 0, sizeof(struct rte_event));
277 : 0 : ev[i].event = 0;
278 : 0 : ev[i].event_type = RTE_EVENT_TYPE_DMADEV;
279 : 0 : ev[i].queue_id = TEST_DMA_EV_QUEUE_ID;
280 : 0 : ev[i].sched_type = RTE_SCHED_TYPE_ATOMIC;
281 : 0 : ev[i].flow_id = 0xAABB;
282 : 0 : ev[i].event_ptr = op;
283 : : }
284 : :
285 : 0 : ret = send_recv_ev(ev);
286 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to send/receive event to dma adapter\n");
287 : :
288 : 0 : test_dma_adapter_stats();
289 : :
290 [ # # ]: 0 : for (i = 0; i < TEST_MAX_OP; i++) {
291 : 0 : op = ev[i].event_ptr;
292 : 0 : ret = memcmp(rte_pktmbuf_mtod(src_mbuf[i], void *),
293 : 0 : rte_pktmbuf_mtod(dst_mbuf[i], void *), PACKET_LENGTH);
294 : :
295 [ # # ]: 0 : TEST_ASSERT_EQUAL(ret, 0, "Data mismatch for dma adapter\n");
296 : :
297 : 0 : rte_free(op->src_seg);
298 : 0 : rte_free(op->dst_seg);
299 [ # # ]: 0 : rte_mempool_put(op->op_mp, op);
300 : : }
301 : :
302 : 0 : rte_pktmbuf_free_bulk(src_mbuf, TEST_MAX_OP);
303 : 0 : rte_pktmbuf_free_bulk(dst_mbuf, TEST_MAX_OP);
304 : :
305 : 0 : return TEST_SUCCESS;
306 : : }
307 : :
308 : : static int
309 : 0 : map_adapter_service_core(void)
310 : : {
311 : : uint32_t adapter_service_id;
312 : : int ret;
313 : :
314 [ # # ]: 0 : if (rte_event_dma_adapter_service_id_get(TEST_ADAPTER_ID, &adapter_service_id) == 0) {
315 : : uint32_t core_list[NUM_CORES];
316 : :
317 : 0 : ret = rte_service_lcore_list(core_list, NUM_CORES);
318 [ # # ]: 0 : TEST_ASSERT(ret >= 0, "Failed to get service core list!");
319 : :
320 [ # # ]: 0 : if (core_list[0] != slcore_id) {
321 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_service_lcore_add(slcore_id),
322 : : "Failed to add service core");
323 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_service_lcore_start(slcore_id),
324 : : "Failed to start service core");
325 : : }
326 : :
327 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(
328 : : adapter_service_id, slcore_id, 1),
329 : : "Failed to map adapter service");
330 : : }
331 : :
332 : : return TEST_SUCCESS;
333 : : }
334 : :
335 : : static int
336 : 0 : test_with_op_forward_mode(void)
337 : : {
338 : : uint32_t cap;
339 : : int ret;
340 : :
341 : 0 : ret = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap);
342 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
343 : :
344 [ # # ]: 0 : if (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
345 : : !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW))
346 : 0 : map_adapter_service_core();
347 : : else {
348 [ # # ]: 0 : if (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))
349 : : return TEST_SKIPPED;
350 : : }
351 : :
352 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_event_dma_adapter_start(TEST_ADAPTER_ID),
353 : : "Failed to start event dma adapter");
354 : :
355 : 0 : ret = test_op_forward_mode();
356 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "DMA - FORWARD mode test failed\n");
357 : : return TEST_SUCCESS;
358 : : }
359 : :
360 : : static int
361 : 0 : configure_dmadev(void)
362 : : {
363 : 0 : const struct rte_dma_conf conf = { .nb_vchans = 1};
364 : 0 : const struct rte_dma_vchan_conf qconf = {
365 : : .direction = RTE_DMA_DIR_MEM_TO_MEM,
366 : : .nb_desc = TEST_RINGSIZE,
367 : : };
368 : : struct rte_dma_info info;
369 : : unsigned int elt_size;
370 : : int ret;
371 : :
372 : 0 : ret = rte_dma_count_avail();
373 [ # # ]: 0 : RTE_TEST_ASSERT_FAIL(ret, "No dma devices found!\n");
374 : :
375 : 0 : ret = rte_dma_info_get(TEST_DMA_DEV_ID, &info);
376 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Error with rte_dma_info_get()\n");
377 : :
378 [ # # ]: 0 : if (info.max_vchans < 1)
379 : 0 : RTE_LOG(ERR, USER1, "Error, no channels available on device id %u\n",
380 : : TEST_DMA_DEV_ID);
381 : :
382 [ # # ]: 0 : if (rte_dma_configure(TEST_DMA_DEV_ID, &conf) != 0)
383 : 0 : RTE_LOG(ERR, USER1, "Error with rte_dma_configure()\n");
384 : :
385 [ # # ]: 0 : if (rte_dma_vchan_setup(TEST_DMA_DEV_ID, TEST_DMA_VCHAN_ID, &qconf) < 0)
386 : 0 : RTE_LOG(ERR, USER1, "Error with vchan configuration\n");
387 : :
388 : 0 : ret = rte_dma_info_get(TEST_DMA_DEV_ID, &info);
389 [ # # # # ]: 0 : if (ret != 0 || info.nb_vchans != 1)
390 : 0 : RTE_LOG(ERR, USER1, "Error, no configured vhcan reported on device id %u\n",
391 : : TEST_DMA_DEV_ID);
392 : :
393 : 0 : params.src_mbuf_pool = rte_pktmbuf_pool_create("DMA_ADAPTER_SRC_MBUFPOOL", NUM_MBUFS,
394 : : MBUF_CACHE_SIZE, 0, MBUF_SIZE,
395 : 0 : rte_socket_id());
396 [ # # ]: 0 : RTE_TEST_ASSERT_NOT_NULL(params.src_mbuf_pool, "Can't create DMA_SRC_MBUFPOOL\n");
397 : :
398 : 0 : params.dst_mbuf_pool = rte_pktmbuf_pool_create("DMA_ADAPTER_DST_MBUFPOOL", NUM_MBUFS,
399 : : MBUF_CACHE_SIZE, 0, MBUF_SIZE,
400 : 0 : rte_socket_id());
401 [ # # ]: 0 : RTE_TEST_ASSERT_NOT_NULL(params.dst_mbuf_pool, "Can't create DMA_DST_MBUFPOOL\n");
402 : :
403 : : elt_size = sizeof(struct rte_event_dma_adapter_op) + sizeof(struct rte_event);
404 : 0 : params.op_mpool = rte_mempool_create("EVENT_DMA_OP_POOL", DMA_OP_POOL_SIZE, elt_size, 0,
405 : 0 : 0, NULL, NULL, NULL, NULL, rte_socket_id(), 0);
406 [ # # ]: 0 : RTE_TEST_ASSERT_NOT_NULL(params.op_mpool, "Can't create DMA_OP_POOL\n");
407 : :
408 : : return TEST_SUCCESS;
409 : : }
410 : :
411 : : static inline void
412 : 0 : evdev_set_conf_values(struct rte_event_dev_config *dev_conf, struct rte_event_dev_info *info)
413 : : {
414 : : memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
415 : 0 : dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
416 : 0 : dev_conf->nb_event_ports = NB_TEST_PORTS;
417 : 0 : dev_conf->nb_event_queues = NB_TEST_QUEUES;
418 : 0 : dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
419 : 0 : dev_conf->nb_event_port_dequeue_depth =
420 : 0 : info->max_event_port_dequeue_depth;
421 : 0 : dev_conf->nb_event_port_enqueue_depth =
422 : 0 : info->max_event_port_enqueue_depth;
423 : : dev_conf->nb_event_port_enqueue_depth =
424 : : info->max_event_port_enqueue_depth;
425 : 0 : dev_conf->nb_events_limit =
426 : 0 : info->max_num_events;
427 : 0 : }
428 : :
429 : : static int
430 : 0 : configure_eventdev(void)
431 : : {
432 : : struct rte_event_queue_conf queue_conf;
433 : : struct rte_event_dev_config devconf;
434 : : struct rte_event_dev_info info;
435 : : uint32_t queue_count;
436 : : uint32_t port_count;
437 : : uint8_t qid;
438 : : int ret;
439 : :
440 [ # # ]: 0 : if (!rte_event_dev_count()) {
441 : : /* If there is no hardware eventdev, or no software vdev was
442 : : * specified on the command line, create an instance of
443 : : * event_sw.
444 : : */
445 : 0 : LOG_DBG("Failed to find a valid event device... "
446 : : "testing with event_sw device\n");
447 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_vdev_init("event_sw0", NULL),
448 : : "Error creating eventdev");
449 : 0 : evdev = rte_event_dev_get_dev_id("event_sw0");
450 : : }
451 : :
452 : 0 : ret = rte_event_dev_info_get(evdev, &info);
453 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info\n");
454 : :
455 : 0 : evdev_set_conf_values(&devconf, &info);
456 : :
457 : 0 : ret = rte_event_dev_configure(evdev, &devconf);
458 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev\n");
459 : :
460 : : /* Set up event queue */
461 : 0 : ret = rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count);
462 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Queue count get failed\n");
463 [ # # ]: 0 : TEST_ASSERT_EQUAL(queue_count, 2, "Unexpected queue count\n");
464 : :
465 : 0 : qid = TEST_APP_EV_QUEUE_ID;
466 : 0 : ret = rte_event_queue_setup(evdev, qid, NULL);
467 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d\n", qid);
468 : :
469 : 0 : queue_conf.nb_atomic_flows = info.max_event_queue_flows;
470 : 0 : queue_conf.nb_atomic_order_sequences = 32;
471 : 0 : queue_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
472 : 0 : queue_conf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
473 : 0 : queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
474 : :
475 : 0 : qid = TEST_DMA_EV_QUEUE_ID;
476 : 0 : ret = rte_event_queue_setup(evdev, qid, &queue_conf);
477 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%u\n", qid);
478 : :
479 : : /* Set up event port */
480 : 0 : ret = rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
481 : : &port_count);
482 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Port count get failed\n");
483 [ # # ]: 0 : TEST_ASSERT_EQUAL(port_count, 1, "Unexpected port count\n");
484 : :
485 : 0 : ret = rte_event_port_setup(evdev, TEST_APP_PORT_ID, NULL);
486 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d\n",
487 : : TEST_APP_PORT_ID);
488 : :
489 : 0 : qid = TEST_APP_EV_QUEUE_ID;
490 : 0 : ret = rte_event_port_link(evdev, TEST_APP_PORT_ID, &qid, NULL, 1);
491 [ # # ]: 0 : TEST_ASSERT(ret >= 0, "Failed to link queue port=%d\n",
492 : : TEST_APP_PORT_ID);
493 : :
494 : : return TEST_SUCCESS;
495 : : }
496 : :
497 : : static void
498 : 0 : test_dma_adapter_free(void)
499 : : {
500 : 0 : rte_event_dma_adapter_free(TEST_ADAPTER_ID);
501 : 0 : }
502 : :
503 : : static int
504 : 0 : test_dma_adapter_create(void)
505 : : {
506 : 0 : struct rte_event_dev_info evdev_info = {0};
507 : 0 : struct rte_event_port_conf conf = {0};
508 : : int ret;
509 : :
510 : 0 : ret = rte_event_dev_info_get(evdev, &evdev_info);
511 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to create event dma adapter\n");
512 : :
513 : 0 : conf.new_event_threshold = evdev_info.max_num_events;
514 : 0 : conf.dequeue_depth = evdev_info.max_event_port_dequeue_depth;
515 : 0 : conf.enqueue_depth = evdev_info.max_event_port_enqueue_depth;
516 : :
517 : : /* Create adapter with default port creation callback */
518 : 0 : ret = rte_event_dma_adapter_create(TEST_ADAPTER_ID, evdev, &conf, 0);
519 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to create event dma adapter\n");
520 : :
521 : : return TEST_SUCCESS;
522 : : }
523 : :
524 : : static int
525 : 0 : test_dma_adapter_vchan_add_del(void)
526 : : {
527 : : struct rte_event event;
528 : : uint32_t cap;
529 : : int ret;
530 : :
531 : 0 : ret = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap);
532 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
533 : :
534 [ # # ]: 0 : if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND) {
535 : 0 : ret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
536 : : TEST_DMA_VCHAN_ID, &event);
537 : : } else
538 : 0 : ret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
539 : : TEST_DMA_VCHAN_ID, NULL);
540 : :
541 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to create add vchan\n");
542 : :
543 : 0 : ret = rte_event_dma_adapter_vchan_del(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
544 : : TEST_DMA_VCHAN_ID);
545 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to delete vchan\n");
546 : :
547 : : return TEST_SUCCESS;
548 : : }
549 : :
550 : : static int
551 : 0 : configure_event_dma_adapter(enum rte_event_dma_adapter_mode mode)
552 : : {
553 : 0 : struct rte_event_dev_info evdev_info = {0};
554 : 0 : struct rte_event_port_conf conf = {0};
555 : : struct rte_event event;
556 : : uint32_t cap;
557 : : int ret;
558 : :
559 : 0 : ret = rte_event_dma_adapter_caps_get(evdev, TEST_DMA_DEV_ID, &cap);
560 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
561 : :
562 : : /* Skip mode and capability mismatch check for SW eventdev */
563 : 0 : if (!(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
564 [ # # ]: 0 : !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
565 : : !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND))
566 : 0 : goto adapter_create;
567 : :
568 [ # # ]: 0 : if (mode == RTE_EVENT_DMA_ADAPTER_OP_FORWARD) {
569 [ # # ]: 0 : if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD)
570 : 0 : params.internal_port_op_fwd = 1;
571 : : else
572 : : return -ENOTSUP;
573 : : }
574 : :
575 : 0 : adapter_create:
576 : 0 : ret = rte_event_dev_info_get(evdev, &evdev_info);
577 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to create event dma adapter\n");
578 : :
579 : 0 : conf.new_event_threshold = evdev_info.max_num_events;
580 : 0 : conf.dequeue_depth = evdev_info.max_event_port_dequeue_depth;
581 : 0 : conf.enqueue_depth = evdev_info.max_event_port_enqueue_depth;
582 : :
583 : : /* Create adapter with default port creation callback */
584 : 0 : ret = rte_event_dma_adapter_create(TEST_ADAPTER_ID, evdev, &conf, mode);
585 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to create event dma adapter\n");
586 : :
587 [ # # ]: 0 : if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND) {
588 : 0 : ret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
589 : : TEST_DMA_VCHAN_ID, &event);
590 : : } else
591 : 0 : ret = rte_event_dma_adapter_vchan_add(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
592 : : TEST_DMA_VCHAN_ID, NULL);
593 : :
594 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to add vchan\n");
595 : :
596 [ # # ]: 0 : if (!params.internal_port_op_fwd) {
597 : 0 : ret = rte_event_dma_adapter_event_port_get(TEST_ADAPTER_ID,
598 : : ¶ms.dma_event_port_id);
599 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to get event port\n");
600 : : }
601 : :
602 : : return TEST_SUCCESS;
603 : : }
604 : :
605 : : static void
606 : 0 : test_dma_adapter_stop(void)
607 : : {
608 : : uint32_t evdev_service_id, adapter_service_id;
609 : :
610 : : /* retrieve service ids & stop services */
611 [ # # ]: 0 : if (rte_event_dma_adapter_service_id_get(TEST_ADAPTER_ID,
612 : : &adapter_service_id) == 0) {
613 : 0 : rte_service_runstate_set(adapter_service_id, 0);
614 : 0 : rte_service_lcore_stop(slcore_id);
615 : 0 : rte_service_lcore_del(slcore_id);
616 : 0 : rte_event_dma_adapter_stop(TEST_ADAPTER_ID);
617 : : }
618 : :
619 [ # # ]: 0 : if (rte_event_dev_service_id_get(evdev, &evdev_service_id) == 0) {
620 : 0 : rte_service_runstate_set(evdev_service_id, 0);
621 : 0 : rte_service_lcore_stop(slcore_id);
622 : 0 : rte_service_lcore_del(slcore_id);
623 : 0 : rte_dma_stop(TEST_DMA_DEV_ID);
624 : 0 : rte_event_dev_stop(evdev);
625 : : } else {
626 : 0 : rte_dma_stop(TEST_DMA_DEV_ID);
627 : 0 : rte_event_dev_stop(evdev);
628 : : }
629 : 0 : }
630 : :
631 : : static int
632 : 0 : test_dma_adapter_conf(enum rte_event_dma_adapter_mode mode)
633 : : {
634 : : uint32_t evdev_service_id;
635 : : uint8_t qid;
636 : : int ret;
637 : :
638 [ # # ]: 0 : if (!dma_adapter_setup_done) {
639 : 0 : ret = configure_event_dma_adapter(mode);
640 [ # # ]: 0 : if (ret)
641 : : return ret;
642 [ # # ]: 0 : if (!params.internal_port_op_fwd) {
643 : 0 : qid = TEST_DMA_EV_QUEUE_ID;
644 : 0 : ret = rte_event_port_link(evdev,
645 : 0 : params.dma_event_port_id, &qid, NULL, 1);
646 [ # # ]: 0 : TEST_ASSERT(ret >= 0, "Failed to link queue %d "
647 : : "port=%u\n", qid,
648 : : params.dma_event_port_id);
649 : : }
650 : 0 : dma_adapter_setup_done = 1;
651 : : }
652 : :
653 : : /* retrieve service ids */
654 [ # # ]: 0 : if (rte_event_dev_service_id_get(evdev, &evdev_service_id) == 0) {
655 : : /* add a service core and start it */
656 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_service_lcore_add(slcore_id),
657 : : "Failed to add service core");
658 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_service_lcore_start(slcore_id),
659 : : "Failed to start service core");
660 : :
661 : : /* map services to it */
662 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(evdev_service_id,
663 : : slcore_id, 1), "Failed to map evdev service");
664 : :
665 : : /* set services to running */
666 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_service_runstate_set(evdev_service_id,
667 : : 1), "Failed to start evdev service");
668 : : }
669 : :
670 : : /* start the eventdev */
671 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_event_dev_start(evdev),
672 : : "Failed to start event device");
673 : :
674 : : /* start the dma dev */
675 [ # # ]: 0 : TEST_ASSERT_SUCCESS(rte_dma_start(TEST_DMA_DEV_ID),
676 : : "Failed to start dma device");
677 : :
678 : : return TEST_SUCCESS;
679 : : }
680 : :
681 : : static int
682 : 0 : test_dma_adapter_conf_op_forward_mode(void)
683 : : {
684 : : enum rte_event_dma_adapter_mode mode;
685 : :
686 : : mode = RTE_EVENT_DMA_ADAPTER_OP_FORWARD;
687 : :
688 : 0 : return test_dma_adapter_conf(mode);
689 : : }
690 : :
691 : : static int
692 : 0 : testsuite_setup(void)
693 : : {
694 : : int ret;
695 : :
696 : 0 : slcore_id = rte_get_next_lcore(-1, 1, 0);
697 [ # # ]: 0 : TEST_ASSERT_NOT_EQUAL(slcore_id, RTE_MAX_LCORE, "At least 2 lcores "
698 : : "are required to run this autotest\n");
699 : :
700 : : /* Setup and start event device. */
701 : 0 : ret = configure_eventdev();
702 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "Failed to setup eventdev\n");
703 : :
704 : : /* Setup and start dma device. */
705 : 0 : ret = configure_dmadev();
706 [ # # ]: 0 : TEST_ASSERT_SUCCESS(ret, "dmadev initialization failed\n");
707 : :
708 : : return TEST_SUCCESS;
709 : : }
710 : :
711 : : static void
712 : 0 : dma_adapter_teardown(void)
713 : : {
714 : : int ret;
715 : :
716 : 0 : ret = rte_event_dma_adapter_stop(TEST_ADAPTER_ID);
717 [ # # ]: 0 : if (ret < 0)
718 : 0 : RTE_LOG(ERR, USER1, "Failed to stop adapter!");
719 : :
720 : 0 : ret = rte_event_dma_adapter_vchan_del(TEST_ADAPTER_ID, TEST_DMA_DEV_ID,
721 : : TEST_DMA_VCHAN_ID);
722 [ # # ]: 0 : if (ret < 0)
723 : 0 : RTE_LOG(ERR, USER1, "Failed to delete vchan!");
724 : :
725 : 0 : ret = rte_event_dma_adapter_free(TEST_ADAPTER_ID);
726 [ # # ]: 0 : if (ret < 0)
727 : 0 : RTE_LOG(ERR, USER1, "Failed to free adapter!");
728 : :
729 : 0 : dma_adapter_setup_done = 0;
730 : 0 : }
731 : :
732 : : static void
733 : 0 : dma_teardown(void)
734 : : {
735 : : /* Free mbuf mempool */
736 [ # # ]: 0 : if (params.src_mbuf_pool != NULL) {
737 : 0 : RTE_LOG(DEBUG, USER1, "DMA_ADAPTER_SRC_MBUFPOOL count %u\n",
738 : : rte_mempool_avail_count(params.src_mbuf_pool));
739 : 0 : rte_mempool_free(params.src_mbuf_pool);
740 : 0 : params.src_mbuf_pool = NULL;
741 : : }
742 : :
743 [ # # ]: 0 : if (params.dst_mbuf_pool != NULL) {
744 : 0 : RTE_LOG(DEBUG, USER1, "DMA_ADAPTER_DST_MBUFPOOL count %u\n",
745 : : rte_mempool_avail_count(params.dst_mbuf_pool));
746 : 0 : rte_mempool_free(params.dst_mbuf_pool);
747 : 0 : params.dst_mbuf_pool = NULL;
748 : : }
749 : :
750 : : /* Free ops mempool */
751 [ # # ]: 0 : if (params.op_mpool != NULL) {
752 : 0 : RTE_LOG(DEBUG, USER1, "EVENT_DMA_OP_POOL count %u\n",
753 : : rte_mempool_avail_count(params.op_mpool));
754 : 0 : rte_mempool_free(params.op_mpool);
755 : 0 : params.op_mpool = NULL;
756 : : }
757 : 0 : }
758 : :
759 : : static void
760 : : eventdev_teardown(void)
761 : : {
762 : 0 : rte_event_dev_stop(evdev);
763 : : }
764 : :
765 : : static void
766 : 0 : testsuite_teardown(void)
767 : : {
768 : 0 : dma_adapter_teardown();
769 : 0 : dma_teardown();
770 : : eventdev_teardown();
771 : 0 : }
772 : :
773 : : static struct unit_test_suite functional_testsuite = {
774 : : .suite_name = "Event dma adapter test suite",
775 : : .setup = testsuite_setup,
776 : : .teardown = testsuite_teardown,
777 : : .unit_test_cases = {
778 : :
779 : : TEST_CASE_ST(NULL, test_dma_adapter_free, test_dma_adapter_create),
780 : :
781 : : TEST_CASE_ST(test_dma_adapter_create, test_dma_adapter_free,
782 : : test_dma_adapter_vchan_add_del),
783 : :
784 : : TEST_CASE_ST(test_dma_adapter_create, test_dma_adapter_free,
785 : : test_dma_adapter_stats),
786 : :
787 : : TEST_CASE_ST(test_dma_adapter_create, test_dma_adapter_free,
788 : : test_dma_adapter_params),
789 : :
790 : : TEST_CASE_ST(test_dma_adapter_conf_op_forward_mode, test_dma_adapter_stop,
791 : : test_with_op_forward_mode),
792 : :
793 : : TEST_CASES_END() /**< NULL terminate unit test array */
794 : : }
795 : : };
796 : :
797 : : static int
798 : 0 : test_event_dma_adapter(void)
799 : : {
800 : 0 : return unit_test_suite_runner(&functional_testsuite);
801 : : }
802 : :
803 : : #endif /* !RTE_EXEC_ENV_WINDOWS */
804 : :
805 : 238 : REGISTER_DRIVER_TEST(event_dma_adapter_autotest, test_event_dma_adapter);
|