Branch data Line data Source code
1 : : /* SPDX-License-Identifier: BSD-3-Clause
2 : : * Copyright(c) 2001-2023 Intel Corporation
3 : : */
4 : :
5 : : #include "idpf_controlq.h"
6 : :
7 : : /**
8 : : * idpf_ctlq_setup_regs - initialize control queue registers
9 : : * @cq: pointer to the specific control queue
10 : : * @q_create_info: structs containing info for each queue to be initialized
11 : : */
12 : : static void idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq,
13 : : struct idpf_ctlq_create_info *q_create_info)
14 : : {
15 : : /* set control queue registers in our local struct */
16 : 0 : cq->reg.head = q_create_info->reg.head;
17 : 0 : cq->reg.tail = q_create_info->reg.tail;
18 : 0 : cq->reg.len = q_create_info->reg.len;
19 : 0 : cq->reg.bah = q_create_info->reg.bah;
20 : 0 : cq->reg.bal = q_create_info->reg.bal;
21 : 0 : cq->reg.len_mask = q_create_info->reg.len_mask;
22 : 0 : cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
23 : 0 : cq->reg.head_mask = q_create_info->reg.head_mask;
24 : : }
25 : :
26 : : /**
27 : : * idpf_ctlq_init_regs - Initialize control queue registers
28 : : * @hw: pointer to hw struct
29 : : * @cq: pointer to the specific Control queue
30 : : * @is_rxq: true if receive control queue, false otherwise
31 : : *
32 : : * Initialize registers. The caller is expected to have already initialized the
33 : : * descriptor ring memory and buffer memory
34 : : */
35 : 0 : static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
36 : : bool is_rxq)
37 : : {
38 : : /* Update tail to post pre-allocated buffers for rx queues */
39 [ # # ]: 0 : if (is_rxq)
40 : 0 : wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));
41 : :
42 : : /* For non-Mailbox control queues only TAIL need to be set */
43 [ # # ]: 0 : if (cq->q_id != -1)
44 : : return;
45 : :
46 : : /* Clear Head for both send or receive */
47 : 0 : wr32(hw, cq->reg.head, 0);
48 : :
49 : : /* set starting point */
50 : 0 : wr32(hw, cq->reg.bal, IDPF_LO_DWORD(cq->desc_ring.pa));
51 : 0 : wr32(hw, cq->reg.bah, IDPF_HI_DWORD(cq->desc_ring.pa));
52 : 0 : wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
53 : : }
54 : :
55 : : /**
56 : : * idpf_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
57 : : * @cq: pointer to the specific Control queue
58 : : *
59 : : * Record the address of the receive queue DMA buffers in the descriptors.
60 : : * The buffers must have been previously allocated.
61 : : */
62 : 0 : static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
63 : : {
64 : : int i = 0;
65 : :
66 [ # # ]: 0 : for (i = 0; i < cq->ring_size; i++) {
67 : 0 : struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
68 : 0 : struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
69 : :
70 : : /* No buffer to post to descriptor, continue */
71 [ # # ]: 0 : if (!bi)
72 : 0 : continue;
73 : :
74 : 0 : desc->flags =
75 : : CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
76 : 0 : desc->opcode = 0;
77 : 0 : desc->datalen = CPU_TO_LE16(bi->size);
78 : 0 : desc->ret_val = 0;
79 : 0 : desc->cookie_high = 0;
80 : 0 : desc->cookie_low = 0;
81 : 0 : desc->params.indirect.addr_high =
82 : 0 : CPU_TO_LE32(IDPF_HI_DWORD(bi->pa));
83 : 0 : desc->params.indirect.addr_low =
84 : 0 : CPU_TO_LE32(IDPF_LO_DWORD(bi->pa));
85 : 0 : desc->params.indirect.param0 = 0;
86 : 0 : desc->params.indirect.param1 = 0;
87 : : }
88 : 0 : }
89 : :
90 : : /**
91 : : * idpf_ctlq_shutdown - shutdown the CQ
92 : : * @hw: pointer to hw struct
93 : : * @cq: pointer to the specific Control queue
94 : : *
95 : : * The main shutdown routine for any controq queue
96 : : */
97 : 0 : static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
98 : : {
99 : 0 : idpf_acquire_lock(&cq->cq_lock);
100 : :
101 [ # # ]: 0 : if (!cq->ring_size)
102 : 0 : goto shutdown_sq_out;
103 : :
104 : : #ifdef SIMICS_BUILD
105 : : wr32(hw, cq->reg.head, 0);
106 : : wr32(hw, cq->reg.tail, 0);
107 : : wr32(hw, cq->reg.len, 0);
108 : : wr32(hw, cq->reg.bal, 0);
109 : : wr32(hw, cq->reg.bah, 0);
110 : : #endif /* SIMICS_BUILD */
111 : :
112 : : /* free ring buffers and the ring itself */
113 : 0 : idpf_ctlq_dealloc_ring_res(hw, cq);
114 : :
115 : : /* Set ring_size to 0 to indicate uninitialized queue */
116 : 0 : cq->ring_size = 0;
117 : :
118 : 0 : shutdown_sq_out:
119 : : idpf_release_lock(&cq->cq_lock);
120 : : idpf_destroy_lock(&cq->cq_lock);
121 : 0 : }
122 : :
123 : : /**
124 : : * idpf_ctlq_add - add one control queue
125 : : * @hw: pointer to hardware struct
126 : : * @qinfo: info for queue to be created
127 : : * @cq_out: (output) double pointer to control queue to be created
128 : : *
129 : : * Allocate and initialize a control queue and add it to the control queue list.
130 : : * The cq parameter will be allocated/initialized and passed back to the caller
131 : : * if no errors occur.
132 : : *
133 : : * Note: idpf_ctlq_init must be called prior to any calls to idpf_ctlq_add
134 : : */
135 : 0 : int idpf_ctlq_add(struct idpf_hw *hw,
136 : : struct idpf_ctlq_create_info *qinfo,
137 : : struct idpf_ctlq_info **cq_out)
138 : : {
139 : : struct idpf_ctlq_info *cq;
140 : : bool is_rxq = false;
141 : : int status = 0;
142 : :
143 [ # # # # : 0 : if (!qinfo->len || !qinfo->buf_size ||
# # ]
144 [ # # ]: 0 : qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||
145 : : qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
146 : : return -EINVAL;
147 : :
148 : : cq = (struct idpf_ctlq_info *)
149 : 0 : idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
150 [ # # ]: 0 : if (!cq)
151 : : return -ENOMEM;
152 : :
153 : 0 : cq->cq_type = qinfo->type;
154 : 0 : cq->q_id = qinfo->id;
155 : 0 : cq->buf_size = qinfo->buf_size;
156 : 0 : cq->ring_size = qinfo->len;
157 : :
158 : 0 : cq->next_to_use = 0;
159 : 0 : cq->next_to_clean = 0;
160 : 0 : cq->next_to_post = cq->ring_size - 1;
161 : :
162 [ # # # ]: 0 : switch (qinfo->type) {
163 : 0 : case IDPF_CTLQ_TYPE_MAILBOX_RX:
164 : : is_rxq = true;
165 : : /* fallthrough */
166 : 0 : case IDPF_CTLQ_TYPE_MAILBOX_TX:
167 : 0 : status = idpf_ctlq_alloc_ring_res(hw, cq);
168 : : break;
169 : : default:
170 : : status = -EINVAL;
171 : : break;
172 : : }
173 : :
174 [ # # ]: 0 : if (status)
175 : 0 : goto init_free_q;
176 : :
177 [ # # ]: 0 : if (is_rxq) {
178 : 0 : idpf_ctlq_init_rxq_bufs(cq);
179 : : } else {
180 : : /* Allocate the array of msg pointers for TX queues */
181 : 0 : cq->bi.tx_msg = (struct idpf_ctlq_msg **)
182 : 0 : idpf_calloc(hw, qinfo->len,
183 : : sizeof(struct idpf_ctlq_msg *));
184 [ # # ]: 0 : if (!cq->bi.tx_msg) {
185 : : status = -ENOMEM;
186 : 0 : goto init_dealloc_q_mem;
187 : : }
188 : : }
189 : :
190 : : idpf_ctlq_setup_regs(cq, qinfo);
191 : :
192 : 0 : idpf_ctlq_init_regs(hw, cq, is_rxq);
193 : :
194 : : idpf_init_lock(&(cq->cq_lock));
195 : :
196 [ # # ]: 0 : LIST_INSERT_HEAD(&hw->cq_list_head, cq, cq_list);
197 : :
198 : 0 : *cq_out = cq;
199 : 0 : return status;
200 : :
201 : : init_dealloc_q_mem:
202 : : /* free ring buffers and the ring itself */
203 : 0 : idpf_ctlq_dealloc_ring_res(hw, cq);
204 : 0 : init_free_q:
205 : 0 : idpf_free(hw, cq);
206 : : cq = NULL;
207 : :
208 : 0 : return status;
209 : : }
210 : :
211 : : /**
212 : : * idpf_ctlq_remove - deallocate and remove specified control queue
213 : : * @hw: pointer to hardware struct
214 : : * @cq: pointer to control queue to be removed
215 : : */
216 : 0 : void idpf_ctlq_remove(struct idpf_hw *hw,
217 : : struct idpf_ctlq_info *cq)
218 : : {
219 [ # # ]: 0 : LIST_REMOVE(cq, cq_list);
220 : 0 : idpf_ctlq_shutdown(hw, cq);
221 : 0 : idpf_free(hw, cq);
222 : 0 : }
223 : :
224 : : /**
225 : : * idpf_ctlq_init - main initialization routine for all control queues
226 : : * @hw: pointer to hardware struct
227 : : * @num_q: number of queues to initialize
228 : : * @q_info: array of structs containing info for each queue to be initialized
229 : : *
230 : : * This initializes any number and any type of control queues. This is an all
231 : : * or nothing routine; if one fails, all previously allocated queues will be
232 : : * destroyed. This must be called prior to using the individual add/remove
233 : : * APIs.
234 : : */
235 : 0 : int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,
236 : : struct idpf_ctlq_create_info *q_info)
237 : : {
238 : 0 : struct idpf_ctlq_info *cq = NULL, *tmp = NULL;
239 : : int ret_code = 0;
240 : : int i = 0;
241 : :
242 : 0 : LIST_INIT(&hw->cq_list_head);
243 : :
244 [ # # ]: 0 : for (i = 0; i < num_q; i++) {
245 : 0 : struct idpf_ctlq_create_info *qinfo = q_info + i;
246 : :
247 : 0 : ret_code = idpf_ctlq_add(hw, qinfo, &cq);
248 [ # # ]: 0 : if (ret_code)
249 : 0 : goto init_destroy_qs;
250 : : }
251 : :
252 : : return ret_code;
253 : :
254 : : init_destroy_qs:
255 [ # # ]: 0 : LIST_FOR_EACH_ENTRY_SAFE(cq, tmp, &hw->cq_list_head,
256 : : idpf_ctlq_info, cq_list)
257 : 0 : idpf_ctlq_remove(hw, cq);
258 : :
259 : : return ret_code;
260 : : }
261 : :
262 : : /**
263 : : * idpf_ctlq_deinit - destroy all control queues
264 : : * @hw: pointer to hw struct
265 : : */
266 : 0 : void idpf_ctlq_deinit(struct idpf_hw *hw)
267 : : {
268 : : struct idpf_ctlq_info *cq = NULL, *tmp = NULL;
269 : :
270 [ # # ]: 0 : LIST_FOR_EACH_ENTRY_SAFE(cq, tmp, &hw->cq_list_head,
271 : : idpf_ctlq_info, cq_list)
272 : 0 : idpf_ctlq_remove(hw, cq);
273 : 0 : }
274 : :
275 : : /**
276 : : * idpf_ctlq_send - send command to Control Queue (CTQ)
277 : : * @hw: pointer to hw struct
278 : : * @cq: handle to control queue struct to send on
279 : : * @num_q_msg: number of messages to send on control queue
280 : : * @q_msg: pointer to array of queue messages to be sent
281 : : *
282 : : * The caller is expected to allocate DMAable buffers and pass them to the
283 : : * send routine via the q_msg struct / control queue specific data struct.
284 : : * The control queue will hold a reference to each send message until
285 : : * the completion for that message has been cleaned.
286 : : * Since all q_msgs being sent are store in native endianness, these values
287 : : * must be converted to LE before being written to the hw descriptor.
288 : : */
289 : 0 : int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
290 : : u16 num_q_msg, struct idpf_ctlq_msg q_msg[])
291 : : {
292 : : struct idpf_ctlq_desc *desc;
293 : : int num_desc_avail = 0;
294 : : int status = 0;
295 : : int i = 0;
296 : :
297 [ # # # # ]: 0 : if (!cq || !cq->ring_size)
298 : : return -ENOBUFS;
299 : :
300 : 0 : idpf_acquire_lock(&cq->cq_lock);
301 : :
302 : : /* Ensure there are enough descriptors to send all messages */
303 [ # # ]: 0 : num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
304 [ # # # # ]: 0 : if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
305 : : status = -ENOSPC;
306 : 0 : goto sq_send_command_out;
307 : : }
308 : :
309 [ # # ]: 0 : for (i = 0; i < num_q_msg; i++) {
310 : 0 : struct idpf_ctlq_msg *msg = &q_msg[i];
311 : :
312 : 0 : desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
313 : :
314 : 0 : desc->opcode = CPU_TO_LE16(msg->opcode);
315 : 0 : desc->pfid_vfid = CPU_TO_LE16(msg->func_id);
316 : :
317 : 0 : desc->cookie_high = CPU_TO_LE32(msg->cookie.mbx.chnl_opcode);
318 : 0 : desc->cookie_low = CPU_TO_LE32(msg->cookie.mbx.chnl_retval);
319 : :
320 : 0 : desc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) <<
321 : : IDPF_CTLQ_FLAG_HOST_ID_S);
322 [ # # ]: 0 : if (msg->data_len) {
323 : 0 : struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
324 : :
325 : 0 : desc->datalen |= CPU_TO_LE16(msg->data_len);
326 : 0 : desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);
327 : 0 : desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);
328 : :
329 : : /* Update the address values in the desc with the pa
330 : : * value for respective buffer
331 : : */
332 : 0 : desc->params.indirect.addr_high =
333 : 0 : CPU_TO_LE32(IDPF_HI_DWORD(buff->pa));
334 : 0 : desc->params.indirect.addr_low =
335 : 0 : CPU_TO_LE32(IDPF_LO_DWORD(buff->pa));
336 : :
337 [ # # ]: 0 : idpf_memcpy(&desc->params, msg->ctx.indirect.context,
338 : : IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
339 : : #ifdef SIMICS_BUILD
340 : : /* MBX message with opcode idpf_mbq_opc_send_msg_to_pf
341 : : * need to set peer PF function id in param0 for Simics
342 : : */
343 : : if (msg->opcode == idpf_mbq_opc_send_msg_to_pf) {
344 : : desc->params.indirect.param0 =
345 : : CPU_TO_LE32(msg->func_id);
346 : : }
347 : : #endif
348 : : } else {
349 [ # # ]: 0 : idpf_memcpy(&desc->params, msg->ctx.direct,
350 : : IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
351 : : #ifdef SIMICS_BUILD
352 : : /* MBX message with opcode idpf_mbq_opc_send_msg_to_pf
353 : : * need to set peer PF function id in param0 for Simics
354 : : */
355 : : if (msg->opcode == idpf_mbq_opc_send_msg_to_pf) {
356 : : desc->params.direct.param0 =
357 : : CPU_TO_LE32(msg->func_id);
358 : : }
359 : : #endif
360 : : }
361 : :
362 : : /* Store buffer info */
363 : 0 : cq->bi.tx_msg[cq->next_to_use] = msg;
364 : :
365 : 0 : (cq->next_to_use)++;
366 [ # # ]: 0 : if (cq->next_to_use == cq->ring_size)
367 : 0 : cq->next_to_use = 0;
368 : : }
369 : :
370 : : /* Force memory write to complete before letting hardware
371 : : * know that there are new descriptors to fetch.
372 : : */
373 : 0 : idpf_wmb();
374 : :
375 : 0 : wr32(hw, cq->reg.tail, cq->next_to_use);
376 : :
377 : 0 : sq_send_command_out:
378 : : idpf_release_lock(&cq->cq_lock);
379 : :
380 : 0 : return status;
381 : : }
382 : :
383 : : /**
384 : : * __idpf_ctlq_clean_sq - helper function to reclaim descriptors on HW write
385 : : * back for the requested queue
386 : : * @cq: pointer to the specific Control queue
387 : : * @clean_count: (input|output) number of descriptors to clean as input, and
388 : : * number of descriptors actually cleaned as output
389 : : * @msg_status: (output) pointer to msg pointer array to be populated; needs
390 : : * to be allocated by caller
391 : : * @force: (input) clean descriptors which were not done yet. Use with caution
392 : : * in kernel mode only
393 : : *
394 : : * Returns an array of message pointers associated with the cleaned
395 : : * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
396 : : * descriptors. The status will be returned for each; any messages that failed
397 : : * to send will have a non-zero status. The caller is expected to free original
398 : : * ctlq_msgs and free or reuse the DMA buffers.
399 : : */
400 : 0 : static int __idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
401 : : struct idpf_ctlq_msg *msg_status[], bool force)
402 : : {
403 : : struct idpf_ctlq_desc *desc;
404 : : u16 i = 0, num_to_clean;
405 : : u16 ntc, desc_err;
406 : : int ret = 0;
407 : :
408 [ # # # # ]: 0 : if (!cq || !cq->ring_size)
409 : : return -ENOBUFS;
410 : :
411 [ # # ]: 0 : if (*clean_count == 0)
412 : : return 0;
413 [ # # ]: 0 : if (*clean_count > cq->ring_size)
414 : : return -EINVAL;
415 : :
416 : 0 : idpf_acquire_lock(&cq->cq_lock);
417 : :
418 : 0 : ntc = cq->next_to_clean;
419 : :
420 : 0 : num_to_clean = *clean_count;
421 : :
422 [ # # ]: 0 : for (i = 0; i < num_to_clean; i++) {
423 : : /* Fetch next descriptor and check if marked as done */
424 : 0 : desc = IDPF_CTLQ_DESC(cq, ntc);
425 [ # # # # ]: 0 : if (!force && !(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))
426 : : break;
427 : :
428 : : /* strip off FW internal code */
429 : 0 : desc_err = LE16_TO_CPU(desc->ret_val) & 0xff;
430 : :
431 : 0 : msg_status[i] = cq->bi.tx_msg[ntc];
432 [ # # ]: 0 : if (!msg_status[i])
433 : : break;
434 : 0 : msg_status[i]->status = desc_err;
435 : :
436 [ # # ]: 0 : cq->bi.tx_msg[ntc] = NULL;
437 : :
438 : : /* Zero out any stale data */
439 : : idpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM);
440 : :
441 : 0 : ntc++;
442 [ # # ]: 0 : if (ntc == cq->ring_size)
443 : : ntc = 0;
444 : : }
445 : :
446 : 0 : cq->next_to_clean = ntc;
447 : :
448 : : idpf_release_lock(&cq->cq_lock);
449 : :
450 : : /* Return number of descriptors actually cleaned */
451 : 0 : *clean_count = i;
452 : :
453 : 0 : return ret;
454 : : }
455 : :
456 : : /**
457 : : * idpf_ctlq_clean_sq_force - reclaim all descriptors on HW write back for the
458 : : * requested queue. Use only in kernel mode.
459 : : * @cq: pointer to the specific Control queue
460 : : * @clean_count: (input|output) number of descriptors to clean as input, and
461 : : * number of descriptors actually cleaned as output
462 : : * @msg_status: (output) pointer to msg pointer array to be populated; needs
463 : : * to be allocated by caller
464 : : *
465 : : * Returns an array of message pointers associated with the cleaned
466 : : * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
467 : : * descriptors. The status will be returned for each; any messages that failed
468 : : * to send will have a non-zero status. The caller is expected to free original
469 : : * ctlq_msgs and free or reuse the DMA buffers.
470 : : */
471 : 0 : int idpf_ctlq_clean_sq_force(struct idpf_ctlq_info *cq, u16 *clean_count,
472 : : struct idpf_ctlq_msg *msg_status[])
473 : : {
474 : 0 : return __idpf_ctlq_clean_sq(cq, clean_count, msg_status, true);
475 : : }
476 : :
477 : : /**
478 : : * idpf_ctlq_clean_sq - reclaim send descriptors on HW write back for the
479 : : * requested queue
480 : : * @cq: pointer to the specific Control queue
481 : : * @clean_count: (input|output) number of descriptors to clean as input, and
482 : : * number of descriptors actually cleaned as output
483 : : * @msg_status: (output) pointer to msg pointer array to be populated; needs
484 : : * to be allocated by caller
485 : : *
486 : : * Returns an array of message pointers associated with the cleaned
487 : : * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
488 : : * descriptors. The status will be returned for each; any messages that failed
489 : : * to send will have a non-zero status. The caller is expected to free original
490 : : * ctlq_msgs and free or reuse the DMA buffers.
491 : : */
492 : 0 : int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
493 : : struct idpf_ctlq_msg *msg_status[])
494 : : {
495 : 0 : return __idpf_ctlq_clean_sq(cq, clean_count, msg_status, false);
496 : : }
497 : :
498 : : /**
499 : : * idpf_ctlq_post_rx_buffs - post buffers to descriptor ring
500 : : * @hw: pointer to hw struct
501 : : * @cq: pointer to control queue handle
502 : : * @buff_count: (input|output) input is number of buffers caller is trying to
503 : : * return; output is number of buffers that were not posted
504 : : * @buffs: array of pointers to dma mem structs to be given to hardware
505 : : *
506 : : * Caller uses this function to return DMA buffers to the descriptor ring after
507 : : * consuming them; buff_count will be the number of buffers.
508 : : *
509 : : * Note: this function needs to be called after a receive call even
510 : : * if there are no DMA buffers to be returned, i.e. buff_count = 0,
511 : : * buffs = NULL to support direct commands
512 : : */
513 : 0 : int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
514 : : u16 *buff_count, struct idpf_dma_mem **buffs)
515 : : {
516 : : struct idpf_ctlq_desc *desc;
517 : 0 : u16 ntp = cq->next_to_post;
518 : : bool buffs_avail = false;
519 : 0 : u16 tbp = ntp + 1;
520 : : int status = 0;
521 : : int i = 0;
522 : :
523 [ # # ]: 0 : if (*buff_count > cq->ring_size)
524 : : return -EINVAL;
525 : :
526 [ # # ]: 0 : if (*buff_count > 0)
527 : : buffs_avail = true;
528 : :
529 : 0 : idpf_acquire_lock(&cq->cq_lock);
530 : :
531 [ # # ]: 0 : if (tbp >= cq->ring_size)
532 : : tbp = 0;
533 : :
534 [ # # ]: 0 : if (tbp == cq->next_to_clean)
535 : : /* Nothing to do */
536 : 0 : goto post_buffs_out;
537 : :
538 : : /* Post buffers for as many as provided or up until the last one used */
539 [ # # ]: 0 : while (ntp != cq->next_to_clean) {
540 : 0 : desc = IDPF_CTLQ_DESC(cq, ntp);
541 : :
542 [ # # ]: 0 : if (cq->bi.rx_buff[ntp])
543 : 0 : goto fill_desc;
544 [ # # ]: 0 : if (!buffs_avail) {
545 : : /* If the caller hasn't given us any buffers or
546 : : * there are none left, search the ring itself
547 : : * for an available buffer to move to this
548 : : * entry starting at the next entry in the ring
549 : : */
550 : 0 : tbp = ntp + 1;
551 : :
552 : : /* Wrap ring if necessary */
553 [ # # ]: 0 : if (tbp >= cq->ring_size)
554 : : tbp = 0;
555 : :
556 [ # # ]: 0 : while (tbp != cq->next_to_clean) {
557 [ # # ]: 0 : if (cq->bi.rx_buff[tbp]) {
558 : 0 : cq->bi.rx_buff[ntp] =
559 : : cq->bi.rx_buff[tbp];
560 : 0 : cq->bi.rx_buff[tbp] = NULL;
561 : :
562 : : /* Found a buffer, no need to
563 : : * search anymore
564 : : */
565 : 0 : break;
566 : : }
567 : :
568 : : /* Wrap ring if necessary */
569 : 0 : tbp++;
570 [ # # ]: 0 : if (tbp >= cq->ring_size)
571 : : tbp = 0;
572 : : }
573 : :
574 [ # # ]: 0 : if (tbp == cq->next_to_clean)
575 : 0 : goto post_buffs_out;
576 : : } else {
577 : : /* Give back pointer to DMA buffer */
578 : 0 : cq->bi.rx_buff[ntp] = buffs[i];
579 : 0 : i++;
580 : :
581 [ # # ]: 0 : if (i >= *buff_count)
582 : : buffs_avail = false;
583 : : }
584 : :
585 : 0 : fill_desc:
586 : 0 : desc->flags =
587 : : CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
588 : :
589 : : /* Post buffers to descriptor */
590 : 0 : desc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size);
591 : 0 : desc->params.indirect.addr_high =
592 : 0 : CPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa));
593 : 0 : desc->params.indirect.addr_low =
594 : 0 : CPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa));
595 : :
596 : 0 : ntp++;
597 [ # # ]: 0 : if (ntp == cq->ring_size)
598 : : ntp = 0;
599 : : }
600 : :
601 : 0 : post_buffs_out:
602 : : /* Only update tail if buffers were actually posted */
603 [ # # ]: 0 : if (cq->next_to_post != ntp) {
604 [ # # ]: 0 : if (ntp)
605 : : /* Update next_to_post to ntp - 1 since current ntp
606 : : * will not have a buffer
607 : : */
608 : 0 : cq->next_to_post = ntp - 1;
609 : : else
610 : : /* Wrap to end of end ring since current ntp is 0 */
611 : 0 : cq->next_to_post = cq->ring_size - 1;
612 : :
613 : 0 : wr32(hw, cq->reg.tail, cq->next_to_post);
614 : : }
615 : :
616 : : idpf_release_lock(&cq->cq_lock);
617 : :
618 : : /* return the number of buffers that were not posted */
619 : 0 : *buff_count = *buff_count - i;
620 : :
621 : 0 : return status;
622 : : }
623 : :
624 : : /**
625 : : * idpf_ctlq_recv - receive control queue message call back
626 : : * @cq: pointer to control queue handle to receive on
627 : : * @num_q_msg: (input|output) input number of messages that should be received;
628 : : * output number of messages actually received
629 : : * @q_msg: (output) array of received control queue messages on this q;
630 : : * needs to be pre-allocated by caller for as many messages as requested
631 : : *
632 : : * Called by interrupt handler or polling mechanism. Caller is expected
633 : : * to free buffers
634 : : */
635 : 0 : int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
636 : : struct idpf_ctlq_msg *q_msg)
637 : : {
638 : : u16 num_to_clean, ntc, ret_val, flags;
639 : : struct idpf_ctlq_desc *desc;
640 : : int ret_code = 0;
641 : : u16 i = 0;
642 : :
643 [ # # # # ]: 0 : if (!cq || !cq->ring_size)
644 : : return -ENOBUFS;
645 : :
646 [ # # ]: 0 : if (*num_q_msg == 0)
647 : : return 0;
648 [ # # ]: 0 : else if (*num_q_msg > cq->ring_size)
649 : : return -EINVAL;
650 : :
651 : : /* take the lock before we start messing with the ring */
652 : 0 : idpf_acquire_lock(&cq->cq_lock);
653 : :
654 : 0 : ntc = cq->next_to_clean;
655 : :
656 : 0 : num_to_clean = *num_q_msg;
657 : :
658 [ # # ]: 0 : for (i = 0; i < num_to_clean; i++) {
659 : : /* Fetch next descriptor and check if marked as done */
660 : 0 : desc = IDPF_CTLQ_DESC(cq, ntc);
661 : 0 : flags = LE16_TO_CPU(desc->flags);
662 : :
663 [ # # ]: 0 : if (!(flags & IDPF_CTLQ_FLAG_DD))
664 : : break;
665 : :
666 : 0 : ret_val = LE16_TO_CPU(desc->ret_val);
667 : :
668 : 0 : q_msg[i].vmvf_type = (flags &
669 : : (IDPF_CTLQ_FLAG_FTYPE_VM |
670 : 0 : IDPF_CTLQ_FLAG_FTYPE_PF)) >>
671 : : IDPF_CTLQ_FLAG_FTYPE_S;
672 : :
673 [ # # ]: 0 : if (flags & IDPF_CTLQ_FLAG_ERR)
674 : : ret_code = -EBADMSG;
675 : :
676 : 0 : q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high);
677 : 0 : q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low);
678 : :
679 : 0 : q_msg[i].opcode = LE16_TO_CPU(desc->opcode);
680 : 0 : q_msg[i].data_len = LE16_TO_CPU(desc->datalen);
681 : 0 : q_msg[i].status = ret_val;
682 : :
683 [ # # ]: 0 : if (desc->datalen) {
684 [ # # ]: 0 : idpf_memcpy(q_msg[i].ctx.indirect.context,
685 : : &desc->params.indirect,
686 : : IDPF_INDIRECT_CTX_SIZE,
687 : : IDPF_DMA_TO_NONDMA);
688 : :
689 : : /* Assign pointer to dma buffer to ctlq_msg array
690 : : * to be given to upper layer
691 : : */
692 : 0 : q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
693 : :
694 : : /* Zero out pointer to DMA buffer info;
695 : : * will be repopulated by post buffers API
696 : : */
697 : 0 : cq->bi.rx_buff[ntc] = NULL;
698 : : } else {
699 [ # # ]: 0 : idpf_memcpy(q_msg[i].ctx.direct,
700 : : desc->params.raw,
701 : : IDPF_DIRECT_CTX_SIZE,
702 : : IDPF_DMA_TO_NONDMA);
703 : : }
704 : :
705 : : /* Zero out stale data in descriptor */
706 : : idpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc),
707 : : IDPF_DMA_MEM);
708 : :
709 : 0 : ntc++;
710 [ # # ]: 0 : if (ntc == cq->ring_size)
711 : : ntc = 0;
712 : : };
713 : :
714 : 0 : cq->next_to_clean = ntc;
715 : :
716 : : idpf_release_lock(&cq->cq_lock);
717 : :
718 : 0 : *num_q_msg = i;
719 [ # # ]: 0 : if (*num_q_msg == 0)
720 : : ret_code = -ENOMSG;
721 : :
722 : : return ret_code;
723 : : }
|