Branch data Line data Source code
1 : : /* SPDX-License-Identifier: BSD-3-Clause
2 : : * Copyright(c) 2001-2023 Intel Corporation
3 : : */
4 : :
5 : : #include "ice_common.h"
6 : :
7 : : #define ICE_CQ_INIT_REGS(qinfo, prefix) \
8 : : do { \
9 : : (qinfo)->sq.head = prefix##_ATQH; \
10 : : (qinfo)->sq.tail = prefix##_ATQT; \
11 : : (qinfo)->sq.len = prefix##_ATQLEN; \
12 : : (qinfo)->sq.bah = prefix##_ATQBAH; \
13 : : (qinfo)->sq.bal = prefix##_ATQBAL; \
14 : : (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
15 : : (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
16 : : (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \
17 : : (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
18 : : (qinfo)->rq.head = prefix##_ARQH; \
19 : : (qinfo)->rq.tail = prefix##_ARQT; \
20 : : (qinfo)->rq.len = prefix##_ARQLEN; \
21 : : (qinfo)->rq.bah = prefix##_ARQBAH; \
22 : : (qinfo)->rq.bal = prefix##_ARQBAL; \
23 : : (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
24 : : (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
25 : : (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \
26 : : (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
27 : : } while (0)
28 : :
29 : : /**
30 : : * ice_adminq_init_regs - Initialize AdminQ registers
31 : : * @hw: pointer to the hardware structure
32 : : *
33 : : * This assumes the alloc_sq and alloc_rq functions have already been called
34 : : */
35 : 0 : static void ice_adminq_init_regs(struct ice_hw *hw)
36 : : {
37 : : struct ice_ctl_q_info *cq = &hw->adminq;
38 : :
39 [ # # ]: 0 : ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
40 : :
41 : 0 : ICE_CQ_INIT_REGS(cq, PF_FW);
42 : 0 : }
43 : :
44 : : /**
45 : : * ice_mailbox_init_regs - Initialize Mailbox registers
46 : : * @hw: pointer to the hardware structure
47 : : *
48 : : * This assumes the alloc_sq and alloc_rq functions have already been called
49 : : */
50 : : static void ice_mailbox_init_regs(struct ice_hw *hw)
51 : : {
52 : : struct ice_ctl_q_info *cq = &hw->mailboxq;
53 : :
54 : 0 : ICE_CQ_INIT_REGS(cq, PF_MBX);
55 : : }
56 : :
57 : : /**
58 : : * ice_sb_init_regs - Initialize Sideband registers
59 : : * @hw: pointer to the hardware structure
60 : : *
61 : : * This assumes the alloc_sq and alloc_rq functions have already been called
62 : : */
63 : 0 : static void ice_sb_init_regs(struct ice_hw *hw)
64 : : {
65 : : struct ice_ctl_q_info *cq = &hw->sbq;
66 : :
67 [ # # ]: 0 : ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
68 : :
69 : 0 : ICE_CQ_INIT_REGS(cq, PF_SB);
70 : 0 : }
71 : :
72 : : /**
73 : : * ice_check_sq_alive
74 : : * @hw: pointer to the HW struct
75 : : * @cq: pointer to the specific Control queue
76 : : *
77 : : * Returns true if Queue is enabled else false.
78 : : */
79 : 0 : bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
80 : : {
81 : : /* check both queue-length and queue-enable fields */
82 [ # # # # : 0 : if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
# # ]
83 : 0 : return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
84 : 0 : cq->sq.len_ena_mask)) ==
85 : 0 : (cq->num_sq_entries | cq->sq.len_ena_mask);
86 : :
87 : : return false;
88 : : }
89 : :
90 : : /**
91 : : * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
92 : : * @hw: pointer to the hardware structure
93 : : * @cq: pointer to the specific Control queue
94 : : */
95 : : static enum ice_status
96 : 0 : ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
97 : : {
98 : 0 : size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
99 : :
100 : 0 : cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size);
101 [ # # ]: 0 : if (!cq->sq.desc_buf.va)
102 : : return ICE_ERR_NO_MEMORY;
103 : :
104 : 0 : cq->sq.cmd_buf = ice_calloc(hw, cq->num_sq_entries,
105 : : sizeof(struct ice_sq_cd));
106 [ # # ]: 0 : if (!cq->sq.cmd_buf) {
107 : 0 : ice_free_dma_mem(hw, &cq->sq.desc_buf);
108 : 0 : return ICE_ERR_NO_MEMORY;
109 : : }
110 : :
111 : : return ICE_SUCCESS;
112 : : }
113 : :
114 : : /**
115 : : * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
116 : : * @hw: pointer to the hardware structure
117 : : * @cq: pointer to the specific Control queue
118 : : */
119 : : static enum ice_status
120 : : ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
121 : : {
122 : 0 : size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
123 : :
124 : 0 : cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);
125 [ # # ]: 0 : if (!cq->rq.desc_buf.va)
126 : : return ICE_ERR_NO_MEMORY;
127 : : return ICE_SUCCESS;
128 : : }
129 : :
130 : : /**
131 : : * ice_free_cq_ring - Free control queue ring
132 : : * @hw: pointer to the hardware structure
133 : : * @ring: pointer to the specific control queue ring
134 : : *
135 : : * This assumes the posted buffers have already been cleaned
136 : : * and de-allocated
137 : : */
138 : : static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
139 : : {
140 : 0 : ice_free_dma_mem(hw, &ring->desc_buf);
141 : 0 : }
142 : :
143 : : /**
144 : : * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
145 : : * @hw: pointer to the hardware structure
146 : : * @cq: pointer to the specific Control queue
147 : : */
148 : : static enum ice_status
149 : 0 : ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
150 : : {
151 : : int i;
152 : :
153 : : /* We'll be allocating the buffer info memory first, then we can
154 : : * allocate the mapped buffers for the event processing
155 : : */
156 : 0 : cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries,
157 : : sizeof(cq->rq.desc_buf));
158 [ # # ]: 0 : if (!cq->rq.dma_head)
159 : : return ICE_ERR_NO_MEMORY;
160 : 0 : cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
161 : :
162 : : /* allocate the mapped buffers */
163 [ # # ]: 0 : for (i = 0; i < cq->num_rq_entries; i++) {
164 : : struct ice_aq_desc *desc;
165 : : struct ice_dma_mem *bi;
166 : :
167 : 0 : bi = &cq->rq.r.rq_bi[i];
168 : 0 : bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size);
169 [ # # ]: 0 : if (!bi->va)
170 : 0 : goto unwind_alloc_rq_bufs;
171 : :
172 : : /* now configure the descriptors for use */
173 : 0 : desc = ICE_CTL_Q_DESC(cq->rq, i);
174 : :
175 : 0 : desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
176 [ # # ]: 0 : if (cq->rq_buf_size > ICE_AQ_LG_BUF)
177 : 0 : desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
178 : 0 : desc->opcode = 0;
179 : : /* This is in accordance with Admin queue design, there is no
180 : : * register for buffer size configuration
181 : : */
182 : 0 : desc->datalen = CPU_TO_LE16(bi->size);
183 : 0 : desc->retval = 0;
184 : 0 : desc->cookie_high = 0;
185 : 0 : desc->cookie_low = 0;
186 : 0 : desc->params.generic.addr_high =
187 : 0 : CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
188 : 0 : desc->params.generic.addr_low =
189 : 0 : CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
190 : 0 : desc->params.generic.param0 = 0;
191 : 0 : desc->params.generic.param1 = 0;
192 : : }
193 : : return ICE_SUCCESS;
194 : :
195 : : unwind_alloc_rq_bufs:
196 : : /* don't try to free the one that failed... */
197 : 0 : i--;
198 [ # # ]: 0 : for (; i >= 0; i--)
199 : 0 : ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);
200 : 0 : cq->rq.r.rq_bi = NULL;
201 : 0 : ice_free(hw, cq->rq.dma_head);
202 : 0 : cq->rq.dma_head = NULL;
203 : :
204 : 0 : return ICE_ERR_NO_MEMORY;
205 : : }
206 : :
207 : : /**
208 : : * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
209 : : * @hw: pointer to the hardware structure
210 : : * @cq: pointer to the specific Control queue
211 : : */
212 : : static enum ice_status
213 : 0 : ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
214 : : {
215 : : int i;
216 : :
217 : : /* No mapped memory needed yet, just the buffer info structures */
218 : 0 : cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries,
219 : : sizeof(cq->sq.desc_buf));
220 [ # # ]: 0 : if (!cq->sq.dma_head)
221 : : return ICE_ERR_NO_MEMORY;
222 : 0 : cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
223 : :
224 : : /* allocate the mapped buffers */
225 [ # # ]: 0 : for (i = 0; i < cq->num_sq_entries; i++) {
226 : : struct ice_dma_mem *bi;
227 : :
228 : 0 : bi = &cq->sq.r.sq_bi[i];
229 : 0 : bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size);
230 [ # # ]: 0 : if (!bi->va)
231 : 0 : goto unwind_alloc_sq_bufs;
232 : : }
233 : : return ICE_SUCCESS;
234 : :
235 : : unwind_alloc_sq_bufs:
236 : : /* don't try to free the one that failed... */
237 : 0 : i--;
238 [ # # ]: 0 : for (; i >= 0; i--)
239 : 0 : ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);
240 : 0 : cq->sq.r.sq_bi = NULL;
241 : 0 : ice_free(hw, cq->sq.dma_head);
242 : 0 : cq->sq.dma_head = NULL;
243 : :
244 : 0 : return ICE_ERR_NO_MEMORY;
245 : : }
246 : :
247 : : static enum ice_status
248 : 0 : ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
249 : : {
250 : : /* Clear Head and Tail */
251 : 0 : wr32(hw, ring->head, 0);
252 : 0 : wr32(hw, ring->tail, 0);
253 : :
254 : : /* set starting point */
255 : 0 : wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
256 : 0 : wr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa));
257 : 0 : wr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa));
258 : :
259 : : /* Check one register to verify that config was applied */
260 [ # # ]: 0 : if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa))
261 : 0 : return ICE_ERR_AQ_ERROR;
262 : :
263 : : return ICE_SUCCESS;
264 : : }
265 : :
266 : : /**
267 : : * ice_cfg_sq_regs - configure Control ATQ registers
268 : : * @hw: pointer to the hardware structure
269 : : * @cq: pointer to the specific Control queue
270 : : *
271 : : * Configure base address and length registers for the transmit queue
272 : : */
273 : : static enum ice_status
274 : : ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
275 : : {
276 : 0 : return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
277 : : }
278 : :
279 : : /**
280 : : * ice_cfg_rq_regs - configure Control ARQ register
281 : : * @hw: pointer to the hardware structure
282 : : * @cq: pointer to the specific Control queue
283 : : *
284 : : * Configure base address and length registers for the receive (event queue)
285 : : */
286 : : static enum ice_status
287 : 0 : ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
288 : : {
289 : : enum ice_status status;
290 : :
291 : 0 : status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
292 [ # # ]: 0 : if (status)
293 : : return status;
294 : :
295 : : /* Update tail in the HW to post pre-allocated buffers */
296 : 0 : wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
297 : :
298 : 0 : return ICE_SUCCESS;
299 : : }
300 : :
301 : : #define ICE_FREE_CQ_BUFS(hw, qi, ring) \
302 : : do { \
303 : : /* free descriptors */ \
304 : : if ((qi)->ring.r.ring##_bi) { \
305 : : int i; \
306 : : \
307 : : for (i = 0; i < (qi)->num_##ring##_entries; i++) \
308 : : if ((qi)->ring.r.ring##_bi[i].pa) \
309 : : ice_free_dma_mem((hw), \
310 : : &(qi)->ring.r.ring##_bi[i]); \
311 : : } \
312 : : /* free the buffer info list */ \
313 : : if ((qi)->ring.cmd_buf) \
314 : : ice_free(hw, (qi)->ring.cmd_buf); \
315 : : /* free DMA head */ \
316 : : ice_free(hw, (qi)->ring.dma_head); \
317 : : } while (0)
318 : :
319 : : /**
320 : : * ice_init_sq - main initialization routine for Control ATQ
321 : : * @hw: pointer to the hardware structure
322 : : * @cq: pointer to the specific Control queue
323 : : *
324 : : * This is the main initialization routine for the Control Send Queue
325 : : * Prior to calling this function, the driver *MUST* set the following fields
326 : : * in the cq->structure:
327 : : * - cq->num_sq_entries
328 : : * - cq->sq_buf_size
329 : : *
330 : : * Do *NOT* hold the lock when calling this as the memory allocation routines
331 : : * called are not going to be atomic context safe
332 : : */
333 : 0 : static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
334 : : {
335 : : enum ice_status ret_code;
336 : :
337 [ # # ]: 0 : ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
338 : :
339 [ # # ]: 0 : if (cq->sq.count > 0) {
340 : : /* queue already initialized */
341 : : ret_code = ICE_ERR_NOT_READY;
342 : 0 : goto init_ctrlq_exit;
343 : : }
344 : :
345 : : /* verify input for valid configuration */
346 [ # # # # ]: 0 : if (!cq->num_sq_entries || !cq->sq_buf_size) {
347 : : ret_code = ICE_ERR_CFG;
348 : 0 : goto init_ctrlq_exit;
349 : : }
350 : :
351 : 0 : cq->sq.next_to_use = 0;
352 : 0 : cq->sq.next_to_clean = 0;
353 : :
354 : : /* allocate the ring memory */
355 : 0 : ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
356 [ # # ]: 0 : if (ret_code)
357 : 0 : goto init_ctrlq_exit;
358 : :
359 : : /* allocate buffers in the rings */
360 : 0 : ret_code = ice_alloc_sq_bufs(hw, cq);
361 [ # # ]: 0 : if (ret_code)
362 : 0 : goto init_ctrlq_free_rings;
363 : :
364 : : /* initialize base registers */
365 : : ret_code = ice_cfg_sq_regs(hw, cq);
366 [ # # ]: 0 : if (ret_code)
367 : 0 : goto init_ctrlq_free_rings;
368 : :
369 : : /* success! */
370 : 0 : cq->sq.count = cq->num_sq_entries;
371 : 0 : goto init_ctrlq_exit;
372 : :
373 : 0 : init_ctrlq_free_rings:
374 [ # # # # : 0 : ICE_FREE_CQ_BUFS(hw, cq, sq);
# # # # ]
375 : : ice_free_cq_ring(hw, &cq->sq);
376 : :
377 : 0 : init_ctrlq_exit:
378 : 0 : return ret_code;
379 : : }
380 : :
381 : : /**
382 : : * ice_init_rq - initialize ARQ
383 : : * @hw: pointer to the hardware structure
384 : : * @cq: pointer to the specific Control queue
385 : : *
386 : : * The main initialization routine for the Admin Receive (Event) Queue.
387 : : * Prior to calling this function, the driver *MUST* set the following fields
388 : : * in the cq->structure:
389 : : * - cq->num_rq_entries
390 : : * - cq->rq_buf_size
391 : : *
392 : : * Do *NOT* hold the lock when calling this as the memory allocation routines
393 : : * called are not going to be atomic context safe
394 : : */
395 : 0 : static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
396 : : {
397 : : enum ice_status ret_code;
398 : :
399 [ # # ]: 0 : ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
400 : :
401 [ # # ]: 0 : if (cq->rq.count > 0) {
402 : : /* queue already initialized */
403 : : ret_code = ICE_ERR_NOT_READY;
404 : 0 : goto init_ctrlq_exit;
405 : : }
406 : :
407 : : /* verify input for valid configuration */
408 [ # # # # ]: 0 : if (!cq->num_rq_entries || !cq->rq_buf_size) {
409 : : ret_code = ICE_ERR_CFG;
410 : 0 : goto init_ctrlq_exit;
411 : : }
412 : :
413 : 0 : cq->rq.next_to_use = 0;
414 : 0 : cq->rq.next_to_clean = 0;
415 : :
416 : : /* allocate the ring memory */
417 : : ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
418 : : if (ret_code)
419 : 0 : goto init_ctrlq_exit;
420 : :
421 : : /* allocate buffers in the rings */
422 : 0 : ret_code = ice_alloc_rq_bufs(hw, cq);
423 [ # # ]: 0 : if (ret_code)
424 : 0 : goto init_ctrlq_free_rings;
425 : :
426 : : /* initialize base registers */
427 : 0 : ret_code = ice_cfg_rq_regs(hw, cq);
428 [ # # ]: 0 : if (ret_code)
429 : 0 : goto init_ctrlq_free_rings;
430 : :
431 : : /* success! */
432 : 0 : cq->rq.count = cq->num_rq_entries;
433 : 0 : goto init_ctrlq_exit;
434 : :
435 : 0 : init_ctrlq_free_rings:
436 [ # # # # : 0 : ICE_FREE_CQ_BUFS(hw, cq, rq);
# # # # ]
437 : : ice_free_cq_ring(hw, &cq->rq);
438 : :
439 : 0 : init_ctrlq_exit:
440 : 0 : return ret_code;
441 : : }
442 : :
443 : : /**
444 : : * ice_shutdown_sq - shutdown the Control ATQ
445 : : * @hw: pointer to the hardware structure
446 : : * @cq: pointer to the specific Control queue
447 : : *
448 : : * The main shutdown routine for the Control Transmit Queue
449 : : */
450 : : static enum ice_status
451 : 0 : ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
452 : : {
453 : : enum ice_status ret_code = ICE_SUCCESS;
454 : :
455 [ # # ]: 0 : ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
456 : :
457 : 0 : ice_acquire_lock(&cq->sq_lock);
458 : :
459 [ # # ]: 0 : if (!cq->sq.count) {
460 : : ret_code = ICE_ERR_NOT_READY;
461 : 0 : goto shutdown_sq_out;
462 : : }
463 : :
464 : : /* Stop firmware AdminQ processing */
465 : 0 : wr32(hw, cq->sq.head, 0);
466 : 0 : wr32(hw, cq->sq.tail, 0);
467 : 0 : wr32(hw, cq->sq.len, 0);
468 : 0 : wr32(hw, cq->sq.bal, 0);
469 : 0 : wr32(hw, cq->sq.bah, 0);
470 : :
471 : 0 : cq->sq.count = 0; /* to indicate uninitialized queue */
472 : :
473 : : /* free ring buffers and the ring itself */
474 [ # # # # : 0 : ICE_FREE_CQ_BUFS(hw, cq, sq);
# # # # ]
475 : : ice_free_cq_ring(hw, &cq->sq);
476 : :
477 : 0 : shutdown_sq_out:
478 : : ice_release_lock(&cq->sq_lock);
479 : 0 : return ret_code;
480 : : }
481 : :
482 : : /**
483 : : * ice_aq_ver_check - Check the reported AQ API version.
484 : : * @hw: pointer to the hardware structure
485 : : *
486 : : * Checks if the driver should load on a given AQ API version.
487 : : *
488 : : * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
489 : : */
490 : 0 : static bool ice_aq_ver_check(struct ice_hw *hw)
491 : : {
492 [ # # ]: 0 : if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
493 : : /* Major API version is newer than expected, don't load */
494 [ # # ]: 0 : ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
495 : 0 : return false;
496 [ # # ]: 0 : } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
497 [ # # ]: 0 : if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
498 [ # # ]: 0 : ice_info(hw, "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n",
499 : : hw->api_maj_ver, hw->api_min_ver,
500 : : EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
501 [ # # ]: 0 : else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
502 [ # # ]: 0 : ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
503 : : hw->api_maj_ver, hw->api_min_ver,
504 : : EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
505 : : } else {
506 : : /* Major API version is older than expected, log a warning */
507 [ # # ]: 0 : ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
508 : : hw->api_maj_ver, hw->api_min_ver,
509 : : EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
510 : : }
511 : : return true;
512 : : }
513 : :
514 : : /**
515 : : * ice_shutdown_rq - shutdown Control ARQ
516 : : * @hw: pointer to the hardware structure
517 : : * @cq: pointer to the specific Control queue
518 : : *
519 : : * The main shutdown routine for the Control Receive Queue
520 : : */
521 : : static enum ice_status
522 : 0 : ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
523 : : {
524 : : enum ice_status ret_code = ICE_SUCCESS;
525 : :
526 [ # # ]: 0 : ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
527 : :
528 : 0 : ice_acquire_lock(&cq->rq_lock);
529 : :
530 [ # # ]: 0 : if (!cq->rq.count) {
531 : : ret_code = ICE_ERR_NOT_READY;
532 : 0 : goto shutdown_rq_out;
533 : : }
534 : :
535 : : /* Stop Control Queue processing */
536 : 0 : wr32(hw, cq->rq.head, 0);
537 : 0 : wr32(hw, cq->rq.tail, 0);
538 : 0 : wr32(hw, cq->rq.len, 0);
539 : 0 : wr32(hw, cq->rq.bal, 0);
540 : 0 : wr32(hw, cq->rq.bah, 0);
541 : :
542 : : /* set rq.count to 0 to indicate uninitialized queue */
543 : 0 : cq->rq.count = 0;
544 : :
545 : : /* free ring buffers and the ring itself */
546 [ # # # # : 0 : ICE_FREE_CQ_BUFS(hw, cq, rq);
# # # # ]
547 : : ice_free_cq_ring(hw, &cq->rq);
548 : :
549 : 0 : shutdown_rq_out:
550 : : ice_release_lock(&cq->rq_lock);
551 : 0 : return ret_code;
552 : : }
553 : :
554 : : /**
555 : : * ice_init_check_adminq - Check version for Admin Queue to know if its alive
556 : : * @hw: pointer to the hardware structure
557 : : */
558 : 0 : static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
559 : : {
560 : 0 : struct ice_ctl_q_info *cq = &hw->adminq;
561 : : enum ice_status status;
562 : :
563 [ # # ]: 0 : ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
564 : :
565 : 0 : status = ice_aq_get_fw_ver(hw, NULL);
566 [ # # ]: 0 : if (status)
567 : 0 : goto init_ctrlq_free_rq;
568 : :
569 [ # # ]: 0 : if (!ice_aq_ver_check(hw)) {
570 : : status = ICE_ERR_FW_API_VER;
571 : 0 : goto init_ctrlq_free_rq;
572 : : }
573 : :
574 : : return ICE_SUCCESS;
575 : :
576 : 0 : init_ctrlq_free_rq:
577 : 0 : ice_shutdown_rq(hw, cq);
578 : 0 : ice_shutdown_sq(hw, cq);
579 : 0 : return status;
580 : : }
581 : :
582 : : /**
583 : : * ice_init_ctrlq - main initialization routine for any control Queue
584 : : * @hw: pointer to the hardware structure
585 : : * @q_type: specific Control queue type
586 : : *
587 : : * Prior to calling this function, the driver *MUST* set the following fields
588 : : * in the cq->structure:
589 : : * - cq->num_sq_entries
590 : : * - cq->num_rq_entries
591 : : * - cq->rq_buf_size
592 : : * - cq->sq_buf_size
593 : : *
594 : : * NOTE: this function does not initialize the controlq locks
595 : : */
596 : 0 : static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
597 : : {
598 : : struct ice_ctl_q_info *cq;
599 : : enum ice_status ret_code;
600 : :
601 [ # # ]: 0 : ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
602 : :
603 [ # # # # ]: 0 : switch (q_type) {
604 : 0 : case ICE_CTL_Q_ADMIN:
605 : 0 : ice_adminq_init_regs(hw);
606 : 0 : cq = &hw->adminq;
607 : 0 : break;
608 : 0 : case ICE_CTL_Q_SB:
609 : 0 : ice_sb_init_regs(hw);
610 : 0 : cq = &hw->sbq;
611 : 0 : break;
612 : : case ICE_CTL_Q_MAILBOX:
613 : : ice_mailbox_init_regs(hw);
614 : 0 : cq = &hw->mailboxq;
615 : 0 : break;
616 : : default:
617 : : return ICE_ERR_PARAM;
618 : : }
619 : 0 : cq->qtype = q_type;
620 : :
621 : : /* verify input for valid configuration */
622 [ # # # # ]: 0 : if (!cq->num_rq_entries || !cq->num_sq_entries ||
623 [ # # # # ]: 0 : !cq->rq_buf_size || !cq->sq_buf_size) {
624 : : return ICE_ERR_CFG;
625 : : }
626 : :
627 : : /* setup SQ command write back timeout */
628 : 0 : cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
629 : :
630 : : /* allocate the ATQ */
631 : 0 : ret_code = ice_init_sq(hw, cq);
632 [ # # ]: 0 : if (ret_code)
633 : : return ret_code;
634 : :
635 : : /* allocate the ARQ */
636 : 0 : ret_code = ice_init_rq(hw, cq);
637 [ # # ]: 0 : if (ret_code)
638 : 0 : goto init_ctrlq_free_sq;
639 : :
640 : : /* success! */
641 : : return ICE_SUCCESS;
642 : :
643 : : init_ctrlq_free_sq:
644 : 0 : ice_shutdown_sq(hw, cq);
645 : 0 : return ret_code;
646 : : }
647 : :
648 : : /**
649 : : * ice_is_sbq_supported - is the sideband queue supported
650 : : * @hw: pointer to the hardware structure
651 : : *
652 : : * Returns true if the sideband control queue interface is
653 : : * supported for the device, false otherwise
654 : : */
655 : : static bool ice_is_sbq_supported(struct ice_hw *hw)
656 : : {
657 : 0 : return ice_is_generic_mac(hw);
658 : : }
659 : :
660 : : /**
661 : : * ice_shutdown_ctrlq - shutdown routine for any control queue
662 : : * @hw: pointer to the hardware structure
663 : : * @q_type: specific Control queue type
664 : : * @unloading: is the driver unloading itself
665 : : *
666 : : * NOTE: this function does not destroy the control queue locks.
667 : : */
668 : 0 : static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type,
669 : : bool unloading)
670 : : {
671 : : struct ice_ctl_q_info *cq;
672 : :
673 [ # # ]: 0 : ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
674 : :
675 [ # # # # ]: 0 : switch (q_type) {
676 : 0 : case ICE_CTL_Q_ADMIN:
677 : 0 : cq = &hw->adminq;
678 [ # # ]: 0 : if (ice_check_sq_alive(hw, cq))
679 : 0 : ice_aq_q_shutdown(hw, unloading);
680 : : break;
681 : 0 : case ICE_CTL_Q_SB:
682 : 0 : cq = &hw->sbq;
683 : 0 : break;
684 : 0 : case ICE_CTL_Q_MAILBOX:
685 : 0 : cq = &hw->mailboxq;
686 : 0 : break;
687 : : default:
688 : : return;
689 : : }
690 : :
691 : 0 : ice_shutdown_sq(hw, cq);
692 : 0 : ice_shutdown_rq(hw, cq);
693 : : }
694 : :
695 : : /**
696 : : * ice_shutdown_all_ctrlq - shutdown routine for all control queues
697 : : * @hw: pointer to the hardware structure
698 : : * @unloading: is the driver unloading itself
699 : : *
700 : : * NOTE: this function does not destroy the control queue locks. The driver
701 : : * may call this at runtime to shutdown and later restart control queues, such
702 : : * as in response to a reset event.
703 : : */
704 : 0 : void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading)
705 : : {
706 [ # # ]: 0 : ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
707 : : /* Shutdown FW admin queue */
708 : 0 : ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading);
709 : : /* Shutdown PHY Sideband */
710 [ # # ]: 0 : if (ice_is_sbq_supported(hw))
711 : 0 : ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB, unloading);
712 : : /* Shutdown PF-VF Mailbox */
713 : 0 : ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading);
714 : 0 : }
715 : :
716 : : /**
717 : : * ice_init_all_ctrlq - main initialization routine for all control queues
718 : : * @hw: pointer to the hardware structure
719 : : *
720 : : * Prior to calling this function, the driver MUST* set the following fields
721 : : * in the cq->structure for all control queues:
722 : : * - cq->num_sq_entries
723 : : * - cq->num_rq_entries
724 : : * - cq->rq_buf_size
725 : : * - cq->sq_buf_size
726 : : *
727 : : * NOTE: this function does not initialize the controlq locks.
728 : : */
729 : 0 : enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
730 : : {
731 : : enum ice_status status;
732 : : u32 retry = 0;
733 : :
734 [ # # ]: 0 : ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
735 : :
736 : : /* Init FW admin queue */
737 : : do {
738 : 0 : status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
739 [ # # ]: 0 : if (status)
740 : 0 : return status;
741 : :
742 : 0 : status = ice_init_check_adminq(hw);
743 [ # # ]: 0 : if (status != ICE_ERR_AQ_FW_CRITICAL)
744 : : break;
745 : :
746 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
747 : 0 : ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, true);
748 : 0 : ice_msec_delay(ICE_CTL_Q_ADMIN_INIT_MSEC, true);
749 [ # # ]: 0 : } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
750 : :
751 [ # # ]: 0 : if (status)
752 : : return status;
753 : : /* sideband control queue (SBQ) interface is not supported on some
754 : : * devices. Initialize if supported, else fallback to the admin queue
755 : : * interface
756 : : */
757 [ # # ]: 0 : if (ice_is_sbq_supported(hw)) {
758 : 0 : status = ice_init_ctrlq(hw, ICE_CTL_Q_SB);
759 [ # # ]: 0 : if (status)
760 : : return status;
761 : : }
762 : : /* Init Mailbox queue */
763 : 0 : return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
764 : : }
765 : :
766 : : /**
767 : : * ice_init_ctrlq_locks - Initialize locks for a control queue
768 : : * @cq: pointer to the control queue
769 : : *
770 : : * Initializes the send and receive queue locks for a given control queue.
771 : : */
772 : : static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
773 : : {
774 : : ice_init_lock(&cq->sq_lock);
775 : : ice_init_lock(&cq->rq_lock);
776 : 0 : }
777 : :
778 : : /**
779 : : * ice_create_all_ctrlq - main initialization routine for all control queues
780 : : * @hw: pointer to the hardware structure
781 : : *
782 : : * Prior to calling this function, the driver *MUST* set the following fields
783 : : * in the cq->structure for all control queues:
784 : : * - cq->num_sq_entries
785 : : * - cq->num_rq_entries
786 : : * - cq->rq_buf_size
787 : : * - cq->sq_buf_size
788 : : *
789 : : * This function creates all the control queue locks and then calls
790 : : * ice_init_all_ctrlq. It should be called once during driver load. If the
791 : : * driver needs to re-initialize control queues at run time it should call
792 : : * ice_init_all_ctrlq instead.
793 : : */
794 : 0 : enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
795 : : {
796 : : ice_init_ctrlq_locks(&hw->adminq);
797 [ # # ]: 0 : if (ice_is_sbq_supported(hw))
798 : : ice_init_ctrlq_locks(&hw->sbq);
799 : : ice_init_ctrlq_locks(&hw->mailboxq);
800 : :
801 : 0 : return ice_init_all_ctrlq(hw);
802 : : }
803 : :
804 : : /**
805 : : * ice_destroy_ctrlq_locks - Destroy locks for a control queue
806 : : * @cq: pointer to the control queue
807 : : *
808 : : * Destroys the send and receive queue locks for a given control queue.
809 : : */
810 : : static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
811 : : {
812 : : ice_destroy_lock(&cq->sq_lock);
813 : : ice_destroy_lock(&cq->rq_lock);
814 : : }
815 : :
816 : : /**
817 : : * ice_destroy_all_ctrlq - exit routine for all control queues
818 : : * @hw: pointer to the hardware structure
819 : : *
820 : : * This function shuts down all the control queues and then destroys the
821 : : * control queue locks. It should be called once during driver unload. The
822 : : * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
823 : : * reinitialize control queues, such as in response to a reset event.
824 : : */
825 : 0 : void ice_destroy_all_ctrlq(struct ice_hw *hw)
826 : : {
827 : : /* shut down all the control queues first */
828 : 0 : ice_shutdown_all_ctrlq(hw, true);
829 : :
830 : : ice_destroy_ctrlq_locks(&hw->adminq);
831 : : if (ice_is_sbq_supported(hw))
832 : : ice_destroy_ctrlq_locks(&hw->sbq);
833 : : ice_destroy_ctrlq_locks(&hw->mailboxq);
834 : 0 : }
835 : :
836 : : /**
837 : : * ice_clean_sq - cleans Admin send queue (ATQ)
838 : : * @hw: pointer to the hardware structure
839 : : * @cq: pointer to the specific Control queue
840 : : *
841 : : * returns the number of free desc
842 : : */
843 : 0 : static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
844 : : {
845 : : struct ice_ctl_q_ring *sq = &cq->sq;
846 : 0 : u16 ntc = sq->next_to_clean;
847 : : struct ice_sq_cd *details;
848 : : struct ice_aq_desc *desc;
849 : :
850 : 0 : desc = ICE_CTL_Q_DESC(*sq, ntc);
851 : 0 : details = ICE_CTL_Q_DETAILS(*sq, ntc);
852 : :
853 [ # # ]: 0 : while (rd32(hw, cq->sq.head) != ntc) {
854 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
855 : : ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
856 : : ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
857 : 0 : ntc++;
858 [ # # ]: 0 : if (ntc == sq->count)
859 : : ntc = 0;
860 : 0 : desc = ICE_CTL_Q_DESC(*sq, ntc);
861 : 0 : details = ICE_CTL_Q_DETAILS(*sq, ntc);
862 : : }
863 : :
864 : 0 : sq->next_to_clean = ntc;
865 : :
866 [ # # ]: 0 : return ICE_CTL_Q_DESC_UNUSED(sq);
867 : : }
868 : :
869 : : /**
870 : : * ice_debug_cq
871 : : * @hw: pointer to the hardware structure
872 : : * @desc: pointer to control queue descriptor
873 : : * @buf: pointer to command buffer
874 : : * @buf_len: max length of buf
875 : : *
876 : : * Dumps debug log about control command with descriptor contents.
877 : : */
878 : 0 : static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
879 : : {
880 : : struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
881 : : u16 datalen, flags;
882 : :
883 [ # # ]: 0 : if (!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
884 : : return;
885 : :
886 [ # # ]: 0 : if (!desc)
887 : : return;
888 : :
889 : 0 : datalen = LE16_TO_CPU(cq_desc->datalen);
890 : 0 : flags = LE16_TO_CPU(cq_desc->flags);
891 : :
892 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
893 : : LE16_TO_CPU(cq_desc->opcode), flags, datalen,
894 : : LE16_TO_CPU(cq_desc->retval));
895 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
896 : : LE32_TO_CPU(cq_desc->cookie_high),
897 : : LE32_TO_CPU(cq_desc->cookie_low));
898 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n",
899 : : LE32_TO_CPU(cq_desc->params.generic.param0),
900 : : LE32_TO_CPU(cq_desc->params.generic.param1));
901 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n",
902 : : LE32_TO_CPU(cq_desc->params.generic.addr_high),
903 : : LE32_TO_CPU(cq_desc->params.generic.addr_low));
904 : : /* Dump buffer iff 1) one exists and 2) is either a response indicated
905 : : * by the DD and/or CMP flag set or a command with the RD flag set.
906 : : */
907 [ # # # # : 0 : if (buf && cq_desc->datalen != 0 &&
# # ]
908 : : (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP) ||
909 : : flags & ICE_AQ_FLAG_RD)) {
910 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
911 [ # # # # ]: 0 : ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf,
912 : : MIN_T(u16, buf_len, datalen));
913 : : }
914 : : }
915 : :
916 : : /**
917 : : * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
918 : : * @hw: pointer to the HW struct
919 : : * @cq: pointer to the specific Control queue
920 : : *
921 : : * Returns true if the firmware has processed all descriptors on the
922 : : * admin send queue. Returns false if there are still requests pending.
923 : : */
924 : : static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
925 : : {
926 : : /* AQ designers suggest use of head for better
927 : : * timing reliability than DD bit
928 : : */
929 : 0 : return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
930 : : }
931 : :
932 : : /**
933 : : * ice_sq_send_cmd_nolock - send command to Control Queue (ATQ)
934 : : * @hw: pointer to the HW struct
935 : : * @cq: pointer to the specific Control queue
936 : : * @desc: prefilled descriptor describing the command (non DMA mem)
937 : : * @buf: buffer to use for indirect commands (or NULL for direct commands)
938 : : * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
939 : : * @cd: pointer to command details structure
940 : : *
941 : : * This is the main send command routine for the ATQ. It runs the queue,
942 : : * cleans the queue, etc.
943 : : */
944 : : enum ice_status
945 : 0 : ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
946 : : struct ice_aq_desc *desc, void *buf, u16 buf_size,
947 : : struct ice_sq_cd *cd)
948 : : {
949 : : struct ice_dma_mem *dma_buf = NULL;
950 : : struct ice_aq_desc *desc_on_ring;
951 : : bool cmd_completed = false;
952 : : enum ice_status status = ICE_SUCCESS;
953 : : struct ice_sq_cd *details;
954 : : u32 total_delay = 0;
955 : : u16 retval = 0;
956 : : u32 val = 0;
957 : :
958 : : /* if reset is in progress return a soft error */
959 [ # # ]: 0 : if (hw->reset_ongoing)
960 : : return ICE_ERR_RESET_ONGOING;
961 : :
962 : 0 : cq->sq_last_status = ICE_AQ_RC_OK;
963 : :
964 [ # # ]: 0 : if (!cq->sq.count) {
965 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
966 : : status = ICE_ERR_AQ_EMPTY;
967 : 0 : goto sq_send_command_error;
968 : : }
969 : :
970 [ # # ]: 0 : if ((buf && !buf_size) || (!buf && buf_size)) {
971 : : status = ICE_ERR_PARAM;
972 : 0 : goto sq_send_command_error;
973 : : }
974 : :
975 [ # # ]: 0 : if (buf) {
976 [ # # ]: 0 : if (buf_size > cq->sq_buf_size) {
977 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
978 : : buf_size);
979 : : status = ICE_ERR_INVAL_SIZE;
980 : 0 : goto sq_send_command_error;
981 : : }
982 : :
983 : 0 : desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF);
984 [ # # ]: 0 : if (buf_size > ICE_AQ_LG_BUF)
985 : 0 : desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
986 : : }
987 : :
988 : 0 : val = rd32(hw, cq->sq.head);
989 [ # # ]: 0 : if (val >= cq->num_sq_entries) {
990 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
991 : : val);
992 : : status = ICE_ERR_AQ_EMPTY;
993 : 0 : goto sq_send_command_error;
994 : : }
995 : :
996 : 0 : details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
997 [ # # ]: 0 : if (cd)
998 : 0 : *details = *cd;
999 : : else
1000 : : ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
1001 : :
1002 : : /* Call clean and check queue available function to reclaim the
1003 : : * descriptors that were processed by FW/MBX; the function returns the
1004 : : * number of desc available. The clean function called here could be
1005 : : * called in a separate thread in case of asynchronous completions.
1006 : : */
1007 [ # # ]: 0 : if (ice_clean_sq(hw, cq) == 0) {
1008 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
1009 : : status = ICE_ERR_AQ_FULL;
1010 : 0 : goto sq_send_command_error;
1011 : : }
1012 : :
1013 : : /* initialize the temp desc pointer with the right desc */
1014 [ # # ]: 0 : desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
1015 : :
1016 : : /* if the desc is available copy the temp desc to the right place */
1017 : : ice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring),
1018 : : ICE_NONDMA_TO_DMA);
1019 : :
1020 : : /* if buf is not NULL assume indirect command */
1021 [ # # ]: 0 : if (buf) {
1022 : 0 : dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
1023 : : /* copy the user buf into the respective DMA buf */
1024 [ # # ]: 0 : ice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA);
1025 : 0 : desc_on_ring->datalen = CPU_TO_LE16(buf_size);
1026 : :
1027 : : /* Update the address values in the desc with the pa value
1028 : : * for respective buffer
1029 : : */
1030 : 0 : desc_on_ring->params.generic.addr_high =
1031 : 0 : CPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa));
1032 : 0 : desc_on_ring->params.generic.addr_low =
1033 : 0 : CPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa));
1034 : : }
1035 : :
1036 : : /* Debug desc and buffer */
1037 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
1038 : :
1039 : 0 : ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
1040 : :
1041 : 0 : (cq->sq.next_to_use)++;
1042 [ # # ]: 0 : if (cq->sq.next_to_use == cq->sq.count)
1043 : 0 : cq->sq.next_to_use = 0;
1044 : 0 : wr32(hw, cq->sq.tail, cq->sq.next_to_use);
1045 : :
1046 : : do {
1047 [ # # ]: 0 : if (ice_sq_done(hw, cq))
1048 : : break;
1049 : :
1050 : 0 : ice_usec_delay(ICE_CTL_Q_SQ_CMD_USEC, false);
1051 : 0 : total_delay++;
1052 [ # # ]: 0 : } while (total_delay < cq->sq_cmd_timeout);
1053 : :
1054 : : /* if ready, copy the desc back to temp */
1055 [ # # ]: 0 : if (ice_sq_done(hw, cq)) {
1056 : : ice_memcpy(desc, desc_on_ring, sizeof(*desc),
1057 : : ICE_DMA_TO_NONDMA);
1058 [ # # ]: 0 : if (buf) {
1059 : : /* get returned length to copy */
1060 : 0 : u16 copy_size = LE16_TO_CPU(desc->datalen);
1061 : :
1062 [ # # ]: 0 : if (copy_size > buf_size) {
1063 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
1064 : : copy_size, buf_size);
1065 : : status = ICE_ERR_AQ_ERROR;
1066 : : } else {
1067 [ # # ]: 0 : ice_memcpy(buf, dma_buf->va, copy_size,
1068 : : ICE_DMA_TO_NONDMA);
1069 : : }
1070 : : }
1071 : 0 : retval = LE16_TO_CPU(desc->retval);
1072 [ # # ]: 0 : if (retval) {
1073 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n",
1074 : : LE16_TO_CPU(desc->opcode),
1075 : : retval);
1076 : :
1077 : : /* strip off FW internal code */
1078 : 0 : retval &= 0xff;
1079 : : }
1080 : : cmd_completed = true;
1081 [ # # ]: 0 : if (!status && retval != ICE_AQ_RC_OK)
1082 : : status = ICE_ERR_AQ_ERROR;
1083 : 0 : cq->sq_last_status = (enum ice_aq_err)retval;
1084 : : }
1085 : :
1086 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
1087 : :
1088 : 0 : ice_debug_cq(hw, (void *)desc, buf, buf_size);
1089 : :
1090 : : /* save writeback AQ if requested */
1091 [ # # ]: 0 : if (details->wb_desc)
1092 : : ice_memcpy(details->wb_desc, desc_on_ring,
1093 : : sizeof(*details->wb_desc), ICE_DMA_TO_NONDMA);
1094 : :
1095 : : /* update the error if time out occurred */
1096 [ # # ]: 0 : if (!cmd_completed) {
1097 [ # # ]: 0 : if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
1098 [ # # ]: 0 : rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
1099 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
1100 : : status = ICE_ERR_AQ_FW_CRITICAL;
1101 : : } else {
1102 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
1103 : : status = ICE_ERR_AQ_TIMEOUT;
1104 : : }
1105 : : }
1106 : :
1107 : 0 : sq_send_command_error:
1108 : : return status;
1109 : : }
1110 : :
1111 : : /**
1112 : : * ice_sq_send_cmd - send command to Control Queue (ATQ)
1113 : : * @hw: pointer to the HW struct
1114 : : * @cq: pointer to the specific Control queue
1115 : : * @desc: prefilled descriptor describing the command
1116 : : * @buf: buffer to use for indirect commands (or NULL for direct commands)
1117 : : * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1118 : : * @cd: pointer to command details structure
1119 : : *
1120 : : * This is the main send command routine for the ATQ. It runs the queue,
1121 : : * cleans the queue, etc.
1122 : : */
1123 : : enum ice_status
1124 : 0 : ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1125 : : struct ice_aq_desc *desc, void *buf, u16 buf_size,
1126 : : struct ice_sq_cd *cd)
1127 : : {
1128 : : enum ice_status status = ICE_SUCCESS;
1129 : :
1130 : : /* if reset is in progress return a soft error */
1131 [ # # ]: 0 : if (hw->reset_ongoing)
1132 : : return ICE_ERR_RESET_ONGOING;
1133 : :
1134 : 0 : ice_acquire_lock(&cq->sq_lock);
1135 : 0 : status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd);
1136 : : ice_release_lock(&cq->sq_lock);
1137 : :
1138 : 0 : return status;
1139 : : }
1140 : :
1141 : : /**
1142 : : * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1143 : : * @desc: pointer to the temp descriptor (non DMA mem)
1144 : : * @opcode: the opcode can be used to decide which flags to turn off or on
1145 : : *
1146 : : * Fill the desc with default values
1147 : : */
1148 : 0 : void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1149 : : {
1150 : : /* zero out the desc */
1151 : : ice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM);
1152 : 0 : desc->opcode = CPU_TO_LE16(opcode);
1153 : 0 : desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI);
1154 : 0 : }
1155 : :
1156 : : /**
1157 : : * ice_clean_rq_elem
1158 : : * @hw: pointer to the HW struct
1159 : : * @cq: pointer to the specific Control queue
1160 : : * @e: event info from the receive descriptor, includes any buffers
1161 : : * @pending: number of events that could be left to process
1162 : : *
1163 : : * This function cleans one Admin Receive Queue element and returns
1164 : : * the contents through e. It can also return how many events are
1165 : : * left to process through 'pending'.
1166 : : */
1167 : : enum ice_status
1168 : 0 : ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1169 : : struct ice_rq_event_info *e, u16 *pending)
1170 : : {
1171 : 0 : u16 ntc = cq->rq.next_to_clean;
1172 : : enum ice_aq_err rq_last_status;
1173 : : enum ice_status ret_code = ICE_SUCCESS;
1174 : : struct ice_aq_desc *desc;
1175 : : struct ice_dma_mem *bi;
1176 : : u16 desc_idx;
1177 : : u16 datalen;
1178 : : u16 flags;
1179 : : u16 ntu;
1180 : :
1181 : : /* pre-clean the event info */
1182 : 0 : ice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM);
1183 : :
1184 : : /* take the lock before we start messing with the ring */
1185 : 0 : ice_acquire_lock(&cq->rq_lock);
1186 : :
1187 [ # # ]: 0 : if (!cq->rq.count) {
1188 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
1189 : : ret_code = ICE_ERR_AQ_EMPTY;
1190 : 0 : goto clean_rq_elem_err;
1191 : : }
1192 : :
1193 : : /* set next_to_use to head */
1194 : 0 : ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1195 : :
1196 [ # # ]: 0 : if (ntu == ntc) {
1197 : : /* nothing to do - shouldn't need to update ring's values */
1198 : : ret_code = ICE_ERR_AQ_NO_WORK;
1199 : 0 : goto clean_rq_elem_out;
1200 : : }
1201 : :
1202 : : /* now clean the next descriptor */
1203 : 0 : desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1204 : : desc_idx = ntc;
1205 : :
1206 : 0 : rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval);
1207 : 0 : flags = LE16_TO_CPU(desc->flags);
1208 [ # # ]: 0 : if (flags & ICE_AQ_FLAG_ERR) {
1209 : : ret_code = ICE_ERR_AQ_ERROR;
1210 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
1211 : : LE16_TO_CPU(desc->opcode), rq_last_status);
1212 : : }
1213 : : ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA);
1214 : 0 : datalen = LE16_TO_CPU(desc->datalen);
1215 : 0 : e->msg_len = MIN_T(u16, datalen, e->buf_len);
1216 [ # # # # ]: 0 : if (e->msg_buf && e->msg_len)
1217 [ # # ]: 0 : ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va,
1218 : : e->msg_len, ICE_DMA_TO_NONDMA);
1219 : :
1220 [ # # ]: 0 : ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1221 : :
1222 : 0 : ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
1223 : :
1224 : : /* Restore the original datalen and buffer address in the desc,
1225 : : * FW updates datalen to indicate the event message size
1226 : : */
1227 [ # # ]: 0 : bi = &cq->rq.r.rq_bi[ntc];
1228 : : ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
1229 : :
1230 : 0 : desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
1231 [ # # ]: 0 : if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1232 : 0 : desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
1233 : 0 : desc->datalen = CPU_TO_LE16(bi->size);
1234 : 0 : desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
1235 : 0 : desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
1236 : :
1237 : : /* set tail = the last cleaned desc index. */
1238 : 0 : wr32(hw, cq->rq.tail, ntc);
1239 : : /* ntc is updated to tail + 1 */
1240 : 0 : ntc++;
1241 [ # # ]: 0 : if (ntc == cq->num_rq_entries)
1242 : : ntc = 0;
1243 : 0 : cq->rq.next_to_clean = ntc;
1244 : 0 : cq->rq.next_to_use = ntu;
1245 : :
1246 : 0 : clean_rq_elem_out:
1247 : : /* Set pending if needed, unlock and return */
1248 [ # # ]: 0 : if (pending) {
1249 : : /* re-read HW head to calculate actual pending messages */
1250 : 0 : ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1251 [ # # ]: 0 : *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1252 : : }
1253 : 0 : clean_rq_elem_err:
1254 : : ice_release_lock(&cq->rq_lock);
1255 : :
1256 : 0 : return ret_code;
1257 : : }
|