1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/dma-mapping.h>
5 #include <linux/slab.h>
7 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include "hclge_cmd.h"
12 #include "hclge_main.h"
14 #define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ)
16 #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
18 static int hclge_ring_space(struct hclge_cmq_ring *ring)
20 int ntu = ring->next_to_use;
21 int ntc = ring->next_to_clean;
22 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
24 return ring->desc_num - used - 1;
27 static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int head)
29 int ntu = ring->next_to_use;
30 int ntc = ring->next_to_clean;
33 return head >= ntc && head <= ntu;
35 return head >= ntc || head <= ntu;
38 static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
40 int size = ring->desc_num * sizeof(struct hclge_desc);
42 ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
43 size, &ring->desc_dma_addr,
51 static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
53 int size = ring->desc_num * sizeof(struct hclge_desc);
56 dma_free_coherent(cmq_ring_to_dev(ring), size,
57 ring->desc, ring->desc_dma_addr);
62 static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
64 struct hclge_hw *hw = &hdev->hw;
65 struct hclge_cmq_ring *ring =
66 (ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
69 ring->ring_type = ring_type;
72 ret = hclge_alloc_cmd_desc(ring);
74 dev_err(&hdev->pdev->dev, "descriptor %s alloc error %d\n",
75 (ring_type == HCLGE_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
82 void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
84 desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
86 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
88 desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
91 void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
92 enum hclge_opcode_type opcode, bool is_read)
94 memset((void *)desc, 0, sizeof(struct hclge_desc));
95 desc->opcode = cpu_to_le16(opcode);
96 desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
99 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
102 static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
104 dma_addr_t dma = ring->desc_dma_addr;
105 struct hclge_dev *hdev = ring->dev;
106 struct hclge_hw *hw = &hdev->hw;
108 if (ring->ring_type == HCLGE_TYPE_CSQ) {
109 hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
111 hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
113 hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG,
114 (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
115 HCLGE_NIC_CMQ_ENABLE);
116 hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
117 hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
119 hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG,
121 hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
123 hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
124 (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
125 HCLGE_NIC_CMQ_ENABLE);
126 hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
127 hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
131 static void hclge_cmd_init_regs(struct hclge_hw *hw)
133 hclge_cmd_config_regs(&hw->cmq.csq);
134 hclge_cmd_config_regs(&hw->cmq.crq);
137 static int hclge_cmd_csq_clean(struct hclge_hw *hw)
139 struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
140 struct hclge_cmq_ring *csq = &hw->cmq.csq;
144 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
145 rmb(); /* Make sure head is ready before touch any data */
147 if (!is_valid_csq_clean_head(csq, head)) {
148 dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
149 csq->next_to_use, csq->next_to_clean);
150 dev_warn(&hdev->pdev->dev,
151 "Disabling any further commands to IMP firmware\n");
152 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
153 dev_warn(&hdev->pdev->dev,
154 "IMP firmware watchdog reset soon expected!\n");
158 clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
159 csq->next_to_clean = head;
163 static int hclge_cmd_csq_done(struct hclge_hw *hw)
165 u32 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
166 return head == hw->cmq.csq.next_to_use;
169 static bool hclge_is_special_opcode(u16 opcode)
171 /* these commands have several descriptors,
172 * and use the first one to save opcode and return value
174 u16 spec_opcode[3] = {HCLGE_OPC_STATS_64_BIT,
175 HCLGE_OPC_STATS_32_BIT, HCLGE_OPC_STATS_MAC};
178 for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
179 if (spec_opcode[i] == opcode)
187 * hclge_cmd_send - send command to command queue
188 * @hw: pointer to the hw struct
189 * @desc: prefilled descriptor for describing the command
190 * @num : the number of descriptors to be sent
192 * This is the main send command for command queue, it
193 * sends the queue, cleans the queue, etc
195 int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
197 struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
198 struct hclge_desc *desc_to_use;
199 bool complete = false;
203 u16 opcode, desc_ret;
206 spin_lock_bh(&hw->cmq.csq.lock);
208 if (num > hclge_ring_space(&hw->cmq.csq) ||
209 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
210 spin_unlock_bh(&hw->cmq.csq.lock);
215 * Record the location of desc in the ring for this time
216 * which will be use for hardware to write back
218 ntc = hw->cmq.csq.next_to_use;
219 opcode = le16_to_cpu(desc[0].opcode);
220 while (handle < num) {
221 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
222 *desc_to_use = desc[handle];
223 (hw->cmq.csq.next_to_use)++;
224 if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
225 hw->cmq.csq.next_to_use = 0;
229 /* Write to hardware */
230 hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use);
233 * If the command is sync, wait for the firmware to write back,
234 * if multi descriptors to be sent, use the first one to check
236 if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
238 if (hclge_cmd_csq_done(hw)) {
244 } while (timeout < hw->cmq.tx_timeout);
251 while (handle < num) {
252 /* Get the result of hardware write back */
253 desc_to_use = &hw->cmq.csq.desc[ntc];
254 desc[handle] = *desc_to_use;
256 if (likely(!hclge_is_special_opcode(opcode)))
257 desc_ret = le16_to_cpu(desc[handle].retval);
259 desc_ret = le16_to_cpu(desc[0].retval);
261 if (desc_ret == HCLGE_CMD_EXEC_SUCCESS)
265 hw->cmq.last_status = desc_ret;
268 if (ntc == hw->cmq.csq.desc_num)
273 /* Clean the command send queue */
274 handle = hclge_cmd_csq_clean(hw);
277 else if (handle != num)
278 dev_warn(&hdev->pdev->dev,
279 "cleaned %d, need to clean %d\n", handle, num);
281 spin_unlock_bh(&hw->cmq.csq.lock);
286 static enum hclge_cmd_status hclge_cmd_query_firmware_version(
287 struct hclge_hw *hw, u32 *version)
289 struct hclge_query_version_cmd *resp;
290 struct hclge_desc desc;
293 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
294 resp = (struct hclge_query_version_cmd *)desc.data;
296 ret = hclge_cmd_send(hw, &desc, 1);
298 *version = le32_to_cpu(resp->firmware);
303 int hclge_cmd_queue_init(struct hclge_dev *hdev)
307 /* Setup the lock for command queue */
308 spin_lock_init(&hdev->hw.cmq.csq.lock);
309 spin_lock_init(&hdev->hw.cmq.crq.lock);
311 /* Setup the queue entries for use cmd queue */
312 hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
313 hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
315 /* Setup Tx write back timeout */
316 hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT;
318 /* Setup queue rings */
319 ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CSQ);
321 dev_err(&hdev->pdev->dev,
322 "CSQ ring setup error %d\n", ret);
326 ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CRQ);
328 dev_err(&hdev->pdev->dev,
329 "CRQ ring setup error %d\n", ret);
335 hclge_free_cmd_desc(&hdev->hw.cmq.csq);
339 int hclge_cmd_init(struct hclge_dev *hdev)
344 spin_lock_bh(&hdev->hw.cmq.csq.lock);
345 spin_lock_bh(&hdev->hw.cmq.crq.lock);
347 hdev->hw.cmq.csq.next_to_clean = 0;
348 hdev->hw.cmq.csq.next_to_use = 0;
349 hdev->hw.cmq.crq.next_to_clean = 0;
350 hdev->hw.cmq.crq.next_to_use = 0;
352 hclge_cmd_init_regs(&hdev->hw);
354 spin_unlock_bh(&hdev->hw.cmq.crq.lock);
355 spin_unlock_bh(&hdev->hw.cmq.csq.lock);
357 clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
359 /* Check if there is new reset pending, because the higher level
360 * reset may happen when lower level reset is being processed.
362 if ((hclge_is_reset_pending(hdev))) {
363 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
367 ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
369 dev_err(&hdev->pdev->dev,
370 "firmware version query failed %d\n", ret);
373 hdev->fw_version = version;
375 dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
380 static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
382 spin_lock(&ring->lock);
383 hclge_free_cmd_desc(ring);
384 spin_unlock(&ring->lock);
387 void hclge_destroy_cmd_queue(struct hclge_hw *hw)
389 hclge_destroy_queue(&hw->cmq.csq);
390 hclge_destroy_queue(&hw->cmq.crq);