OSDN Git Service

Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[uclinux-h8/linux.git] / drivers / infiniband / hw / ocrdma / ocrdma_verbs.c
1 /* This file is part of the Emulex RoCE Device Driver for
2  * RoCE (RDMA over Converged Ethernet) adapters.
3  * Copyright (C) 2012-2015 Emulex. All rights reserved.
4  * EMULEX and SLI are trademarks of Emulex.
5  * www.emulex.com
6  *
7  * This software is available to you under a choice of one of two licenses.
8  * You may choose to be licensed under the terms of the GNU General Public
9  * License (GPL) Version 2, available from the file COPYING in the main
10  * directory of this source tree, or the BSD license below:
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  *
16  * - Redistributions of source code must retain the above copyright notice,
17  *   this list of conditions and the following disclaimer.
18  *
19  * - Redistributions in binary form must reproduce the above copyright
20  *   notice, this list of conditions and the following disclaimer in
21  *   the documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * Contact Information:
36  * linux-drivers@emulex.com
37  *
38  * Emulex
39  * 3333 Susan Street
40  * Costa Mesa, CA 92626
41  */
42
43 #include <linux/dma-mapping.h>
44 #include <rdma/ib_verbs.h>
45 #include <rdma/ib_user_verbs.h>
46 #include <rdma/iw_cm.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49
50 #include "ocrdma.h"
51 #include "ocrdma_hw.h"
52 #include "ocrdma_verbs.h"
53 #include "ocrdma_abi.h"
54
55 int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
56 {
57         if (index > 1)
58                 return -EINVAL;
59
60         *pkey = 0xffff;
61         return 0;
62 }
63
64 int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
65                      int index, union ib_gid *sgid)
66 {
67         struct ocrdma_dev *dev;
68
69         dev = get_ocrdma_dev(ibdev);
70         memset(sgid, 0, sizeof(*sgid));
71         if (index >= OCRDMA_MAX_SGID)
72                 return -EINVAL;
73
74         memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
75
76         return 0;
77 }
78
79 int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
80                         struct ib_udata *uhw)
81 {
82         struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
83
84         if (uhw->inlen || uhw->outlen)
85                 return -EINVAL;
86
87         memset(attr, 0, sizeof *attr);
88         memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
89                min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
90         ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
91         attr->max_mr_size = dev->attr.max_mr_size;
92         attr->page_size_cap = 0xffff000;
93         attr->vendor_id = dev->nic_info.pdev->vendor;
94         attr->vendor_part_id = dev->nic_info.pdev->device;
95         attr->hw_ver = dev->asic_id;
96         attr->max_qp = dev->attr.max_qp;
97         attr->max_ah = OCRDMA_MAX_AH;
98         attr->max_qp_wr = dev->attr.max_wqe;
99
100         attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
101                                         IB_DEVICE_RC_RNR_NAK_GEN |
102                                         IB_DEVICE_SHUTDOWN_PORT |
103                                         IB_DEVICE_SYS_IMAGE_GUID |
104                                         IB_DEVICE_LOCAL_DMA_LKEY |
105                                         IB_DEVICE_MEM_MGT_EXTENSIONS;
106         attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
107         attr->max_sge_rd = 0;
108         attr->max_cq = dev->attr.max_cq;
109         attr->max_cqe = dev->attr.max_cqe;
110         attr->max_mr = dev->attr.max_mr;
111         attr->max_mw = dev->attr.max_mw;
112         attr->max_pd = dev->attr.max_pd;
113         attr->atomic_cap = 0;
114         attr->max_fmr = 0;
115         attr->max_map_per_fmr = 0;
116         attr->max_qp_rd_atom =
117             min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
118         attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
119         attr->max_srq = dev->attr.max_srq;
120         attr->max_srq_sge = dev->attr.max_srq_sge;
121         attr->max_srq_wr = dev->attr.max_rqe;
122         attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
123         attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr;
124         attr->max_pkeys = 1;
125         return 0;
126 }
127
128 static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
129                                             u8 *ib_speed, u8 *ib_width)
130 {
131         int status;
132         u8 speed;
133
134         status = ocrdma_mbx_get_link_speed(dev, &speed);
135         if (status)
136                 speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
137
138         switch (speed) {
139         case OCRDMA_PHYS_LINK_SPEED_1GBPS:
140                 *ib_speed = IB_SPEED_SDR;
141                 *ib_width = IB_WIDTH_1X;
142                 break;
143
144         case OCRDMA_PHYS_LINK_SPEED_10GBPS:
145                 *ib_speed = IB_SPEED_QDR;
146                 *ib_width = IB_WIDTH_1X;
147                 break;
148
149         case OCRDMA_PHYS_LINK_SPEED_20GBPS:
150                 *ib_speed = IB_SPEED_DDR;
151                 *ib_width = IB_WIDTH_4X;
152                 break;
153
154         case OCRDMA_PHYS_LINK_SPEED_40GBPS:
155                 *ib_speed = IB_SPEED_QDR;
156                 *ib_width = IB_WIDTH_4X;
157                 break;
158
159         default:
160                 /* Unsupported */
161                 *ib_speed = IB_SPEED_SDR;
162                 *ib_width = IB_WIDTH_1X;
163         }
164 }
165
166 int ocrdma_query_port(struct ib_device *ibdev,
167                       u8 port, struct ib_port_attr *props)
168 {
169         enum ib_port_state port_state;
170         struct ocrdma_dev *dev;
171         struct net_device *netdev;
172
173         dev = get_ocrdma_dev(ibdev);
174         if (port > 1) {
175                 pr_err("%s(%d) invalid_port=0x%x\n", __func__,
176                        dev->id, port);
177                 return -EINVAL;
178         }
179         netdev = dev->nic_info.netdev;
180         if (netif_running(netdev) && netif_oper_up(netdev)) {
181                 port_state = IB_PORT_ACTIVE;
182                 props->phys_state = 5;
183         } else {
184                 port_state = IB_PORT_DOWN;
185                 props->phys_state = 3;
186         }
187         props->max_mtu = IB_MTU_4096;
188         props->active_mtu = iboe_get_mtu(netdev->mtu);
189         props->lid = 0;
190         props->lmc = 0;
191         props->sm_lid = 0;
192         props->sm_sl = 0;
193         props->state = port_state;
194         props->port_cap_flags =
195             IB_PORT_CM_SUP |
196             IB_PORT_REINIT_SUP |
197             IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS;
198         props->gid_tbl_len = OCRDMA_MAX_SGID;
199         props->pkey_tbl_len = 1;
200         props->bad_pkey_cntr = 0;
201         props->qkey_viol_cntr = 0;
202         get_link_speed_and_width(dev, &props->active_speed,
203                                  &props->active_width);
204         props->max_msg_sz = 0x80000000;
205         props->max_vl_num = 4;
206         return 0;
207 }
208
209 int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
210                        struct ib_port_modify *props)
211 {
212         struct ocrdma_dev *dev;
213
214         dev = get_ocrdma_dev(ibdev);
215         if (port > 1) {
216                 pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port);
217                 return -EINVAL;
218         }
219         return 0;
220 }
221
222 static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
223                            unsigned long len)
224 {
225         struct ocrdma_mm *mm;
226
227         mm = kzalloc(sizeof(*mm), GFP_KERNEL);
228         if (mm == NULL)
229                 return -ENOMEM;
230         mm->key.phy_addr = phy_addr;
231         mm->key.len = len;
232         INIT_LIST_HEAD(&mm->entry);
233
234         mutex_lock(&uctx->mm_list_lock);
235         list_add_tail(&mm->entry, &uctx->mm_head);
236         mutex_unlock(&uctx->mm_list_lock);
237         return 0;
238 }
239
240 static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
241                             unsigned long len)
242 {
243         struct ocrdma_mm *mm, *tmp;
244
245         mutex_lock(&uctx->mm_list_lock);
246         list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
247                 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
248                         continue;
249
250                 list_del(&mm->entry);
251                 kfree(mm);
252                 break;
253         }
254         mutex_unlock(&uctx->mm_list_lock);
255 }
256
257 static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
258                               unsigned long len)
259 {
260         bool found = false;
261         struct ocrdma_mm *mm;
262
263         mutex_lock(&uctx->mm_list_lock);
264         list_for_each_entry(mm, &uctx->mm_head, entry) {
265                 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
266                         continue;
267
268                 found = true;
269                 break;
270         }
271         mutex_unlock(&uctx->mm_list_lock);
272         return found;
273 }
274
275
276 static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
277 {
278         u16 pd_bitmap_idx = 0;
279         const unsigned long *pd_bitmap;
280
281         if (dpp_pool) {
282                 pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
283                 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
284                                                     dev->pd_mgr->max_dpp_pd);
285                 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
286                 dev->pd_mgr->pd_dpp_count++;
287                 if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
288                         dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
289         } else {
290                 pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
291                 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
292                                                     dev->pd_mgr->max_normal_pd);
293                 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
294                 dev->pd_mgr->pd_norm_count++;
295                 if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
296                         dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
297         }
298         return pd_bitmap_idx;
299 }
300
301 static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
302                                         bool dpp_pool)
303 {
304         u16 pd_count;
305         u16 pd_bit_index;
306
307         pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
308                               dev->pd_mgr->pd_norm_count;
309         if (pd_count == 0)
310                 return -EINVAL;
311
312         if (dpp_pool) {
313                 pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
314                 if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
315                         return -EINVAL;
316                 } else {
317                         __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
318                         dev->pd_mgr->pd_dpp_count--;
319                 }
320         } else {
321                 pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
322                 if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
323                         return -EINVAL;
324                 } else {
325                         __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
326                         dev->pd_mgr->pd_norm_count--;
327                 }
328         }
329
330         return 0;
331 }
332
333 static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
334                                    bool dpp_pool)
335 {
336         int status;
337
338         mutex_lock(&dev->dev_lock);
339         status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
340         mutex_unlock(&dev->dev_lock);
341         return status;
342 }
343
344 static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
345 {
346         u16 pd_idx = 0;
347         int status = 0;
348
349         mutex_lock(&dev->dev_lock);
350         if (pd->dpp_enabled) {
351                 /* try allocating DPP PD, if not available then normal PD */
352                 if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
353                         pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
354                         pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
355                         pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
356                 } else if (dev->pd_mgr->pd_norm_count <
357                            dev->pd_mgr->max_normal_pd) {
358                         pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
359                         pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
360                         pd->dpp_enabled = false;
361                 } else {
362                         status = -EINVAL;
363                 }
364         } else {
365                 if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
366                         pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
367                         pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
368                 } else {
369                         status = -EINVAL;
370                 }
371         }
372         mutex_unlock(&dev->dev_lock);
373         return status;
374 }
375
376 static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
377                                           struct ocrdma_ucontext *uctx,
378                                           struct ib_udata *udata)
379 {
380         struct ocrdma_pd *pd = NULL;
381         int status = 0;
382
383         pd = kzalloc(sizeof(*pd), GFP_KERNEL);
384         if (!pd)
385                 return ERR_PTR(-ENOMEM);
386
387         if (udata && uctx && dev->attr.max_dpp_pds) {
388                 pd->dpp_enabled =
389                         ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
390                 pd->num_dpp_qp =
391                         pd->dpp_enabled ? (dev->nic_info.db_page_size /
392                                            dev->attr.wqe_size) : 0;
393         }
394
395         if (dev->pd_mgr->pd_prealloc_valid) {
396                 status = ocrdma_get_pd_num(dev, pd);
397                 if (status == 0) {
398                         return pd;
399                 } else {
400                         kfree(pd);
401                         return ERR_PTR(status);
402                 }
403         }
404
405 retry:
406         status = ocrdma_mbx_alloc_pd(dev, pd);
407         if (status) {
408                 if (pd->dpp_enabled) {
409                         pd->dpp_enabled = false;
410                         pd->num_dpp_qp = 0;
411                         goto retry;
412                 } else {
413                         kfree(pd);
414                         return ERR_PTR(status);
415                 }
416         }
417
418         return pd;
419 }
420
421 static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
422                                  struct ocrdma_pd *pd)
423 {
424         return (uctx->cntxt_pd == pd ? true : false);
425 }
426
427 static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
428                               struct ocrdma_pd *pd)
429 {
430         int status = 0;
431
432         if (dev->pd_mgr->pd_prealloc_valid)
433                 status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
434         else
435                 status = ocrdma_mbx_dealloc_pd(dev, pd);
436
437         kfree(pd);
438         return status;
439 }
440
441 static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
442                                     struct ocrdma_ucontext *uctx,
443                                     struct ib_udata *udata)
444 {
445         int status = 0;
446
447         uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata);
448         if (IS_ERR(uctx->cntxt_pd)) {
449                 status = PTR_ERR(uctx->cntxt_pd);
450                 uctx->cntxt_pd = NULL;
451                 goto err;
452         }
453
454         uctx->cntxt_pd->uctx = uctx;
455         uctx->cntxt_pd->ibpd.device = &dev->ibdev;
456 err:
457         return status;
458 }
459
460 static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
461 {
462         struct ocrdma_pd *pd = uctx->cntxt_pd;
463         struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
464
465         if (uctx->pd_in_use) {
466                 pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
467                        __func__, dev->id, pd->id);
468         }
469         uctx->cntxt_pd = NULL;
470         (void)_ocrdma_dealloc_pd(dev, pd);
471         return 0;
472 }
473
474 static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
475 {
476         struct ocrdma_pd *pd = NULL;
477
478         mutex_lock(&uctx->mm_list_lock);
479         if (!uctx->pd_in_use) {
480                 uctx->pd_in_use = true;
481                 pd = uctx->cntxt_pd;
482         }
483         mutex_unlock(&uctx->mm_list_lock);
484
485         return pd;
486 }
487
488 static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
489 {
490         mutex_lock(&uctx->mm_list_lock);
491         uctx->pd_in_use = false;
492         mutex_unlock(&uctx->mm_list_lock);
493 }
494
495 struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
496                                           struct ib_udata *udata)
497 {
498         int status;
499         struct ocrdma_ucontext *ctx;
500         struct ocrdma_alloc_ucontext_resp resp;
501         struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
502         struct pci_dev *pdev = dev->nic_info.pdev;
503         u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
504
505         if (!udata)
506                 return ERR_PTR(-EFAULT);
507         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
508         if (!ctx)
509                 return ERR_PTR(-ENOMEM);
510         INIT_LIST_HEAD(&ctx->mm_head);
511         mutex_init(&ctx->mm_list_lock);
512
513         ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
514                                             &ctx->ah_tbl.pa, GFP_KERNEL);
515         if (!ctx->ah_tbl.va) {
516                 kfree(ctx);
517                 return ERR_PTR(-ENOMEM);
518         }
519         memset(ctx->ah_tbl.va, 0, map_len);
520         ctx->ah_tbl.len = map_len;
521
522         memset(&resp, 0, sizeof(resp));
523         resp.ah_tbl_len = ctx->ah_tbl.len;
524         resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va);
525
526         status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
527         if (status)
528                 goto map_err;
529
530         status = ocrdma_alloc_ucontext_pd(dev, ctx, udata);
531         if (status)
532                 goto pd_err;
533
534         resp.dev_id = dev->id;
535         resp.max_inline_data = dev->attr.max_inline_data;
536         resp.wqe_size = dev->attr.wqe_size;
537         resp.rqe_size = dev->attr.rqe_size;
538         resp.dpp_wqe_size = dev->attr.wqe_size;
539
540         memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
541         status = ib_copy_to_udata(udata, &resp, sizeof(resp));
542         if (status)
543                 goto cpy_err;
544         return &ctx->ibucontext;
545
546 cpy_err:
547 pd_err:
548         ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
549 map_err:
550         dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
551                           ctx->ah_tbl.pa);
552         kfree(ctx);
553         return ERR_PTR(status);
554 }
555
556 int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
557 {
558         int status = 0;
559         struct ocrdma_mm *mm, *tmp;
560         struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
561         struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
562         struct pci_dev *pdev = dev->nic_info.pdev;
563
564         status = ocrdma_dealloc_ucontext_pd(uctx);
565
566         ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
567         dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
568                           uctx->ah_tbl.pa);
569
570         list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
571                 list_del(&mm->entry);
572                 kfree(mm);
573         }
574         kfree(uctx);
575         return status;
576 }
577
578 int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
579 {
580         struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
581         struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
582         unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
583         u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
584         unsigned long len = (vma->vm_end - vma->vm_start);
585         int status = 0;
586         bool found;
587
588         if (vma->vm_start & (PAGE_SIZE - 1))
589                 return -EINVAL;
590         found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
591         if (!found)
592                 return -EINVAL;
593
594         if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
595                 dev->nic_info.db_total_size)) &&
596                 (len <= dev->nic_info.db_page_size)) {
597                 if (vma->vm_flags & VM_READ)
598                         return -EPERM;
599
600                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
601                 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
602                                             len, vma->vm_page_prot);
603         } else if (dev->nic_info.dpp_unmapped_len &&
604                 (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
605                 (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
606                         dev->nic_info.dpp_unmapped_len)) &&
607                 (len <= dev->nic_info.dpp_unmapped_len)) {
608                 if (vma->vm_flags & VM_READ)
609                         return -EPERM;
610
611                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
612                 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
613                                             len, vma->vm_page_prot);
614         } else {
615                 status = remap_pfn_range(vma, vma->vm_start,
616                                          vma->vm_pgoff, len, vma->vm_page_prot);
617         }
618         return status;
619 }
620
621 static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
622                                 struct ib_ucontext *ib_ctx,
623                                 struct ib_udata *udata)
624 {
625         int status;
626         u64 db_page_addr;
627         u64 dpp_page_addr = 0;
628         u32 db_page_size;
629         struct ocrdma_alloc_pd_uresp rsp;
630         struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
631
632         memset(&rsp, 0, sizeof(rsp));
633         rsp.id = pd->id;
634         rsp.dpp_enabled = pd->dpp_enabled;
635         db_page_addr = ocrdma_get_db_addr(dev, pd->id);
636         db_page_size = dev->nic_info.db_page_size;
637
638         status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
639         if (status)
640                 return status;
641
642         if (pd->dpp_enabled) {
643                 dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
644                                 (pd->id * PAGE_SIZE);
645                 status = ocrdma_add_mmap(uctx, dpp_page_addr,
646                                  PAGE_SIZE);
647                 if (status)
648                         goto dpp_map_err;
649                 rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
650                 rsp.dpp_page_addr_lo = dpp_page_addr;
651         }
652
653         status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
654         if (status)
655                 goto ucopy_err;
656
657         pd->uctx = uctx;
658         return 0;
659
660 ucopy_err:
661         if (pd->dpp_enabled)
662                 ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
663 dpp_map_err:
664         ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
665         return status;
666 }
667
668 struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
669                               struct ib_ucontext *context,
670                               struct ib_udata *udata)
671 {
672         struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
673         struct ocrdma_pd *pd;
674         struct ocrdma_ucontext *uctx = NULL;
675         int status;
676         u8 is_uctx_pd = false;
677
678         if (udata && context) {
679                 uctx = get_ocrdma_ucontext(context);
680                 pd = ocrdma_get_ucontext_pd(uctx);
681                 if (pd) {
682                         is_uctx_pd = true;
683                         goto pd_mapping;
684                 }
685         }
686
687         pd = _ocrdma_alloc_pd(dev, uctx, udata);
688         if (IS_ERR(pd)) {
689                 status = PTR_ERR(pd);
690                 goto exit;
691         }
692
693 pd_mapping:
694         if (udata && context) {
695                 status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
696                 if (status)
697                         goto err;
698         }
699         return &pd->ibpd;
700
701 err:
702         if (is_uctx_pd) {
703                 ocrdma_release_ucontext_pd(uctx);
704         } else {
705                 status = _ocrdma_dealloc_pd(dev, pd);
706         }
707 exit:
708         return ERR_PTR(status);
709 }
710
711 int ocrdma_dealloc_pd(struct ib_pd *ibpd)
712 {
713         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
714         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
715         struct ocrdma_ucontext *uctx = NULL;
716         int status = 0;
717         u64 usr_db;
718
719         uctx = pd->uctx;
720         if (uctx) {
721                 u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
722                         (pd->id * PAGE_SIZE);
723                 if (pd->dpp_enabled)
724                         ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
725                 usr_db = ocrdma_get_db_addr(dev, pd->id);
726                 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
727
728                 if (is_ucontext_pd(uctx, pd)) {
729                         ocrdma_release_ucontext_pd(uctx);
730                         return status;
731                 }
732         }
733         status = _ocrdma_dealloc_pd(dev, pd);
734         return status;
735 }
736
737 static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
738                             u32 pdid, int acc, u32 num_pbls, u32 addr_check)
739 {
740         int status;
741
742         mr->hwmr.fr_mr = 0;
743         mr->hwmr.local_rd = 1;
744         mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
745         mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
746         mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
747         mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
748         mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
749         mr->hwmr.num_pbls = num_pbls;
750
751         status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
752         if (status)
753                 return status;
754
755         mr->ibmr.lkey = mr->hwmr.lkey;
756         if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
757                 mr->ibmr.rkey = mr->hwmr.lkey;
758         return 0;
759 }
760
761 struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
762 {
763         int status;
764         struct ocrdma_mr *mr;
765         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
766         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
767
768         if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
769                 pr_err("%s err, invalid access rights\n", __func__);
770                 return ERR_PTR(-EINVAL);
771         }
772
773         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
774         if (!mr)
775                 return ERR_PTR(-ENOMEM);
776
777         status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
778                                    OCRDMA_ADDR_CHECK_DISABLE);
779         if (status) {
780                 kfree(mr);
781                 return ERR_PTR(status);
782         }
783
784         return &mr->ibmr;
785 }
786
787 static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
788                                    struct ocrdma_hw_mr *mr)
789 {
790         struct pci_dev *pdev = dev->nic_info.pdev;
791         int i = 0;
792
793         if (mr->pbl_table) {
794                 for (i = 0; i < mr->num_pbls; i++) {
795                         if (!mr->pbl_table[i].va)
796                                 continue;
797                         dma_free_coherent(&pdev->dev, mr->pbl_size,
798                                           mr->pbl_table[i].va,
799                                           mr->pbl_table[i].pa);
800                 }
801                 kfree(mr->pbl_table);
802                 mr->pbl_table = NULL;
803         }
804 }
805
806 static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
807                               u32 num_pbes)
808 {
809         u32 num_pbls = 0;
810         u32 idx = 0;
811         int status = 0;
812         u32 pbl_size;
813
814         do {
815                 pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
816                 if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
817                         status = -EFAULT;
818                         break;
819                 }
820                 num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
821                 num_pbls = num_pbls / (pbl_size / sizeof(u64));
822                 idx++;
823         } while (num_pbls >= dev->attr.max_num_mr_pbl);
824
825         mr->hwmr.num_pbes = num_pbes;
826         mr->hwmr.num_pbls = num_pbls;
827         mr->hwmr.pbl_size = pbl_size;
828         return status;
829 }
830
831 static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
832 {
833         int status = 0;
834         int i;
835         u32 dma_len = mr->pbl_size;
836         struct pci_dev *pdev = dev->nic_info.pdev;
837         void *va;
838         dma_addr_t pa;
839
840         mr->pbl_table = kzalloc(sizeof(struct ocrdma_pbl) *
841                                 mr->num_pbls, GFP_KERNEL);
842
843         if (!mr->pbl_table)
844                 return -ENOMEM;
845
846         for (i = 0; i < mr->num_pbls; i++) {
847                 va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
848                 if (!va) {
849                         ocrdma_free_mr_pbl_tbl(dev, mr);
850                         status = -ENOMEM;
851                         break;
852                 }
853                 memset(va, 0, dma_len);
854                 mr->pbl_table[i].va = va;
855                 mr->pbl_table[i].pa = pa;
856         }
857         return status;
858 }
859
860 static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
861                             u32 num_pbes)
862 {
863         struct ocrdma_pbe *pbe;
864         struct scatterlist *sg;
865         struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
866         struct ib_umem *umem = mr->umem;
867         int shift, pg_cnt, pages, pbe_cnt, entry, total_num_pbes = 0;
868
869         if (!mr->hwmr.num_pbes)
870                 return;
871
872         pbe = (struct ocrdma_pbe *)pbl_tbl->va;
873         pbe_cnt = 0;
874
875         shift = ilog2(umem->page_size);
876
877         for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
878                 pages = sg_dma_len(sg) >> shift;
879                 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
880                         /* store the page address in pbe */
881                         pbe->pa_lo =
882                             cpu_to_le32(sg_dma_address
883                                         (sg) +
884                                         (umem->page_size * pg_cnt));
885                         pbe->pa_hi =
886                             cpu_to_le32(upper_32_bits
887                                         ((sg_dma_address
888                                           (sg) +
889                                           umem->page_size * pg_cnt)));
890                         pbe_cnt += 1;
891                         total_num_pbes += 1;
892                         pbe++;
893
894                         /* if done building pbes, issue the mbx cmd. */
895                         if (total_num_pbes == num_pbes)
896                                 return;
897
898                         /* if the given pbl is full storing the pbes,
899                          * move to next pbl.
900                          */
901                         if (pbe_cnt ==
902                                 (mr->hwmr.pbl_size / sizeof(u64))) {
903                                 pbl_tbl++;
904                                 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
905                                 pbe_cnt = 0;
906                         }
907
908                 }
909         }
910 }
911
912 struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
913                                  u64 usr_addr, int acc, struct ib_udata *udata)
914 {
915         int status = -ENOMEM;
916         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
917         struct ocrdma_mr *mr;
918         struct ocrdma_pd *pd;
919         u32 num_pbes;
920
921         pd = get_ocrdma_pd(ibpd);
922
923         if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
924                 return ERR_PTR(-EINVAL);
925
926         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
927         if (!mr)
928                 return ERR_PTR(status);
929         mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
930         if (IS_ERR(mr->umem)) {
931                 status = -EFAULT;
932                 goto umem_err;
933         }
934         num_pbes = ib_umem_page_count(mr->umem);
935         status = ocrdma_get_pbl_info(dev, mr, num_pbes);
936         if (status)
937                 goto umem_err;
938
939         mr->hwmr.pbe_size = mr->umem->page_size;
940         mr->hwmr.fbo = ib_umem_offset(mr->umem);
941         mr->hwmr.va = usr_addr;
942         mr->hwmr.len = len;
943         mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
944         mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
945         mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
946         mr->hwmr.local_rd = 1;
947         mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
948         status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
949         if (status)
950                 goto umem_err;
951         build_user_pbes(dev, mr, num_pbes);
952         status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
953         if (status)
954                 goto mbx_err;
955         mr->ibmr.lkey = mr->hwmr.lkey;
956         if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
957                 mr->ibmr.rkey = mr->hwmr.lkey;
958
959         return &mr->ibmr;
960
961 mbx_err:
962         ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
963 umem_err:
964         kfree(mr);
965         return ERR_PTR(status);
966 }
967
968 int ocrdma_dereg_mr(struct ib_mr *ib_mr)
969 {
970         struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
971         struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
972
973         (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
974
975         ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
976
977         /* it could be user registered memory. */
978         if (mr->umem)
979                 ib_umem_release(mr->umem);
980         kfree(mr);
981
982         /* Don't stop cleanup, in case FW is unresponsive */
983         if (dev->mqe_ctx.fw_error_state) {
984                 pr_err("%s(%d) fw not responding.\n",
985                        __func__, dev->id);
986         }
987         return 0;
988 }
989
990 static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
991                                 struct ib_udata *udata,
992                                 struct ib_ucontext *ib_ctx)
993 {
994         int status;
995         struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
996         struct ocrdma_create_cq_uresp uresp;
997
998         memset(&uresp, 0, sizeof(uresp));
999         uresp.cq_id = cq->id;
1000         uresp.page_size = PAGE_ALIGN(cq->len);
1001         uresp.num_pages = 1;
1002         uresp.max_hw_cqe = cq->max_hw_cqe;
1003         uresp.page_addr[0] = virt_to_phys(cq->va);
1004         uresp.db_page_addr =  ocrdma_get_db_addr(dev, uctx->cntxt_pd->id);
1005         uresp.db_page_size = dev->nic_info.db_page_size;
1006         uresp.phase_change = cq->phase_change ? 1 : 0;
1007         status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1008         if (status) {
1009                 pr_err("%s(%d) copy error cqid=0x%x.\n",
1010                        __func__, dev->id, cq->id);
1011                 goto err;
1012         }
1013         status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
1014         if (status)
1015                 goto err;
1016         status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
1017         if (status) {
1018                 ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
1019                 goto err;
1020         }
1021         cq->ucontext = uctx;
1022 err:
1023         return status;
1024 }
1025
1026 struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
1027                                const struct ib_cq_init_attr *attr,
1028                                struct ib_ucontext *ib_ctx,
1029                                struct ib_udata *udata)
1030 {
1031         int entries = attr->cqe;
1032         struct ocrdma_cq *cq;
1033         struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
1034         struct ocrdma_ucontext *uctx = NULL;
1035         u16 pd_id = 0;
1036         int status;
1037         struct ocrdma_create_cq_ureq ureq;
1038
1039         if (attr->flags)
1040                 return ERR_PTR(-EINVAL);
1041
1042         if (udata) {
1043                 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1044                         return ERR_PTR(-EFAULT);
1045         } else
1046                 ureq.dpp_cq = 0;
1047         cq = kzalloc(sizeof(*cq), GFP_KERNEL);
1048         if (!cq)
1049                 return ERR_PTR(-ENOMEM);
1050
1051         spin_lock_init(&cq->cq_lock);
1052         spin_lock_init(&cq->comp_handler_lock);
1053         INIT_LIST_HEAD(&cq->sq_head);
1054         INIT_LIST_HEAD(&cq->rq_head);
1055         cq->first_arm = true;
1056
1057         if (ib_ctx) {
1058                 uctx = get_ocrdma_ucontext(ib_ctx);
1059                 pd_id = uctx->cntxt_pd->id;
1060         }
1061
1062         status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
1063         if (status) {
1064                 kfree(cq);
1065                 return ERR_PTR(status);
1066         }
1067         if (ib_ctx) {
1068                 status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
1069                 if (status)
1070                         goto ctx_err;
1071         }
1072         cq->phase = OCRDMA_CQE_VALID;
1073         dev->cq_tbl[cq->id] = cq;
1074         return &cq->ibcq;
1075
1076 ctx_err:
1077         ocrdma_mbx_destroy_cq(dev, cq);
1078         kfree(cq);
1079         return ERR_PTR(status);
1080 }
1081
1082 int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
1083                      struct ib_udata *udata)
1084 {
1085         int status = 0;
1086         struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1087
1088         if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
1089                 status = -EINVAL;
1090                 return status;
1091         }
1092         ibcq->cqe = new_cnt;
1093         return status;
1094 }
1095
1096 static void ocrdma_flush_cq(struct ocrdma_cq *cq)
1097 {
1098         int cqe_cnt;
1099         int valid_count = 0;
1100         unsigned long flags;
1101
1102         struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
1103         struct ocrdma_cqe *cqe = NULL;
1104
1105         cqe = cq->va;
1106         cqe_cnt = cq->cqe_cnt;
1107
1108         /* Last irq might have scheduled a polling thread
1109          * sync-up with it before hard flushing.
1110          */
1111         spin_lock_irqsave(&cq->cq_lock, flags);
1112         while (cqe_cnt) {
1113                 if (is_cqe_valid(cq, cqe))
1114                         valid_count++;
1115                 cqe++;
1116                 cqe_cnt--;
1117         }
1118         ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count);
1119         spin_unlock_irqrestore(&cq->cq_lock, flags);
1120 }
1121
1122 int ocrdma_destroy_cq(struct ib_cq *ibcq)
1123 {
1124         struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1125         struct ocrdma_eq *eq = NULL;
1126         struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
1127         int pdid = 0;
1128         u32 irq, indx;
1129
1130         dev->cq_tbl[cq->id] = NULL;
1131         indx = ocrdma_get_eq_table_index(dev, cq->eqn);
1132         if (indx == -EINVAL)
1133                 BUG();
1134
1135         eq = &dev->eq_tbl[indx];
1136         irq = ocrdma_get_irq(dev, eq);
1137         synchronize_irq(irq);
1138         ocrdma_flush_cq(cq);
1139
1140         (void)ocrdma_mbx_destroy_cq(dev, cq);
1141         if (cq->ucontext) {
1142                 pdid = cq->ucontext->cntxt_pd->id;
1143                 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
1144                                 PAGE_ALIGN(cq->len));
1145                 ocrdma_del_mmap(cq->ucontext,
1146                                 ocrdma_get_db_addr(dev, pdid),
1147                                 dev->nic_info.db_page_size);
1148         }
1149
1150         kfree(cq);
1151         return 0;
1152 }
1153
1154 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1155 {
1156         int status = -EINVAL;
1157
1158         if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
1159                 dev->qp_tbl[qp->id] = qp;
1160                 status = 0;
1161         }
1162         return status;
1163 }
1164
1165 static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1166 {
1167         dev->qp_tbl[qp->id] = NULL;
1168 }
1169
1170 static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
1171                                   struct ib_qp_init_attr *attrs)
1172 {
1173         if ((attrs->qp_type != IB_QPT_GSI) &&
1174             (attrs->qp_type != IB_QPT_RC) &&
1175             (attrs->qp_type != IB_QPT_UC) &&
1176             (attrs->qp_type != IB_QPT_UD)) {
1177                 pr_err("%s(%d) unsupported qp type=0x%x requested\n",
1178                        __func__, dev->id, attrs->qp_type);
1179                 return -EINVAL;
1180         }
1181         /* Skip the check for QP1 to support CM size of 128 */
1182         if ((attrs->qp_type != IB_QPT_GSI) &&
1183             (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
1184                 pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
1185                        __func__, dev->id, attrs->cap.max_send_wr);
1186                 pr_err("%s(%d) supported send_wr=0x%x\n",
1187                        __func__, dev->id, dev->attr.max_wqe);
1188                 return -EINVAL;
1189         }
1190         if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
1191                 pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
1192                        __func__, dev->id, attrs->cap.max_recv_wr);
1193                 pr_err("%s(%d) supported recv_wr=0x%x\n",
1194                        __func__, dev->id, dev->attr.max_rqe);
1195                 return -EINVAL;
1196         }
1197         if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
1198                 pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
1199                        __func__, dev->id, attrs->cap.max_inline_data);
1200                 pr_err("%s(%d) supported inline data size=0x%x\n",
1201                        __func__, dev->id, dev->attr.max_inline_data);
1202                 return -EINVAL;
1203         }
1204         if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
1205                 pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
1206                        __func__, dev->id, attrs->cap.max_send_sge);
1207                 pr_err("%s(%d) supported send_sge=0x%x\n",
1208                        __func__, dev->id, dev->attr.max_send_sge);
1209                 return -EINVAL;
1210         }
1211         if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
1212                 pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
1213                        __func__, dev->id, attrs->cap.max_recv_sge);
1214                 pr_err("%s(%d) supported recv_sge=0x%x\n",
1215                        __func__, dev->id, dev->attr.max_recv_sge);
1216                 return -EINVAL;
1217         }
1218         /* unprivileged user space cannot create special QP */
1219         if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1220                 pr_err
1221                     ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
1222                      __func__, dev->id, attrs->qp_type);
1223                 return -EINVAL;
1224         }
1225         /* allow creating only one GSI type of QP */
1226         if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
1227                 pr_err("%s(%d) GSI special QPs already created.\n",
1228                        __func__, dev->id);
1229                 return -EINVAL;
1230         }
1231         /* verify consumer QPs are not trying to use GSI QP's CQ */
1232         if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
1233                 if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
1234                         (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
1235                         pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
1236                                 __func__, dev->id);
1237                         return -EINVAL;
1238                 }
1239         }
1240         return 0;
1241 }
1242
1243 static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
1244                                 struct ib_udata *udata, int dpp_offset,
1245                                 int dpp_credit_lmt, int srq)
1246 {
1247         int status = 0;
1248         u64 usr_db;
1249         struct ocrdma_create_qp_uresp uresp;
1250         struct ocrdma_pd *pd = qp->pd;
1251         struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
1252
1253         memset(&uresp, 0, sizeof(uresp));
1254         usr_db = dev->nic_info.unmapped_db +
1255                         (pd->id * dev->nic_info.db_page_size);
1256         uresp.qp_id = qp->id;
1257         uresp.sq_dbid = qp->sq.dbid;
1258         uresp.num_sq_pages = 1;
1259         uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
1260         uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va);
1261         uresp.num_wqe_allocated = qp->sq.max_cnt;
1262         if (!srq) {
1263                 uresp.rq_dbid = qp->rq.dbid;
1264                 uresp.num_rq_pages = 1;
1265                 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
1266                 uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va);
1267                 uresp.num_rqe_allocated = qp->rq.max_cnt;
1268         }
1269         uresp.db_page_addr = usr_db;
1270         uresp.db_page_size = dev->nic_info.db_page_size;
1271         uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
1272         uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1273         uresp.db_shift = OCRDMA_DB_RQ_SHIFT;
1274
1275         if (qp->dpp_enabled) {
1276                 uresp.dpp_credit = dpp_credit_lmt;
1277                 uresp.dpp_offset = dpp_offset;
1278         }
1279         status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1280         if (status) {
1281                 pr_err("%s(%d) user copy error.\n", __func__, dev->id);
1282                 goto err;
1283         }
1284         status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
1285                                  uresp.sq_page_size);
1286         if (status)
1287                 goto err;
1288
1289         if (!srq) {
1290                 status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
1291                                          uresp.rq_page_size);
1292                 if (status)
1293                         goto rq_map_err;
1294         }
1295         return status;
1296 rq_map_err:
1297         ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
1298 err:
1299         return status;
1300 }
1301
1302 static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
1303                              struct ocrdma_pd *pd)
1304 {
1305         if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1306                 qp->sq_db = dev->nic_info.db +
1307                         (pd->id * dev->nic_info.db_page_size) +
1308                         OCRDMA_DB_GEN2_SQ_OFFSET;
1309                 qp->rq_db = dev->nic_info.db +
1310                         (pd->id * dev->nic_info.db_page_size) +
1311                         OCRDMA_DB_GEN2_RQ_OFFSET;
1312         } else {
1313                 qp->sq_db = dev->nic_info.db +
1314                         (pd->id * dev->nic_info.db_page_size) +
1315                         OCRDMA_DB_SQ_OFFSET;
1316                 qp->rq_db = dev->nic_info.db +
1317                         (pd->id * dev->nic_info.db_page_size) +
1318                         OCRDMA_DB_RQ_OFFSET;
1319         }
1320 }
1321
1322 static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
1323 {
1324         qp->wqe_wr_id_tbl =
1325             kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt,
1326                     GFP_KERNEL);
1327         if (qp->wqe_wr_id_tbl == NULL)
1328                 return -ENOMEM;
1329         qp->rqe_wr_id_tbl =
1330             kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL);
1331         if (qp->rqe_wr_id_tbl == NULL)
1332                 return -ENOMEM;
1333
1334         return 0;
1335 }
1336
1337 static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
1338                                       struct ocrdma_pd *pd,
1339                                       struct ib_qp_init_attr *attrs)
1340 {
1341         qp->pd = pd;
1342         spin_lock_init(&qp->q_lock);
1343         INIT_LIST_HEAD(&qp->sq_entry);
1344         INIT_LIST_HEAD(&qp->rq_entry);
1345
1346         qp->qp_type = attrs->qp_type;
1347         qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
1348         qp->max_inline_data = attrs->cap.max_inline_data;
1349         qp->sq.max_sges = attrs->cap.max_send_sge;
1350         qp->rq.max_sges = attrs->cap.max_recv_sge;
1351         qp->state = OCRDMA_QPS_RST;
1352         qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1353 }
1354
1355 static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
1356                                    struct ib_qp_init_attr *attrs)
1357 {
1358         if (attrs->qp_type == IB_QPT_GSI) {
1359                 dev->gsi_qp_created = 1;
1360                 dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
1361                 dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
1362         }
1363 }
1364
1365 struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1366                                struct ib_qp_init_attr *attrs,
1367                                struct ib_udata *udata)
1368 {
1369         int status;
1370         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1371         struct ocrdma_qp *qp;
1372         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1373         struct ocrdma_create_qp_ureq ureq;
1374         u16 dpp_credit_lmt, dpp_offset;
1375
1376         status = ocrdma_check_qp_params(ibpd, dev, attrs);
1377         if (status)
1378                 goto gen_err;
1379
1380         memset(&ureq, 0, sizeof(ureq));
1381         if (udata) {
1382                 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1383                         return ERR_PTR(-EFAULT);
1384         }
1385         qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1386         if (!qp) {
1387                 status = -ENOMEM;
1388                 goto gen_err;
1389         }
1390         ocrdma_set_qp_init_params(qp, pd, attrs);
1391         if (udata == NULL)
1392                 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
1393                                         OCRDMA_QP_FAST_REG);
1394
1395         mutex_lock(&dev->dev_lock);
1396         status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
1397                                         ureq.dpp_cq_id,
1398                                         &dpp_offset, &dpp_credit_lmt);
1399         if (status)
1400                 goto mbx_err;
1401
1402         /* user space QP's wr_id table are managed in library */
1403         if (udata == NULL) {
1404                 status = ocrdma_alloc_wr_id_tbl(qp);
1405                 if (status)
1406                         goto map_err;
1407         }
1408
1409         status = ocrdma_add_qpn_map(dev, qp);
1410         if (status)
1411                 goto map_err;
1412         ocrdma_set_qp_db(dev, qp, pd);
1413         if (udata) {
1414                 status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
1415                                               dpp_credit_lmt,
1416                                               (attrs->srq != NULL));
1417                 if (status)
1418                         goto cpy_err;
1419         }
1420         ocrdma_store_gsi_qp_cq(dev, attrs);
1421         qp->ibqp.qp_num = qp->id;
1422         mutex_unlock(&dev->dev_lock);
1423         return &qp->ibqp;
1424
1425 cpy_err:
1426         ocrdma_del_qpn_map(dev, qp);
1427 map_err:
1428         ocrdma_mbx_destroy_qp(dev, qp);
1429 mbx_err:
1430         mutex_unlock(&dev->dev_lock);
1431         kfree(qp->wqe_wr_id_tbl);
1432         kfree(qp->rqe_wr_id_tbl);
1433         kfree(qp);
1434         pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
1435 gen_err:
1436         return ERR_PTR(status);
1437 }
1438
1439 int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1440                       int attr_mask)
1441 {
1442         int status = 0;
1443         struct ocrdma_qp *qp;
1444         struct ocrdma_dev *dev;
1445         enum ib_qp_state old_qps;
1446
1447         qp = get_ocrdma_qp(ibqp);
1448         dev = get_ocrdma_dev(ibqp->device);
1449         if (attr_mask & IB_QP_STATE)
1450                 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
1451         /* if new and previous states are same hw doesn't need to
1452          * know about it.
1453          */
1454         if (status < 0)
1455                 return status;
1456         status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
1457
1458         return status;
1459 }
1460
1461 int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1462                      int attr_mask, struct ib_udata *udata)
1463 {
1464         unsigned long flags;
1465         int status = -EINVAL;
1466         struct ocrdma_qp *qp;
1467         struct ocrdma_dev *dev;
1468         enum ib_qp_state old_qps, new_qps;
1469
1470         qp = get_ocrdma_qp(ibqp);
1471         dev = get_ocrdma_dev(ibqp->device);
1472
1473         /* syncronize with multiple context trying to change, retrive qps */
1474         mutex_lock(&dev->dev_lock);
1475         /* syncronize with wqe, rqe posting and cqe processing contexts */
1476         spin_lock_irqsave(&qp->q_lock, flags);
1477         old_qps = get_ibqp_state(qp->state);
1478         if (attr_mask & IB_QP_STATE)
1479                 new_qps = attr->qp_state;
1480         else
1481                 new_qps = old_qps;
1482         spin_unlock_irqrestore(&qp->q_lock, flags);
1483
1484         if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask,
1485                                 IB_LINK_LAYER_ETHERNET)) {
1486                 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1487                        "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1488                        __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
1489                        old_qps, new_qps);
1490                 goto param_err;
1491         }
1492
1493         status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
1494         if (status > 0)
1495                 status = 0;
1496 param_err:
1497         mutex_unlock(&dev->dev_lock);
1498         return status;
1499 }
1500
1501 static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
1502 {
1503         switch (mtu) {
1504         case 256:
1505                 return IB_MTU_256;
1506         case 512:
1507                 return IB_MTU_512;
1508         case 1024:
1509                 return IB_MTU_1024;
1510         case 2048:
1511                 return IB_MTU_2048;
1512         case 4096:
1513                 return IB_MTU_4096;
1514         default:
1515                 return IB_MTU_1024;
1516         }
1517 }
1518
1519 static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
1520 {
1521         int ib_qp_acc_flags = 0;
1522
1523         if (qp_cap_flags & OCRDMA_QP_INB_WR)
1524                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1525         if (qp_cap_flags & OCRDMA_QP_INB_RD)
1526                 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1527         return ib_qp_acc_flags;
1528 }
1529
1530 int ocrdma_query_qp(struct ib_qp *ibqp,
1531                     struct ib_qp_attr *qp_attr,
1532                     int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1533 {
1534         int status;
1535         u32 qp_state;
1536         struct ocrdma_qp_params params;
1537         struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1538         struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
1539
1540         memset(&params, 0, sizeof(params));
1541         mutex_lock(&dev->dev_lock);
1542         status = ocrdma_mbx_query_qp(dev, qp, &params);
1543         mutex_unlock(&dev->dev_lock);
1544         if (status)
1545                 goto mbx_err;
1546         if (qp->qp_type == IB_QPT_UD)
1547                 qp_attr->qkey = params.qkey;
1548         qp_attr->path_mtu =
1549                 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1550                                 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
1551                                 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
1552         qp_attr->path_mig_state = IB_MIG_MIGRATED;
1553         qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
1554         qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
1555         qp_attr->dest_qp_num =
1556             params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
1557
1558         qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
1559         qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
1560         qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
1561         qp_attr->cap.max_send_sge = qp->sq.max_sges;
1562         qp_attr->cap.max_recv_sge = qp->rq.max_sges;
1563         qp_attr->cap.max_inline_data = qp->max_inline_data;
1564         qp_init_attr->cap = qp_attr->cap;
1565         memcpy(&qp_attr->ah_attr.grh.dgid, &params.dgid[0],
1566                sizeof(params.dgid));
1567         qp_attr->ah_attr.grh.flow_label = params.rnt_rc_sl_fl &
1568             OCRDMA_QP_PARAMS_FLOW_LABEL_MASK;
1569         qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
1570         qp_attr->ah_attr.grh.hop_limit = (params.hop_lmt_rq_psn &
1571                                           OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
1572                                                 OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
1573         qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
1574                                               OCRDMA_QP_PARAMS_TCLASS_MASK) >>
1575                                                 OCRDMA_QP_PARAMS_TCLASS_SHIFT;
1576
1577         qp_attr->ah_attr.ah_flags = IB_AH_GRH;
1578         qp_attr->ah_attr.port_num = 1;
1579         qp_attr->ah_attr.sl = (params.rnt_rc_sl_fl &
1580                                OCRDMA_QP_PARAMS_SL_MASK) >>
1581                                 OCRDMA_QP_PARAMS_SL_SHIFT;
1582         qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
1583                             OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
1584                                 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
1585         qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
1586                               OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
1587                                 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
1588         qp_attr->retry_cnt =
1589             (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
1590                 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
1591         qp_attr->min_rnr_timer = 0;
1592         qp_attr->pkey_index = 0;
1593         qp_attr->port_num = 1;
1594         qp_attr->ah_attr.src_path_bits = 0;
1595         qp_attr->ah_attr.static_rate = 0;
1596         qp_attr->alt_pkey_index = 0;
1597         qp_attr->alt_port_num = 0;
1598         qp_attr->alt_timeout = 0;
1599         memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1600         qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1601                     OCRDMA_QP_PARAMS_STATE_SHIFT;
1602         qp_attr->qp_state = get_ibqp_state(qp_state);
1603         qp_attr->cur_qp_state = qp_attr->qp_state;
1604         qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1605         qp_attr->max_dest_rd_atomic =
1606             params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
1607         qp_attr->max_rd_atomic =
1608             params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1609         qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1610                                 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
1611         /* Sync driver QP state with FW */
1612         ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL);
1613 mbx_err:
1614         return status;
1615 }
1616
1617 static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx)
1618 {
1619         unsigned int i = idx / 32;
1620         u32 mask = (1U << (idx % 32));
1621
1622         srq->idx_bit_fields[i] ^= mask;
1623 }
1624
1625 static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
1626 {
1627         return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
1628 }
1629
1630 static int is_hw_sq_empty(struct ocrdma_qp *qp)
1631 {
1632         return (qp->sq.tail == qp->sq.head);
1633 }
1634
1635 static int is_hw_rq_empty(struct ocrdma_qp *qp)
1636 {
1637         return (qp->rq.tail == qp->rq.head);
1638 }
1639
1640 static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
1641 {
1642         return q->va + (q->head * q->entry_size);
1643 }
1644
1645 static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
1646                                       u32 idx)
1647 {
1648         return q->va + (idx * q->entry_size);
1649 }
1650
1651 static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
1652 {
1653         q->head = (q->head + 1) & q->max_wqe_idx;
1654 }
1655
1656 static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
1657 {
1658         q->tail = (q->tail + 1) & q->max_wqe_idx;
1659 }
1660
1661 /* discard the cqe for a given QP */
1662 static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
1663 {
1664         unsigned long cq_flags;
1665         unsigned long flags;
1666         int discard_cnt = 0;
1667         u32 cur_getp, stop_getp;
1668         struct ocrdma_cqe *cqe;
1669         u32 qpn = 0, wqe_idx = 0;
1670
1671         spin_lock_irqsave(&cq->cq_lock, cq_flags);
1672
1673         /* traverse through the CQEs in the hw CQ,
1674          * find the matching CQE for a given qp,
1675          * mark the matching one discarded by clearing qpn.
1676          * ring the doorbell in the poll_cq() as
1677          * we don't complete out of order cqe.
1678          */
1679
1680         cur_getp = cq->getp;
1681         /* find upto when do we reap the cq. */
1682         stop_getp = cur_getp;
1683         do {
1684                 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
1685                         break;
1686
1687                 cqe = cq->va + cur_getp;
1688                 /* if (a) done reaping whole hw cq, or
1689                  *    (b) qp_xq becomes empty.
1690                  * then exit
1691                  */
1692                 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
1693                 /* if previously discarded cqe found, skip that too. */
1694                 /* check for matching qp */
1695                 if (qpn == 0 || qpn != qp->id)
1696                         goto skip_cqe;
1697
1698                 if (is_cqe_for_sq(cqe)) {
1699                         ocrdma_hwq_inc_tail(&qp->sq);
1700                 } else {
1701                         if (qp->srq) {
1702                                 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
1703                                         OCRDMA_CQE_BUFTAG_SHIFT) &
1704                                         qp->srq->rq.max_wqe_idx;
1705                                 if (wqe_idx < 1)
1706                                         BUG();
1707                                 spin_lock_irqsave(&qp->srq->q_lock, flags);
1708                                 ocrdma_hwq_inc_tail(&qp->srq->rq);
1709                                 ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1);
1710                                 spin_unlock_irqrestore(&qp->srq->q_lock, flags);
1711
1712                         } else {
1713                                 ocrdma_hwq_inc_tail(&qp->rq);
1714                         }
1715                 }
1716                 /* mark cqe discarded so that it is not picked up later
1717                  * in the poll_cq().
1718                  */
1719                 discard_cnt += 1;
1720                 cqe->cmn.qpn = 0;
1721 skip_cqe:
1722                 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
1723         } while (cur_getp != stop_getp);
1724         spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
1725 }
1726
1727 void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
1728 {
1729         int found = false;
1730         unsigned long flags;
1731         struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
1732         /* sync with any active CQ poll */
1733
1734         spin_lock_irqsave(&dev->flush_q_lock, flags);
1735         found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1736         if (found)
1737                 list_del(&qp->sq_entry);
1738         if (!qp->srq) {
1739                 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1740                 if (found)
1741                         list_del(&qp->rq_entry);
1742         }
1743         spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1744 }
1745
1746 int ocrdma_destroy_qp(struct ib_qp *ibqp)
1747 {
1748         struct ocrdma_pd *pd;
1749         struct ocrdma_qp *qp;
1750         struct ocrdma_dev *dev;
1751         struct ib_qp_attr attrs;
1752         int attr_mask;
1753         unsigned long flags;
1754
1755         qp = get_ocrdma_qp(ibqp);
1756         dev = get_ocrdma_dev(ibqp->device);
1757
1758         pd = qp->pd;
1759
1760         /* change the QP state to ERROR */
1761         if (qp->state != OCRDMA_QPS_RST) {
1762                 attrs.qp_state = IB_QPS_ERR;
1763                 attr_mask = IB_QP_STATE;
1764                 _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
1765         }
1766         /* ensure that CQEs for newly created QP (whose id may be same with
1767          * one which just getting destroyed are same), dont get
1768          * discarded until the old CQEs are discarded.
1769          */
1770         mutex_lock(&dev->dev_lock);
1771         (void) ocrdma_mbx_destroy_qp(dev, qp);
1772
1773         /*
1774          * acquire CQ lock while destroy is in progress, in order to
1775          * protect against proessing in-flight CQEs for this QP.
1776          */
1777         spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
1778         if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
1779                 spin_lock(&qp->rq_cq->cq_lock);
1780
1781         ocrdma_del_qpn_map(dev, qp);
1782
1783         if (qp->rq_cq && (qp->rq_cq != qp->sq_cq))
1784                 spin_unlock(&qp->rq_cq->cq_lock);
1785         spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
1786
1787         if (!pd->uctx) {
1788                 ocrdma_discard_cqes(qp, qp->sq_cq);
1789                 ocrdma_discard_cqes(qp, qp->rq_cq);
1790         }
1791         mutex_unlock(&dev->dev_lock);
1792
1793         if (pd->uctx) {
1794                 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
1795                                 PAGE_ALIGN(qp->sq.len));
1796                 if (!qp->srq)
1797                         ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
1798                                         PAGE_ALIGN(qp->rq.len));
1799         }
1800
1801         ocrdma_del_flush_qp(qp);
1802
1803         kfree(qp->wqe_wr_id_tbl);
1804         kfree(qp->rqe_wr_id_tbl);
1805         kfree(qp);
1806         return 0;
1807 }
1808
1809 static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
1810                                 struct ib_udata *udata)
1811 {
1812         int status;
1813         struct ocrdma_create_srq_uresp uresp;
1814
1815         memset(&uresp, 0, sizeof(uresp));
1816         uresp.rq_dbid = srq->rq.dbid;
1817         uresp.num_rq_pages = 1;
1818         uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va);
1819         uresp.rq_page_size = srq->rq.len;
1820         uresp.db_page_addr = dev->nic_info.unmapped_db +
1821             (srq->pd->id * dev->nic_info.db_page_size);
1822         uresp.db_page_size = dev->nic_info.db_page_size;
1823         uresp.num_rqe_allocated = srq->rq.max_cnt;
1824         if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1825                 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1826                 uresp.db_shift = 24;
1827         } else {
1828                 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
1829                 uresp.db_shift = 16;
1830         }
1831
1832         status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1833         if (status)
1834                 return status;
1835         status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
1836                                  uresp.rq_page_size);
1837         if (status)
1838                 return status;
1839         return status;
1840 }
1841
1842 struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
1843                                  struct ib_srq_init_attr *init_attr,
1844                                  struct ib_udata *udata)
1845 {
1846         int status = -ENOMEM;
1847         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1848         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1849         struct ocrdma_srq *srq;
1850
1851         if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
1852                 return ERR_PTR(-EINVAL);
1853         if (init_attr->attr.max_wr > dev->attr.max_rqe)
1854                 return ERR_PTR(-EINVAL);
1855
1856         srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1857         if (!srq)
1858                 return ERR_PTR(status);
1859
1860         spin_lock_init(&srq->q_lock);
1861         srq->pd = pd;
1862         srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
1863         status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
1864         if (status)
1865                 goto err;
1866
1867         if (udata == NULL) {
1868                 srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
1869                             GFP_KERNEL);
1870                 if (srq->rqe_wr_id_tbl == NULL)
1871                         goto arm_err;
1872
1873                 srq->bit_fields_len = (srq->rq.max_cnt / 32) +
1874                     (srq->rq.max_cnt % 32 ? 1 : 0);
1875                 srq->idx_bit_fields =
1876                     kmalloc(srq->bit_fields_len * sizeof(u32), GFP_KERNEL);
1877                 if (srq->idx_bit_fields == NULL)
1878                         goto arm_err;
1879                 memset(srq->idx_bit_fields, 0xff,
1880                        srq->bit_fields_len * sizeof(u32));
1881         }
1882
1883         if (init_attr->attr.srq_limit) {
1884                 status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
1885                 if (status)
1886                         goto arm_err;
1887         }
1888
1889         if (udata) {
1890                 status = ocrdma_copy_srq_uresp(dev, srq, udata);
1891                 if (status)
1892                         goto arm_err;
1893         }
1894
1895         return &srq->ibsrq;
1896
1897 arm_err:
1898         ocrdma_mbx_destroy_srq(dev, srq);
1899 err:
1900         kfree(srq->rqe_wr_id_tbl);
1901         kfree(srq->idx_bit_fields);
1902         kfree(srq);
1903         return ERR_PTR(status);
1904 }
1905
1906 int ocrdma_modify_srq(struct ib_srq *ibsrq,
1907                       struct ib_srq_attr *srq_attr,
1908                       enum ib_srq_attr_mask srq_attr_mask,
1909                       struct ib_udata *udata)
1910 {
1911         int status = 0;
1912         struct ocrdma_srq *srq;
1913
1914         srq = get_ocrdma_srq(ibsrq);
1915         if (srq_attr_mask & IB_SRQ_MAX_WR)
1916                 status = -EINVAL;
1917         else
1918                 status = ocrdma_mbx_modify_srq(srq, srq_attr);
1919         return status;
1920 }
1921
1922 int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
1923 {
1924         int status;
1925         struct ocrdma_srq *srq;
1926
1927         srq = get_ocrdma_srq(ibsrq);
1928         status = ocrdma_mbx_query_srq(srq, srq_attr);
1929         return status;
1930 }
1931
1932 int ocrdma_destroy_srq(struct ib_srq *ibsrq)
1933 {
1934         int status;
1935         struct ocrdma_srq *srq;
1936         struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
1937
1938         srq = get_ocrdma_srq(ibsrq);
1939
1940         status = ocrdma_mbx_destroy_srq(dev, srq);
1941
1942         if (srq->pd->uctx)
1943                 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
1944                                 PAGE_ALIGN(srq->rq.len));
1945
1946         kfree(srq->idx_bit_fields);
1947         kfree(srq->rqe_wr_id_tbl);
1948         kfree(srq);
1949         return status;
1950 }
1951
1952 /* unprivileged verbs and their support functions. */
1953 static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1954                                 struct ocrdma_hdr_wqe *hdr,
1955                                 struct ib_send_wr *wr)
1956 {
1957         struct ocrdma_ewqe_ud_hdr *ud_hdr =
1958                 (struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
1959         struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah);
1960
1961         ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn;
1962         if (qp->qp_type == IB_QPT_GSI)
1963                 ud_hdr->qkey = qp->qkey;
1964         else
1965                 ud_hdr->qkey = wr->wr.ud.remote_qkey;
1966         ud_hdr->rsvd_ahid = ah->id;
1967         if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
1968                 hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
1969 }
1970
1971 static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
1972                               struct ocrdma_sge *sge, int num_sge,
1973                               struct ib_sge *sg_list)
1974 {
1975         int i;
1976
1977         for (i = 0; i < num_sge; i++) {
1978                 sge[i].lrkey = sg_list[i].lkey;
1979                 sge[i].addr_lo = sg_list[i].addr;
1980                 sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
1981                 sge[i].len = sg_list[i].length;
1982                 hdr->total_len += sg_list[i].length;
1983         }
1984         if (num_sge == 0)
1985                 memset(sge, 0, sizeof(*sge));
1986 }
1987
1988 static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
1989 {
1990         uint32_t total_len = 0, i;
1991
1992         for (i = 0; i < num_sge; i++)
1993                 total_len += sg_list[i].length;
1994         return total_len;
1995 }
1996
1997
1998 static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
1999                                     struct ocrdma_hdr_wqe *hdr,
2000                                     struct ocrdma_sge *sge,
2001                                     struct ib_send_wr *wr, u32 wqe_size)
2002 {
2003         int i;
2004         char *dpp_addr;
2005
2006         if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
2007                 hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
2008                 if (unlikely(hdr->total_len > qp->max_inline_data)) {
2009                         pr_err("%s() supported_len=0x%x,\n"
2010                                " unsupported len req=0x%x\n", __func__,
2011                                 qp->max_inline_data, hdr->total_len);
2012                         return -EINVAL;
2013                 }
2014                 dpp_addr = (char *)sge;
2015                 for (i = 0; i < wr->num_sge; i++) {
2016                         memcpy(dpp_addr,
2017                                (void *)(unsigned long)wr->sg_list[i].addr,
2018                                wr->sg_list[i].length);
2019                         dpp_addr += wr->sg_list[i].length;
2020                 }
2021
2022                 wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
2023                 if (0 == hdr->total_len)
2024                         wqe_size += sizeof(struct ocrdma_sge);
2025                 hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
2026         } else {
2027                 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
2028                 if (wr->num_sge)
2029                         wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
2030                 else
2031                         wqe_size += sizeof(struct ocrdma_sge);
2032                 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2033         }
2034         hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2035         return 0;
2036 }
2037
2038 static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2039                              struct ib_send_wr *wr)
2040 {
2041         int status;
2042         struct ocrdma_sge *sge;
2043         u32 wqe_size = sizeof(*hdr);
2044
2045         if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2046                 ocrdma_build_ud_hdr(qp, hdr, wr);
2047                 sge = (struct ocrdma_sge *)(hdr + 2);
2048                 wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
2049         } else {
2050                 sge = (struct ocrdma_sge *)(hdr + 1);
2051         }
2052
2053         status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2054         return status;
2055 }
2056
2057 static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2058                               struct ib_send_wr *wr)
2059 {
2060         int status;
2061         struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2062         struct ocrdma_sge *sge = ext_rw + 1;
2063         u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
2064
2065         status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2066         if (status)
2067                 return status;
2068         ext_rw->addr_lo = wr->wr.rdma.remote_addr;
2069         ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
2070         ext_rw->lrkey = wr->wr.rdma.rkey;
2071         ext_rw->len = hdr->total_len;
2072         return 0;
2073 }
2074
2075 static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2076                               struct ib_send_wr *wr)
2077 {
2078         struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2079         struct ocrdma_sge *sge = ext_rw + 1;
2080         u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
2081             sizeof(struct ocrdma_hdr_wqe);
2082
2083         ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
2084         hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2085         hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
2086         hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2087
2088         ext_rw->addr_lo = wr->wr.rdma.remote_addr;
2089         ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
2090         ext_rw->lrkey = wr->wr.rdma.rkey;
2091         ext_rw->len = hdr->total_len;
2092 }
2093
2094 static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl,
2095                             struct ocrdma_hw_mr *hwmr)
2096 {
2097         int i;
2098         u64 buf_addr = 0;
2099         int num_pbes;
2100         struct ocrdma_pbe *pbe;
2101
2102         pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2103         num_pbes = 0;
2104
2105         /* go through the OS phy regions & fill hw pbe entries into pbls. */
2106         for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
2107                 /* number of pbes can be more for one OS buf, when
2108                  * buffers are of different sizes.
2109                  * split the ib_buf to one or more pbes.
2110                  */
2111                 buf_addr = wr->wr.fast_reg.page_list->page_list[i];
2112                 pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
2113                 pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
2114                 num_pbes += 1;
2115                 pbe++;
2116
2117                 /* if the pbl is full storing the pbes,
2118                  * move to next pbl.
2119                 */
2120                 if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
2121                         pbl_tbl++;
2122                         pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2123                 }
2124         }
2125         return;
2126 }
2127
2128 static int get_encoded_page_size(int pg_sz)
2129 {
2130         /* Max size is 256M 4096 << 16 */
2131         int i = 0;
2132         for (; i < 17; i++)
2133                 if (pg_sz == (4096 << i))
2134                         break;
2135         return i;
2136 }
2137
2138
2139 static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2140                            struct ib_send_wr *wr)
2141 {
2142         u64 fbo;
2143         struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
2144         struct ocrdma_mr *mr;
2145         struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2146         u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
2147
2148         wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
2149
2150         if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr)
2151                 return -EINVAL;
2152
2153         hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
2154         hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2155
2156         if (wr->wr.fast_reg.page_list_len == 0)
2157                 BUG();
2158         if (wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE)
2159                 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
2160         if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE)
2161                 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
2162         if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ)
2163                 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
2164         hdr->lkey = wr->wr.fast_reg.rkey;
2165         hdr->total_len = wr->wr.fast_reg.length;
2166
2167         fbo = wr->wr.fast_reg.iova_start -
2168             (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK);
2169
2170         fast_reg->va_hi = upper_32_bits(wr->wr.fast_reg.iova_start);
2171         fast_reg->va_lo = (u32) (wr->wr.fast_reg.iova_start & 0xffffffff);
2172         fast_reg->fbo_hi = upper_32_bits(fbo);
2173         fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
2174         fast_reg->num_sges = wr->wr.fast_reg.page_list_len;
2175         fast_reg->size_sge =
2176                 get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
2177         mr = (struct ocrdma_mr *) (unsigned long)
2178                 dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
2179         build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
2180         return 0;
2181 }
2182
2183 static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
2184 {
2185         u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT);
2186
2187         iowrite32(val, qp->sq_db);
2188 }
2189
2190 int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2191                      struct ib_send_wr **bad_wr)
2192 {
2193         int status = 0;
2194         struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2195         struct ocrdma_hdr_wqe *hdr;
2196         unsigned long flags;
2197
2198         spin_lock_irqsave(&qp->q_lock, flags);
2199         if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
2200                 spin_unlock_irqrestore(&qp->q_lock, flags);
2201                 *bad_wr = wr;
2202                 return -EINVAL;
2203         }
2204
2205         while (wr) {
2206                 if (qp->qp_type == IB_QPT_UD &&
2207                     (wr->opcode != IB_WR_SEND &&
2208                      wr->opcode != IB_WR_SEND_WITH_IMM)) {
2209                         *bad_wr = wr;
2210                         status = -EINVAL;
2211                         break;
2212                 }
2213                 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
2214                     wr->num_sge > qp->sq.max_sges) {
2215                         *bad_wr = wr;
2216                         status = -ENOMEM;
2217                         break;
2218                 }
2219                 hdr = ocrdma_hwq_head(&qp->sq);
2220                 hdr->cw = 0;
2221                 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2222                         hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2223                 if (wr->send_flags & IB_SEND_FENCE)
2224                         hdr->cw |=
2225                             (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
2226                 if (wr->send_flags & IB_SEND_SOLICITED)
2227                         hdr->cw |=
2228                             (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
2229                 hdr->total_len = 0;
2230                 switch (wr->opcode) {
2231                 case IB_WR_SEND_WITH_IMM:
2232                         hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2233                         hdr->immdt = ntohl(wr->ex.imm_data);
2234                 case IB_WR_SEND:
2235                         hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2236                         ocrdma_build_send(qp, hdr, wr);
2237                         break;
2238                 case IB_WR_SEND_WITH_INV:
2239                         hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2240                         hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2241                         hdr->lkey = wr->ex.invalidate_rkey;
2242                         status = ocrdma_build_send(qp, hdr, wr);
2243                         break;
2244                 case IB_WR_RDMA_WRITE_WITH_IMM:
2245                         hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2246                         hdr->immdt = ntohl(wr->ex.imm_data);
2247                 case IB_WR_RDMA_WRITE:
2248                         hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
2249                         status = ocrdma_build_write(qp, hdr, wr);
2250                         break;
2251                 case IB_WR_RDMA_READ:
2252                         ocrdma_build_read(qp, hdr, wr);
2253                         break;
2254                 case IB_WR_LOCAL_INV:
2255                         hdr->cw |=
2256                             (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
2257                         hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
2258                                         sizeof(struct ocrdma_sge)) /
2259                                 OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
2260                         hdr->lkey = wr->ex.invalidate_rkey;
2261                         break;
2262                 case IB_WR_FAST_REG_MR:
2263                         status = ocrdma_build_fr(qp, hdr, wr);
2264                         break;
2265                 default:
2266                         status = -EINVAL;
2267                         break;
2268                 }
2269                 if (status) {
2270                         *bad_wr = wr;
2271                         break;
2272                 }
2273                 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2274                         qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
2275                 else
2276                         qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
2277                 qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
2278                 ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
2279                                    OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
2280                 /* make sure wqe is written before adapter can access it */
2281                 wmb();
2282                 /* inform hw to start processing it */
2283                 ocrdma_ring_sq_db(qp);
2284
2285                 /* update pointer, counter for next wr */
2286                 ocrdma_hwq_inc_head(&qp->sq);
2287                 wr = wr->next;
2288         }
2289         spin_unlock_irqrestore(&qp->q_lock, flags);
2290         return status;
2291 }
2292
2293 static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
2294 {
2295         u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT);
2296
2297         iowrite32(val, qp->rq_db);
2298 }
2299
2300 static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
2301                              u16 tag)
2302 {
2303         u32 wqe_size = 0;
2304         struct ocrdma_sge *sge;
2305         if (wr->num_sge)
2306                 wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
2307         else
2308                 wqe_size = sizeof(*sge) + sizeof(*rqe);
2309
2310         rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
2311                                 OCRDMA_WQE_SIZE_SHIFT);
2312         rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2313         rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2314         rqe->total_len = 0;
2315         rqe->rsvd_tag = tag;
2316         sge = (struct ocrdma_sge *)(rqe + 1);
2317         ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
2318         ocrdma_cpu_to_le32(rqe, wqe_size);
2319 }
2320
2321 int ocrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2322                      struct ib_recv_wr **bad_wr)
2323 {
2324         int status = 0;
2325         unsigned long flags;
2326         struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2327         struct ocrdma_hdr_wqe *rqe;
2328
2329         spin_lock_irqsave(&qp->q_lock, flags);
2330         if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
2331                 spin_unlock_irqrestore(&qp->q_lock, flags);
2332                 *bad_wr = wr;
2333                 return -EINVAL;
2334         }
2335         while (wr) {
2336                 if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
2337                     wr->num_sge > qp->rq.max_sges) {
2338                         *bad_wr = wr;
2339                         status = -ENOMEM;
2340                         break;
2341                 }
2342                 rqe = ocrdma_hwq_head(&qp->rq);
2343                 ocrdma_build_rqe(rqe, wr, 0);
2344
2345                 qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
2346                 /* make sure rqe is written before adapter can access it */
2347                 wmb();
2348
2349                 /* inform hw to start processing it */
2350                 ocrdma_ring_rq_db(qp);
2351
2352                 /* update pointer, counter for next wr */
2353                 ocrdma_hwq_inc_head(&qp->rq);
2354                 wr = wr->next;
2355         }
2356         spin_unlock_irqrestore(&qp->q_lock, flags);
2357         return status;
2358 }
2359
2360 /* cqe for srq's rqe can potentially arrive out of order.
2361  * index gives the entry in the shadow table where to store
2362  * the wr_id. tag/index is returned in cqe to reference back
2363  * for a given rqe.
2364  */
2365 static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
2366 {
2367         int row = 0;
2368         int indx = 0;
2369
2370         for (row = 0; row < srq->bit_fields_len; row++) {
2371                 if (srq->idx_bit_fields[row]) {
2372                         indx = ffs(srq->idx_bit_fields[row]);
2373                         indx = (row * 32) + (indx - 1);
2374                         if (indx >= srq->rq.max_cnt)
2375                                 BUG();
2376                         ocrdma_srq_toggle_bit(srq, indx);
2377                         break;
2378                 }
2379         }
2380
2381         if (row == srq->bit_fields_len)
2382                 BUG();
2383         return indx + 1; /* Use from index 1 */
2384 }
2385
2386 static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
2387 {
2388         u32 val = srq->rq.dbid | (1 << 16);
2389
2390         iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
2391 }
2392
2393 int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
2394                          struct ib_recv_wr **bad_wr)
2395 {
2396         int status = 0;
2397         unsigned long flags;
2398         struct ocrdma_srq *srq;
2399         struct ocrdma_hdr_wqe *rqe;
2400         u16 tag;
2401
2402         srq = get_ocrdma_srq(ibsrq);
2403
2404         spin_lock_irqsave(&srq->q_lock, flags);
2405         while (wr) {
2406                 if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
2407                     wr->num_sge > srq->rq.max_sges) {
2408                         status = -ENOMEM;
2409                         *bad_wr = wr;
2410                         break;
2411                 }
2412                 tag = ocrdma_srq_get_idx(srq);
2413                 rqe = ocrdma_hwq_head(&srq->rq);
2414                 ocrdma_build_rqe(rqe, wr, tag);
2415
2416                 srq->rqe_wr_id_tbl[tag] = wr->wr_id;
2417                 /* make sure rqe is written before adapter can perform DMA */
2418                 wmb();
2419                 /* inform hw to start processing it */
2420                 ocrdma_ring_srq_db(srq);
2421                 /* update pointer, counter for next wr */
2422                 ocrdma_hwq_inc_head(&srq->rq);
2423                 wr = wr->next;
2424         }
2425         spin_unlock_irqrestore(&srq->q_lock, flags);
2426         return status;
2427 }
2428
2429 static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
2430 {
2431         enum ib_wc_status ibwc_status;
2432
2433         switch (status) {
2434         case OCRDMA_CQE_GENERAL_ERR:
2435                 ibwc_status = IB_WC_GENERAL_ERR;
2436                 break;
2437         case OCRDMA_CQE_LOC_LEN_ERR:
2438                 ibwc_status = IB_WC_LOC_LEN_ERR;
2439                 break;
2440         case OCRDMA_CQE_LOC_QP_OP_ERR:
2441                 ibwc_status = IB_WC_LOC_QP_OP_ERR;
2442                 break;
2443         case OCRDMA_CQE_LOC_EEC_OP_ERR:
2444                 ibwc_status = IB_WC_LOC_EEC_OP_ERR;
2445                 break;
2446         case OCRDMA_CQE_LOC_PROT_ERR:
2447                 ibwc_status = IB_WC_LOC_PROT_ERR;
2448                 break;
2449         case OCRDMA_CQE_WR_FLUSH_ERR:
2450                 ibwc_status = IB_WC_WR_FLUSH_ERR;
2451                 break;
2452         case OCRDMA_CQE_MW_BIND_ERR:
2453                 ibwc_status = IB_WC_MW_BIND_ERR;
2454                 break;
2455         case OCRDMA_CQE_BAD_RESP_ERR:
2456                 ibwc_status = IB_WC_BAD_RESP_ERR;
2457                 break;
2458         case OCRDMA_CQE_LOC_ACCESS_ERR:
2459                 ibwc_status = IB_WC_LOC_ACCESS_ERR;
2460                 break;
2461         case OCRDMA_CQE_REM_INV_REQ_ERR:
2462                 ibwc_status = IB_WC_REM_INV_REQ_ERR;
2463                 break;
2464         case OCRDMA_CQE_REM_ACCESS_ERR:
2465                 ibwc_status = IB_WC_REM_ACCESS_ERR;
2466                 break;
2467         case OCRDMA_CQE_REM_OP_ERR:
2468                 ibwc_status = IB_WC_REM_OP_ERR;
2469                 break;
2470         case OCRDMA_CQE_RETRY_EXC_ERR:
2471                 ibwc_status = IB_WC_RETRY_EXC_ERR;
2472                 break;
2473         case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
2474                 ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
2475                 break;
2476         case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
2477                 ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
2478                 break;
2479         case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
2480                 ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
2481                 break;
2482         case OCRDMA_CQE_REM_ABORT_ERR:
2483                 ibwc_status = IB_WC_REM_ABORT_ERR;
2484                 break;
2485         case OCRDMA_CQE_INV_EECN_ERR:
2486                 ibwc_status = IB_WC_INV_EECN_ERR;
2487                 break;
2488         case OCRDMA_CQE_INV_EEC_STATE_ERR:
2489                 ibwc_status = IB_WC_INV_EEC_STATE_ERR;
2490                 break;
2491         case OCRDMA_CQE_FATAL_ERR:
2492                 ibwc_status = IB_WC_FATAL_ERR;
2493                 break;
2494         case OCRDMA_CQE_RESP_TIMEOUT_ERR:
2495                 ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
2496                 break;
2497         default:
2498                 ibwc_status = IB_WC_GENERAL_ERR;
2499                 break;
2500         }
2501         return ibwc_status;
2502 }
2503
2504 static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2505                       u32 wqe_idx)
2506 {
2507         struct ocrdma_hdr_wqe *hdr;
2508         struct ocrdma_sge *rw;
2509         int opcode;
2510
2511         hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
2512
2513         ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
2514         /* Undo the hdr->cw swap */
2515         opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
2516         switch (opcode) {
2517         case OCRDMA_WRITE:
2518                 ibwc->opcode = IB_WC_RDMA_WRITE;
2519                 break;
2520         case OCRDMA_READ:
2521                 rw = (struct ocrdma_sge *)(hdr + 1);
2522                 ibwc->opcode = IB_WC_RDMA_READ;
2523                 ibwc->byte_len = rw->len;
2524                 break;
2525         case OCRDMA_SEND:
2526                 ibwc->opcode = IB_WC_SEND;
2527                 break;
2528         case OCRDMA_FR_MR:
2529                 ibwc->opcode = IB_WC_FAST_REG_MR;
2530                 break;
2531         case OCRDMA_LKEY_INV:
2532                 ibwc->opcode = IB_WC_LOCAL_INV;
2533                 break;
2534         default:
2535                 ibwc->status = IB_WC_GENERAL_ERR;
2536                 pr_err("%s() invalid opcode received = 0x%x\n",
2537                        __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
2538                 break;
2539         }
2540 }
2541
2542 static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
2543                                                 struct ocrdma_cqe *cqe)
2544 {
2545         if (is_cqe_for_sq(cqe)) {
2546                 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2547                                 cqe->flags_status_srcqpn) &
2548                                         ~OCRDMA_CQE_STATUS_MASK);
2549                 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2550                                 cqe->flags_status_srcqpn) |
2551                                 (OCRDMA_CQE_WR_FLUSH_ERR <<
2552                                         OCRDMA_CQE_STATUS_SHIFT));
2553         } else {
2554                 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2555                         cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2556                                         cqe->flags_status_srcqpn) &
2557                                                 ~OCRDMA_CQE_UD_STATUS_MASK);
2558                         cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2559                                         cqe->flags_status_srcqpn) |
2560                                         (OCRDMA_CQE_WR_FLUSH_ERR <<
2561                                                 OCRDMA_CQE_UD_STATUS_SHIFT));
2562                 } else {
2563                         cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2564                                         cqe->flags_status_srcqpn) &
2565                                                 ~OCRDMA_CQE_STATUS_MASK);
2566                         cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2567                                         cqe->flags_status_srcqpn) |
2568                                         (OCRDMA_CQE_WR_FLUSH_ERR <<
2569                                                 OCRDMA_CQE_STATUS_SHIFT));
2570                 }
2571         }
2572 }
2573
2574 static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2575                                   struct ocrdma_qp *qp, int status)
2576 {
2577         bool expand = false;
2578
2579         ibwc->byte_len = 0;
2580         ibwc->qp = &qp->ibqp;
2581         ibwc->status = ocrdma_to_ibwc_err(status);
2582
2583         ocrdma_flush_qp(qp);
2584         ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
2585
2586         /* if wqe/rqe pending for which cqe needs to be returned,
2587          * trigger inflating it.
2588          */
2589         if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
2590                 expand = true;
2591                 ocrdma_set_cqe_status_flushed(qp, cqe);
2592         }
2593         return expand;
2594 }
2595
2596 static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2597                                   struct ocrdma_qp *qp, int status)
2598 {
2599         ibwc->opcode = IB_WC_RECV;
2600         ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2601         ocrdma_hwq_inc_tail(&qp->rq);
2602
2603         return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2604 }
2605
2606 static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2607                                   struct ocrdma_qp *qp, int status)
2608 {
2609         ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2610         ocrdma_hwq_inc_tail(&qp->sq);
2611
2612         return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2613 }
2614
2615
2616 static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2617                                  struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
2618                                  bool *polled, bool *stop)
2619 {
2620         bool expand;
2621         struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2622         int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2623                 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2624         if (status < OCRDMA_MAX_CQE_ERR)
2625                 atomic_inc(&dev->cqe_err_stats[status]);
2626
2627         /* when hw sq is empty, but rq is not empty, so we continue
2628          * to keep the cqe in order to get the cq event again.
2629          */
2630         if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
2631                 /* when cq for rq and sq is same, it is safe to return
2632                  * flush cqe for RQEs.
2633                  */
2634                 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2635                         *polled = true;
2636                         status = OCRDMA_CQE_WR_FLUSH_ERR;
2637                         expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2638                 } else {
2639                         /* stop processing further cqe as this cqe is used for
2640                          * triggering cq event on buddy cq of RQ.
2641                          * When QP is destroyed, this cqe will be removed
2642                          * from the cq's hardware q.
2643                          */
2644                         *polled = false;
2645                         *stop = true;
2646                         expand = false;
2647                 }
2648         } else if (is_hw_sq_empty(qp)) {
2649                 /* Do nothing */
2650                 expand = false;
2651                 *polled = false;
2652                 *stop = false;
2653         } else {
2654                 *polled = true;
2655                 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2656         }
2657         return expand;
2658 }
2659
2660 static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
2661                                      struct ocrdma_cqe *cqe,
2662                                      struct ib_wc *ibwc, bool *polled)
2663 {
2664         bool expand = false;
2665         int tail = qp->sq.tail;
2666         u32 wqe_idx;
2667
2668         if (!qp->wqe_wr_id_tbl[tail].signaled) {
2669                 *polled = false;    /* WC cannot be consumed yet */
2670         } else {
2671                 ibwc->status = IB_WC_SUCCESS;
2672                 ibwc->wc_flags = 0;
2673                 ibwc->qp = &qp->ibqp;
2674                 ocrdma_update_wc(qp, ibwc, tail);
2675                 *polled = true;
2676         }
2677         wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
2678                         OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
2679         if (tail != wqe_idx)
2680                 expand = true; /* Coalesced CQE can't be consumed yet */
2681
2682         ocrdma_hwq_inc_tail(&qp->sq);
2683         return expand;
2684 }
2685
2686 static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2687                              struct ib_wc *ibwc, bool *polled, bool *stop)
2688 {
2689         int status;
2690         bool expand;
2691
2692         status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2693                 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2694
2695         if (status == OCRDMA_CQE_SUCCESS)
2696                 expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
2697         else
2698                 expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
2699         return expand;
2700 }
2701
2702 static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
2703 {
2704         int status;
2705
2706         status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2707                 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
2708         ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
2709                                                 OCRDMA_CQE_SRCQP_MASK;
2710         ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) &
2711                                                 OCRDMA_CQE_PKEY_MASK;
2712         ibwc->wc_flags = IB_WC_GRH;
2713         ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2714                                         OCRDMA_CQE_UD_XFER_LEN_SHIFT);
2715         return status;
2716 }
2717
2718 static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
2719                                        struct ocrdma_cqe *cqe,
2720                                        struct ocrdma_qp *qp)
2721 {
2722         unsigned long flags;
2723         struct ocrdma_srq *srq;
2724         u32 wqe_idx;
2725
2726         srq = get_ocrdma_srq(qp->ibqp.srq);
2727         wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
2728                 OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
2729         if (wqe_idx < 1)
2730                 BUG();
2731
2732         ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
2733         spin_lock_irqsave(&srq->q_lock, flags);
2734         ocrdma_srq_toggle_bit(srq, wqe_idx - 1);
2735         spin_unlock_irqrestore(&srq->q_lock, flags);
2736         ocrdma_hwq_inc_tail(&srq->rq);
2737 }
2738
2739 static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2740                                 struct ib_wc *ibwc, bool *polled, bool *stop,
2741                                 int status)
2742 {
2743         bool expand;
2744         struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2745
2746         if (status < OCRDMA_MAX_CQE_ERR)
2747                 atomic_inc(&dev->cqe_err_stats[status]);
2748
2749         /* when hw_rq is empty, but wq is not empty, so continue
2750          * to keep the cqe to get the cq event again.
2751          */
2752         if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
2753                 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2754                         *polled = true;
2755                         status = OCRDMA_CQE_WR_FLUSH_ERR;
2756                         expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2757                 } else {
2758                         *polled = false;
2759                         *stop = true;
2760                         expand = false;
2761                 }
2762         } else if (is_hw_rq_empty(qp)) {
2763                 /* Do nothing */
2764                 expand = false;
2765                 *polled = false;
2766                 *stop = false;
2767         } else {
2768                 *polled = true;
2769                 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2770         }
2771         return expand;
2772 }
2773
2774 static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
2775                                      struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
2776 {
2777         ibwc->opcode = IB_WC_RECV;
2778         ibwc->qp = &qp->ibqp;
2779         ibwc->status = IB_WC_SUCCESS;
2780
2781         if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
2782                 ocrdma_update_ud_rcqe(ibwc, cqe);
2783         else
2784                 ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
2785
2786         if (is_cqe_imm(cqe)) {
2787                 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2788                 ibwc->wc_flags |= IB_WC_WITH_IMM;
2789         } else if (is_cqe_wr_imm(cqe)) {
2790                 ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2791                 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2792                 ibwc->wc_flags |= IB_WC_WITH_IMM;
2793         } else if (is_cqe_invalidated(cqe)) {
2794                 ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
2795                 ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
2796         }
2797         if (qp->ibqp.srq) {
2798                 ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
2799         } else {
2800                 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2801                 ocrdma_hwq_inc_tail(&qp->rq);
2802         }
2803 }
2804
2805 static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2806                              struct ib_wc *ibwc, bool *polled, bool *stop)
2807 {
2808         int status;
2809         bool expand = false;
2810
2811         ibwc->wc_flags = 0;
2812         if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2813                 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2814                                         OCRDMA_CQE_UD_STATUS_MASK) >>
2815                                         OCRDMA_CQE_UD_STATUS_SHIFT;
2816         } else {
2817                 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2818                              OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2819         }
2820
2821         if (status == OCRDMA_CQE_SUCCESS) {
2822                 *polled = true;
2823                 ocrdma_poll_success_rcqe(qp, cqe, ibwc);
2824         } else {
2825                 expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
2826                                               status);
2827         }
2828         return expand;
2829 }
2830
2831 static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
2832                                    u16 cur_getp)
2833 {
2834         if (cq->phase_change) {
2835                 if (cur_getp == 0)
2836                         cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
2837         } else {
2838                 /* clear valid bit */
2839                 cqe->flags_status_srcqpn = 0;
2840         }
2841 }
2842
2843 static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
2844                             struct ib_wc *ibwc)
2845 {
2846         u16 qpn = 0;
2847         int i = 0;
2848         bool expand = false;
2849         int polled_hw_cqes = 0;
2850         struct ocrdma_qp *qp = NULL;
2851         struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
2852         struct ocrdma_cqe *cqe;
2853         u16 cur_getp; bool polled = false; bool stop = false;
2854
2855         cur_getp = cq->getp;
2856         while (num_entries) {
2857                 cqe = cq->va + cur_getp;
2858                 /* check whether valid cqe or not */
2859                 if (!is_cqe_valid(cq, cqe))
2860                         break;
2861                 qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
2862                 /* ignore discarded cqe */
2863                 if (qpn == 0)
2864                         goto skip_cqe;
2865                 qp = dev->qp_tbl[qpn];
2866                 BUG_ON(qp == NULL);
2867
2868                 if (is_cqe_for_sq(cqe)) {
2869                         expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
2870                                                   &stop);
2871                 } else {
2872                         expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
2873                                                   &stop);
2874                 }
2875                 if (expand)
2876                         goto expand_cqe;
2877                 if (stop)
2878                         goto stop_cqe;
2879                 /* clear qpn to avoid duplicate processing by discard_cqe() */
2880                 cqe->cmn.qpn = 0;
2881 skip_cqe:
2882                 polled_hw_cqes += 1;
2883                 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
2884                 ocrdma_change_cq_phase(cq, cqe, cur_getp);
2885 expand_cqe:
2886                 if (polled) {
2887                         num_entries -= 1;
2888                         i += 1;
2889                         ibwc = ibwc + 1;
2890                         polled = false;
2891                 }
2892         }
2893 stop_cqe:
2894         cq->getp = cur_getp;
2895         if (cq->deferred_arm) {
2896                 ocrdma_ring_cq_db(dev, cq->id, true, cq->deferred_sol,
2897                                   polled_hw_cqes);
2898                 cq->deferred_arm = false;
2899                 cq->deferred_sol = false;
2900         } else {
2901                 /* We need to pop the CQE. No need to arm */
2902                 ocrdma_ring_cq_db(dev, cq->id, false, cq->deferred_sol,
2903                                   polled_hw_cqes);
2904                 cq->deferred_sol = false;
2905         }
2906
2907         return i;
2908 }
2909
2910 /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
2911 static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
2912                               struct ocrdma_qp *qp, struct ib_wc *ibwc)
2913 {
2914         int err_cqes = 0;
2915
2916         while (num_entries) {
2917                 if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
2918                         break;
2919                 if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
2920                         ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2921                         ocrdma_hwq_inc_tail(&qp->sq);
2922                 } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
2923                         ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2924                         ocrdma_hwq_inc_tail(&qp->rq);
2925                 } else {
2926                         return err_cqes;
2927                 }
2928                 ibwc->byte_len = 0;
2929                 ibwc->status = IB_WC_WR_FLUSH_ERR;
2930                 ibwc = ibwc + 1;
2931                 err_cqes += 1;
2932                 num_entries -= 1;
2933         }
2934         return err_cqes;
2935 }
2936
2937 int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2938 {
2939         int cqes_to_poll = num_entries;
2940         struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2941         struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2942         int num_os_cqe = 0, err_cqes = 0;
2943         struct ocrdma_qp *qp;
2944         unsigned long flags;
2945
2946         /* poll cqes from adapter CQ */
2947         spin_lock_irqsave(&cq->cq_lock, flags);
2948         num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
2949         spin_unlock_irqrestore(&cq->cq_lock, flags);
2950         cqes_to_poll -= num_os_cqe;
2951
2952         if (cqes_to_poll) {
2953                 wc = wc + num_os_cqe;
2954                 /* adapter returns single error cqe when qp moves to
2955                  * error state. So insert error cqes with wc_status as
2956                  * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
2957                  * respectively which uses this CQ.
2958                  */
2959                 spin_lock_irqsave(&dev->flush_q_lock, flags);
2960                 list_for_each_entry(qp, &cq->sq_head, sq_entry) {
2961                         if (cqes_to_poll == 0)
2962                                 break;
2963                         err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
2964                         cqes_to_poll -= err_cqes;
2965                         num_os_cqe += err_cqes;
2966                         wc = wc + err_cqes;
2967                 }
2968                 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2969         }
2970         return num_os_cqe;
2971 }
2972
2973 int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
2974 {
2975         struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2976         struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2977         u16 cq_id;
2978         unsigned long flags;
2979         bool arm_needed = false, sol_needed = false;
2980
2981         cq_id = cq->id;
2982
2983         spin_lock_irqsave(&cq->cq_lock, flags);
2984         if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
2985                 arm_needed = true;
2986         if (cq_flags & IB_CQ_SOLICITED)
2987                 sol_needed = true;
2988
2989         if (cq->first_arm) {
2990                 ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
2991                 cq->first_arm = false;
2992         }
2993
2994         cq->deferred_arm = true;
2995         cq->deferred_sol = sol_needed;
2996         spin_unlock_irqrestore(&cq->cq_lock, flags);
2997
2998         return 0;
2999 }
3000
3001 struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
3002 {
3003         int status;
3004         struct ocrdma_mr *mr;
3005         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
3006         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
3007
3008         if (max_page_list_len > dev->attr.max_pages_per_frmr)
3009                 return ERR_PTR(-EINVAL);
3010
3011         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3012         if (!mr)
3013                 return ERR_PTR(-ENOMEM);
3014
3015         status = ocrdma_get_pbl_info(dev, mr, max_page_list_len);
3016         if (status)
3017                 goto pbl_err;
3018         mr->hwmr.fr_mr = 1;
3019         mr->hwmr.remote_rd = 0;
3020         mr->hwmr.remote_wr = 0;
3021         mr->hwmr.local_rd = 0;
3022         mr->hwmr.local_wr = 0;
3023         mr->hwmr.mw_bind = 0;
3024         status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
3025         if (status)
3026                 goto pbl_err;
3027         status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
3028         if (status)
3029                 goto mbx_err;
3030         mr->ibmr.rkey = mr->hwmr.lkey;
3031         mr->ibmr.lkey = mr->hwmr.lkey;
3032         dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] =
3033                 (unsigned long) mr;
3034         return &mr->ibmr;
3035 mbx_err:
3036         ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
3037 pbl_err:
3038         kfree(mr);
3039         return ERR_PTR(-ENOMEM);
3040 }
3041
3042 struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
3043                                                           *ibdev,
3044                                                           int page_list_len)
3045 {
3046         struct ib_fast_reg_page_list *frmr_list;
3047         int size;
3048
3049         size = sizeof(*frmr_list) + (page_list_len * sizeof(u64));
3050         frmr_list = kzalloc(size, GFP_KERNEL);
3051         if (!frmr_list)
3052                 return ERR_PTR(-ENOMEM);
3053         frmr_list->page_list = (u64 *)(frmr_list + 1);
3054         return frmr_list;
3055 }
3056
3057 void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list)
3058 {
3059         kfree(page_list);
3060 }
3061
3062 #define MAX_KERNEL_PBE_SIZE 65536
3063 static inline int count_kernel_pbes(struct ib_phys_buf *buf_list,
3064                                     int buf_cnt, u32 *pbe_size)
3065 {
3066         u64 total_size = 0;
3067         u64 buf_size = 0;
3068         int i;
3069         *pbe_size = roundup(buf_list[0].size, PAGE_SIZE);
3070         *pbe_size = roundup_pow_of_two(*pbe_size);
3071
3072         /* find the smallest PBE size that we can have */
3073         for (i = 0; i < buf_cnt; i++) {
3074                 /* first addr may not be page aligned, so ignore checking */
3075                 if ((i != 0) && ((buf_list[i].addr & ~PAGE_MASK) ||
3076                                  (buf_list[i].size & ~PAGE_MASK))) {
3077                         return 0;
3078                 }
3079
3080                 /* if configured PBE size is greater then the chosen one,
3081                  * reduce the PBE size.
3082                  */
3083                 buf_size = roundup(buf_list[i].size, PAGE_SIZE);
3084                 /* pbe_size has to be even multiple of 4K 1,2,4,8...*/
3085                 buf_size = roundup_pow_of_two(buf_size);
3086                 if (*pbe_size > buf_size)
3087                         *pbe_size = buf_size;
3088
3089                 total_size += buf_size;
3090         }
3091         *pbe_size = *pbe_size > MAX_KERNEL_PBE_SIZE ?
3092             (MAX_KERNEL_PBE_SIZE) : (*pbe_size);
3093
3094         /* num_pbes = total_size / (*pbe_size);  this is implemented below. */
3095
3096         return total_size >> ilog2(*pbe_size);
3097 }
3098
3099 static void build_kernel_pbes(struct ib_phys_buf *buf_list, int ib_buf_cnt,
3100                               u32 pbe_size, struct ocrdma_pbl *pbl_tbl,
3101                               struct ocrdma_hw_mr *hwmr)
3102 {
3103         int i;
3104         int idx;
3105         int pbes_per_buf = 0;
3106         u64 buf_addr = 0;
3107         int num_pbes;
3108         struct ocrdma_pbe *pbe;
3109         int total_num_pbes = 0;
3110
3111         if (!hwmr->num_pbes)
3112                 return;
3113
3114         pbe = (struct ocrdma_pbe *)pbl_tbl->va;
3115         num_pbes = 0;
3116
3117         /* go through the OS phy regions & fill hw pbe entries into pbls. */
3118         for (i = 0; i < ib_buf_cnt; i++) {
3119                 buf_addr = buf_list[i].addr;
3120                 pbes_per_buf =
3121                     roundup_pow_of_two(roundup(buf_list[i].size, PAGE_SIZE)) /
3122                     pbe_size;
3123                 hwmr->len += buf_list[i].size;
3124                 /* number of pbes can be more for one OS buf, when
3125                  * buffers are of different sizes.
3126                  * split the ib_buf to one or more pbes.
3127                  */
3128                 for (idx = 0; idx < pbes_per_buf; idx++) {
3129                         /* we program always page aligned addresses,
3130                          * first unaligned address is taken care by fbo.
3131                          */
3132                         if (i == 0) {
3133                                 /* for non zero fbo, assign the
3134                                  * start of the page.
3135                                  */
3136                                 pbe->pa_lo =
3137                                     cpu_to_le32((u32) (buf_addr & PAGE_MASK));
3138                                 pbe->pa_hi =
3139                                     cpu_to_le32((u32) upper_32_bits(buf_addr));
3140                         } else {
3141                                 pbe->pa_lo =
3142                                     cpu_to_le32((u32) (buf_addr & 0xffffffff));
3143                                 pbe->pa_hi =
3144                                     cpu_to_le32((u32) upper_32_bits(buf_addr));
3145                         }
3146                         buf_addr += pbe_size;
3147                         num_pbes += 1;
3148                         total_num_pbes += 1;
3149                         pbe++;
3150
3151                         if (total_num_pbes == hwmr->num_pbes)
3152                                 goto mr_tbl_done;
3153                         /* if the pbl is full storing the pbes,
3154                          * move to next pbl.
3155                          */
3156                         if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
3157                                 pbl_tbl++;
3158                                 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
3159                                 num_pbes = 0;
3160                         }
3161                 }
3162         }
3163 mr_tbl_done:
3164         return;
3165 }
3166
3167 struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *ibpd,
3168                                    struct ib_phys_buf *buf_list,
3169                                    int buf_cnt, int acc, u64 *iova_start)
3170 {
3171         int status = -ENOMEM;
3172         struct ocrdma_mr *mr;
3173         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
3174         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
3175         u32 num_pbes;
3176         u32 pbe_size = 0;
3177
3178         if ((acc & IB_ACCESS_REMOTE_WRITE) && !(acc & IB_ACCESS_LOCAL_WRITE))
3179                 return ERR_PTR(-EINVAL);
3180
3181         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3182         if (!mr)
3183                 return ERR_PTR(status);
3184
3185         num_pbes = count_kernel_pbes(buf_list, buf_cnt, &pbe_size);
3186         if (num_pbes == 0) {
3187                 status = -EINVAL;
3188                 goto pbl_err;
3189         }
3190         status = ocrdma_get_pbl_info(dev, mr, num_pbes);
3191         if (status)
3192                 goto pbl_err;
3193
3194         mr->hwmr.pbe_size = pbe_size;
3195         mr->hwmr.fbo = *iova_start - (buf_list[0].addr & PAGE_MASK);
3196         mr->hwmr.va = *iova_start;
3197         mr->hwmr.local_rd = 1;
3198         mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3199         mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3200         mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3201         mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3202         mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
3203
3204         status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
3205         if (status)
3206                 goto pbl_err;
3207         build_kernel_pbes(buf_list, buf_cnt, pbe_size, mr->hwmr.pbl_table,
3208                           &mr->hwmr);
3209         status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
3210         if (status)
3211                 goto mbx_err;
3212
3213         mr->ibmr.lkey = mr->hwmr.lkey;
3214         if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
3215                 mr->ibmr.rkey = mr->hwmr.lkey;
3216         return &mr->ibmr;
3217
3218 mbx_err:
3219         ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
3220 pbl_err:
3221         kfree(mr);
3222         return ERR_PTR(status);
3223 }