OSDN Git Service

SCSI: Fix NULL pointer dereference in runtime PM
[uclinux-h8/linux.git] / drivers / infiniband / core / mad.c
1 /*
2  * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3  * Copyright (c) 2005 Intel Corporation.  All rights reserved.
4  * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved.
5  * Copyright (c) 2009 HNR Consulting. All rights reserved.
6  * Copyright (c) 2014 Intel Corporation.  All rights reserved.
7  *
8  * This software is available to you under a choice of one of two
9  * licenses.  You may choose to be licensed under the terms of the GNU
10  * General Public License (GPL) Version 2, available from the file
11  * COPYING in the main directory of this source tree, or the
12  * OpenIB.org BSD license below:
13  *
14  *     Redistribution and use in source and binary forms, with or
15  *     without modification, are permitted provided that the following
16  *     conditions are met:
17  *
18  *      - Redistributions of source code must retain the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer.
21  *
22  *      - Redistributions in binary form must reproduce the above
23  *        copyright notice, this list of conditions and the following
24  *        disclaimer in the documentation and/or other materials
25  *        provided with the distribution.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34  * SOFTWARE.
35  *
36  */
37
38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <rdma/ib_cache.h>
44
45 #include "mad_priv.h"
46 #include "mad_rmpp.h"
47 #include "smi.h"
48 #include "opa_smi.h"
49 #include "agent.h"
50
51 MODULE_LICENSE("Dual BSD/GPL");
52 MODULE_DESCRIPTION("kernel IB MAD API");
53 MODULE_AUTHOR("Hal Rosenstock");
54 MODULE_AUTHOR("Sean Hefty");
55
56 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
57 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
58
59 module_param_named(send_queue_size, mad_sendq_size, int, 0444);
60 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
61 module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
62 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
63
64 static struct list_head ib_mad_port_list;
65 static u32 ib_mad_client_id = 0;
66
67 /* Port list lock */
68 static DEFINE_SPINLOCK(ib_mad_port_list_lock);
69
70 /* Forward declarations */
71 static int method_in_use(struct ib_mad_mgmt_method_table **method,
72                          struct ib_mad_reg_req *mad_reg_req);
73 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
74 static struct ib_mad_agent_private *find_mad_agent(
75                                         struct ib_mad_port_private *port_priv,
76                                         const struct ib_mad_hdr *mad);
77 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
78                                     struct ib_mad_private *mad);
79 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
80 static void timeout_sends(struct work_struct *work);
81 static void local_completions(struct work_struct *work);
82 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
83                               struct ib_mad_agent_private *agent_priv,
84                               u8 mgmt_class);
85 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
86                            struct ib_mad_agent_private *agent_priv);
87
88 /*
89  * Returns a ib_mad_port_private structure or NULL for a device/port
90  * Assumes ib_mad_port_list_lock is being held
91  */
92 static inline struct ib_mad_port_private *
93 __ib_get_mad_port(struct ib_device *device, int port_num)
94 {
95         struct ib_mad_port_private *entry;
96
97         list_for_each_entry(entry, &ib_mad_port_list, port_list) {
98                 if (entry->device == device && entry->port_num == port_num)
99                         return entry;
100         }
101         return NULL;
102 }
103
104 /*
105  * Wrapper function to return a ib_mad_port_private structure or NULL
106  * for a device/port
107  */
108 static inline struct ib_mad_port_private *
109 ib_get_mad_port(struct ib_device *device, int port_num)
110 {
111         struct ib_mad_port_private *entry;
112         unsigned long flags;
113
114         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
115         entry = __ib_get_mad_port(device, port_num);
116         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
117
118         return entry;
119 }
120
121 static inline u8 convert_mgmt_class(u8 mgmt_class)
122 {
123         /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
124         return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
125                 0 : mgmt_class;
126 }
127
128 static int get_spl_qp_index(enum ib_qp_type qp_type)
129 {
130         switch (qp_type)
131         {
132         case IB_QPT_SMI:
133                 return 0;
134         case IB_QPT_GSI:
135                 return 1;
136         default:
137                 return -1;
138         }
139 }
140
141 static int vendor_class_index(u8 mgmt_class)
142 {
143         return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
144 }
145
146 static int is_vendor_class(u8 mgmt_class)
147 {
148         if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
149             (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
150                 return 0;
151         return 1;
152 }
153
154 static int is_vendor_oui(char *oui)
155 {
156         if (oui[0] || oui[1] || oui[2])
157                 return 1;
158         return 0;
159 }
160
161 static int is_vendor_method_in_use(
162                 struct ib_mad_mgmt_vendor_class *vendor_class,
163                 struct ib_mad_reg_req *mad_reg_req)
164 {
165         struct ib_mad_mgmt_method_table *method;
166         int i;
167
168         for (i = 0; i < MAX_MGMT_OUI; i++) {
169                 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
170                         method = vendor_class->method_table[i];
171                         if (method) {
172                                 if (method_in_use(&method, mad_reg_req))
173                                         return 1;
174                                 else
175                                         break;
176                         }
177                 }
178         }
179         return 0;
180 }
181
182 int ib_response_mad(const struct ib_mad_hdr *hdr)
183 {
184         return ((hdr->method & IB_MGMT_METHOD_RESP) ||
185                 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
186                 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
187                  (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
188 }
189 EXPORT_SYMBOL(ib_response_mad);
190
191 /*
192  * ib_register_mad_agent - Register to send/receive MADs
193  */
194 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
195                                            u8 port_num,
196                                            enum ib_qp_type qp_type,
197                                            struct ib_mad_reg_req *mad_reg_req,
198                                            u8 rmpp_version,
199                                            ib_mad_send_handler send_handler,
200                                            ib_mad_recv_handler recv_handler,
201                                            void *context,
202                                            u32 registration_flags)
203 {
204         struct ib_mad_port_private *port_priv;
205         struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
206         struct ib_mad_agent_private *mad_agent_priv;
207         struct ib_mad_reg_req *reg_req = NULL;
208         struct ib_mad_mgmt_class_table *class;
209         struct ib_mad_mgmt_vendor_class_table *vendor;
210         struct ib_mad_mgmt_vendor_class *vendor_class;
211         struct ib_mad_mgmt_method_table *method;
212         int ret2, qpn;
213         unsigned long flags;
214         u8 mgmt_class, vclass;
215
216         /* Validate parameters */
217         qpn = get_spl_qp_index(qp_type);
218         if (qpn == -1) {
219                 dev_notice(&device->dev,
220                            "ib_register_mad_agent: invalid QP Type %d\n",
221                            qp_type);
222                 goto error1;
223         }
224
225         if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
226                 dev_notice(&device->dev,
227                            "ib_register_mad_agent: invalid RMPP Version %u\n",
228                            rmpp_version);
229                 goto error1;
230         }
231
232         /* Validate MAD registration request if supplied */
233         if (mad_reg_req) {
234                 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
235                         dev_notice(&device->dev,
236                                    "ib_register_mad_agent: invalid Class Version %u\n",
237                                    mad_reg_req->mgmt_class_version);
238                         goto error1;
239                 }
240                 if (!recv_handler) {
241                         dev_notice(&device->dev,
242                                    "ib_register_mad_agent: no recv_handler\n");
243                         goto error1;
244                 }
245                 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
246                         /*
247                          * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
248                          * one in this range currently allowed
249                          */
250                         if (mad_reg_req->mgmt_class !=
251                             IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
252                                 dev_notice(&device->dev,
253                                            "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
254                                            mad_reg_req->mgmt_class);
255                                 goto error1;
256                         }
257                 } else if (mad_reg_req->mgmt_class == 0) {
258                         /*
259                          * Class 0 is reserved in IBA and is used for
260                          * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
261                          */
262                         dev_notice(&device->dev,
263                                    "ib_register_mad_agent: Invalid Mgmt Class 0\n");
264                         goto error1;
265                 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
266                         /*
267                          * If class is in "new" vendor range,
268                          * ensure supplied OUI is not zero
269                          */
270                         if (!is_vendor_oui(mad_reg_req->oui)) {
271                                 dev_notice(&device->dev,
272                                            "ib_register_mad_agent: No OUI specified for class 0x%x\n",
273                                            mad_reg_req->mgmt_class);
274                                 goto error1;
275                         }
276                 }
277                 /* Make sure class supplied is consistent with RMPP */
278                 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
279                         if (rmpp_version) {
280                                 dev_notice(&device->dev,
281                                            "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
282                                            mad_reg_req->mgmt_class);
283                                 goto error1;
284                         }
285                 }
286
287                 /* Make sure class supplied is consistent with QP type */
288                 if (qp_type == IB_QPT_SMI) {
289                         if ((mad_reg_req->mgmt_class !=
290                                         IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
291                             (mad_reg_req->mgmt_class !=
292                                         IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
293                                 dev_notice(&device->dev,
294                                            "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
295                                            mad_reg_req->mgmt_class);
296                                 goto error1;
297                         }
298                 } else {
299                         if ((mad_reg_req->mgmt_class ==
300                                         IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
301                             (mad_reg_req->mgmt_class ==
302                                         IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
303                                 dev_notice(&device->dev,
304                                            "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
305                                            mad_reg_req->mgmt_class);
306                                 goto error1;
307                         }
308                 }
309         } else {
310                 /* No registration request supplied */
311                 if (!send_handler)
312                         goto error1;
313                 if (registration_flags & IB_MAD_USER_RMPP)
314                         goto error1;
315         }
316
317         /* Validate device and port */
318         port_priv = ib_get_mad_port(device, port_num);
319         if (!port_priv) {
320                 dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n");
321                 ret = ERR_PTR(-ENODEV);
322                 goto error1;
323         }
324
325         /* Verify the QP requested is supported.  For example, Ethernet devices
326          * will not have QP0 */
327         if (!port_priv->qp_info[qpn].qp) {
328                 dev_notice(&device->dev,
329                            "ib_register_mad_agent: QP %d not supported\n", qpn);
330                 ret = ERR_PTR(-EPROTONOSUPPORT);
331                 goto error1;
332         }
333
334         /* Allocate structures */
335         mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
336         if (!mad_agent_priv) {
337                 ret = ERR_PTR(-ENOMEM);
338                 goto error1;
339         }
340
341         mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
342                                                  IB_ACCESS_LOCAL_WRITE);
343         if (IS_ERR(mad_agent_priv->agent.mr)) {
344                 ret = ERR_PTR(-ENOMEM);
345                 goto error2;
346         }
347
348         if (mad_reg_req) {
349                 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
350                 if (!reg_req) {
351                         ret = ERR_PTR(-ENOMEM);
352                         goto error3;
353                 }
354         }
355
356         /* Now, fill in the various structures */
357         mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
358         mad_agent_priv->reg_req = reg_req;
359         mad_agent_priv->agent.rmpp_version = rmpp_version;
360         mad_agent_priv->agent.device = device;
361         mad_agent_priv->agent.recv_handler = recv_handler;
362         mad_agent_priv->agent.send_handler = send_handler;
363         mad_agent_priv->agent.context = context;
364         mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
365         mad_agent_priv->agent.port_num = port_num;
366         mad_agent_priv->agent.flags = registration_flags;
367         spin_lock_init(&mad_agent_priv->lock);
368         INIT_LIST_HEAD(&mad_agent_priv->send_list);
369         INIT_LIST_HEAD(&mad_agent_priv->wait_list);
370         INIT_LIST_HEAD(&mad_agent_priv->done_list);
371         INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
372         INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
373         INIT_LIST_HEAD(&mad_agent_priv->local_list);
374         INIT_WORK(&mad_agent_priv->local_work, local_completions);
375         atomic_set(&mad_agent_priv->refcount, 1);
376         init_completion(&mad_agent_priv->comp);
377
378         spin_lock_irqsave(&port_priv->reg_lock, flags);
379         mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
380
381         /*
382          * Make sure MAD registration (if supplied)
383          * is non overlapping with any existing ones
384          */
385         if (mad_reg_req) {
386                 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
387                 if (!is_vendor_class(mgmt_class)) {
388                         class = port_priv->version[mad_reg_req->
389                                                    mgmt_class_version].class;
390                         if (class) {
391                                 method = class->method_table[mgmt_class];
392                                 if (method) {
393                                         if (method_in_use(&method,
394                                                            mad_reg_req))
395                                                 goto error4;
396                                 }
397                         }
398                         ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
399                                                   mgmt_class);
400                 } else {
401                         /* "New" vendor class range */
402                         vendor = port_priv->version[mad_reg_req->
403                                                     mgmt_class_version].vendor;
404                         if (vendor) {
405                                 vclass = vendor_class_index(mgmt_class);
406                                 vendor_class = vendor->vendor_class[vclass];
407                                 if (vendor_class) {
408                                         if (is_vendor_method_in_use(
409                                                         vendor_class,
410                                                         mad_reg_req))
411                                                 goto error4;
412                                 }
413                         }
414                         ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
415                 }
416                 if (ret2) {
417                         ret = ERR_PTR(ret2);
418                         goto error4;
419                 }
420         }
421
422         /* Add mad agent into port's agent list */
423         list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
424         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
425
426         return &mad_agent_priv->agent;
427
428 error4:
429         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
430         kfree(reg_req);
431 error3:
432         ib_dereg_mr(mad_agent_priv->agent.mr);
433 error2:
434         kfree(mad_agent_priv);
435 error1:
436         return ret;
437 }
438 EXPORT_SYMBOL(ib_register_mad_agent);
439
440 static inline int is_snooping_sends(int mad_snoop_flags)
441 {
442         return (mad_snoop_flags &
443                 (/*IB_MAD_SNOOP_POSTED_SENDS |
444                  IB_MAD_SNOOP_RMPP_SENDS |*/
445                  IB_MAD_SNOOP_SEND_COMPLETIONS /*|
446                  IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
447 }
448
449 static inline int is_snooping_recvs(int mad_snoop_flags)
450 {
451         return (mad_snoop_flags &
452                 (IB_MAD_SNOOP_RECVS /*|
453                  IB_MAD_SNOOP_RMPP_RECVS*/));
454 }
455
456 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
457                                 struct ib_mad_snoop_private *mad_snoop_priv)
458 {
459         struct ib_mad_snoop_private **new_snoop_table;
460         unsigned long flags;
461         int i;
462
463         spin_lock_irqsave(&qp_info->snoop_lock, flags);
464         /* Check for empty slot in array. */
465         for (i = 0; i < qp_info->snoop_table_size; i++)
466                 if (!qp_info->snoop_table[i])
467                         break;
468
469         if (i == qp_info->snoop_table_size) {
470                 /* Grow table. */
471                 new_snoop_table = krealloc(qp_info->snoop_table,
472                                            sizeof mad_snoop_priv *
473                                            (qp_info->snoop_table_size + 1),
474                                            GFP_ATOMIC);
475                 if (!new_snoop_table) {
476                         i = -ENOMEM;
477                         goto out;
478                 }
479
480                 qp_info->snoop_table = new_snoop_table;
481                 qp_info->snoop_table_size++;
482         }
483         qp_info->snoop_table[i] = mad_snoop_priv;
484         atomic_inc(&qp_info->snoop_count);
485 out:
486         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
487         return i;
488 }
489
490 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
491                                            u8 port_num,
492                                            enum ib_qp_type qp_type,
493                                            int mad_snoop_flags,
494                                            ib_mad_snoop_handler snoop_handler,
495                                            ib_mad_recv_handler recv_handler,
496                                            void *context)
497 {
498         struct ib_mad_port_private *port_priv;
499         struct ib_mad_agent *ret;
500         struct ib_mad_snoop_private *mad_snoop_priv;
501         int qpn;
502
503         /* Validate parameters */
504         if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
505             (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
506                 ret = ERR_PTR(-EINVAL);
507                 goto error1;
508         }
509         qpn = get_spl_qp_index(qp_type);
510         if (qpn == -1) {
511                 ret = ERR_PTR(-EINVAL);
512                 goto error1;
513         }
514         port_priv = ib_get_mad_port(device, port_num);
515         if (!port_priv) {
516                 ret = ERR_PTR(-ENODEV);
517                 goto error1;
518         }
519         /* Allocate structures */
520         mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
521         if (!mad_snoop_priv) {
522                 ret = ERR_PTR(-ENOMEM);
523                 goto error1;
524         }
525
526         /* Now, fill in the various structures */
527         mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
528         mad_snoop_priv->agent.device = device;
529         mad_snoop_priv->agent.recv_handler = recv_handler;
530         mad_snoop_priv->agent.snoop_handler = snoop_handler;
531         mad_snoop_priv->agent.context = context;
532         mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
533         mad_snoop_priv->agent.port_num = port_num;
534         mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
535         init_completion(&mad_snoop_priv->comp);
536         mad_snoop_priv->snoop_index = register_snoop_agent(
537                                                 &port_priv->qp_info[qpn],
538                                                 mad_snoop_priv);
539         if (mad_snoop_priv->snoop_index < 0) {
540                 ret = ERR_PTR(mad_snoop_priv->snoop_index);
541                 goto error2;
542         }
543
544         atomic_set(&mad_snoop_priv->refcount, 1);
545         return &mad_snoop_priv->agent;
546
547 error2:
548         kfree(mad_snoop_priv);
549 error1:
550         return ret;
551 }
552 EXPORT_SYMBOL(ib_register_mad_snoop);
553
554 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
555 {
556         if (atomic_dec_and_test(&mad_agent_priv->refcount))
557                 complete(&mad_agent_priv->comp);
558 }
559
560 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
561 {
562         if (atomic_dec_and_test(&mad_snoop_priv->refcount))
563                 complete(&mad_snoop_priv->comp);
564 }
565
566 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
567 {
568         struct ib_mad_port_private *port_priv;
569         unsigned long flags;
570
571         /* Note that we could still be handling received MADs */
572
573         /*
574          * Canceling all sends results in dropping received response
575          * MADs, preventing us from queuing additional work
576          */
577         cancel_mads(mad_agent_priv);
578         port_priv = mad_agent_priv->qp_info->port_priv;
579         cancel_delayed_work(&mad_agent_priv->timed_work);
580
581         spin_lock_irqsave(&port_priv->reg_lock, flags);
582         remove_mad_reg_req(mad_agent_priv);
583         list_del(&mad_agent_priv->agent_list);
584         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
585
586         flush_workqueue(port_priv->wq);
587         ib_cancel_rmpp_recvs(mad_agent_priv);
588
589         deref_mad_agent(mad_agent_priv);
590         wait_for_completion(&mad_agent_priv->comp);
591
592         kfree(mad_agent_priv->reg_req);
593         ib_dereg_mr(mad_agent_priv->agent.mr);
594         kfree(mad_agent_priv);
595 }
596
597 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
598 {
599         struct ib_mad_qp_info *qp_info;
600         unsigned long flags;
601
602         qp_info = mad_snoop_priv->qp_info;
603         spin_lock_irqsave(&qp_info->snoop_lock, flags);
604         qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
605         atomic_dec(&qp_info->snoop_count);
606         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
607
608         deref_snoop_agent(mad_snoop_priv);
609         wait_for_completion(&mad_snoop_priv->comp);
610
611         kfree(mad_snoop_priv);
612 }
613
614 /*
615  * ib_unregister_mad_agent - Unregisters a client from using MAD services
616  */
617 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
618 {
619         struct ib_mad_agent_private *mad_agent_priv;
620         struct ib_mad_snoop_private *mad_snoop_priv;
621
622         /* If the TID is zero, the agent can only snoop. */
623         if (mad_agent->hi_tid) {
624                 mad_agent_priv = container_of(mad_agent,
625                                               struct ib_mad_agent_private,
626                                               agent);
627                 unregister_mad_agent(mad_agent_priv);
628         } else {
629                 mad_snoop_priv = container_of(mad_agent,
630                                               struct ib_mad_snoop_private,
631                                               agent);
632                 unregister_mad_snoop(mad_snoop_priv);
633         }
634         return 0;
635 }
636 EXPORT_SYMBOL(ib_unregister_mad_agent);
637
638 static void dequeue_mad(struct ib_mad_list_head *mad_list)
639 {
640         struct ib_mad_queue *mad_queue;
641         unsigned long flags;
642
643         BUG_ON(!mad_list->mad_queue);
644         mad_queue = mad_list->mad_queue;
645         spin_lock_irqsave(&mad_queue->lock, flags);
646         list_del(&mad_list->list);
647         mad_queue->count--;
648         spin_unlock_irqrestore(&mad_queue->lock, flags);
649 }
650
651 static void snoop_send(struct ib_mad_qp_info *qp_info,
652                        struct ib_mad_send_buf *send_buf,
653                        struct ib_mad_send_wc *mad_send_wc,
654                        int mad_snoop_flags)
655 {
656         struct ib_mad_snoop_private *mad_snoop_priv;
657         unsigned long flags;
658         int i;
659
660         spin_lock_irqsave(&qp_info->snoop_lock, flags);
661         for (i = 0; i < qp_info->snoop_table_size; i++) {
662                 mad_snoop_priv = qp_info->snoop_table[i];
663                 if (!mad_snoop_priv ||
664                     !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
665                         continue;
666
667                 atomic_inc(&mad_snoop_priv->refcount);
668                 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
669                 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
670                                                     send_buf, mad_send_wc);
671                 deref_snoop_agent(mad_snoop_priv);
672                 spin_lock_irqsave(&qp_info->snoop_lock, flags);
673         }
674         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
675 }
676
677 static void snoop_recv(struct ib_mad_qp_info *qp_info,
678                        struct ib_mad_recv_wc *mad_recv_wc,
679                        int mad_snoop_flags)
680 {
681         struct ib_mad_snoop_private *mad_snoop_priv;
682         unsigned long flags;
683         int i;
684
685         spin_lock_irqsave(&qp_info->snoop_lock, flags);
686         for (i = 0; i < qp_info->snoop_table_size; i++) {
687                 mad_snoop_priv = qp_info->snoop_table[i];
688                 if (!mad_snoop_priv ||
689                     !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
690                         continue;
691
692                 atomic_inc(&mad_snoop_priv->refcount);
693                 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
694                 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
695                                                    mad_recv_wc);
696                 deref_snoop_agent(mad_snoop_priv);
697                 spin_lock_irqsave(&qp_info->snoop_lock, flags);
698         }
699         spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
700 }
701
702 static void build_smp_wc(struct ib_qp *qp,
703                          u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
704                          struct ib_wc *wc)
705 {
706         memset(wc, 0, sizeof *wc);
707         wc->wr_id = wr_id;
708         wc->status = IB_WC_SUCCESS;
709         wc->opcode = IB_WC_RECV;
710         wc->pkey_index = pkey_index;
711         wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
712         wc->src_qp = IB_QP0;
713         wc->qp = qp;
714         wc->slid = slid;
715         wc->sl = 0;
716         wc->dlid_path_bits = 0;
717         wc->port_num = port_num;
718 }
719
720 static size_t mad_priv_size(const struct ib_mad_private *mp)
721 {
722         return sizeof(struct ib_mad_private) + mp->mad_size;
723 }
724
725 static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
726 {
727         size_t size = sizeof(struct ib_mad_private) + mad_size;
728         struct ib_mad_private *ret = kzalloc(size, flags);
729
730         if (ret)
731                 ret->mad_size = mad_size;
732
733         return ret;
734 }
735
736 static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
737 {
738         return rdma_max_mad_size(port_priv->device, port_priv->port_num);
739 }
740
741 static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
742 {
743         return sizeof(struct ib_grh) + mp->mad_size;
744 }
745
746 /*
747  * Return 0 if SMP is to be sent
748  * Return 1 if SMP was consumed locally (whether or not solicited)
749  * Return < 0 if error
750  */
751 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
752                                   struct ib_mad_send_wr_private *mad_send_wr)
753 {
754         int ret = 0;
755         struct ib_smp *smp = mad_send_wr->send_buf.mad;
756         struct opa_smp *opa_smp = (struct opa_smp *)smp;
757         unsigned long flags;
758         struct ib_mad_local_private *local;
759         struct ib_mad_private *mad_priv;
760         struct ib_mad_port_private *port_priv;
761         struct ib_mad_agent_private *recv_mad_agent = NULL;
762         struct ib_device *device = mad_agent_priv->agent.device;
763         u8 port_num;
764         struct ib_wc mad_wc;
765         struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
766         size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
767         u16 out_mad_pkey_index = 0;
768         u16 drslid;
769         bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
770                                     mad_agent_priv->qp_info->port_priv->port_num);
771
772         if (device->node_type == RDMA_NODE_IB_SWITCH &&
773             smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
774                 port_num = send_wr->wr.ud.port_num;
775         else
776                 port_num = mad_agent_priv->agent.port_num;
777
778         /*
779          * Directed route handling starts if the initial LID routed part of
780          * a request or the ending LID routed part of a response is empty.
781          * If we are at the start of the LID routed part, don't update the
782          * hop_ptr or hop_cnt.  See section 14.2.2, Vol 1 IB spec.
783          */
784         if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) {
785                 u32 opa_drslid;
786
787                 if ((opa_get_smp_direction(opa_smp)
788                      ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
789                      OPA_LID_PERMISSIVE &&
790                      opa_smi_handle_dr_smp_send(opa_smp, device->node_type,
791                                                 port_num) == IB_SMI_DISCARD) {
792                         ret = -EINVAL;
793                         dev_err(&device->dev, "OPA Invalid directed route\n");
794                         goto out;
795                 }
796                 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
797                 if (opa_drslid != OPA_LID_PERMISSIVE &&
798                     opa_drslid & 0xffff0000) {
799                         ret = -EINVAL;
800                         dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
801                                opa_drslid);
802                         goto out;
803                 }
804                 drslid = (u16)(opa_drslid & 0x0000ffff);
805
806                 /* Check to post send on QP or process locally */
807                 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
808                     opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
809                         goto out;
810         } else {
811                 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
812                      IB_LID_PERMISSIVE &&
813                      smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
814                      IB_SMI_DISCARD) {
815                         ret = -EINVAL;
816                         dev_err(&device->dev, "Invalid directed route\n");
817                         goto out;
818                 }
819                 drslid = be16_to_cpu(smp->dr_slid);
820
821                 /* Check to post send on QP or process locally */
822                 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
823                     smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
824                         goto out;
825         }
826
827         local = kmalloc(sizeof *local, GFP_ATOMIC);
828         if (!local) {
829                 ret = -ENOMEM;
830                 dev_err(&device->dev, "No memory for ib_mad_local_private\n");
831                 goto out;
832         }
833         local->mad_priv = NULL;
834         local->recv_mad_agent = NULL;
835         mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
836         if (!mad_priv) {
837                 ret = -ENOMEM;
838                 dev_err(&device->dev, "No memory for local response MAD\n");
839                 kfree(local);
840                 goto out;
841         }
842
843         build_smp_wc(mad_agent_priv->agent.qp,
844                      send_wr->wr_id, drslid,
845                      send_wr->wr.ud.pkey_index,
846                      send_wr->wr.ud.port_num, &mad_wc);
847
848         if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
849                 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
850                                         + mad_send_wr->send_buf.data_len
851                                         + sizeof(struct ib_grh);
852         }
853
854         /* No GRH for DR SMP */
855         ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
856                                   (const struct ib_mad_hdr *)smp, mad_size,
857                                   (struct ib_mad_hdr *)mad_priv->mad,
858                                   &mad_size, &out_mad_pkey_index);
859         switch (ret)
860         {
861         case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
862                 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
863                     mad_agent_priv->agent.recv_handler) {
864                         local->mad_priv = mad_priv;
865                         local->recv_mad_agent = mad_agent_priv;
866                         /*
867                          * Reference MAD agent until receive
868                          * side of local completion handled
869                          */
870                         atomic_inc(&mad_agent_priv->refcount);
871                 } else
872                         kfree(mad_priv);
873                 break;
874         case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
875                 kfree(mad_priv);
876                 break;
877         case IB_MAD_RESULT_SUCCESS:
878                 /* Treat like an incoming receive MAD */
879                 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
880                                             mad_agent_priv->agent.port_num);
881                 if (port_priv) {
882                         memcpy(mad_priv->mad, smp, mad_priv->mad_size);
883                         recv_mad_agent = find_mad_agent(port_priv,
884                                                         (const struct ib_mad_hdr *)mad_priv->mad);
885                 }
886                 if (!port_priv || !recv_mad_agent) {
887                         /*
888                          * No receiving agent so drop packet and
889                          * generate send completion.
890                          */
891                         kfree(mad_priv);
892                         break;
893                 }
894                 local->mad_priv = mad_priv;
895                 local->recv_mad_agent = recv_mad_agent;
896                 break;
897         default:
898                 kfree(mad_priv);
899                 kfree(local);
900                 ret = -EINVAL;
901                 goto out;
902         }
903
904         local->mad_send_wr = mad_send_wr;
905         if (opa) {
906                 local->mad_send_wr->send_wr.wr.ud.pkey_index = out_mad_pkey_index;
907                 local->return_wc_byte_len = mad_size;
908         }
909         /* Reference MAD agent until send side of local completion handled */
910         atomic_inc(&mad_agent_priv->refcount);
911         /* Queue local completion to local list */
912         spin_lock_irqsave(&mad_agent_priv->lock, flags);
913         list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
914         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
915         queue_work(mad_agent_priv->qp_info->port_priv->wq,
916                    &mad_agent_priv->local_work);
917         ret = 1;
918 out:
919         return ret;
920 }
921
922 static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
923 {
924         int seg_size, pad;
925
926         seg_size = mad_size - hdr_len;
927         if (data_len && seg_size) {
928                 pad = seg_size - data_len % seg_size;
929                 return pad == seg_size ? 0 : pad;
930         } else
931                 return seg_size;
932 }
933
934 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
935 {
936         struct ib_rmpp_segment *s, *t;
937
938         list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
939                 list_del(&s->list);
940                 kfree(s);
941         }
942 }
943
944 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
945                                 size_t mad_size, gfp_t gfp_mask)
946 {
947         struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
948         struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
949         struct ib_rmpp_segment *seg = NULL;
950         int left, seg_size, pad;
951
952         send_buf->seg_size = mad_size - send_buf->hdr_len;
953         send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
954         seg_size = send_buf->seg_size;
955         pad = send_wr->pad;
956
957         /* Allocate data segments. */
958         for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
959                 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
960                 if (!seg) {
961                         dev_err(&send_buf->mad_agent->device->dev,
962                                 "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
963                                 sizeof (*seg) + seg_size, gfp_mask);
964                         free_send_rmpp_list(send_wr);
965                         return -ENOMEM;
966                 }
967                 seg->num = ++send_buf->seg_count;
968                 list_add_tail(&seg->list, &send_wr->rmpp_list);
969         }
970
971         /* Zero any padding */
972         if (pad)
973                 memset(seg->data + seg_size - pad, 0, pad);
974
975         rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
976                                           agent.rmpp_version;
977         rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
978         ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
979
980         send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
981                                         struct ib_rmpp_segment, list);
982         send_wr->last_ack_seg = send_wr->cur_seg;
983         return 0;
984 }
985
986 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
987 {
988         return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
989 }
990 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
991
992 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
993                                             u32 remote_qpn, u16 pkey_index,
994                                             int rmpp_active,
995                                             int hdr_len, int data_len,
996                                             gfp_t gfp_mask,
997                                             u8 base_version)
998 {
999         struct ib_mad_agent_private *mad_agent_priv;
1000         struct ib_mad_send_wr_private *mad_send_wr;
1001         int pad, message_size, ret, size;
1002         void *buf;
1003         size_t mad_size;
1004         bool opa;
1005
1006         mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
1007                                       agent);
1008
1009         opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
1010
1011         if (opa && base_version == OPA_MGMT_BASE_VERSION)
1012                 mad_size = sizeof(struct opa_mad);
1013         else
1014                 mad_size = sizeof(struct ib_mad);
1015
1016         pad = get_pad_size(hdr_len, data_len, mad_size);
1017         message_size = hdr_len + data_len + pad;
1018
1019         if (ib_mad_kernel_rmpp_agent(mad_agent)) {
1020                 if (!rmpp_active && message_size > mad_size)
1021                         return ERR_PTR(-EINVAL);
1022         } else
1023                 if (rmpp_active || message_size > mad_size)
1024                         return ERR_PTR(-EINVAL);
1025
1026         size = rmpp_active ? hdr_len : mad_size;
1027         buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
1028         if (!buf)
1029                 return ERR_PTR(-ENOMEM);
1030
1031         mad_send_wr = buf + size;
1032         INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
1033         mad_send_wr->send_buf.mad = buf;
1034         mad_send_wr->send_buf.hdr_len = hdr_len;
1035         mad_send_wr->send_buf.data_len = data_len;
1036         mad_send_wr->pad = pad;
1037
1038         mad_send_wr->mad_agent_priv = mad_agent_priv;
1039         mad_send_wr->sg_list[0].length = hdr_len;
1040         mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
1041
1042         /* OPA MADs don't have to be the full 2048 bytes */
1043         if (opa && base_version == OPA_MGMT_BASE_VERSION &&
1044             data_len < mad_size - hdr_len)
1045                 mad_send_wr->sg_list[1].length = data_len;
1046         else
1047                 mad_send_wr->sg_list[1].length = mad_size - hdr_len;
1048
1049         mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
1050
1051         mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
1052         mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
1053         mad_send_wr->send_wr.num_sge = 2;
1054         mad_send_wr->send_wr.opcode = IB_WR_SEND;
1055         mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
1056         mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
1057         mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
1058         mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
1059
1060         if (rmpp_active) {
1061                 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
1062                 if (ret) {
1063                         kfree(buf);
1064                         return ERR_PTR(ret);
1065                 }
1066         }
1067
1068         mad_send_wr->send_buf.mad_agent = mad_agent;
1069         atomic_inc(&mad_agent_priv->refcount);
1070         return &mad_send_wr->send_buf;
1071 }
1072 EXPORT_SYMBOL(ib_create_send_mad);
1073
1074 int ib_get_mad_data_offset(u8 mgmt_class)
1075 {
1076         if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1077                 return IB_MGMT_SA_HDR;
1078         else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1079                  (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1080                  (mgmt_class == IB_MGMT_CLASS_BIS))
1081                 return IB_MGMT_DEVICE_HDR;
1082         else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1083                  (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1084                 return IB_MGMT_VENDOR_HDR;
1085         else
1086                 return IB_MGMT_MAD_HDR;
1087 }
1088 EXPORT_SYMBOL(ib_get_mad_data_offset);
1089
1090 int ib_is_mad_class_rmpp(u8 mgmt_class)
1091 {
1092         if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1093             (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1094             (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1095             (mgmt_class == IB_MGMT_CLASS_BIS) ||
1096             ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1097              (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1098                 return 1;
1099         return 0;
1100 }
1101 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1102
1103 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1104 {
1105         struct ib_mad_send_wr_private *mad_send_wr;
1106         struct list_head *list;
1107
1108         mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1109                                    send_buf);
1110         list = &mad_send_wr->cur_seg->list;
1111
1112         if (mad_send_wr->cur_seg->num < seg_num) {
1113                 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1114                         if (mad_send_wr->cur_seg->num == seg_num)
1115                                 break;
1116         } else if (mad_send_wr->cur_seg->num > seg_num) {
1117                 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1118                         if (mad_send_wr->cur_seg->num == seg_num)
1119                                 break;
1120         }
1121         return mad_send_wr->cur_seg->data;
1122 }
1123 EXPORT_SYMBOL(ib_get_rmpp_segment);
1124
1125 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1126 {
1127         if (mad_send_wr->send_buf.seg_count)
1128                 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1129                                            mad_send_wr->seg_num);
1130         else
1131                 return mad_send_wr->send_buf.mad +
1132                        mad_send_wr->send_buf.hdr_len;
1133 }
1134
1135 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1136 {
1137         struct ib_mad_agent_private *mad_agent_priv;
1138         struct ib_mad_send_wr_private *mad_send_wr;
1139
1140         mad_agent_priv = container_of(send_buf->mad_agent,
1141                                       struct ib_mad_agent_private, agent);
1142         mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1143                                    send_buf);
1144
1145         free_send_rmpp_list(mad_send_wr);
1146         kfree(send_buf->mad);
1147         deref_mad_agent(mad_agent_priv);
1148 }
1149 EXPORT_SYMBOL(ib_free_send_mad);
1150
1151 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1152 {
1153         struct ib_mad_qp_info *qp_info;
1154         struct list_head *list;
1155         struct ib_send_wr *bad_send_wr;
1156         struct ib_mad_agent *mad_agent;
1157         struct ib_sge *sge;
1158         unsigned long flags;
1159         int ret;
1160
1161         /* Set WR ID to find mad_send_wr upon completion */
1162         qp_info = mad_send_wr->mad_agent_priv->qp_info;
1163         mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
1164         mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1165
1166         mad_agent = mad_send_wr->send_buf.mad_agent;
1167         sge = mad_send_wr->sg_list;
1168         sge[0].addr = ib_dma_map_single(mad_agent->device,
1169                                         mad_send_wr->send_buf.mad,
1170                                         sge[0].length,
1171                                         DMA_TO_DEVICE);
1172         if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1173                 return -ENOMEM;
1174
1175         mad_send_wr->header_mapping = sge[0].addr;
1176
1177         sge[1].addr = ib_dma_map_single(mad_agent->device,
1178                                         ib_get_payload(mad_send_wr),
1179                                         sge[1].length,
1180                                         DMA_TO_DEVICE);
1181         if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1182                 ib_dma_unmap_single(mad_agent->device,
1183                                     mad_send_wr->header_mapping,
1184                                     sge[0].length, DMA_TO_DEVICE);
1185                 return -ENOMEM;
1186         }
1187         mad_send_wr->payload_mapping = sge[1].addr;
1188
1189         spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1190         if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1191                 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
1192                                    &bad_send_wr);
1193                 list = &qp_info->send_queue.list;
1194         } else {
1195                 ret = 0;
1196                 list = &qp_info->overflow_list;
1197         }
1198
1199         if (!ret) {
1200                 qp_info->send_queue.count++;
1201                 list_add_tail(&mad_send_wr->mad_list.list, list);
1202         }
1203         spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1204         if (ret) {
1205                 ib_dma_unmap_single(mad_agent->device,
1206                                     mad_send_wr->header_mapping,
1207                                     sge[0].length, DMA_TO_DEVICE);
1208                 ib_dma_unmap_single(mad_agent->device,
1209                                     mad_send_wr->payload_mapping,
1210                                     sge[1].length, DMA_TO_DEVICE);
1211         }
1212         return ret;
1213 }
1214
1215 /*
1216  * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1217  *  with the registered client
1218  */
1219 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1220                      struct ib_mad_send_buf **bad_send_buf)
1221 {
1222         struct ib_mad_agent_private *mad_agent_priv;
1223         struct ib_mad_send_buf *next_send_buf;
1224         struct ib_mad_send_wr_private *mad_send_wr;
1225         unsigned long flags;
1226         int ret = -EINVAL;
1227
1228         /* Walk list of send WRs and post each on send list */
1229         for (; send_buf; send_buf = next_send_buf) {
1230
1231                 mad_send_wr = container_of(send_buf,
1232                                            struct ib_mad_send_wr_private,
1233                                            send_buf);
1234                 mad_agent_priv = mad_send_wr->mad_agent_priv;
1235
1236                 if (!send_buf->mad_agent->send_handler ||
1237                     (send_buf->timeout_ms &&
1238                      !send_buf->mad_agent->recv_handler)) {
1239                         ret = -EINVAL;
1240                         goto error;
1241                 }
1242
1243                 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1244                         if (mad_agent_priv->agent.rmpp_version) {
1245                                 ret = -EINVAL;
1246                                 goto error;
1247                         }
1248                 }
1249
1250                 /*
1251                  * Save pointer to next work request to post in case the
1252                  * current one completes, and the user modifies the work
1253                  * request associated with the completion
1254                  */
1255                 next_send_buf = send_buf->next;
1256                 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
1257
1258                 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1259                     IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1260                         ret = handle_outgoing_dr_smp(mad_agent_priv,
1261                                                      mad_send_wr);
1262                         if (ret < 0)            /* error */
1263                                 goto error;
1264                         else if (ret == 1)      /* locally consumed */
1265                                 continue;
1266                 }
1267
1268                 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1269                 /* Timeout will be updated after send completes */
1270                 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1271                 mad_send_wr->max_retries = send_buf->retries;
1272                 mad_send_wr->retries_left = send_buf->retries;
1273                 send_buf->retries = 0;
1274                 /* Reference for work request to QP + response */
1275                 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1276                 mad_send_wr->status = IB_WC_SUCCESS;
1277
1278                 /* Reference MAD agent until send completes */
1279                 atomic_inc(&mad_agent_priv->refcount);
1280                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1281                 list_add_tail(&mad_send_wr->agent_list,
1282                               &mad_agent_priv->send_list);
1283                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1284
1285                 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1286                         ret = ib_send_rmpp_mad(mad_send_wr);
1287                         if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1288                                 ret = ib_send_mad(mad_send_wr);
1289                 } else
1290                         ret = ib_send_mad(mad_send_wr);
1291                 if (ret < 0) {
1292                         /* Fail send request */
1293                         spin_lock_irqsave(&mad_agent_priv->lock, flags);
1294                         list_del(&mad_send_wr->agent_list);
1295                         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1296                         atomic_dec(&mad_agent_priv->refcount);
1297                         goto error;
1298                 }
1299         }
1300         return 0;
1301 error:
1302         if (bad_send_buf)
1303                 *bad_send_buf = send_buf;
1304         return ret;
1305 }
1306 EXPORT_SYMBOL(ib_post_send_mad);
1307
1308 /*
1309  * ib_free_recv_mad - Returns data buffers used to receive
1310  *  a MAD to the access layer
1311  */
1312 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1313 {
1314         struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1315         struct ib_mad_private_header *mad_priv_hdr;
1316         struct ib_mad_private *priv;
1317         struct list_head free_list;
1318
1319         INIT_LIST_HEAD(&free_list);
1320         list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1321
1322         list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1323                                         &free_list, list) {
1324                 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1325                                            recv_buf);
1326                 mad_priv_hdr = container_of(mad_recv_wc,
1327                                             struct ib_mad_private_header,
1328                                             recv_wc);
1329                 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1330                                     header);
1331                 kfree(priv);
1332         }
1333 }
1334 EXPORT_SYMBOL(ib_free_recv_mad);
1335
1336 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1337                                         u8 rmpp_version,
1338                                         ib_mad_send_handler send_handler,
1339                                         ib_mad_recv_handler recv_handler,
1340                                         void *context)
1341 {
1342         return ERR_PTR(-EINVAL);        /* XXX: for now */
1343 }
1344 EXPORT_SYMBOL(ib_redirect_mad_qp);
1345
1346 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1347                       struct ib_wc *wc)
1348 {
1349         dev_err(&mad_agent->device->dev,
1350                 "ib_process_mad_wc() not implemented yet\n");
1351         return 0;
1352 }
1353 EXPORT_SYMBOL(ib_process_mad_wc);
1354
1355 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1356                          struct ib_mad_reg_req *mad_reg_req)
1357 {
1358         int i;
1359
1360         for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1361                 if ((*method)->agent[i]) {
1362                         pr_err("Method %d already in use\n", i);
1363                         return -EINVAL;
1364                 }
1365         }
1366         return 0;
1367 }
1368
1369 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1370 {
1371         /* Allocate management method table */
1372         *method = kzalloc(sizeof **method, GFP_ATOMIC);
1373         if (!*method) {
1374                 pr_err("No memory for ib_mad_mgmt_method_table\n");
1375                 return -ENOMEM;
1376         }
1377
1378         return 0;
1379 }
1380
1381 /*
1382  * Check to see if there are any methods still in use
1383  */
1384 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1385 {
1386         int i;
1387
1388         for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1389                 if (method->agent[i])
1390                         return 1;
1391         return 0;
1392 }
1393
1394 /*
1395  * Check to see if there are any method tables for this class still in use
1396  */
1397 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1398 {
1399         int i;
1400
1401         for (i = 0; i < MAX_MGMT_CLASS; i++)
1402                 if (class->method_table[i])
1403                         return 1;
1404         return 0;
1405 }
1406
1407 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1408 {
1409         int i;
1410
1411         for (i = 0; i < MAX_MGMT_OUI; i++)
1412                 if (vendor_class->method_table[i])
1413                         return 1;
1414         return 0;
1415 }
1416
1417 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1418                            const char *oui)
1419 {
1420         int i;
1421
1422         for (i = 0; i < MAX_MGMT_OUI; i++)
1423                 /* Is there matching OUI for this vendor class ? */
1424                 if (!memcmp(vendor_class->oui[i], oui, 3))
1425                         return i;
1426
1427         return -1;
1428 }
1429
1430 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1431 {
1432         int i;
1433
1434         for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1435                 if (vendor->vendor_class[i])
1436                         return 1;
1437
1438         return 0;
1439 }
1440
1441 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1442                                      struct ib_mad_agent_private *agent)
1443 {
1444         int i;
1445
1446         /* Remove any methods for this mad agent */
1447         for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1448                 if (method->agent[i] == agent) {
1449                         method->agent[i] = NULL;
1450                 }
1451         }
1452 }
1453
1454 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1455                               struct ib_mad_agent_private *agent_priv,
1456                               u8 mgmt_class)
1457 {
1458         struct ib_mad_port_private *port_priv;
1459         struct ib_mad_mgmt_class_table **class;
1460         struct ib_mad_mgmt_method_table **method;
1461         int i, ret;
1462
1463         port_priv = agent_priv->qp_info->port_priv;
1464         class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1465         if (!*class) {
1466                 /* Allocate management class table for "new" class version */
1467                 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1468                 if (!*class) {
1469                         dev_err(&agent_priv->agent.device->dev,
1470                                 "No memory for ib_mad_mgmt_class_table\n");
1471                         ret = -ENOMEM;
1472                         goto error1;
1473                 }
1474
1475                 /* Allocate method table for this management class */
1476                 method = &(*class)->method_table[mgmt_class];
1477                 if ((ret = allocate_method_table(method)))
1478                         goto error2;
1479         } else {
1480                 method = &(*class)->method_table[mgmt_class];
1481                 if (!*method) {
1482                         /* Allocate method table for this management class */
1483                         if ((ret = allocate_method_table(method)))
1484                                 goto error1;
1485                 }
1486         }
1487
1488         /* Now, make sure methods are not already in use */
1489         if (method_in_use(method, mad_reg_req))
1490                 goto error3;
1491
1492         /* Finally, add in methods being registered */
1493         for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1494                 (*method)->agent[i] = agent_priv;
1495
1496         return 0;
1497
1498 error3:
1499         /* Remove any methods for this mad agent */
1500         remove_methods_mad_agent(*method, agent_priv);
1501         /* Now, check to see if there are any methods in use */
1502         if (!check_method_table(*method)) {
1503                 /* If not, release management method table */
1504                 kfree(*method);
1505                 *method = NULL;
1506         }
1507         ret = -EINVAL;
1508         goto error1;
1509 error2:
1510         kfree(*class);
1511         *class = NULL;
1512 error1:
1513         return ret;
1514 }
1515
1516 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1517                            struct ib_mad_agent_private *agent_priv)
1518 {
1519         struct ib_mad_port_private *port_priv;
1520         struct ib_mad_mgmt_vendor_class_table **vendor_table;
1521         struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1522         struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1523         struct ib_mad_mgmt_method_table **method;
1524         int i, ret = -ENOMEM;
1525         u8 vclass;
1526
1527         /* "New" vendor (with OUI) class */
1528         vclass = vendor_class_index(mad_reg_req->mgmt_class);
1529         port_priv = agent_priv->qp_info->port_priv;
1530         vendor_table = &port_priv->version[
1531                                 mad_reg_req->mgmt_class_version].vendor;
1532         if (!*vendor_table) {
1533                 /* Allocate mgmt vendor class table for "new" class version */
1534                 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1535                 if (!vendor) {
1536                         dev_err(&agent_priv->agent.device->dev,
1537                                 "No memory for ib_mad_mgmt_vendor_class_table\n");
1538                         goto error1;
1539                 }
1540
1541                 *vendor_table = vendor;
1542         }
1543         if (!(*vendor_table)->vendor_class[vclass]) {
1544                 /* Allocate table for this management vendor class */
1545                 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1546                 if (!vendor_class) {
1547                         dev_err(&agent_priv->agent.device->dev,
1548                                 "No memory for ib_mad_mgmt_vendor_class\n");
1549                         goto error2;
1550                 }
1551
1552                 (*vendor_table)->vendor_class[vclass] = vendor_class;
1553         }
1554         for (i = 0; i < MAX_MGMT_OUI; i++) {
1555                 /* Is there matching OUI for this vendor class ? */
1556                 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1557                             mad_reg_req->oui, 3)) {
1558                         method = &(*vendor_table)->vendor_class[
1559                                                 vclass]->method_table[i];
1560                         BUG_ON(!*method);
1561                         goto check_in_use;
1562                 }
1563         }
1564         for (i = 0; i < MAX_MGMT_OUI; i++) {
1565                 /* OUI slot available ? */
1566                 if (!is_vendor_oui((*vendor_table)->vendor_class[
1567                                 vclass]->oui[i])) {
1568                         method = &(*vendor_table)->vendor_class[
1569                                 vclass]->method_table[i];
1570                         BUG_ON(*method);
1571                         /* Allocate method table for this OUI */
1572                         if ((ret = allocate_method_table(method)))
1573                                 goto error3;
1574                         memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1575                                mad_reg_req->oui, 3);
1576                         goto check_in_use;
1577                 }
1578         }
1579         dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1580         goto error3;
1581
1582 check_in_use:
1583         /* Now, make sure methods are not already in use */
1584         if (method_in_use(method, mad_reg_req))
1585                 goto error4;
1586
1587         /* Finally, add in methods being registered */
1588         for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1589                 (*method)->agent[i] = agent_priv;
1590
1591         return 0;
1592
1593 error4:
1594         /* Remove any methods for this mad agent */
1595         remove_methods_mad_agent(*method, agent_priv);
1596         /* Now, check to see if there are any methods in use */
1597         if (!check_method_table(*method)) {
1598                 /* If not, release management method table */
1599                 kfree(*method);
1600                 *method = NULL;
1601         }
1602         ret = -EINVAL;
1603 error3:
1604         if (vendor_class) {
1605                 (*vendor_table)->vendor_class[vclass] = NULL;
1606                 kfree(vendor_class);
1607         }
1608 error2:
1609         if (vendor) {
1610                 *vendor_table = NULL;
1611                 kfree(vendor);
1612         }
1613 error1:
1614         return ret;
1615 }
1616
1617 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1618 {
1619         struct ib_mad_port_private *port_priv;
1620         struct ib_mad_mgmt_class_table *class;
1621         struct ib_mad_mgmt_method_table *method;
1622         struct ib_mad_mgmt_vendor_class_table *vendor;
1623         struct ib_mad_mgmt_vendor_class *vendor_class;
1624         int index;
1625         u8 mgmt_class;
1626
1627         /*
1628          * Was MAD registration request supplied
1629          * with original registration ?
1630          */
1631         if (!agent_priv->reg_req) {
1632                 goto out;
1633         }
1634
1635         port_priv = agent_priv->qp_info->port_priv;
1636         mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1637         class = port_priv->version[
1638                         agent_priv->reg_req->mgmt_class_version].class;
1639         if (!class)
1640                 goto vendor_check;
1641
1642         method = class->method_table[mgmt_class];
1643         if (method) {
1644                 /* Remove any methods for this mad agent */
1645                 remove_methods_mad_agent(method, agent_priv);
1646                 /* Now, check to see if there are any methods still in use */
1647                 if (!check_method_table(method)) {
1648                         /* If not, release management method table */
1649                          kfree(method);
1650                          class->method_table[mgmt_class] = NULL;
1651                          /* Any management classes left ? */
1652                         if (!check_class_table(class)) {
1653                                 /* If not, release management class table */
1654                                 kfree(class);
1655                                 port_priv->version[
1656                                         agent_priv->reg_req->
1657                                         mgmt_class_version].class = NULL;
1658                         }
1659                 }
1660         }
1661
1662 vendor_check:
1663         if (!is_vendor_class(mgmt_class))
1664                 goto out;
1665
1666         /* normalize mgmt_class to vendor range 2 */
1667         mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1668         vendor = port_priv->version[
1669                         agent_priv->reg_req->mgmt_class_version].vendor;
1670
1671         if (!vendor)
1672                 goto out;
1673
1674         vendor_class = vendor->vendor_class[mgmt_class];
1675         if (vendor_class) {
1676                 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1677                 if (index < 0)
1678                         goto out;
1679                 method = vendor_class->method_table[index];
1680                 if (method) {
1681                         /* Remove any methods for this mad agent */
1682                         remove_methods_mad_agent(method, agent_priv);
1683                         /*
1684                          * Now, check to see if there are
1685                          * any methods still in use
1686                          */
1687                         if (!check_method_table(method)) {
1688                                 /* If not, release management method table */
1689                                 kfree(method);
1690                                 vendor_class->method_table[index] = NULL;
1691                                 memset(vendor_class->oui[index], 0, 3);
1692                                 /* Any OUIs left ? */
1693                                 if (!check_vendor_class(vendor_class)) {
1694                                         /* If not, release vendor class table */
1695                                         kfree(vendor_class);
1696                                         vendor->vendor_class[mgmt_class] = NULL;
1697                                         /* Any other vendor classes left ? */
1698                                         if (!check_vendor_table(vendor)) {
1699                                                 kfree(vendor);
1700                                                 port_priv->version[
1701                                                         agent_priv->reg_req->
1702                                                         mgmt_class_version].
1703                                                         vendor = NULL;
1704                                         }
1705                                 }
1706                         }
1707                 }
1708         }
1709
1710 out:
1711         return;
1712 }
1713
1714 static struct ib_mad_agent_private *
1715 find_mad_agent(struct ib_mad_port_private *port_priv,
1716                const struct ib_mad_hdr *mad_hdr)
1717 {
1718         struct ib_mad_agent_private *mad_agent = NULL;
1719         unsigned long flags;
1720
1721         spin_lock_irqsave(&port_priv->reg_lock, flags);
1722         if (ib_response_mad(mad_hdr)) {
1723                 u32 hi_tid;
1724                 struct ib_mad_agent_private *entry;
1725
1726                 /*
1727                  * Routing is based on high 32 bits of transaction ID
1728                  * of MAD.
1729                  */
1730                 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
1731                 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1732                         if (entry->agent.hi_tid == hi_tid) {
1733                                 mad_agent = entry;
1734                                 break;
1735                         }
1736                 }
1737         } else {
1738                 struct ib_mad_mgmt_class_table *class;
1739                 struct ib_mad_mgmt_method_table *method;
1740                 struct ib_mad_mgmt_vendor_class_table *vendor;
1741                 struct ib_mad_mgmt_vendor_class *vendor_class;
1742                 const struct ib_vendor_mad *vendor_mad;
1743                 int index;
1744
1745                 /*
1746                  * Routing is based on version, class, and method
1747                  * For "newer" vendor MADs, also based on OUI
1748                  */
1749                 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1750                         goto out;
1751                 if (!is_vendor_class(mad_hdr->mgmt_class)) {
1752                         class = port_priv->version[
1753                                         mad_hdr->class_version].class;
1754                         if (!class)
1755                                 goto out;
1756                         if (convert_mgmt_class(mad_hdr->mgmt_class) >=
1757                             IB_MGMT_MAX_METHODS)
1758                                 goto out;
1759                         method = class->method_table[convert_mgmt_class(
1760                                                         mad_hdr->mgmt_class)];
1761                         if (method)
1762                                 mad_agent = method->agent[mad_hdr->method &
1763                                                           ~IB_MGMT_METHOD_RESP];
1764                 } else {
1765                         vendor = port_priv->version[
1766                                         mad_hdr->class_version].vendor;
1767                         if (!vendor)
1768                                 goto out;
1769                         vendor_class = vendor->vendor_class[vendor_class_index(
1770                                                 mad_hdr->mgmt_class)];
1771                         if (!vendor_class)
1772                                 goto out;
1773                         /* Find matching OUI */
1774                         vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1775                         index = find_vendor_oui(vendor_class, vendor_mad->oui);
1776                         if (index == -1)
1777                                 goto out;
1778                         method = vendor_class->method_table[index];
1779                         if (method) {
1780                                 mad_agent = method->agent[mad_hdr->method &
1781                                                           ~IB_MGMT_METHOD_RESP];
1782                         }
1783                 }
1784         }
1785
1786         if (mad_agent) {
1787                 if (mad_agent->agent.recv_handler)
1788                         atomic_inc(&mad_agent->refcount);
1789                 else {
1790                         dev_notice(&port_priv->device->dev,
1791                                    "No receive handler for client %p on port %d\n",
1792                                    &mad_agent->agent, port_priv->port_num);
1793                         mad_agent = NULL;
1794                 }
1795         }
1796 out:
1797         spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1798
1799         return mad_agent;
1800 }
1801
1802 static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1803                         const struct ib_mad_qp_info *qp_info,
1804                         bool opa)
1805 {
1806         int valid = 0;
1807         u32 qp_num = qp_info->qp->qp_num;
1808
1809         /* Make sure MAD base version is understood */
1810         if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1811             (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1812                 pr_err("MAD received with unsupported base version %d %s\n",
1813                        mad_hdr->base_version, opa ? "(opa)" : "");
1814                 goto out;
1815         }
1816
1817         /* Filter SMI packets sent to other than QP0 */
1818         if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1819             (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1820                 if (qp_num == 0)
1821                         valid = 1;
1822         } else {
1823                 /* Filter GSI packets sent to QP0 */
1824                 if (qp_num != 0)
1825                         valid = 1;
1826         }
1827
1828 out:
1829         return valid;
1830 }
1831
1832 static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1833                             const struct ib_mad_hdr *mad_hdr)
1834 {
1835         struct ib_rmpp_mad *rmpp_mad;
1836
1837         rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1838         return !mad_agent_priv->agent.rmpp_version ||
1839                 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
1840                 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1841                                     IB_MGMT_RMPP_FLAG_ACTIVE) ||
1842                 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1843 }
1844
1845 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1846                                      const struct ib_mad_recv_wc *rwc)
1847 {
1848         return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
1849                 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1850 }
1851
1852 static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1853                                    const struct ib_mad_send_wr_private *wr,
1854                                    const struct ib_mad_recv_wc *rwc )
1855 {
1856         struct ib_ah_attr attr;
1857         u8 send_resp, rcv_resp;
1858         union ib_gid sgid;
1859         struct ib_device *device = mad_agent_priv->agent.device;
1860         u8 port_num = mad_agent_priv->agent.port_num;
1861         u8 lmc;
1862
1863         send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1864         rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
1865
1866         if (send_resp == rcv_resp)
1867                 /* both requests, or both responses. GIDs different */
1868                 return 0;
1869
1870         if (ib_query_ah(wr->send_buf.ah, &attr))
1871                 /* Assume not equal, to avoid false positives. */
1872                 return 0;
1873
1874         if (!!(attr.ah_flags & IB_AH_GRH) !=
1875             !!(rwc->wc->wc_flags & IB_WC_GRH))
1876                 /* one has GID, other does not.  Assume different */
1877                 return 0;
1878
1879         if (!send_resp && rcv_resp) {
1880                 /* is request/response. */
1881                 if (!(attr.ah_flags & IB_AH_GRH)) {
1882                         if (ib_get_cached_lmc(device, port_num, &lmc))
1883                                 return 0;
1884                         return (!lmc || !((attr.src_path_bits ^
1885                                            rwc->wc->dlid_path_bits) &
1886                                           ((1 << lmc) - 1)));
1887                 } else {
1888                         if (ib_get_cached_gid(device, port_num,
1889                                               attr.grh.sgid_index, &sgid))
1890                                 return 0;
1891                         return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1892                                        16);
1893                 }
1894         }
1895
1896         if (!(attr.ah_flags & IB_AH_GRH))
1897                 return attr.dlid == rwc->wc->slid;
1898         else
1899                 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1900                                16);
1901 }
1902
1903 static inline int is_direct(u8 class)
1904 {
1905         return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1906 }
1907
1908 struct ib_mad_send_wr_private*
1909 ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1910                  const struct ib_mad_recv_wc *wc)
1911 {
1912         struct ib_mad_send_wr_private *wr;
1913         const struct ib_mad_hdr *mad_hdr;
1914
1915         mad_hdr = &wc->recv_buf.mad->mad_hdr;
1916
1917         list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1918                 if ((wr->tid == mad_hdr->tid) &&
1919                     rcv_has_same_class(wr, wc) &&
1920                     /*
1921                      * Don't check GID for direct routed MADs.
1922                      * These might have permissive LIDs.
1923                      */
1924                     (is_direct(mad_hdr->mgmt_class) ||
1925                      rcv_has_same_gid(mad_agent_priv, wr, wc)))
1926                         return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1927         }
1928
1929         /*
1930          * It's possible to receive the response before we've
1931          * been notified that the send has completed
1932          */
1933         list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1934                 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1935                     wr->tid == mad_hdr->tid &&
1936                     wr->timeout &&
1937                     rcv_has_same_class(wr, wc) &&
1938                     /*
1939                      * Don't check GID for direct routed MADs.
1940                      * These might have permissive LIDs.
1941                      */
1942                     (is_direct(mad_hdr->mgmt_class) ||
1943                      rcv_has_same_gid(mad_agent_priv, wr, wc)))
1944                         /* Verify request has not been canceled */
1945                         return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1946         }
1947         return NULL;
1948 }
1949
1950 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1951 {
1952         mad_send_wr->timeout = 0;
1953         if (mad_send_wr->refcount == 1)
1954                 list_move_tail(&mad_send_wr->agent_list,
1955                               &mad_send_wr->mad_agent_priv->done_list);
1956 }
1957
1958 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1959                                  struct ib_mad_recv_wc *mad_recv_wc)
1960 {
1961         struct ib_mad_send_wr_private *mad_send_wr;
1962         struct ib_mad_send_wc mad_send_wc;
1963         unsigned long flags;
1964
1965         INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1966         list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1967         if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1968                 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1969                                                       mad_recv_wc);
1970                 if (!mad_recv_wc) {
1971                         deref_mad_agent(mad_agent_priv);
1972                         return;
1973                 }
1974         }
1975
1976         /* Complete corresponding request */
1977         if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
1978                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1979                 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1980                 if (!mad_send_wr) {
1981                         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1982                         if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
1983                            && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
1984                            && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
1985                                         & IB_MGMT_RMPP_FLAG_ACTIVE)) {
1986                                 /* user rmpp is in effect
1987                                  * and this is an active RMPP MAD
1988                                  */
1989                                 mad_recv_wc->wc->wr_id = 0;
1990                                 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1991                                                                    mad_recv_wc);
1992                                 atomic_dec(&mad_agent_priv->refcount);
1993                         } else {
1994                                 /* not user rmpp, revert to normal behavior and
1995                                  * drop the mad */
1996                                 ib_free_recv_mad(mad_recv_wc);
1997                                 deref_mad_agent(mad_agent_priv);
1998                                 return;
1999                         }
2000                 } else {
2001                         ib_mark_mad_done(mad_send_wr);
2002                         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2003
2004                         /* Defined behavior is to complete response before request */
2005                         mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
2006                         mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
2007                                                            mad_recv_wc);
2008                         atomic_dec(&mad_agent_priv->refcount);
2009
2010                         mad_send_wc.status = IB_WC_SUCCESS;
2011                         mad_send_wc.vendor_err = 0;
2012                         mad_send_wc.send_buf = &mad_send_wr->send_buf;
2013                         ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2014                 }
2015         } else {
2016                 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
2017                                                    mad_recv_wc);
2018                 deref_mad_agent(mad_agent_priv);
2019         }
2020 }
2021
2022 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
2023                                      const struct ib_mad_qp_info *qp_info,
2024                                      const struct ib_wc *wc,
2025                                      int port_num,
2026                                      struct ib_mad_private *recv,
2027                                      struct ib_mad_private *response)
2028 {
2029         enum smi_forward_action retsmi;
2030         struct ib_smp *smp = (struct ib_smp *)recv->mad;
2031
2032         if (smi_handle_dr_smp_recv(smp,
2033                                    port_priv->device->node_type,
2034                                    port_num,
2035                                    port_priv->device->phys_port_cnt) ==
2036                                    IB_SMI_DISCARD)
2037                 return IB_SMI_DISCARD;
2038
2039         retsmi = smi_check_forward_dr_smp(smp);
2040         if (retsmi == IB_SMI_LOCAL)
2041                 return IB_SMI_HANDLE;
2042
2043         if (retsmi == IB_SMI_SEND) { /* don't forward */
2044                 if (smi_handle_dr_smp_send(smp,
2045                                            port_priv->device->node_type,
2046                                            port_num) == IB_SMI_DISCARD)
2047                         return IB_SMI_DISCARD;
2048
2049                 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
2050                         return IB_SMI_DISCARD;
2051         } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
2052                 /* forward case for switches */
2053                 memcpy(response, recv, mad_priv_size(response));
2054                 response->header.recv_wc.wc = &response->header.wc;
2055                 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2056                 response->header.recv_wc.recv_buf.grh = &response->grh;
2057
2058                 agent_send_response((const struct ib_mad_hdr *)response->mad,
2059                                     &response->grh, wc,
2060                                     port_priv->device,
2061                                     smi_get_fwd_port(smp),
2062                                     qp_info->qp->qp_num,
2063                                     response->mad_size,
2064                                     false);
2065
2066                 return IB_SMI_DISCARD;
2067         }
2068         return IB_SMI_HANDLE;
2069 }
2070
2071 static bool generate_unmatched_resp(const struct ib_mad_private *recv,
2072                                     struct ib_mad_private *response,
2073                                     size_t *resp_len, bool opa)
2074 {
2075         const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2076         struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2077
2078         if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2079             recv_hdr->method == IB_MGMT_METHOD_SET) {
2080                 memcpy(response, recv, mad_priv_size(response));
2081                 response->header.recv_wc.wc = &response->header.wc;
2082                 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2083                 response->header.recv_wc.recv_buf.grh = &response->grh;
2084                 resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2085                 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2086                 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2087                         resp_hdr->status |= IB_SMP_DIRECTION;
2088
2089                 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
2090                         if (recv_hdr->mgmt_class ==
2091                             IB_MGMT_CLASS_SUBN_LID_ROUTED ||
2092                             recv_hdr->mgmt_class ==
2093                             IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2094                                 *resp_len = opa_get_smp_header_size(
2095                                                         (struct opa_smp *)recv->mad);
2096                         else
2097                                 *resp_len = sizeof(struct ib_mad_hdr);
2098                 }
2099
2100                 return true;
2101         } else {
2102                 return false;
2103         }
2104 }
2105
2106 static enum smi_action
2107 handle_opa_smi(struct ib_mad_port_private *port_priv,
2108                struct ib_mad_qp_info *qp_info,
2109                struct ib_wc *wc,
2110                int port_num,
2111                struct ib_mad_private *recv,
2112                struct ib_mad_private *response)
2113 {
2114         enum smi_forward_action retsmi;
2115         struct opa_smp *smp = (struct opa_smp *)recv->mad;
2116
2117         if (opa_smi_handle_dr_smp_recv(smp,
2118                                    port_priv->device->node_type,
2119                                    port_num,
2120                                    port_priv->device->phys_port_cnt) ==
2121                                    IB_SMI_DISCARD)
2122                 return IB_SMI_DISCARD;
2123
2124         retsmi = opa_smi_check_forward_dr_smp(smp);
2125         if (retsmi == IB_SMI_LOCAL)
2126                 return IB_SMI_HANDLE;
2127
2128         if (retsmi == IB_SMI_SEND) { /* don't forward */
2129                 if (opa_smi_handle_dr_smp_send(smp,
2130                                            port_priv->device->node_type,
2131                                            port_num) == IB_SMI_DISCARD)
2132                         return IB_SMI_DISCARD;
2133
2134                 if (opa_smi_check_local_smp(smp, port_priv->device) ==
2135                     IB_SMI_DISCARD)
2136                         return IB_SMI_DISCARD;
2137
2138         } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
2139                 /* forward case for switches */
2140                 memcpy(response, recv, mad_priv_size(response));
2141                 response->header.recv_wc.wc = &response->header.wc;
2142                 response->header.recv_wc.recv_buf.opa_mad =
2143                                 (struct opa_mad *)response->mad;
2144                 response->header.recv_wc.recv_buf.grh = &response->grh;
2145
2146                 agent_send_response((const struct ib_mad_hdr *)response->mad,
2147                                     &response->grh, wc,
2148                                     port_priv->device,
2149                                     opa_smi_get_fwd_port(smp),
2150                                     qp_info->qp->qp_num,
2151                                     recv->header.wc.byte_len,
2152                                     true);
2153
2154                 return IB_SMI_DISCARD;
2155         }
2156
2157         return IB_SMI_HANDLE;
2158 }
2159
2160 static enum smi_action
2161 handle_smi(struct ib_mad_port_private *port_priv,
2162            struct ib_mad_qp_info *qp_info,
2163            struct ib_wc *wc,
2164            int port_num,
2165            struct ib_mad_private *recv,
2166            struct ib_mad_private *response,
2167            bool opa)
2168 {
2169         struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2170
2171         if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2172             mad_hdr->class_version == OPA_SMI_CLASS_VERSION)
2173                 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2174                                       response);
2175
2176         return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2177 }
2178
2179 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
2180                                      struct ib_wc *wc)
2181 {
2182         struct ib_mad_qp_info *qp_info;
2183         struct ib_mad_private_header *mad_priv_hdr;
2184         struct ib_mad_private *recv, *response = NULL;
2185         struct ib_mad_list_head *mad_list;
2186         struct ib_mad_agent_private *mad_agent;
2187         int port_num;
2188         int ret = IB_MAD_RESULT_SUCCESS;
2189         size_t mad_size;
2190         u16 resp_mad_pkey_index = 0;
2191         bool opa;
2192
2193         mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2194         qp_info = mad_list->mad_queue->qp_info;
2195         dequeue_mad(mad_list);
2196
2197         opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2198                                qp_info->port_priv->port_num);
2199
2200         mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2201                                     mad_list);
2202         recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
2203         ib_dma_unmap_single(port_priv->device,
2204                             recv->header.mapping,
2205                             mad_priv_dma_size(recv),
2206                             DMA_FROM_DEVICE);
2207
2208         /* Setup MAD receive work completion from "normal" work completion */
2209         recv->header.wc = *wc;
2210         recv->header.recv_wc.wc = &recv->header.wc;
2211
2212         if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2213                 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2214                 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2215         } else {
2216                 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2217                 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2218         }
2219
2220         recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
2221         recv->header.recv_wc.recv_buf.grh = &recv->grh;
2222
2223         if (atomic_read(&qp_info->snoop_count))
2224                 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2225
2226         /* Validate MAD */
2227         if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
2228                 goto out;
2229
2230         mad_size = recv->mad_size;
2231         response = alloc_mad_private(mad_size, GFP_KERNEL);
2232         if (!response) {
2233                 dev_err(&port_priv->device->dev,
2234                         "ib_mad_recv_done_handler no memory for response buffer\n");
2235                 goto out;
2236         }
2237
2238         if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
2239                 port_num = wc->port_num;
2240         else
2241                 port_num = port_priv->port_num;
2242
2243         if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
2244             IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
2245                 if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2246                                response, opa)
2247                     == IB_SMI_DISCARD)
2248                         goto out;
2249         }
2250
2251         /* Give driver "right of first refusal" on incoming MAD */
2252         if (port_priv->device->process_mad) {
2253                 ret = port_priv->device->process_mad(port_priv->device, 0,
2254                                                      port_priv->port_num,
2255                                                      wc, &recv->grh,
2256                                                      (const struct ib_mad_hdr *)recv->mad,
2257                                                      recv->mad_size,
2258                                                      (struct ib_mad_hdr *)response->mad,
2259                                                      &mad_size, &resp_mad_pkey_index);
2260
2261                 if (opa)
2262                         wc->pkey_index = resp_mad_pkey_index;
2263
2264                 if (ret & IB_MAD_RESULT_SUCCESS) {
2265                         if (ret & IB_MAD_RESULT_CONSUMED)
2266                                 goto out;
2267                         if (ret & IB_MAD_RESULT_REPLY) {
2268                                 agent_send_response((const struct ib_mad_hdr *)response->mad,
2269                                                     &recv->grh, wc,
2270                                                     port_priv->device,
2271                                                     port_num,
2272                                                     qp_info->qp->qp_num,
2273                                                     mad_size, opa);
2274                                 goto out;
2275                         }
2276                 }
2277         }
2278
2279         mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
2280         if (mad_agent) {
2281                 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
2282                 /*
2283                  * recv is freed up in error cases in ib_mad_complete_recv
2284                  * or via recv_handler in ib_mad_complete_recv()
2285                  */
2286                 recv = NULL;
2287         } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2288                    generate_unmatched_resp(recv, response, &mad_size, opa)) {
2289                 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2290                                     port_priv->device, port_num,
2291                                     qp_info->qp->qp_num, mad_size, opa);
2292         }
2293
2294 out:
2295         /* Post another receive request for this QP */
2296         if (response) {
2297                 ib_mad_post_receive_mads(qp_info, response);
2298                 kfree(recv);
2299         } else
2300                 ib_mad_post_receive_mads(qp_info, recv);
2301 }
2302
2303 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2304 {
2305         struct ib_mad_send_wr_private *mad_send_wr;
2306         unsigned long delay;
2307
2308         if (list_empty(&mad_agent_priv->wait_list)) {
2309                 cancel_delayed_work(&mad_agent_priv->timed_work);
2310         } else {
2311                 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2312                                          struct ib_mad_send_wr_private,
2313                                          agent_list);
2314
2315                 if (time_after(mad_agent_priv->timeout,
2316                                mad_send_wr->timeout)) {
2317                         mad_agent_priv->timeout = mad_send_wr->timeout;
2318                         delay = mad_send_wr->timeout - jiffies;
2319                         if ((long)delay <= 0)
2320                                 delay = 1;
2321                         mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2322                                          &mad_agent_priv->timed_work, delay);
2323                 }
2324         }
2325 }
2326
2327 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2328 {
2329         struct ib_mad_agent_private *mad_agent_priv;
2330         struct ib_mad_send_wr_private *temp_mad_send_wr;
2331         struct list_head *list_item;
2332         unsigned long delay;
2333
2334         mad_agent_priv = mad_send_wr->mad_agent_priv;
2335         list_del(&mad_send_wr->agent_list);
2336
2337         delay = mad_send_wr->timeout;
2338         mad_send_wr->timeout += jiffies;
2339
2340         if (delay) {
2341                 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2342                         temp_mad_send_wr = list_entry(list_item,
2343                                                 struct ib_mad_send_wr_private,
2344                                                 agent_list);
2345                         if (time_after(mad_send_wr->timeout,
2346                                        temp_mad_send_wr->timeout))
2347                                 break;
2348                 }
2349         }
2350         else
2351                 list_item = &mad_agent_priv->wait_list;
2352         list_add(&mad_send_wr->agent_list, list_item);
2353
2354         /* Reschedule a work item if we have a shorter timeout */
2355         if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2356                 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2357                                  &mad_agent_priv->timed_work, delay);
2358 }
2359
2360 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2361                           int timeout_ms)
2362 {
2363         mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2364         wait_for_response(mad_send_wr);
2365 }
2366
2367 /*
2368  * Process a send work completion
2369  */
2370 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2371                              struct ib_mad_send_wc *mad_send_wc)
2372 {
2373         struct ib_mad_agent_private     *mad_agent_priv;
2374         unsigned long                   flags;
2375         int                             ret;
2376
2377         mad_agent_priv = mad_send_wr->mad_agent_priv;
2378         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2379         if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2380                 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2381                 if (ret == IB_RMPP_RESULT_CONSUMED)
2382                         goto done;
2383         } else
2384                 ret = IB_RMPP_RESULT_UNHANDLED;
2385
2386         if (mad_send_wc->status != IB_WC_SUCCESS &&
2387             mad_send_wr->status == IB_WC_SUCCESS) {
2388                 mad_send_wr->status = mad_send_wc->status;
2389                 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2390         }
2391
2392         if (--mad_send_wr->refcount > 0) {
2393                 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2394                     mad_send_wr->status == IB_WC_SUCCESS) {
2395                         wait_for_response(mad_send_wr);
2396                 }
2397                 goto done;
2398         }
2399
2400         /* Remove send from MAD agent and notify client of completion */
2401         list_del(&mad_send_wr->agent_list);
2402         adjust_timeout(mad_agent_priv);
2403         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2404
2405         if (mad_send_wr->status != IB_WC_SUCCESS )
2406                 mad_send_wc->status = mad_send_wr->status;
2407         if (ret == IB_RMPP_RESULT_INTERNAL)
2408                 ib_rmpp_send_handler(mad_send_wc);
2409         else
2410                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2411                                                    mad_send_wc);
2412
2413         /* Release reference on agent taken when sending */
2414         deref_mad_agent(mad_agent_priv);
2415         return;
2416 done:
2417         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2418 }
2419
2420 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2421                                      struct ib_wc *wc)
2422 {
2423         struct ib_mad_send_wr_private   *mad_send_wr, *queued_send_wr;
2424         struct ib_mad_list_head         *mad_list;
2425         struct ib_mad_qp_info           *qp_info;
2426         struct ib_mad_queue             *send_queue;
2427         struct ib_send_wr               *bad_send_wr;
2428         struct ib_mad_send_wc           mad_send_wc;
2429         unsigned long flags;
2430         int ret;
2431
2432         mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2433         mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2434                                    mad_list);
2435         send_queue = mad_list->mad_queue;
2436         qp_info = send_queue->qp_info;
2437
2438 retry:
2439         ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2440                             mad_send_wr->header_mapping,
2441                             mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2442         ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2443                             mad_send_wr->payload_mapping,
2444                             mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2445         queued_send_wr = NULL;
2446         spin_lock_irqsave(&send_queue->lock, flags);
2447         list_del(&mad_list->list);
2448
2449         /* Move queued send to the send queue */
2450         if (send_queue->count-- > send_queue->max_active) {
2451                 mad_list = container_of(qp_info->overflow_list.next,
2452                                         struct ib_mad_list_head, list);
2453                 queued_send_wr = container_of(mad_list,
2454                                         struct ib_mad_send_wr_private,
2455                                         mad_list);
2456                 list_move_tail(&mad_list->list, &send_queue->list);
2457         }
2458         spin_unlock_irqrestore(&send_queue->lock, flags);
2459
2460         mad_send_wc.send_buf = &mad_send_wr->send_buf;
2461         mad_send_wc.status = wc->status;
2462         mad_send_wc.vendor_err = wc->vendor_err;
2463         if (atomic_read(&qp_info->snoop_count))
2464                 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2465                            IB_MAD_SNOOP_SEND_COMPLETIONS);
2466         ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2467
2468         if (queued_send_wr) {
2469                 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
2470                                    &bad_send_wr);
2471                 if (ret) {
2472                         dev_err(&port_priv->device->dev,
2473                                 "ib_post_send failed: %d\n", ret);
2474                         mad_send_wr = queued_send_wr;
2475                         wc->status = IB_WC_LOC_QP_OP_ERR;
2476                         goto retry;
2477                 }
2478         }
2479 }
2480
2481 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2482 {
2483         struct ib_mad_send_wr_private *mad_send_wr;
2484         struct ib_mad_list_head *mad_list;
2485         unsigned long flags;
2486
2487         spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2488         list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2489                 mad_send_wr = container_of(mad_list,
2490                                            struct ib_mad_send_wr_private,
2491                                            mad_list);
2492                 mad_send_wr->retry = 1;
2493         }
2494         spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2495 }
2496
2497 static void mad_error_handler(struct ib_mad_port_private *port_priv,
2498                               struct ib_wc *wc)
2499 {
2500         struct ib_mad_list_head *mad_list;
2501         struct ib_mad_qp_info *qp_info;
2502         struct ib_mad_send_wr_private *mad_send_wr;
2503         int ret;
2504
2505         /* Determine if failure was a send or receive */
2506         mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2507         qp_info = mad_list->mad_queue->qp_info;
2508         if (mad_list->mad_queue == &qp_info->recv_queue)
2509                 /*
2510                  * Receive errors indicate that the QP has entered the error
2511                  * state - error handling/shutdown code will cleanup
2512                  */
2513                 return;
2514
2515         /*
2516          * Send errors will transition the QP to SQE - move
2517          * QP to RTS and repost flushed work requests
2518          */
2519         mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2520                                    mad_list);
2521         if (wc->status == IB_WC_WR_FLUSH_ERR) {
2522                 if (mad_send_wr->retry) {
2523                         /* Repost send */
2524                         struct ib_send_wr *bad_send_wr;
2525
2526                         mad_send_wr->retry = 0;
2527                         ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2528                                         &bad_send_wr);
2529                         if (ret)
2530                                 ib_mad_send_done_handler(port_priv, wc);
2531                 } else
2532                         ib_mad_send_done_handler(port_priv, wc);
2533         } else {
2534                 struct ib_qp_attr *attr;
2535
2536                 /* Transition QP to RTS and fail offending send */
2537                 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2538                 if (attr) {
2539                         attr->qp_state = IB_QPS_RTS;
2540                         attr->cur_qp_state = IB_QPS_SQE;
2541                         ret = ib_modify_qp(qp_info->qp, attr,
2542                                            IB_QP_STATE | IB_QP_CUR_STATE);
2543                         kfree(attr);
2544                         if (ret)
2545                                 dev_err(&port_priv->device->dev,
2546                                         "mad_error_handler - ib_modify_qp to RTS : %d\n",
2547                                         ret);
2548                         else
2549                                 mark_sends_for_retry(qp_info);
2550                 }
2551                 ib_mad_send_done_handler(port_priv, wc);
2552         }
2553 }
2554
2555 /*
2556  * IB MAD completion callback
2557  */
2558 static void ib_mad_completion_handler(struct work_struct *work)
2559 {
2560         struct ib_mad_port_private *port_priv;
2561         struct ib_wc wc;
2562
2563         port_priv = container_of(work, struct ib_mad_port_private, work);
2564         ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2565
2566         while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2567                 if (wc.status == IB_WC_SUCCESS) {
2568                         switch (wc.opcode) {
2569                         case IB_WC_SEND:
2570                                 ib_mad_send_done_handler(port_priv, &wc);
2571                                 break;
2572                         case IB_WC_RECV:
2573                                 ib_mad_recv_done_handler(port_priv, &wc);
2574                                 break;
2575                         default:
2576                                 BUG_ON(1);
2577                                 break;
2578                         }
2579                 } else
2580                         mad_error_handler(port_priv, &wc);
2581         }
2582 }
2583
2584 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2585 {
2586         unsigned long flags;
2587         struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2588         struct ib_mad_send_wc mad_send_wc;
2589         struct list_head cancel_list;
2590
2591         INIT_LIST_HEAD(&cancel_list);
2592
2593         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2594         list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2595                                  &mad_agent_priv->send_list, agent_list) {
2596                 if (mad_send_wr->status == IB_WC_SUCCESS) {
2597                         mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2598                         mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2599                 }
2600         }
2601
2602         /* Empty wait list to prevent receives from finding a request */
2603         list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2604         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2605
2606         /* Report all cancelled requests */
2607         mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2608         mad_send_wc.vendor_err = 0;
2609
2610         list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2611                                  &cancel_list, agent_list) {
2612                 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2613                 list_del(&mad_send_wr->agent_list);
2614                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2615                                                    &mad_send_wc);
2616                 atomic_dec(&mad_agent_priv->refcount);
2617         }
2618 }
2619
2620 static struct ib_mad_send_wr_private*
2621 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2622              struct ib_mad_send_buf *send_buf)
2623 {
2624         struct ib_mad_send_wr_private *mad_send_wr;
2625
2626         list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2627                             agent_list) {
2628                 if (&mad_send_wr->send_buf == send_buf)
2629                         return mad_send_wr;
2630         }
2631
2632         list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2633                             agent_list) {
2634                 if (is_rmpp_data_mad(mad_agent_priv,
2635                                      mad_send_wr->send_buf.mad) &&
2636                     &mad_send_wr->send_buf == send_buf)
2637                         return mad_send_wr;
2638         }
2639         return NULL;
2640 }
2641
2642 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2643                   struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2644 {
2645         struct ib_mad_agent_private *mad_agent_priv;
2646         struct ib_mad_send_wr_private *mad_send_wr;
2647         unsigned long flags;
2648         int active;
2649
2650         mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2651                                       agent);
2652         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2653         mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2654         if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2655                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2656                 return -EINVAL;
2657         }
2658
2659         active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2660         if (!timeout_ms) {
2661                 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2662                 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2663         }
2664
2665         mad_send_wr->send_buf.timeout_ms = timeout_ms;
2666         if (active)
2667                 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2668         else
2669                 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2670
2671         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2672         return 0;
2673 }
2674 EXPORT_SYMBOL(ib_modify_mad);
2675
2676 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2677                    struct ib_mad_send_buf *send_buf)
2678 {
2679         ib_modify_mad(mad_agent, send_buf, 0);
2680 }
2681 EXPORT_SYMBOL(ib_cancel_mad);
2682
2683 static void local_completions(struct work_struct *work)
2684 {
2685         struct ib_mad_agent_private *mad_agent_priv;
2686         struct ib_mad_local_private *local;
2687         struct ib_mad_agent_private *recv_mad_agent;
2688         unsigned long flags;
2689         int free_mad;
2690         struct ib_wc wc;
2691         struct ib_mad_send_wc mad_send_wc;
2692         bool opa;
2693
2694         mad_agent_priv =
2695                 container_of(work, struct ib_mad_agent_private, local_work);
2696
2697         opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2698                                mad_agent_priv->qp_info->port_priv->port_num);
2699
2700         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2701         while (!list_empty(&mad_agent_priv->local_list)) {
2702                 local = list_entry(mad_agent_priv->local_list.next,
2703                                    struct ib_mad_local_private,
2704                                    completion_list);
2705                 list_del(&local->completion_list);
2706                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2707                 free_mad = 0;
2708                 if (local->mad_priv) {
2709                         u8 base_version;
2710                         recv_mad_agent = local->recv_mad_agent;
2711                         if (!recv_mad_agent) {
2712                                 dev_err(&mad_agent_priv->agent.device->dev,
2713                                         "No receive MAD agent for local completion\n");
2714                                 free_mad = 1;
2715                                 goto local_send_completion;
2716                         }
2717
2718                         /*
2719                          * Defined behavior is to complete response
2720                          * before request
2721                          */
2722                         build_smp_wc(recv_mad_agent->agent.qp,
2723                                      (unsigned long) local->mad_send_wr,
2724                                      be16_to_cpu(IB_LID_PERMISSIVE),
2725                                      local->mad_send_wr->send_wr.wr.ud.pkey_index,
2726                                      recv_mad_agent->agent.port_num, &wc);
2727
2728                         local->mad_priv->header.recv_wc.wc = &wc;
2729
2730                         base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2731                         if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2732                                 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2733                                 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2734                         } else {
2735                                 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2736                                 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2737                         }
2738
2739                         INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2740                         list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2741                                  &local->mad_priv->header.recv_wc.rmpp_list);
2742                         local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2743                         local->mad_priv->header.recv_wc.recv_buf.mad =
2744                                                 (struct ib_mad *)local->mad_priv->mad;
2745                         if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2746                                 snoop_recv(recv_mad_agent->qp_info,
2747                                           &local->mad_priv->header.recv_wc,
2748                                            IB_MAD_SNOOP_RECVS);
2749                         recv_mad_agent->agent.recv_handler(
2750                                                 &recv_mad_agent->agent,
2751                                                 &local->mad_priv->header.recv_wc);
2752                         spin_lock_irqsave(&recv_mad_agent->lock, flags);
2753                         atomic_dec(&recv_mad_agent->refcount);
2754                         spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2755                 }
2756
2757 local_send_completion:
2758                 /* Complete send */
2759                 mad_send_wc.status = IB_WC_SUCCESS;
2760                 mad_send_wc.vendor_err = 0;
2761                 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2762                 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2763                         snoop_send(mad_agent_priv->qp_info,
2764                                    &local->mad_send_wr->send_buf,
2765                                    &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2766                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2767                                                    &mad_send_wc);
2768
2769                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2770                 atomic_dec(&mad_agent_priv->refcount);
2771                 if (free_mad)
2772                         kfree(local->mad_priv);
2773                 kfree(local);
2774         }
2775         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2776 }
2777
2778 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2779 {
2780         int ret;
2781
2782         if (!mad_send_wr->retries_left)
2783                 return -ETIMEDOUT;
2784
2785         mad_send_wr->retries_left--;
2786         mad_send_wr->send_buf.retries++;
2787
2788         mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2789
2790         if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
2791                 ret = ib_retry_rmpp(mad_send_wr);
2792                 switch (ret) {
2793                 case IB_RMPP_RESULT_UNHANDLED:
2794                         ret = ib_send_mad(mad_send_wr);
2795                         break;
2796                 case IB_RMPP_RESULT_CONSUMED:
2797                         ret = 0;
2798                         break;
2799                 default:
2800                         ret = -ECOMM;
2801                         break;
2802                 }
2803         } else
2804                 ret = ib_send_mad(mad_send_wr);
2805
2806         if (!ret) {
2807                 mad_send_wr->refcount++;
2808                 list_add_tail(&mad_send_wr->agent_list,
2809                               &mad_send_wr->mad_agent_priv->send_list);
2810         }
2811         return ret;
2812 }
2813
2814 static void timeout_sends(struct work_struct *work)
2815 {
2816         struct ib_mad_agent_private *mad_agent_priv;
2817         struct ib_mad_send_wr_private *mad_send_wr;
2818         struct ib_mad_send_wc mad_send_wc;
2819         unsigned long flags, delay;
2820
2821         mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2822                                       timed_work.work);
2823         mad_send_wc.vendor_err = 0;
2824
2825         spin_lock_irqsave(&mad_agent_priv->lock, flags);
2826         while (!list_empty(&mad_agent_priv->wait_list)) {
2827                 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2828                                          struct ib_mad_send_wr_private,
2829                                          agent_list);
2830
2831                 if (time_after(mad_send_wr->timeout, jiffies)) {
2832                         delay = mad_send_wr->timeout - jiffies;
2833                         if ((long)delay <= 0)
2834                                 delay = 1;
2835                         queue_delayed_work(mad_agent_priv->qp_info->
2836                                            port_priv->wq,
2837                                            &mad_agent_priv->timed_work, delay);
2838                         break;
2839                 }
2840
2841                 list_del(&mad_send_wr->agent_list);
2842                 if (mad_send_wr->status == IB_WC_SUCCESS &&
2843                     !retry_send(mad_send_wr))
2844                         continue;
2845
2846                 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2847
2848                 if (mad_send_wr->status == IB_WC_SUCCESS)
2849                         mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2850                 else
2851                         mad_send_wc.status = mad_send_wr->status;
2852                 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2853                 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2854                                                    &mad_send_wc);
2855
2856                 atomic_dec(&mad_agent_priv->refcount);
2857                 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2858         }
2859         spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2860 }
2861
2862 static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
2863 {
2864         struct ib_mad_port_private *port_priv = cq->cq_context;
2865         unsigned long flags;
2866
2867         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2868         if (!list_empty(&port_priv->port_list))
2869                 queue_work(port_priv->wq, &port_priv->work);
2870         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2871 }
2872
2873 /*
2874  * Allocate receive MADs and post receive WRs for them
2875  */
2876 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2877                                     struct ib_mad_private *mad)
2878 {
2879         unsigned long flags;
2880         int post, ret;
2881         struct ib_mad_private *mad_priv;
2882         struct ib_sge sg_list;
2883         struct ib_recv_wr recv_wr, *bad_recv_wr;
2884         struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2885
2886         /* Initialize common scatter list fields */
2887         sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2888
2889         /* Initialize common receive WR fields */
2890         recv_wr.next = NULL;
2891         recv_wr.sg_list = &sg_list;
2892         recv_wr.num_sge = 1;
2893
2894         do {
2895                 /* Allocate and map receive buffer */
2896                 if (mad) {
2897                         mad_priv = mad;
2898                         mad = NULL;
2899                 } else {
2900                         mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2901                                                      GFP_ATOMIC);
2902                         if (!mad_priv) {
2903                                 dev_err(&qp_info->port_priv->device->dev,
2904                                         "No memory for receive buffer\n");
2905                                 ret = -ENOMEM;
2906                                 break;
2907                         }
2908                 }
2909                 sg_list.length = mad_priv_dma_size(mad_priv);
2910                 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2911                                                  &mad_priv->grh,
2912                                                  mad_priv_dma_size(mad_priv),
2913                                                  DMA_FROM_DEVICE);
2914                 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2915                                                   sg_list.addr))) {
2916                         ret = -ENOMEM;
2917                         break;
2918                 }
2919                 mad_priv->header.mapping = sg_list.addr;
2920                 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2921                 mad_priv->header.mad_list.mad_queue = recv_queue;
2922
2923                 /* Post receive WR */
2924                 spin_lock_irqsave(&recv_queue->lock, flags);
2925                 post = (++recv_queue->count < recv_queue->max_active);
2926                 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2927                 spin_unlock_irqrestore(&recv_queue->lock, flags);
2928                 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2929                 if (ret) {
2930                         spin_lock_irqsave(&recv_queue->lock, flags);
2931                         list_del(&mad_priv->header.mad_list.list);
2932                         recv_queue->count--;
2933                         spin_unlock_irqrestore(&recv_queue->lock, flags);
2934                         ib_dma_unmap_single(qp_info->port_priv->device,
2935                                             mad_priv->header.mapping,
2936                                             mad_priv_dma_size(mad_priv),
2937                                             DMA_FROM_DEVICE);
2938                         kfree(mad_priv);
2939                         dev_err(&qp_info->port_priv->device->dev,
2940                                 "ib_post_recv failed: %d\n", ret);
2941                         break;
2942                 }
2943         } while (post);
2944
2945         return ret;
2946 }
2947
2948 /*
2949  * Return all the posted receive MADs
2950  */
2951 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2952 {
2953         struct ib_mad_private_header *mad_priv_hdr;
2954         struct ib_mad_private *recv;
2955         struct ib_mad_list_head *mad_list;
2956
2957         if (!qp_info->qp)
2958                 return;
2959
2960         while (!list_empty(&qp_info->recv_queue.list)) {
2961
2962                 mad_list = list_entry(qp_info->recv_queue.list.next,
2963                                       struct ib_mad_list_head, list);
2964                 mad_priv_hdr = container_of(mad_list,
2965                                             struct ib_mad_private_header,
2966                                             mad_list);
2967                 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2968                                     header);
2969
2970                 /* Remove from posted receive MAD list */
2971                 list_del(&mad_list->list);
2972
2973                 ib_dma_unmap_single(qp_info->port_priv->device,
2974                                     recv->header.mapping,
2975                                     mad_priv_dma_size(recv),
2976                                     DMA_FROM_DEVICE);
2977                 kfree(recv);
2978         }
2979
2980         qp_info->recv_queue.count = 0;
2981 }
2982
2983 /*
2984  * Start the port
2985  */
2986 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2987 {
2988         int ret, i;
2989         struct ib_qp_attr *attr;
2990         struct ib_qp *qp;
2991         u16 pkey_index;
2992
2993         attr = kmalloc(sizeof *attr, GFP_KERNEL);
2994         if (!attr) {
2995                 dev_err(&port_priv->device->dev,
2996                         "Couldn't kmalloc ib_qp_attr\n");
2997                 return -ENOMEM;
2998         }
2999
3000         ret = ib_find_pkey(port_priv->device, port_priv->port_num,
3001                            IB_DEFAULT_PKEY_FULL, &pkey_index);
3002         if (ret)
3003                 pkey_index = 0;
3004
3005         for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3006                 qp = port_priv->qp_info[i].qp;
3007                 if (!qp)
3008                         continue;
3009
3010                 /*
3011                  * PKey index for QP1 is irrelevant but
3012                  * one is needed for the Reset to Init transition
3013                  */
3014                 attr->qp_state = IB_QPS_INIT;
3015                 attr->pkey_index = pkey_index;
3016                 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
3017                 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
3018                                              IB_QP_PKEY_INDEX | IB_QP_QKEY);
3019                 if (ret) {
3020                         dev_err(&port_priv->device->dev,
3021                                 "Couldn't change QP%d state to INIT: %d\n",
3022                                 i, ret);
3023                         goto out;
3024                 }
3025
3026                 attr->qp_state = IB_QPS_RTR;
3027                 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
3028                 if (ret) {
3029                         dev_err(&port_priv->device->dev,
3030                                 "Couldn't change QP%d state to RTR: %d\n",
3031                                 i, ret);
3032                         goto out;
3033                 }
3034
3035                 attr->qp_state = IB_QPS_RTS;
3036                 attr->sq_psn = IB_MAD_SEND_Q_PSN;
3037                 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
3038                 if (ret) {
3039                         dev_err(&port_priv->device->dev,
3040                                 "Couldn't change QP%d state to RTS: %d\n",
3041                                 i, ret);
3042                         goto out;
3043                 }
3044         }
3045
3046         ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
3047         if (ret) {
3048                 dev_err(&port_priv->device->dev,
3049                         "Failed to request completion notification: %d\n",
3050                         ret);
3051                 goto out;
3052         }
3053
3054         for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3055                 if (!port_priv->qp_info[i].qp)
3056                         continue;
3057
3058                 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
3059                 if (ret) {
3060                         dev_err(&port_priv->device->dev,
3061                                 "Couldn't post receive WRs\n");
3062                         goto out;
3063                 }
3064         }
3065 out:
3066         kfree(attr);
3067         return ret;
3068 }
3069
3070 static void qp_event_handler(struct ib_event *event, void *qp_context)
3071 {
3072         struct ib_mad_qp_info   *qp_info = qp_context;
3073
3074         /* It's worse than that! He's dead, Jim! */
3075         dev_err(&qp_info->port_priv->device->dev,
3076                 "Fatal error (%d) on MAD QP (%d)\n",
3077                 event->event, qp_info->qp->qp_num);
3078 }
3079
3080 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
3081                            struct ib_mad_queue *mad_queue)
3082 {
3083         mad_queue->qp_info = qp_info;
3084         mad_queue->count = 0;
3085         spin_lock_init(&mad_queue->lock);
3086         INIT_LIST_HEAD(&mad_queue->list);
3087 }
3088
3089 static void init_mad_qp(struct ib_mad_port_private *port_priv,
3090                         struct ib_mad_qp_info *qp_info)
3091 {
3092         qp_info->port_priv = port_priv;
3093         init_mad_queue(qp_info, &qp_info->send_queue);
3094         init_mad_queue(qp_info, &qp_info->recv_queue);
3095         INIT_LIST_HEAD(&qp_info->overflow_list);
3096         spin_lock_init(&qp_info->snoop_lock);
3097         qp_info->snoop_table = NULL;
3098         qp_info->snoop_table_size = 0;
3099         atomic_set(&qp_info->snoop_count, 0);
3100 }
3101
3102 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
3103                          enum ib_qp_type qp_type)
3104 {
3105         struct ib_qp_init_attr  qp_init_attr;
3106         int ret;
3107
3108         memset(&qp_init_attr, 0, sizeof qp_init_attr);
3109         qp_init_attr.send_cq = qp_info->port_priv->cq;
3110         qp_init_attr.recv_cq = qp_info->port_priv->cq;
3111         qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
3112         qp_init_attr.cap.max_send_wr = mad_sendq_size;
3113         qp_init_attr.cap.max_recv_wr = mad_recvq_size;
3114         qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
3115         qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
3116         qp_init_attr.qp_type = qp_type;
3117         qp_init_attr.port_num = qp_info->port_priv->port_num;
3118         qp_init_attr.qp_context = qp_info;
3119         qp_init_attr.event_handler = qp_event_handler;
3120         qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
3121         if (IS_ERR(qp_info->qp)) {
3122                 dev_err(&qp_info->port_priv->device->dev,
3123                         "Couldn't create ib_mad QP%d\n",
3124                         get_spl_qp_index(qp_type));
3125                 ret = PTR_ERR(qp_info->qp);
3126                 goto error;
3127         }
3128         /* Use minimum queue sizes unless the CQ is resized */
3129         qp_info->send_queue.max_active = mad_sendq_size;
3130         qp_info->recv_queue.max_active = mad_recvq_size;
3131         return 0;
3132
3133 error:
3134         return ret;
3135 }
3136
3137 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
3138 {
3139         if (!qp_info->qp)
3140                 return;
3141
3142         ib_destroy_qp(qp_info->qp);
3143         kfree(qp_info->snoop_table);
3144 }
3145
3146 /*
3147  * Open the port
3148  * Create the QP, PD, MR, and CQ if needed
3149  */
3150 static int ib_mad_port_open(struct ib_device *device,
3151                             int port_num)
3152 {
3153         int ret, cq_size;
3154         struct ib_mad_port_private *port_priv;
3155         unsigned long flags;
3156         char name[sizeof "ib_mad123"];
3157         int has_smi;
3158         struct ib_cq_init_attr cq_attr = {};
3159
3160         if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
3161                 return -EFAULT;
3162
3163         if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
3164                     rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
3165                 return -EFAULT;
3166
3167         /* Create new device info */
3168         port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
3169         if (!port_priv) {
3170                 dev_err(&device->dev, "No memory for ib_mad_port_private\n");
3171                 return -ENOMEM;
3172         }
3173
3174         port_priv->device = device;
3175         port_priv->port_num = port_num;
3176         spin_lock_init(&port_priv->reg_lock);
3177         INIT_LIST_HEAD(&port_priv->agent_list);
3178         init_mad_qp(port_priv, &port_priv->qp_info[0]);
3179         init_mad_qp(port_priv, &port_priv->qp_info[1]);
3180
3181         cq_size = mad_sendq_size + mad_recvq_size;
3182         has_smi = rdma_cap_ib_smi(device, port_num);
3183         if (has_smi)
3184                 cq_size *= 2;
3185
3186         cq_attr.cqe = cq_size;
3187         port_priv->cq = ib_create_cq(port_priv->device,
3188                                      ib_mad_thread_completion_handler,
3189                                      NULL, port_priv, &cq_attr);
3190         if (IS_ERR(port_priv->cq)) {
3191                 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
3192                 ret = PTR_ERR(port_priv->cq);
3193                 goto error3;
3194         }
3195
3196         port_priv->pd = ib_alloc_pd(device);
3197         if (IS_ERR(port_priv->pd)) {
3198                 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
3199                 ret = PTR_ERR(port_priv->pd);
3200                 goto error4;
3201         }
3202
3203         port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
3204         if (IS_ERR(port_priv->mr)) {
3205                 dev_err(&device->dev, "Couldn't get ib_mad DMA MR\n");
3206                 ret = PTR_ERR(port_priv->mr);
3207                 goto error5;
3208         }
3209
3210         if (has_smi) {
3211                 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3212                 if (ret)
3213                         goto error6;
3214         }
3215         ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3216         if (ret)
3217                 goto error7;
3218
3219         snprintf(name, sizeof name, "ib_mad%d", port_num);
3220         port_priv->wq = create_singlethread_workqueue(name);
3221         if (!port_priv->wq) {
3222                 ret = -ENOMEM;
3223                 goto error8;
3224         }
3225         INIT_WORK(&port_priv->work, ib_mad_completion_handler);
3226
3227         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3228         list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3229         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3230
3231         ret = ib_mad_port_start(port_priv);
3232         if (ret) {
3233                 dev_err(&device->dev, "Couldn't start port\n");
3234                 goto error9;
3235         }
3236
3237         return 0;
3238
3239 error9:
3240         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3241         list_del_init(&port_priv->port_list);
3242         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3243
3244         destroy_workqueue(port_priv->wq);
3245 error8:
3246         destroy_mad_qp(&port_priv->qp_info[1]);
3247 error7:
3248         destroy_mad_qp(&port_priv->qp_info[0]);
3249 error6:
3250         ib_dereg_mr(port_priv->mr);
3251 error5:
3252         ib_dealloc_pd(port_priv->pd);
3253 error4:
3254         ib_destroy_cq(port_priv->cq);
3255         cleanup_recv_queue(&port_priv->qp_info[1]);
3256         cleanup_recv_queue(&port_priv->qp_info[0]);
3257 error3:
3258         kfree(port_priv);
3259
3260         return ret;
3261 }
3262
3263 /*
3264  * Close the port
3265  * If there are no classes using the port, free the port
3266  * resources (CQ, MR, PD, QP) and remove the port's info structure
3267  */
3268 static int ib_mad_port_close(struct ib_device *device, int port_num)
3269 {
3270         struct ib_mad_port_private *port_priv;
3271         unsigned long flags;
3272
3273         spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3274         port_priv = __ib_get_mad_port(device, port_num);
3275         if (port_priv == NULL) {
3276                 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3277                 dev_err(&device->dev, "Port %d not found\n", port_num);
3278                 return -ENODEV;
3279         }
3280         list_del_init(&port_priv->port_list);
3281         spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3282
3283         destroy_workqueue(port_priv->wq);
3284         destroy_mad_qp(&port_priv->qp_info[1]);
3285         destroy_mad_qp(&port_priv->qp_info[0]);
3286         ib_dereg_mr(port_priv->mr);
3287         ib_dealloc_pd(port_priv->pd);
3288         ib_destroy_cq(port_priv->cq);
3289         cleanup_recv_queue(&port_priv->qp_info[1]);
3290         cleanup_recv_queue(&port_priv->qp_info[0]);
3291         /* XXX: Handle deallocation of MAD registration tables */
3292
3293         kfree(port_priv);
3294
3295         return 0;
3296 }
3297
3298 static void ib_mad_init_device(struct ib_device *device)
3299 {
3300         int start, end, i;
3301
3302         if (device->node_type == RDMA_NODE_IB_SWITCH) {
3303                 start = 0;
3304                 end   = 0;
3305         } else {
3306                 start = 1;
3307                 end   = device->phys_port_cnt;
3308         }
3309
3310         for (i = start; i <= end; i++) {
3311                 if (!rdma_cap_ib_mad(device, i))
3312                         continue;
3313
3314                 if (ib_mad_port_open(device, i)) {
3315                         dev_err(&device->dev, "Couldn't open port %d\n", i);
3316                         goto error;
3317                 }
3318                 if (ib_agent_port_open(device, i)) {
3319                         dev_err(&device->dev,
3320                                 "Couldn't open port %d for agents\n", i);
3321                         goto error_agent;
3322                 }
3323         }
3324         return;
3325
3326 error_agent:
3327         if (ib_mad_port_close(device, i))
3328                 dev_err(&device->dev, "Couldn't close port %d\n", i);
3329
3330 error:
3331         while (--i >= start) {
3332                 if (!rdma_cap_ib_mad(device, i))
3333                         continue;
3334
3335                 if (ib_agent_port_close(device, i))
3336                         dev_err(&device->dev,
3337                                 "Couldn't close port %d for agents\n", i);
3338                 if (ib_mad_port_close(device, i))
3339                         dev_err(&device->dev, "Couldn't close port %d\n", i);
3340         }
3341 }
3342
3343 static void ib_mad_remove_device(struct ib_device *device)
3344 {
3345         int start, end, i;
3346
3347         if (device->node_type == RDMA_NODE_IB_SWITCH) {
3348                 start = 0;
3349                 end   = 0;
3350         } else {
3351                 start = 1;
3352                 end   = device->phys_port_cnt;
3353         }
3354
3355         for (i = start; i <= end; i++) {
3356                 if (!rdma_cap_ib_mad(device, i))
3357                         continue;
3358
3359                 if (ib_agent_port_close(device, i))
3360                         dev_err(&device->dev,
3361                                 "Couldn't close port %d for agents\n", i);
3362                 if (ib_mad_port_close(device, i))
3363                         dev_err(&device->dev, "Couldn't close port %d\n", i);
3364         }
3365 }
3366
3367 static struct ib_client mad_client = {
3368         .name   = "mad",
3369         .add = ib_mad_init_device,
3370         .remove = ib_mad_remove_device
3371 };
3372
3373 static int __init ib_mad_init_module(void)
3374 {
3375         mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3376         mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3377
3378         mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3379         mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3380
3381         INIT_LIST_HEAD(&ib_mad_port_list);
3382
3383         if (ib_register_client(&mad_client)) {
3384                 pr_err("Couldn't register ib_mad client\n");
3385                 return -EINVAL;
3386         }
3387
3388         return 0;
3389 }
3390
3391 static void __exit ib_mad_cleanup_module(void)
3392 {
3393         ib_unregister_client(&mad_client);
3394 }
3395
3396 module_init(ib_mad_init_module);
3397 module_exit(ib_mad_cleanup_module);