2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
6 * Copyright (c) 2014 Intel Corporation. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <rdma/ib_cache.h>
51 MODULE_LICENSE("Dual BSD/GPL");
52 MODULE_DESCRIPTION("kernel IB MAD API");
53 MODULE_AUTHOR("Hal Rosenstock");
54 MODULE_AUTHOR("Sean Hefty");
56 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
57 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
59 module_param_named(send_queue_size, mad_sendq_size, int, 0444);
60 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
61 module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
62 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
64 static struct list_head ib_mad_port_list;
65 static u32 ib_mad_client_id = 0;
68 static DEFINE_SPINLOCK(ib_mad_port_list_lock);
70 /* Forward declarations */
71 static int method_in_use(struct ib_mad_mgmt_method_table **method,
72 struct ib_mad_reg_req *mad_reg_req);
73 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
74 static struct ib_mad_agent_private *find_mad_agent(
75 struct ib_mad_port_private *port_priv,
76 const struct ib_mad_hdr *mad);
77 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
78 struct ib_mad_private *mad);
79 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
80 static void timeout_sends(struct work_struct *work);
81 static void local_completions(struct work_struct *work);
82 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
83 struct ib_mad_agent_private *agent_priv,
85 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
86 struct ib_mad_agent_private *agent_priv);
89 * Returns a ib_mad_port_private structure or NULL for a device/port
90 * Assumes ib_mad_port_list_lock is being held
92 static inline struct ib_mad_port_private *
93 __ib_get_mad_port(struct ib_device *device, int port_num)
95 struct ib_mad_port_private *entry;
97 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
98 if (entry->device == device && entry->port_num == port_num)
105 * Wrapper function to return a ib_mad_port_private structure or NULL
108 static inline struct ib_mad_port_private *
109 ib_get_mad_port(struct ib_device *device, int port_num)
111 struct ib_mad_port_private *entry;
114 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
115 entry = __ib_get_mad_port(device, port_num);
116 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
121 static inline u8 convert_mgmt_class(u8 mgmt_class)
123 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
124 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
128 static int get_spl_qp_index(enum ib_qp_type qp_type)
141 static int vendor_class_index(u8 mgmt_class)
143 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
146 static int is_vendor_class(u8 mgmt_class)
148 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
149 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
154 static int is_vendor_oui(char *oui)
156 if (oui[0] || oui[1] || oui[2])
161 static int is_vendor_method_in_use(
162 struct ib_mad_mgmt_vendor_class *vendor_class,
163 struct ib_mad_reg_req *mad_reg_req)
165 struct ib_mad_mgmt_method_table *method;
168 for (i = 0; i < MAX_MGMT_OUI; i++) {
169 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
170 method = vendor_class->method_table[i];
172 if (method_in_use(&method, mad_reg_req))
182 int ib_response_mad(const struct ib_mad_hdr *hdr)
184 return ((hdr->method & IB_MGMT_METHOD_RESP) ||
185 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
186 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
187 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
189 EXPORT_SYMBOL(ib_response_mad);
192 * ib_register_mad_agent - Register to send/receive MADs
194 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
196 enum ib_qp_type qp_type,
197 struct ib_mad_reg_req *mad_reg_req,
199 ib_mad_send_handler send_handler,
200 ib_mad_recv_handler recv_handler,
202 u32 registration_flags)
204 struct ib_mad_port_private *port_priv;
205 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
206 struct ib_mad_agent_private *mad_agent_priv;
207 struct ib_mad_reg_req *reg_req = NULL;
208 struct ib_mad_mgmt_class_table *class;
209 struct ib_mad_mgmt_vendor_class_table *vendor;
210 struct ib_mad_mgmt_vendor_class *vendor_class;
211 struct ib_mad_mgmt_method_table *method;
214 u8 mgmt_class, vclass;
216 /* Validate parameters */
217 qpn = get_spl_qp_index(qp_type);
219 dev_notice(&device->dev,
220 "ib_register_mad_agent: invalid QP Type %d\n",
225 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
226 dev_notice(&device->dev,
227 "ib_register_mad_agent: invalid RMPP Version %u\n",
232 /* Validate MAD registration request if supplied */
234 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
235 dev_notice(&device->dev,
236 "ib_register_mad_agent: invalid Class Version %u\n",
237 mad_reg_req->mgmt_class_version);
241 dev_notice(&device->dev,
242 "ib_register_mad_agent: no recv_handler\n");
245 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
247 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
248 * one in this range currently allowed
250 if (mad_reg_req->mgmt_class !=
251 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
252 dev_notice(&device->dev,
253 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
254 mad_reg_req->mgmt_class);
257 } else if (mad_reg_req->mgmt_class == 0) {
259 * Class 0 is reserved in IBA and is used for
260 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
262 dev_notice(&device->dev,
263 "ib_register_mad_agent: Invalid Mgmt Class 0\n");
265 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
267 * If class is in "new" vendor range,
268 * ensure supplied OUI is not zero
270 if (!is_vendor_oui(mad_reg_req->oui)) {
271 dev_notice(&device->dev,
272 "ib_register_mad_agent: No OUI specified for class 0x%x\n",
273 mad_reg_req->mgmt_class);
277 /* Make sure class supplied is consistent with RMPP */
278 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
280 dev_notice(&device->dev,
281 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
282 mad_reg_req->mgmt_class);
287 /* Make sure class supplied is consistent with QP type */
288 if (qp_type == IB_QPT_SMI) {
289 if ((mad_reg_req->mgmt_class !=
290 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
291 (mad_reg_req->mgmt_class !=
292 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
293 dev_notice(&device->dev,
294 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
295 mad_reg_req->mgmt_class);
299 if ((mad_reg_req->mgmt_class ==
300 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
301 (mad_reg_req->mgmt_class ==
302 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
303 dev_notice(&device->dev,
304 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
305 mad_reg_req->mgmt_class);
310 /* No registration request supplied */
313 if (registration_flags & IB_MAD_USER_RMPP)
317 /* Validate device and port */
318 port_priv = ib_get_mad_port(device, port_num);
320 dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n");
321 ret = ERR_PTR(-ENODEV);
325 /* Verify the QP requested is supported. For example, Ethernet devices
326 * will not have QP0 */
327 if (!port_priv->qp_info[qpn].qp) {
328 dev_notice(&device->dev,
329 "ib_register_mad_agent: QP %d not supported\n", qpn);
330 ret = ERR_PTR(-EPROTONOSUPPORT);
334 /* Allocate structures */
335 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
336 if (!mad_agent_priv) {
337 ret = ERR_PTR(-ENOMEM);
341 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
342 IB_ACCESS_LOCAL_WRITE);
343 if (IS_ERR(mad_agent_priv->agent.mr)) {
344 ret = ERR_PTR(-ENOMEM);
349 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
351 ret = ERR_PTR(-ENOMEM);
356 /* Now, fill in the various structures */
357 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
358 mad_agent_priv->reg_req = reg_req;
359 mad_agent_priv->agent.rmpp_version = rmpp_version;
360 mad_agent_priv->agent.device = device;
361 mad_agent_priv->agent.recv_handler = recv_handler;
362 mad_agent_priv->agent.send_handler = send_handler;
363 mad_agent_priv->agent.context = context;
364 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
365 mad_agent_priv->agent.port_num = port_num;
366 mad_agent_priv->agent.flags = registration_flags;
367 spin_lock_init(&mad_agent_priv->lock);
368 INIT_LIST_HEAD(&mad_agent_priv->send_list);
369 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
370 INIT_LIST_HEAD(&mad_agent_priv->done_list);
371 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
372 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
373 INIT_LIST_HEAD(&mad_agent_priv->local_list);
374 INIT_WORK(&mad_agent_priv->local_work, local_completions);
375 atomic_set(&mad_agent_priv->refcount, 1);
376 init_completion(&mad_agent_priv->comp);
378 spin_lock_irqsave(&port_priv->reg_lock, flags);
379 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
382 * Make sure MAD registration (if supplied)
383 * is non overlapping with any existing ones
386 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
387 if (!is_vendor_class(mgmt_class)) {
388 class = port_priv->version[mad_reg_req->
389 mgmt_class_version].class;
391 method = class->method_table[mgmt_class];
393 if (method_in_use(&method,
398 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
401 /* "New" vendor class range */
402 vendor = port_priv->version[mad_reg_req->
403 mgmt_class_version].vendor;
405 vclass = vendor_class_index(mgmt_class);
406 vendor_class = vendor->vendor_class[vclass];
408 if (is_vendor_method_in_use(
414 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
422 /* Add mad agent into port's agent list */
423 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
424 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
426 return &mad_agent_priv->agent;
429 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
432 ib_dereg_mr(mad_agent_priv->agent.mr);
434 kfree(mad_agent_priv);
438 EXPORT_SYMBOL(ib_register_mad_agent);
440 static inline int is_snooping_sends(int mad_snoop_flags)
442 return (mad_snoop_flags &
443 (/*IB_MAD_SNOOP_POSTED_SENDS |
444 IB_MAD_SNOOP_RMPP_SENDS |*/
445 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
446 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
449 static inline int is_snooping_recvs(int mad_snoop_flags)
451 return (mad_snoop_flags &
452 (IB_MAD_SNOOP_RECVS /*|
453 IB_MAD_SNOOP_RMPP_RECVS*/));
456 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
457 struct ib_mad_snoop_private *mad_snoop_priv)
459 struct ib_mad_snoop_private **new_snoop_table;
463 spin_lock_irqsave(&qp_info->snoop_lock, flags);
464 /* Check for empty slot in array. */
465 for (i = 0; i < qp_info->snoop_table_size; i++)
466 if (!qp_info->snoop_table[i])
469 if (i == qp_info->snoop_table_size) {
471 new_snoop_table = krealloc(qp_info->snoop_table,
472 sizeof mad_snoop_priv *
473 (qp_info->snoop_table_size + 1),
475 if (!new_snoop_table) {
480 qp_info->snoop_table = new_snoop_table;
481 qp_info->snoop_table_size++;
483 qp_info->snoop_table[i] = mad_snoop_priv;
484 atomic_inc(&qp_info->snoop_count);
486 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
490 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
492 enum ib_qp_type qp_type,
494 ib_mad_snoop_handler snoop_handler,
495 ib_mad_recv_handler recv_handler,
498 struct ib_mad_port_private *port_priv;
499 struct ib_mad_agent *ret;
500 struct ib_mad_snoop_private *mad_snoop_priv;
503 /* Validate parameters */
504 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
505 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
506 ret = ERR_PTR(-EINVAL);
509 qpn = get_spl_qp_index(qp_type);
511 ret = ERR_PTR(-EINVAL);
514 port_priv = ib_get_mad_port(device, port_num);
516 ret = ERR_PTR(-ENODEV);
519 /* Allocate structures */
520 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
521 if (!mad_snoop_priv) {
522 ret = ERR_PTR(-ENOMEM);
526 /* Now, fill in the various structures */
527 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
528 mad_snoop_priv->agent.device = device;
529 mad_snoop_priv->agent.recv_handler = recv_handler;
530 mad_snoop_priv->agent.snoop_handler = snoop_handler;
531 mad_snoop_priv->agent.context = context;
532 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
533 mad_snoop_priv->agent.port_num = port_num;
534 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
535 init_completion(&mad_snoop_priv->comp);
536 mad_snoop_priv->snoop_index = register_snoop_agent(
537 &port_priv->qp_info[qpn],
539 if (mad_snoop_priv->snoop_index < 0) {
540 ret = ERR_PTR(mad_snoop_priv->snoop_index);
544 atomic_set(&mad_snoop_priv->refcount, 1);
545 return &mad_snoop_priv->agent;
548 kfree(mad_snoop_priv);
552 EXPORT_SYMBOL(ib_register_mad_snoop);
554 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
556 if (atomic_dec_and_test(&mad_agent_priv->refcount))
557 complete(&mad_agent_priv->comp);
560 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
562 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
563 complete(&mad_snoop_priv->comp);
566 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
568 struct ib_mad_port_private *port_priv;
571 /* Note that we could still be handling received MADs */
574 * Canceling all sends results in dropping received response
575 * MADs, preventing us from queuing additional work
577 cancel_mads(mad_agent_priv);
578 port_priv = mad_agent_priv->qp_info->port_priv;
579 cancel_delayed_work(&mad_agent_priv->timed_work);
581 spin_lock_irqsave(&port_priv->reg_lock, flags);
582 remove_mad_reg_req(mad_agent_priv);
583 list_del(&mad_agent_priv->agent_list);
584 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
586 flush_workqueue(port_priv->wq);
587 ib_cancel_rmpp_recvs(mad_agent_priv);
589 deref_mad_agent(mad_agent_priv);
590 wait_for_completion(&mad_agent_priv->comp);
592 kfree(mad_agent_priv->reg_req);
593 ib_dereg_mr(mad_agent_priv->agent.mr);
594 kfree(mad_agent_priv);
597 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
599 struct ib_mad_qp_info *qp_info;
602 qp_info = mad_snoop_priv->qp_info;
603 spin_lock_irqsave(&qp_info->snoop_lock, flags);
604 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
605 atomic_dec(&qp_info->snoop_count);
606 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
608 deref_snoop_agent(mad_snoop_priv);
609 wait_for_completion(&mad_snoop_priv->comp);
611 kfree(mad_snoop_priv);
615 * ib_unregister_mad_agent - Unregisters a client from using MAD services
617 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
619 struct ib_mad_agent_private *mad_agent_priv;
620 struct ib_mad_snoop_private *mad_snoop_priv;
622 /* If the TID is zero, the agent can only snoop. */
623 if (mad_agent->hi_tid) {
624 mad_agent_priv = container_of(mad_agent,
625 struct ib_mad_agent_private,
627 unregister_mad_agent(mad_agent_priv);
629 mad_snoop_priv = container_of(mad_agent,
630 struct ib_mad_snoop_private,
632 unregister_mad_snoop(mad_snoop_priv);
636 EXPORT_SYMBOL(ib_unregister_mad_agent);
638 static void dequeue_mad(struct ib_mad_list_head *mad_list)
640 struct ib_mad_queue *mad_queue;
643 BUG_ON(!mad_list->mad_queue);
644 mad_queue = mad_list->mad_queue;
645 spin_lock_irqsave(&mad_queue->lock, flags);
646 list_del(&mad_list->list);
648 spin_unlock_irqrestore(&mad_queue->lock, flags);
651 static void snoop_send(struct ib_mad_qp_info *qp_info,
652 struct ib_mad_send_buf *send_buf,
653 struct ib_mad_send_wc *mad_send_wc,
656 struct ib_mad_snoop_private *mad_snoop_priv;
660 spin_lock_irqsave(&qp_info->snoop_lock, flags);
661 for (i = 0; i < qp_info->snoop_table_size; i++) {
662 mad_snoop_priv = qp_info->snoop_table[i];
663 if (!mad_snoop_priv ||
664 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
667 atomic_inc(&mad_snoop_priv->refcount);
668 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
669 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
670 send_buf, mad_send_wc);
671 deref_snoop_agent(mad_snoop_priv);
672 spin_lock_irqsave(&qp_info->snoop_lock, flags);
674 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
677 static void snoop_recv(struct ib_mad_qp_info *qp_info,
678 struct ib_mad_recv_wc *mad_recv_wc,
681 struct ib_mad_snoop_private *mad_snoop_priv;
685 spin_lock_irqsave(&qp_info->snoop_lock, flags);
686 for (i = 0; i < qp_info->snoop_table_size; i++) {
687 mad_snoop_priv = qp_info->snoop_table[i];
688 if (!mad_snoop_priv ||
689 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
692 atomic_inc(&mad_snoop_priv->refcount);
693 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
694 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
696 deref_snoop_agent(mad_snoop_priv);
697 spin_lock_irqsave(&qp_info->snoop_lock, flags);
699 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
702 static void build_smp_wc(struct ib_qp *qp,
703 u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
706 memset(wc, 0, sizeof *wc);
708 wc->status = IB_WC_SUCCESS;
709 wc->opcode = IB_WC_RECV;
710 wc->pkey_index = pkey_index;
711 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
716 wc->dlid_path_bits = 0;
717 wc->port_num = port_num;
720 static size_t mad_priv_size(const struct ib_mad_private *mp)
722 return sizeof(struct ib_mad_private) + mp->mad_size;
725 static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
727 size_t size = sizeof(struct ib_mad_private) + mad_size;
728 struct ib_mad_private *ret = kzalloc(size, flags);
731 ret->mad_size = mad_size;
736 static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
738 return rdma_max_mad_size(port_priv->device, port_priv->port_num);
741 static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
743 return sizeof(struct ib_grh) + mp->mad_size;
747 * Return 0 if SMP is to be sent
748 * Return 1 if SMP was consumed locally (whether or not solicited)
749 * Return < 0 if error
751 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
752 struct ib_mad_send_wr_private *mad_send_wr)
755 struct ib_smp *smp = mad_send_wr->send_buf.mad;
756 struct opa_smp *opa_smp = (struct opa_smp *)smp;
758 struct ib_mad_local_private *local;
759 struct ib_mad_private *mad_priv;
760 struct ib_mad_port_private *port_priv;
761 struct ib_mad_agent_private *recv_mad_agent = NULL;
762 struct ib_device *device = mad_agent_priv->agent.device;
765 struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
766 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
767 u16 out_mad_pkey_index = 0;
769 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
770 mad_agent_priv->qp_info->port_priv->port_num);
772 if (device->node_type == RDMA_NODE_IB_SWITCH &&
773 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
774 port_num = send_wr->wr.ud.port_num;
776 port_num = mad_agent_priv->agent.port_num;
779 * Directed route handling starts if the initial LID routed part of
780 * a request or the ending LID routed part of a response is empty.
781 * If we are at the start of the LID routed part, don't update the
782 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
784 if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) {
787 if ((opa_get_smp_direction(opa_smp)
788 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
789 OPA_LID_PERMISSIVE &&
790 opa_smi_handle_dr_smp_send(opa_smp, device->node_type,
791 port_num) == IB_SMI_DISCARD) {
793 dev_err(&device->dev, "OPA Invalid directed route\n");
796 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
797 if (opa_drslid != OPA_LID_PERMISSIVE &&
798 opa_drslid & 0xffff0000) {
800 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
804 drslid = (u16)(opa_drslid & 0x0000ffff);
806 /* Check to post send on QP or process locally */
807 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
808 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
811 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
813 smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
816 dev_err(&device->dev, "Invalid directed route\n");
819 drslid = be16_to_cpu(smp->dr_slid);
821 /* Check to post send on QP or process locally */
822 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
823 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
827 local = kmalloc(sizeof *local, GFP_ATOMIC);
830 dev_err(&device->dev, "No memory for ib_mad_local_private\n");
833 local->mad_priv = NULL;
834 local->recv_mad_agent = NULL;
835 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
838 dev_err(&device->dev, "No memory for local response MAD\n");
843 build_smp_wc(mad_agent_priv->agent.qp,
844 send_wr->wr_id, drslid,
845 send_wr->wr.ud.pkey_index,
846 send_wr->wr.ud.port_num, &mad_wc);
848 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
849 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
850 + mad_send_wr->send_buf.data_len
851 + sizeof(struct ib_grh);
854 /* No GRH for DR SMP */
855 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
856 (const struct ib_mad_hdr *)smp, mad_size,
857 (struct ib_mad_hdr *)mad_priv->mad,
858 &mad_size, &out_mad_pkey_index);
861 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
862 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
863 mad_agent_priv->agent.recv_handler) {
864 local->mad_priv = mad_priv;
865 local->recv_mad_agent = mad_agent_priv;
867 * Reference MAD agent until receive
868 * side of local completion handled
870 atomic_inc(&mad_agent_priv->refcount);
874 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
877 case IB_MAD_RESULT_SUCCESS:
878 /* Treat like an incoming receive MAD */
879 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
880 mad_agent_priv->agent.port_num);
882 memcpy(mad_priv->mad, smp, mad_priv->mad_size);
883 recv_mad_agent = find_mad_agent(port_priv,
884 (const struct ib_mad_hdr *)mad_priv->mad);
886 if (!port_priv || !recv_mad_agent) {
888 * No receiving agent so drop packet and
889 * generate send completion.
894 local->mad_priv = mad_priv;
895 local->recv_mad_agent = recv_mad_agent;
904 local->mad_send_wr = mad_send_wr;
906 local->mad_send_wr->send_wr.wr.ud.pkey_index = out_mad_pkey_index;
907 local->return_wc_byte_len = mad_size;
909 /* Reference MAD agent until send side of local completion handled */
910 atomic_inc(&mad_agent_priv->refcount);
911 /* Queue local completion to local list */
912 spin_lock_irqsave(&mad_agent_priv->lock, flags);
913 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
914 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
915 queue_work(mad_agent_priv->qp_info->port_priv->wq,
916 &mad_agent_priv->local_work);
922 static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
926 seg_size = mad_size - hdr_len;
927 if (data_len && seg_size) {
928 pad = seg_size - data_len % seg_size;
929 return pad == seg_size ? 0 : pad;
934 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
936 struct ib_rmpp_segment *s, *t;
938 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
944 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
945 size_t mad_size, gfp_t gfp_mask)
947 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
948 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
949 struct ib_rmpp_segment *seg = NULL;
950 int left, seg_size, pad;
952 send_buf->seg_size = mad_size - send_buf->hdr_len;
953 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
954 seg_size = send_buf->seg_size;
957 /* Allocate data segments. */
958 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
959 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
961 dev_err(&send_buf->mad_agent->device->dev,
962 "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
963 sizeof (*seg) + seg_size, gfp_mask);
964 free_send_rmpp_list(send_wr);
967 seg->num = ++send_buf->seg_count;
968 list_add_tail(&seg->list, &send_wr->rmpp_list);
971 /* Zero any padding */
973 memset(seg->data + seg_size - pad, 0, pad);
975 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
977 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
978 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
980 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
981 struct ib_rmpp_segment, list);
982 send_wr->last_ack_seg = send_wr->cur_seg;
986 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
988 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
990 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
992 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
993 u32 remote_qpn, u16 pkey_index,
995 int hdr_len, int data_len,
999 struct ib_mad_agent_private *mad_agent_priv;
1000 struct ib_mad_send_wr_private *mad_send_wr;
1001 int pad, message_size, ret, size;
1006 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
1009 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
1011 if (opa && base_version == OPA_MGMT_BASE_VERSION)
1012 mad_size = sizeof(struct opa_mad);
1014 mad_size = sizeof(struct ib_mad);
1016 pad = get_pad_size(hdr_len, data_len, mad_size);
1017 message_size = hdr_len + data_len + pad;
1019 if (ib_mad_kernel_rmpp_agent(mad_agent)) {
1020 if (!rmpp_active && message_size > mad_size)
1021 return ERR_PTR(-EINVAL);
1023 if (rmpp_active || message_size > mad_size)
1024 return ERR_PTR(-EINVAL);
1026 size = rmpp_active ? hdr_len : mad_size;
1027 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
1029 return ERR_PTR(-ENOMEM);
1031 mad_send_wr = buf + size;
1032 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
1033 mad_send_wr->send_buf.mad = buf;
1034 mad_send_wr->send_buf.hdr_len = hdr_len;
1035 mad_send_wr->send_buf.data_len = data_len;
1036 mad_send_wr->pad = pad;
1038 mad_send_wr->mad_agent_priv = mad_agent_priv;
1039 mad_send_wr->sg_list[0].length = hdr_len;
1040 mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
1042 /* OPA MADs don't have to be the full 2048 bytes */
1043 if (opa && base_version == OPA_MGMT_BASE_VERSION &&
1044 data_len < mad_size - hdr_len)
1045 mad_send_wr->sg_list[1].length = data_len;
1047 mad_send_wr->sg_list[1].length = mad_size - hdr_len;
1049 mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
1051 mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
1052 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
1053 mad_send_wr->send_wr.num_sge = 2;
1054 mad_send_wr->send_wr.opcode = IB_WR_SEND;
1055 mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
1056 mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
1057 mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
1058 mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
1061 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
1064 return ERR_PTR(ret);
1068 mad_send_wr->send_buf.mad_agent = mad_agent;
1069 atomic_inc(&mad_agent_priv->refcount);
1070 return &mad_send_wr->send_buf;
1072 EXPORT_SYMBOL(ib_create_send_mad);
1074 int ib_get_mad_data_offset(u8 mgmt_class)
1076 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1077 return IB_MGMT_SA_HDR;
1078 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1079 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1080 (mgmt_class == IB_MGMT_CLASS_BIS))
1081 return IB_MGMT_DEVICE_HDR;
1082 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1083 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1084 return IB_MGMT_VENDOR_HDR;
1086 return IB_MGMT_MAD_HDR;
1088 EXPORT_SYMBOL(ib_get_mad_data_offset);
1090 int ib_is_mad_class_rmpp(u8 mgmt_class)
1092 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1093 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1094 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1095 (mgmt_class == IB_MGMT_CLASS_BIS) ||
1096 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1097 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1101 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1103 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1105 struct ib_mad_send_wr_private *mad_send_wr;
1106 struct list_head *list;
1108 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1110 list = &mad_send_wr->cur_seg->list;
1112 if (mad_send_wr->cur_seg->num < seg_num) {
1113 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1114 if (mad_send_wr->cur_seg->num == seg_num)
1116 } else if (mad_send_wr->cur_seg->num > seg_num) {
1117 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1118 if (mad_send_wr->cur_seg->num == seg_num)
1121 return mad_send_wr->cur_seg->data;
1123 EXPORT_SYMBOL(ib_get_rmpp_segment);
1125 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1127 if (mad_send_wr->send_buf.seg_count)
1128 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1129 mad_send_wr->seg_num);
1131 return mad_send_wr->send_buf.mad +
1132 mad_send_wr->send_buf.hdr_len;
1135 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1137 struct ib_mad_agent_private *mad_agent_priv;
1138 struct ib_mad_send_wr_private *mad_send_wr;
1140 mad_agent_priv = container_of(send_buf->mad_agent,
1141 struct ib_mad_agent_private, agent);
1142 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1145 free_send_rmpp_list(mad_send_wr);
1146 kfree(send_buf->mad);
1147 deref_mad_agent(mad_agent_priv);
1149 EXPORT_SYMBOL(ib_free_send_mad);
1151 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1153 struct ib_mad_qp_info *qp_info;
1154 struct list_head *list;
1155 struct ib_send_wr *bad_send_wr;
1156 struct ib_mad_agent *mad_agent;
1158 unsigned long flags;
1161 /* Set WR ID to find mad_send_wr upon completion */
1162 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1163 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
1164 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1166 mad_agent = mad_send_wr->send_buf.mad_agent;
1167 sge = mad_send_wr->sg_list;
1168 sge[0].addr = ib_dma_map_single(mad_agent->device,
1169 mad_send_wr->send_buf.mad,
1172 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1175 mad_send_wr->header_mapping = sge[0].addr;
1177 sge[1].addr = ib_dma_map_single(mad_agent->device,
1178 ib_get_payload(mad_send_wr),
1181 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1182 ib_dma_unmap_single(mad_agent->device,
1183 mad_send_wr->header_mapping,
1184 sge[0].length, DMA_TO_DEVICE);
1187 mad_send_wr->payload_mapping = sge[1].addr;
1189 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1190 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1191 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
1193 list = &qp_info->send_queue.list;
1196 list = &qp_info->overflow_list;
1200 qp_info->send_queue.count++;
1201 list_add_tail(&mad_send_wr->mad_list.list, list);
1203 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1205 ib_dma_unmap_single(mad_agent->device,
1206 mad_send_wr->header_mapping,
1207 sge[0].length, DMA_TO_DEVICE);
1208 ib_dma_unmap_single(mad_agent->device,
1209 mad_send_wr->payload_mapping,
1210 sge[1].length, DMA_TO_DEVICE);
1216 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1217 * with the registered client
1219 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1220 struct ib_mad_send_buf **bad_send_buf)
1222 struct ib_mad_agent_private *mad_agent_priv;
1223 struct ib_mad_send_buf *next_send_buf;
1224 struct ib_mad_send_wr_private *mad_send_wr;
1225 unsigned long flags;
1228 /* Walk list of send WRs and post each on send list */
1229 for (; send_buf; send_buf = next_send_buf) {
1231 mad_send_wr = container_of(send_buf,
1232 struct ib_mad_send_wr_private,
1234 mad_agent_priv = mad_send_wr->mad_agent_priv;
1236 if (!send_buf->mad_agent->send_handler ||
1237 (send_buf->timeout_ms &&
1238 !send_buf->mad_agent->recv_handler)) {
1243 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1244 if (mad_agent_priv->agent.rmpp_version) {
1251 * Save pointer to next work request to post in case the
1252 * current one completes, and the user modifies the work
1253 * request associated with the completion
1255 next_send_buf = send_buf->next;
1256 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
1258 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1259 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1260 ret = handle_outgoing_dr_smp(mad_agent_priv,
1262 if (ret < 0) /* error */
1264 else if (ret == 1) /* locally consumed */
1268 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1269 /* Timeout will be updated after send completes */
1270 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1271 mad_send_wr->max_retries = send_buf->retries;
1272 mad_send_wr->retries_left = send_buf->retries;
1273 send_buf->retries = 0;
1274 /* Reference for work request to QP + response */
1275 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1276 mad_send_wr->status = IB_WC_SUCCESS;
1278 /* Reference MAD agent until send completes */
1279 atomic_inc(&mad_agent_priv->refcount);
1280 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1281 list_add_tail(&mad_send_wr->agent_list,
1282 &mad_agent_priv->send_list);
1283 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1285 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1286 ret = ib_send_rmpp_mad(mad_send_wr);
1287 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1288 ret = ib_send_mad(mad_send_wr);
1290 ret = ib_send_mad(mad_send_wr);
1292 /* Fail send request */
1293 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1294 list_del(&mad_send_wr->agent_list);
1295 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1296 atomic_dec(&mad_agent_priv->refcount);
1303 *bad_send_buf = send_buf;
1306 EXPORT_SYMBOL(ib_post_send_mad);
1309 * ib_free_recv_mad - Returns data buffers used to receive
1310 * a MAD to the access layer
1312 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1314 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1315 struct ib_mad_private_header *mad_priv_hdr;
1316 struct ib_mad_private *priv;
1317 struct list_head free_list;
1319 INIT_LIST_HEAD(&free_list);
1320 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1322 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1324 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1326 mad_priv_hdr = container_of(mad_recv_wc,
1327 struct ib_mad_private_header,
1329 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1334 EXPORT_SYMBOL(ib_free_recv_mad);
1336 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1338 ib_mad_send_handler send_handler,
1339 ib_mad_recv_handler recv_handler,
1342 return ERR_PTR(-EINVAL); /* XXX: for now */
1344 EXPORT_SYMBOL(ib_redirect_mad_qp);
1346 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1349 dev_err(&mad_agent->device->dev,
1350 "ib_process_mad_wc() not implemented yet\n");
1353 EXPORT_SYMBOL(ib_process_mad_wc);
1355 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1356 struct ib_mad_reg_req *mad_reg_req)
1360 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1361 if ((*method)->agent[i]) {
1362 pr_err("Method %d already in use\n", i);
1369 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1371 /* Allocate management method table */
1372 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1374 pr_err("No memory for ib_mad_mgmt_method_table\n");
1382 * Check to see if there are any methods still in use
1384 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1388 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1389 if (method->agent[i])
1395 * Check to see if there are any method tables for this class still in use
1397 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1401 for (i = 0; i < MAX_MGMT_CLASS; i++)
1402 if (class->method_table[i])
1407 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1411 for (i = 0; i < MAX_MGMT_OUI; i++)
1412 if (vendor_class->method_table[i])
1417 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1422 for (i = 0; i < MAX_MGMT_OUI; i++)
1423 /* Is there matching OUI for this vendor class ? */
1424 if (!memcmp(vendor_class->oui[i], oui, 3))
1430 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1434 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1435 if (vendor->vendor_class[i])
1441 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1442 struct ib_mad_agent_private *agent)
1446 /* Remove any methods for this mad agent */
1447 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1448 if (method->agent[i] == agent) {
1449 method->agent[i] = NULL;
1454 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1455 struct ib_mad_agent_private *agent_priv,
1458 struct ib_mad_port_private *port_priv;
1459 struct ib_mad_mgmt_class_table **class;
1460 struct ib_mad_mgmt_method_table **method;
1463 port_priv = agent_priv->qp_info->port_priv;
1464 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1466 /* Allocate management class table for "new" class version */
1467 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1469 dev_err(&agent_priv->agent.device->dev,
1470 "No memory for ib_mad_mgmt_class_table\n");
1475 /* Allocate method table for this management class */
1476 method = &(*class)->method_table[mgmt_class];
1477 if ((ret = allocate_method_table(method)))
1480 method = &(*class)->method_table[mgmt_class];
1482 /* Allocate method table for this management class */
1483 if ((ret = allocate_method_table(method)))
1488 /* Now, make sure methods are not already in use */
1489 if (method_in_use(method, mad_reg_req))
1492 /* Finally, add in methods being registered */
1493 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1494 (*method)->agent[i] = agent_priv;
1499 /* Remove any methods for this mad agent */
1500 remove_methods_mad_agent(*method, agent_priv);
1501 /* Now, check to see if there are any methods in use */
1502 if (!check_method_table(*method)) {
1503 /* If not, release management method table */
1516 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1517 struct ib_mad_agent_private *agent_priv)
1519 struct ib_mad_port_private *port_priv;
1520 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1521 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1522 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1523 struct ib_mad_mgmt_method_table **method;
1524 int i, ret = -ENOMEM;
1527 /* "New" vendor (with OUI) class */
1528 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1529 port_priv = agent_priv->qp_info->port_priv;
1530 vendor_table = &port_priv->version[
1531 mad_reg_req->mgmt_class_version].vendor;
1532 if (!*vendor_table) {
1533 /* Allocate mgmt vendor class table for "new" class version */
1534 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1536 dev_err(&agent_priv->agent.device->dev,
1537 "No memory for ib_mad_mgmt_vendor_class_table\n");
1541 *vendor_table = vendor;
1543 if (!(*vendor_table)->vendor_class[vclass]) {
1544 /* Allocate table for this management vendor class */
1545 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1546 if (!vendor_class) {
1547 dev_err(&agent_priv->agent.device->dev,
1548 "No memory for ib_mad_mgmt_vendor_class\n");
1552 (*vendor_table)->vendor_class[vclass] = vendor_class;
1554 for (i = 0; i < MAX_MGMT_OUI; i++) {
1555 /* Is there matching OUI for this vendor class ? */
1556 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1557 mad_reg_req->oui, 3)) {
1558 method = &(*vendor_table)->vendor_class[
1559 vclass]->method_table[i];
1564 for (i = 0; i < MAX_MGMT_OUI; i++) {
1565 /* OUI slot available ? */
1566 if (!is_vendor_oui((*vendor_table)->vendor_class[
1568 method = &(*vendor_table)->vendor_class[
1569 vclass]->method_table[i];
1571 /* Allocate method table for this OUI */
1572 if ((ret = allocate_method_table(method)))
1574 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1575 mad_reg_req->oui, 3);
1579 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1583 /* Now, make sure methods are not already in use */
1584 if (method_in_use(method, mad_reg_req))
1587 /* Finally, add in methods being registered */
1588 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1589 (*method)->agent[i] = agent_priv;
1594 /* Remove any methods for this mad agent */
1595 remove_methods_mad_agent(*method, agent_priv);
1596 /* Now, check to see if there are any methods in use */
1597 if (!check_method_table(*method)) {
1598 /* If not, release management method table */
1605 (*vendor_table)->vendor_class[vclass] = NULL;
1606 kfree(vendor_class);
1610 *vendor_table = NULL;
1617 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1619 struct ib_mad_port_private *port_priv;
1620 struct ib_mad_mgmt_class_table *class;
1621 struct ib_mad_mgmt_method_table *method;
1622 struct ib_mad_mgmt_vendor_class_table *vendor;
1623 struct ib_mad_mgmt_vendor_class *vendor_class;
1628 * Was MAD registration request supplied
1629 * with original registration ?
1631 if (!agent_priv->reg_req) {
1635 port_priv = agent_priv->qp_info->port_priv;
1636 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1637 class = port_priv->version[
1638 agent_priv->reg_req->mgmt_class_version].class;
1642 method = class->method_table[mgmt_class];
1644 /* Remove any methods for this mad agent */
1645 remove_methods_mad_agent(method, agent_priv);
1646 /* Now, check to see if there are any methods still in use */
1647 if (!check_method_table(method)) {
1648 /* If not, release management method table */
1650 class->method_table[mgmt_class] = NULL;
1651 /* Any management classes left ? */
1652 if (!check_class_table(class)) {
1653 /* If not, release management class table */
1656 agent_priv->reg_req->
1657 mgmt_class_version].class = NULL;
1663 if (!is_vendor_class(mgmt_class))
1666 /* normalize mgmt_class to vendor range 2 */
1667 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1668 vendor = port_priv->version[
1669 agent_priv->reg_req->mgmt_class_version].vendor;
1674 vendor_class = vendor->vendor_class[mgmt_class];
1676 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1679 method = vendor_class->method_table[index];
1681 /* Remove any methods for this mad agent */
1682 remove_methods_mad_agent(method, agent_priv);
1684 * Now, check to see if there are
1685 * any methods still in use
1687 if (!check_method_table(method)) {
1688 /* If not, release management method table */
1690 vendor_class->method_table[index] = NULL;
1691 memset(vendor_class->oui[index], 0, 3);
1692 /* Any OUIs left ? */
1693 if (!check_vendor_class(vendor_class)) {
1694 /* If not, release vendor class table */
1695 kfree(vendor_class);
1696 vendor->vendor_class[mgmt_class] = NULL;
1697 /* Any other vendor classes left ? */
1698 if (!check_vendor_table(vendor)) {
1701 agent_priv->reg_req->
1702 mgmt_class_version].
1714 static struct ib_mad_agent_private *
1715 find_mad_agent(struct ib_mad_port_private *port_priv,
1716 const struct ib_mad_hdr *mad_hdr)
1718 struct ib_mad_agent_private *mad_agent = NULL;
1719 unsigned long flags;
1721 spin_lock_irqsave(&port_priv->reg_lock, flags);
1722 if (ib_response_mad(mad_hdr)) {
1724 struct ib_mad_agent_private *entry;
1727 * Routing is based on high 32 bits of transaction ID
1730 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
1731 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1732 if (entry->agent.hi_tid == hi_tid) {
1738 struct ib_mad_mgmt_class_table *class;
1739 struct ib_mad_mgmt_method_table *method;
1740 struct ib_mad_mgmt_vendor_class_table *vendor;
1741 struct ib_mad_mgmt_vendor_class *vendor_class;
1742 const struct ib_vendor_mad *vendor_mad;
1746 * Routing is based on version, class, and method
1747 * For "newer" vendor MADs, also based on OUI
1749 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1751 if (!is_vendor_class(mad_hdr->mgmt_class)) {
1752 class = port_priv->version[
1753 mad_hdr->class_version].class;
1756 if (convert_mgmt_class(mad_hdr->mgmt_class) >=
1757 IB_MGMT_MAX_METHODS)
1759 method = class->method_table[convert_mgmt_class(
1760 mad_hdr->mgmt_class)];
1762 mad_agent = method->agent[mad_hdr->method &
1763 ~IB_MGMT_METHOD_RESP];
1765 vendor = port_priv->version[
1766 mad_hdr->class_version].vendor;
1769 vendor_class = vendor->vendor_class[vendor_class_index(
1770 mad_hdr->mgmt_class)];
1773 /* Find matching OUI */
1774 vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1775 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1778 method = vendor_class->method_table[index];
1780 mad_agent = method->agent[mad_hdr->method &
1781 ~IB_MGMT_METHOD_RESP];
1787 if (mad_agent->agent.recv_handler)
1788 atomic_inc(&mad_agent->refcount);
1790 dev_notice(&port_priv->device->dev,
1791 "No receive handler for client %p on port %d\n",
1792 &mad_agent->agent, port_priv->port_num);
1797 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1802 static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1803 const struct ib_mad_qp_info *qp_info,
1807 u32 qp_num = qp_info->qp->qp_num;
1809 /* Make sure MAD base version is understood */
1810 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1811 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1812 pr_err("MAD received with unsupported base version %d %s\n",
1813 mad_hdr->base_version, opa ? "(opa)" : "");
1817 /* Filter SMI packets sent to other than QP0 */
1818 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1819 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1823 /* Filter GSI packets sent to QP0 */
1832 static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1833 const struct ib_mad_hdr *mad_hdr)
1835 struct ib_rmpp_mad *rmpp_mad;
1837 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1838 return !mad_agent_priv->agent.rmpp_version ||
1839 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
1840 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1841 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1842 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1845 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1846 const struct ib_mad_recv_wc *rwc)
1848 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
1849 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1852 static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1853 const struct ib_mad_send_wr_private *wr,
1854 const struct ib_mad_recv_wc *rwc )
1856 struct ib_ah_attr attr;
1857 u8 send_resp, rcv_resp;
1859 struct ib_device *device = mad_agent_priv->agent.device;
1860 u8 port_num = mad_agent_priv->agent.port_num;
1863 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1864 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
1866 if (send_resp == rcv_resp)
1867 /* both requests, or both responses. GIDs different */
1870 if (ib_query_ah(wr->send_buf.ah, &attr))
1871 /* Assume not equal, to avoid false positives. */
1874 if (!!(attr.ah_flags & IB_AH_GRH) !=
1875 !!(rwc->wc->wc_flags & IB_WC_GRH))
1876 /* one has GID, other does not. Assume different */
1879 if (!send_resp && rcv_resp) {
1880 /* is request/response. */
1881 if (!(attr.ah_flags & IB_AH_GRH)) {
1882 if (ib_get_cached_lmc(device, port_num, &lmc))
1884 return (!lmc || !((attr.src_path_bits ^
1885 rwc->wc->dlid_path_bits) &
1888 if (ib_get_cached_gid(device, port_num,
1889 attr.grh.sgid_index, &sgid))
1891 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1896 if (!(attr.ah_flags & IB_AH_GRH))
1897 return attr.dlid == rwc->wc->slid;
1899 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1903 static inline int is_direct(u8 class)
1905 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1908 struct ib_mad_send_wr_private*
1909 ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1910 const struct ib_mad_recv_wc *wc)
1912 struct ib_mad_send_wr_private *wr;
1913 const struct ib_mad_hdr *mad_hdr;
1915 mad_hdr = &wc->recv_buf.mad->mad_hdr;
1917 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1918 if ((wr->tid == mad_hdr->tid) &&
1919 rcv_has_same_class(wr, wc) &&
1921 * Don't check GID for direct routed MADs.
1922 * These might have permissive LIDs.
1924 (is_direct(mad_hdr->mgmt_class) ||
1925 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1926 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1930 * It's possible to receive the response before we've
1931 * been notified that the send has completed
1933 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1934 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1935 wr->tid == mad_hdr->tid &&
1937 rcv_has_same_class(wr, wc) &&
1939 * Don't check GID for direct routed MADs.
1940 * These might have permissive LIDs.
1942 (is_direct(mad_hdr->mgmt_class) ||
1943 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1944 /* Verify request has not been canceled */
1945 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1950 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1952 mad_send_wr->timeout = 0;
1953 if (mad_send_wr->refcount == 1)
1954 list_move_tail(&mad_send_wr->agent_list,
1955 &mad_send_wr->mad_agent_priv->done_list);
1958 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1959 struct ib_mad_recv_wc *mad_recv_wc)
1961 struct ib_mad_send_wr_private *mad_send_wr;
1962 struct ib_mad_send_wc mad_send_wc;
1963 unsigned long flags;
1965 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1966 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1967 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1968 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1971 deref_mad_agent(mad_agent_priv);
1976 /* Complete corresponding request */
1977 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
1978 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1979 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1981 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1982 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
1983 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
1984 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
1985 & IB_MGMT_RMPP_FLAG_ACTIVE)) {
1986 /* user rmpp is in effect
1987 * and this is an active RMPP MAD
1989 mad_recv_wc->wc->wr_id = 0;
1990 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1992 atomic_dec(&mad_agent_priv->refcount);
1994 /* not user rmpp, revert to normal behavior and
1996 ib_free_recv_mad(mad_recv_wc);
1997 deref_mad_agent(mad_agent_priv);
2001 ib_mark_mad_done(mad_send_wr);
2002 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2004 /* Defined behavior is to complete response before request */
2005 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
2006 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
2008 atomic_dec(&mad_agent_priv->refcount);
2010 mad_send_wc.status = IB_WC_SUCCESS;
2011 mad_send_wc.vendor_err = 0;
2012 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2013 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2016 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
2018 deref_mad_agent(mad_agent_priv);
2022 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
2023 const struct ib_mad_qp_info *qp_info,
2024 const struct ib_wc *wc,
2026 struct ib_mad_private *recv,
2027 struct ib_mad_private *response)
2029 enum smi_forward_action retsmi;
2030 struct ib_smp *smp = (struct ib_smp *)recv->mad;
2032 if (smi_handle_dr_smp_recv(smp,
2033 port_priv->device->node_type,
2035 port_priv->device->phys_port_cnt) ==
2037 return IB_SMI_DISCARD;
2039 retsmi = smi_check_forward_dr_smp(smp);
2040 if (retsmi == IB_SMI_LOCAL)
2041 return IB_SMI_HANDLE;
2043 if (retsmi == IB_SMI_SEND) { /* don't forward */
2044 if (smi_handle_dr_smp_send(smp,
2045 port_priv->device->node_type,
2046 port_num) == IB_SMI_DISCARD)
2047 return IB_SMI_DISCARD;
2049 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
2050 return IB_SMI_DISCARD;
2051 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
2052 /* forward case for switches */
2053 memcpy(response, recv, mad_priv_size(response));
2054 response->header.recv_wc.wc = &response->header.wc;
2055 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2056 response->header.recv_wc.recv_buf.grh = &response->grh;
2058 agent_send_response((const struct ib_mad_hdr *)response->mad,
2061 smi_get_fwd_port(smp),
2062 qp_info->qp->qp_num,
2066 return IB_SMI_DISCARD;
2068 return IB_SMI_HANDLE;
2071 static bool generate_unmatched_resp(const struct ib_mad_private *recv,
2072 struct ib_mad_private *response,
2073 size_t *resp_len, bool opa)
2075 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2076 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2078 if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2079 recv_hdr->method == IB_MGMT_METHOD_SET) {
2080 memcpy(response, recv, mad_priv_size(response));
2081 response->header.recv_wc.wc = &response->header.wc;
2082 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2083 response->header.recv_wc.recv_buf.grh = &response->grh;
2084 resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2085 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2086 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2087 resp_hdr->status |= IB_SMP_DIRECTION;
2089 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
2090 if (recv_hdr->mgmt_class ==
2091 IB_MGMT_CLASS_SUBN_LID_ROUTED ||
2092 recv_hdr->mgmt_class ==
2093 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2094 *resp_len = opa_get_smp_header_size(
2095 (struct opa_smp *)recv->mad);
2097 *resp_len = sizeof(struct ib_mad_hdr);
2106 static enum smi_action
2107 handle_opa_smi(struct ib_mad_port_private *port_priv,
2108 struct ib_mad_qp_info *qp_info,
2111 struct ib_mad_private *recv,
2112 struct ib_mad_private *response)
2114 enum smi_forward_action retsmi;
2115 struct opa_smp *smp = (struct opa_smp *)recv->mad;
2117 if (opa_smi_handle_dr_smp_recv(smp,
2118 port_priv->device->node_type,
2120 port_priv->device->phys_port_cnt) ==
2122 return IB_SMI_DISCARD;
2124 retsmi = opa_smi_check_forward_dr_smp(smp);
2125 if (retsmi == IB_SMI_LOCAL)
2126 return IB_SMI_HANDLE;
2128 if (retsmi == IB_SMI_SEND) { /* don't forward */
2129 if (opa_smi_handle_dr_smp_send(smp,
2130 port_priv->device->node_type,
2131 port_num) == IB_SMI_DISCARD)
2132 return IB_SMI_DISCARD;
2134 if (opa_smi_check_local_smp(smp, port_priv->device) ==
2136 return IB_SMI_DISCARD;
2138 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
2139 /* forward case for switches */
2140 memcpy(response, recv, mad_priv_size(response));
2141 response->header.recv_wc.wc = &response->header.wc;
2142 response->header.recv_wc.recv_buf.opa_mad =
2143 (struct opa_mad *)response->mad;
2144 response->header.recv_wc.recv_buf.grh = &response->grh;
2146 agent_send_response((const struct ib_mad_hdr *)response->mad,
2149 opa_smi_get_fwd_port(smp),
2150 qp_info->qp->qp_num,
2151 recv->header.wc.byte_len,
2154 return IB_SMI_DISCARD;
2157 return IB_SMI_HANDLE;
2160 static enum smi_action
2161 handle_smi(struct ib_mad_port_private *port_priv,
2162 struct ib_mad_qp_info *qp_info,
2165 struct ib_mad_private *recv,
2166 struct ib_mad_private *response,
2169 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2171 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2172 mad_hdr->class_version == OPA_SMI_CLASS_VERSION)
2173 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2176 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2179 static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
2182 struct ib_mad_qp_info *qp_info;
2183 struct ib_mad_private_header *mad_priv_hdr;
2184 struct ib_mad_private *recv, *response = NULL;
2185 struct ib_mad_list_head *mad_list;
2186 struct ib_mad_agent_private *mad_agent;
2188 int ret = IB_MAD_RESULT_SUCCESS;
2190 u16 resp_mad_pkey_index = 0;
2193 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2194 qp_info = mad_list->mad_queue->qp_info;
2195 dequeue_mad(mad_list);
2197 opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2198 qp_info->port_priv->port_num);
2200 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2202 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
2203 ib_dma_unmap_single(port_priv->device,
2204 recv->header.mapping,
2205 mad_priv_dma_size(recv),
2208 /* Setup MAD receive work completion from "normal" work completion */
2209 recv->header.wc = *wc;
2210 recv->header.recv_wc.wc = &recv->header.wc;
2212 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2213 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2214 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2216 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2217 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2220 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
2221 recv->header.recv_wc.recv_buf.grh = &recv->grh;
2223 if (atomic_read(&qp_info->snoop_count))
2224 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2227 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
2230 mad_size = recv->mad_size;
2231 response = alloc_mad_private(mad_size, GFP_KERNEL);
2233 dev_err(&port_priv->device->dev,
2234 "ib_mad_recv_done_handler no memory for response buffer\n");
2238 if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
2239 port_num = wc->port_num;
2241 port_num = port_priv->port_num;
2243 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
2244 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
2245 if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2251 /* Give driver "right of first refusal" on incoming MAD */
2252 if (port_priv->device->process_mad) {
2253 ret = port_priv->device->process_mad(port_priv->device, 0,
2254 port_priv->port_num,
2256 (const struct ib_mad_hdr *)recv->mad,
2258 (struct ib_mad_hdr *)response->mad,
2259 &mad_size, &resp_mad_pkey_index);
2262 wc->pkey_index = resp_mad_pkey_index;
2264 if (ret & IB_MAD_RESULT_SUCCESS) {
2265 if (ret & IB_MAD_RESULT_CONSUMED)
2267 if (ret & IB_MAD_RESULT_REPLY) {
2268 agent_send_response((const struct ib_mad_hdr *)response->mad,
2272 qp_info->qp->qp_num,
2279 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
2281 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
2283 * recv is freed up in error cases in ib_mad_complete_recv
2284 * or via recv_handler in ib_mad_complete_recv()
2287 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2288 generate_unmatched_resp(recv, response, &mad_size, opa)) {
2289 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2290 port_priv->device, port_num,
2291 qp_info->qp->qp_num, mad_size, opa);
2295 /* Post another receive request for this QP */
2297 ib_mad_post_receive_mads(qp_info, response);
2300 ib_mad_post_receive_mads(qp_info, recv);
2303 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2305 struct ib_mad_send_wr_private *mad_send_wr;
2306 unsigned long delay;
2308 if (list_empty(&mad_agent_priv->wait_list)) {
2309 cancel_delayed_work(&mad_agent_priv->timed_work);
2311 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2312 struct ib_mad_send_wr_private,
2315 if (time_after(mad_agent_priv->timeout,
2316 mad_send_wr->timeout)) {
2317 mad_agent_priv->timeout = mad_send_wr->timeout;
2318 delay = mad_send_wr->timeout - jiffies;
2319 if ((long)delay <= 0)
2321 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2322 &mad_agent_priv->timed_work, delay);
2327 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2329 struct ib_mad_agent_private *mad_agent_priv;
2330 struct ib_mad_send_wr_private *temp_mad_send_wr;
2331 struct list_head *list_item;
2332 unsigned long delay;
2334 mad_agent_priv = mad_send_wr->mad_agent_priv;
2335 list_del(&mad_send_wr->agent_list);
2337 delay = mad_send_wr->timeout;
2338 mad_send_wr->timeout += jiffies;
2341 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2342 temp_mad_send_wr = list_entry(list_item,
2343 struct ib_mad_send_wr_private,
2345 if (time_after(mad_send_wr->timeout,
2346 temp_mad_send_wr->timeout))
2351 list_item = &mad_agent_priv->wait_list;
2352 list_add(&mad_send_wr->agent_list, list_item);
2354 /* Reschedule a work item if we have a shorter timeout */
2355 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2356 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2357 &mad_agent_priv->timed_work, delay);
2360 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2363 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2364 wait_for_response(mad_send_wr);
2368 * Process a send work completion
2370 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2371 struct ib_mad_send_wc *mad_send_wc)
2373 struct ib_mad_agent_private *mad_agent_priv;
2374 unsigned long flags;
2377 mad_agent_priv = mad_send_wr->mad_agent_priv;
2378 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2379 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2380 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2381 if (ret == IB_RMPP_RESULT_CONSUMED)
2384 ret = IB_RMPP_RESULT_UNHANDLED;
2386 if (mad_send_wc->status != IB_WC_SUCCESS &&
2387 mad_send_wr->status == IB_WC_SUCCESS) {
2388 mad_send_wr->status = mad_send_wc->status;
2389 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2392 if (--mad_send_wr->refcount > 0) {
2393 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2394 mad_send_wr->status == IB_WC_SUCCESS) {
2395 wait_for_response(mad_send_wr);
2400 /* Remove send from MAD agent and notify client of completion */
2401 list_del(&mad_send_wr->agent_list);
2402 adjust_timeout(mad_agent_priv);
2403 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2405 if (mad_send_wr->status != IB_WC_SUCCESS )
2406 mad_send_wc->status = mad_send_wr->status;
2407 if (ret == IB_RMPP_RESULT_INTERNAL)
2408 ib_rmpp_send_handler(mad_send_wc);
2410 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2413 /* Release reference on agent taken when sending */
2414 deref_mad_agent(mad_agent_priv);
2417 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2420 static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2423 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2424 struct ib_mad_list_head *mad_list;
2425 struct ib_mad_qp_info *qp_info;
2426 struct ib_mad_queue *send_queue;
2427 struct ib_send_wr *bad_send_wr;
2428 struct ib_mad_send_wc mad_send_wc;
2429 unsigned long flags;
2432 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2433 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2435 send_queue = mad_list->mad_queue;
2436 qp_info = send_queue->qp_info;
2439 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2440 mad_send_wr->header_mapping,
2441 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2442 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2443 mad_send_wr->payload_mapping,
2444 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2445 queued_send_wr = NULL;
2446 spin_lock_irqsave(&send_queue->lock, flags);
2447 list_del(&mad_list->list);
2449 /* Move queued send to the send queue */
2450 if (send_queue->count-- > send_queue->max_active) {
2451 mad_list = container_of(qp_info->overflow_list.next,
2452 struct ib_mad_list_head, list);
2453 queued_send_wr = container_of(mad_list,
2454 struct ib_mad_send_wr_private,
2456 list_move_tail(&mad_list->list, &send_queue->list);
2458 spin_unlock_irqrestore(&send_queue->lock, flags);
2460 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2461 mad_send_wc.status = wc->status;
2462 mad_send_wc.vendor_err = wc->vendor_err;
2463 if (atomic_read(&qp_info->snoop_count))
2464 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2465 IB_MAD_SNOOP_SEND_COMPLETIONS);
2466 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2468 if (queued_send_wr) {
2469 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
2472 dev_err(&port_priv->device->dev,
2473 "ib_post_send failed: %d\n", ret);
2474 mad_send_wr = queued_send_wr;
2475 wc->status = IB_WC_LOC_QP_OP_ERR;
2481 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2483 struct ib_mad_send_wr_private *mad_send_wr;
2484 struct ib_mad_list_head *mad_list;
2485 unsigned long flags;
2487 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2488 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2489 mad_send_wr = container_of(mad_list,
2490 struct ib_mad_send_wr_private,
2492 mad_send_wr->retry = 1;
2494 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2497 static void mad_error_handler(struct ib_mad_port_private *port_priv,
2500 struct ib_mad_list_head *mad_list;
2501 struct ib_mad_qp_info *qp_info;
2502 struct ib_mad_send_wr_private *mad_send_wr;
2505 /* Determine if failure was a send or receive */
2506 mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
2507 qp_info = mad_list->mad_queue->qp_info;
2508 if (mad_list->mad_queue == &qp_info->recv_queue)
2510 * Receive errors indicate that the QP has entered the error
2511 * state - error handling/shutdown code will cleanup
2516 * Send errors will transition the QP to SQE - move
2517 * QP to RTS and repost flushed work requests
2519 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2521 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2522 if (mad_send_wr->retry) {
2524 struct ib_send_wr *bad_send_wr;
2526 mad_send_wr->retry = 0;
2527 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
2530 ib_mad_send_done_handler(port_priv, wc);
2532 ib_mad_send_done_handler(port_priv, wc);
2534 struct ib_qp_attr *attr;
2536 /* Transition QP to RTS and fail offending send */
2537 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2539 attr->qp_state = IB_QPS_RTS;
2540 attr->cur_qp_state = IB_QPS_SQE;
2541 ret = ib_modify_qp(qp_info->qp, attr,
2542 IB_QP_STATE | IB_QP_CUR_STATE);
2545 dev_err(&port_priv->device->dev,
2546 "mad_error_handler - ib_modify_qp to RTS : %d\n",
2549 mark_sends_for_retry(qp_info);
2551 ib_mad_send_done_handler(port_priv, wc);
2556 * IB MAD completion callback
2558 static void ib_mad_completion_handler(struct work_struct *work)
2560 struct ib_mad_port_private *port_priv;
2563 port_priv = container_of(work, struct ib_mad_port_private, work);
2564 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2566 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2567 if (wc.status == IB_WC_SUCCESS) {
2568 switch (wc.opcode) {
2570 ib_mad_send_done_handler(port_priv, &wc);
2573 ib_mad_recv_done_handler(port_priv, &wc);
2580 mad_error_handler(port_priv, &wc);
2584 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2586 unsigned long flags;
2587 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2588 struct ib_mad_send_wc mad_send_wc;
2589 struct list_head cancel_list;
2591 INIT_LIST_HEAD(&cancel_list);
2593 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2594 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2595 &mad_agent_priv->send_list, agent_list) {
2596 if (mad_send_wr->status == IB_WC_SUCCESS) {
2597 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2598 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2602 /* Empty wait list to prevent receives from finding a request */
2603 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2604 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2606 /* Report all cancelled requests */
2607 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2608 mad_send_wc.vendor_err = 0;
2610 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2611 &cancel_list, agent_list) {
2612 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2613 list_del(&mad_send_wr->agent_list);
2614 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2616 atomic_dec(&mad_agent_priv->refcount);
2620 static struct ib_mad_send_wr_private*
2621 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2622 struct ib_mad_send_buf *send_buf)
2624 struct ib_mad_send_wr_private *mad_send_wr;
2626 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2628 if (&mad_send_wr->send_buf == send_buf)
2632 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2634 if (is_rmpp_data_mad(mad_agent_priv,
2635 mad_send_wr->send_buf.mad) &&
2636 &mad_send_wr->send_buf == send_buf)
2642 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2643 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2645 struct ib_mad_agent_private *mad_agent_priv;
2646 struct ib_mad_send_wr_private *mad_send_wr;
2647 unsigned long flags;
2650 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2652 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2653 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2654 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2655 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2659 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2661 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2662 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2665 mad_send_wr->send_buf.timeout_ms = timeout_ms;
2667 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2669 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2671 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2674 EXPORT_SYMBOL(ib_modify_mad);
2676 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2677 struct ib_mad_send_buf *send_buf)
2679 ib_modify_mad(mad_agent, send_buf, 0);
2681 EXPORT_SYMBOL(ib_cancel_mad);
2683 static void local_completions(struct work_struct *work)
2685 struct ib_mad_agent_private *mad_agent_priv;
2686 struct ib_mad_local_private *local;
2687 struct ib_mad_agent_private *recv_mad_agent;
2688 unsigned long flags;
2691 struct ib_mad_send_wc mad_send_wc;
2695 container_of(work, struct ib_mad_agent_private, local_work);
2697 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2698 mad_agent_priv->qp_info->port_priv->port_num);
2700 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2701 while (!list_empty(&mad_agent_priv->local_list)) {
2702 local = list_entry(mad_agent_priv->local_list.next,
2703 struct ib_mad_local_private,
2705 list_del(&local->completion_list);
2706 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2708 if (local->mad_priv) {
2710 recv_mad_agent = local->recv_mad_agent;
2711 if (!recv_mad_agent) {
2712 dev_err(&mad_agent_priv->agent.device->dev,
2713 "No receive MAD agent for local completion\n");
2715 goto local_send_completion;
2719 * Defined behavior is to complete response
2722 build_smp_wc(recv_mad_agent->agent.qp,
2723 (unsigned long) local->mad_send_wr,
2724 be16_to_cpu(IB_LID_PERMISSIVE),
2725 local->mad_send_wr->send_wr.wr.ud.pkey_index,
2726 recv_mad_agent->agent.port_num, &wc);
2728 local->mad_priv->header.recv_wc.wc = &wc;
2730 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2731 if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2732 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2733 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2735 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2736 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2739 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2740 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2741 &local->mad_priv->header.recv_wc.rmpp_list);
2742 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2743 local->mad_priv->header.recv_wc.recv_buf.mad =
2744 (struct ib_mad *)local->mad_priv->mad;
2745 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2746 snoop_recv(recv_mad_agent->qp_info,
2747 &local->mad_priv->header.recv_wc,
2748 IB_MAD_SNOOP_RECVS);
2749 recv_mad_agent->agent.recv_handler(
2750 &recv_mad_agent->agent,
2751 &local->mad_priv->header.recv_wc);
2752 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2753 atomic_dec(&recv_mad_agent->refcount);
2754 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2757 local_send_completion:
2759 mad_send_wc.status = IB_WC_SUCCESS;
2760 mad_send_wc.vendor_err = 0;
2761 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2762 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2763 snoop_send(mad_agent_priv->qp_info,
2764 &local->mad_send_wr->send_buf,
2765 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2766 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2769 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2770 atomic_dec(&mad_agent_priv->refcount);
2772 kfree(local->mad_priv);
2775 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2778 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2782 if (!mad_send_wr->retries_left)
2785 mad_send_wr->retries_left--;
2786 mad_send_wr->send_buf.retries++;
2788 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2790 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
2791 ret = ib_retry_rmpp(mad_send_wr);
2793 case IB_RMPP_RESULT_UNHANDLED:
2794 ret = ib_send_mad(mad_send_wr);
2796 case IB_RMPP_RESULT_CONSUMED:
2804 ret = ib_send_mad(mad_send_wr);
2807 mad_send_wr->refcount++;
2808 list_add_tail(&mad_send_wr->agent_list,
2809 &mad_send_wr->mad_agent_priv->send_list);
2814 static void timeout_sends(struct work_struct *work)
2816 struct ib_mad_agent_private *mad_agent_priv;
2817 struct ib_mad_send_wr_private *mad_send_wr;
2818 struct ib_mad_send_wc mad_send_wc;
2819 unsigned long flags, delay;
2821 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2823 mad_send_wc.vendor_err = 0;
2825 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2826 while (!list_empty(&mad_agent_priv->wait_list)) {
2827 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2828 struct ib_mad_send_wr_private,
2831 if (time_after(mad_send_wr->timeout, jiffies)) {
2832 delay = mad_send_wr->timeout - jiffies;
2833 if ((long)delay <= 0)
2835 queue_delayed_work(mad_agent_priv->qp_info->
2837 &mad_agent_priv->timed_work, delay);
2841 list_del(&mad_send_wr->agent_list);
2842 if (mad_send_wr->status == IB_WC_SUCCESS &&
2843 !retry_send(mad_send_wr))
2846 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2848 if (mad_send_wr->status == IB_WC_SUCCESS)
2849 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2851 mad_send_wc.status = mad_send_wr->status;
2852 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2853 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2856 atomic_dec(&mad_agent_priv->refcount);
2857 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2859 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2862 static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg)
2864 struct ib_mad_port_private *port_priv = cq->cq_context;
2865 unsigned long flags;
2867 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
2868 if (!list_empty(&port_priv->port_list))
2869 queue_work(port_priv->wq, &port_priv->work);
2870 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2874 * Allocate receive MADs and post receive WRs for them
2876 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2877 struct ib_mad_private *mad)
2879 unsigned long flags;
2881 struct ib_mad_private *mad_priv;
2882 struct ib_sge sg_list;
2883 struct ib_recv_wr recv_wr, *bad_recv_wr;
2884 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2886 /* Initialize common scatter list fields */
2887 sg_list.lkey = (*qp_info->port_priv->mr).lkey;
2889 /* Initialize common receive WR fields */
2890 recv_wr.next = NULL;
2891 recv_wr.sg_list = &sg_list;
2892 recv_wr.num_sge = 1;
2895 /* Allocate and map receive buffer */
2900 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2903 dev_err(&qp_info->port_priv->device->dev,
2904 "No memory for receive buffer\n");
2909 sg_list.length = mad_priv_dma_size(mad_priv);
2910 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2912 mad_priv_dma_size(mad_priv),
2914 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2919 mad_priv->header.mapping = sg_list.addr;
2920 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2921 mad_priv->header.mad_list.mad_queue = recv_queue;
2923 /* Post receive WR */
2924 spin_lock_irqsave(&recv_queue->lock, flags);
2925 post = (++recv_queue->count < recv_queue->max_active);
2926 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2927 spin_unlock_irqrestore(&recv_queue->lock, flags);
2928 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2930 spin_lock_irqsave(&recv_queue->lock, flags);
2931 list_del(&mad_priv->header.mad_list.list);
2932 recv_queue->count--;
2933 spin_unlock_irqrestore(&recv_queue->lock, flags);
2934 ib_dma_unmap_single(qp_info->port_priv->device,
2935 mad_priv->header.mapping,
2936 mad_priv_dma_size(mad_priv),
2939 dev_err(&qp_info->port_priv->device->dev,
2940 "ib_post_recv failed: %d\n", ret);
2949 * Return all the posted receive MADs
2951 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2953 struct ib_mad_private_header *mad_priv_hdr;
2954 struct ib_mad_private *recv;
2955 struct ib_mad_list_head *mad_list;
2960 while (!list_empty(&qp_info->recv_queue.list)) {
2962 mad_list = list_entry(qp_info->recv_queue.list.next,
2963 struct ib_mad_list_head, list);
2964 mad_priv_hdr = container_of(mad_list,
2965 struct ib_mad_private_header,
2967 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2970 /* Remove from posted receive MAD list */
2971 list_del(&mad_list->list);
2973 ib_dma_unmap_single(qp_info->port_priv->device,
2974 recv->header.mapping,
2975 mad_priv_dma_size(recv),
2980 qp_info->recv_queue.count = 0;
2986 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2989 struct ib_qp_attr *attr;
2993 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2995 dev_err(&port_priv->device->dev,
2996 "Couldn't kmalloc ib_qp_attr\n");
3000 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
3001 IB_DEFAULT_PKEY_FULL, &pkey_index);
3005 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3006 qp = port_priv->qp_info[i].qp;
3011 * PKey index for QP1 is irrelevant but
3012 * one is needed for the Reset to Init transition
3014 attr->qp_state = IB_QPS_INIT;
3015 attr->pkey_index = pkey_index;
3016 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
3017 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
3018 IB_QP_PKEY_INDEX | IB_QP_QKEY);
3020 dev_err(&port_priv->device->dev,
3021 "Couldn't change QP%d state to INIT: %d\n",
3026 attr->qp_state = IB_QPS_RTR;
3027 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
3029 dev_err(&port_priv->device->dev,
3030 "Couldn't change QP%d state to RTR: %d\n",
3035 attr->qp_state = IB_QPS_RTS;
3036 attr->sq_psn = IB_MAD_SEND_Q_PSN;
3037 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
3039 dev_err(&port_priv->device->dev,
3040 "Couldn't change QP%d state to RTS: %d\n",
3046 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
3048 dev_err(&port_priv->device->dev,
3049 "Failed to request completion notification: %d\n",
3054 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3055 if (!port_priv->qp_info[i].qp)
3058 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
3060 dev_err(&port_priv->device->dev,
3061 "Couldn't post receive WRs\n");
3070 static void qp_event_handler(struct ib_event *event, void *qp_context)
3072 struct ib_mad_qp_info *qp_info = qp_context;
3074 /* It's worse than that! He's dead, Jim! */
3075 dev_err(&qp_info->port_priv->device->dev,
3076 "Fatal error (%d) on MAD QP (%d)\n",
3077 event->event, qp_info->qp->qp_num);
3080 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
3081 struct ib_mad_queue *mad_queue)
3083 mad_queue->qp_info = qp_info;
3084 mad_queue->count = 0;
3085 spin_lock_init(&mad_queue->lock);
3086 INIT_LIST_HEAD(&mad_queue->list);
3089 static void init_mad_qp(struct ib_mad_port_private *port_priv,
3090 struct ib_mad_qp_info *qp_info)
3092 qp_info->port_priv = port_priv;
3093 init_mad_queue(qp_info, &qp_info->send_queue);
3094 init_mad_queue(qp_info, &qp_info->recv_queue);
3095 INIT_LIST_HEAD(&qp_info->overflow_list);
3096 spin_lock_init(&qp_info->snoop_lock);
3097 qp_info->snoop_table = NULL;
3098 qp_info->snoop_table_size = 0;
3099 atomic_set(&qp_info->snoop_count, 0);
3102 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
3103 enum ib_qp_type qp_type)
3105 struct ib_qp_init_attr qp_init_attr;
3108 memset(&qp_init_attr, 0, sizeof qp_init_attr);
3109 qp_init_attr.send_cq = qp_info->port_priv->cq;
3110 qp_init_attr.recv_cq = qp_info->port_priv->cq;
3111 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
3112 qp_init_attr.cap.max_send_wr = mad_sendq_size;
3113 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
3114 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
3115 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
3116 qp_init_attr.qp_type = qp_type;
3117 qp_init_attr.port_num = qp_info->port_priv->port_num;
3118 qp_init_attr.qp_context = qp_info;
3119 qp_init_attr.event_handler = qp_event_handler;
3120 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
3121 if (IS_ERR(qp_info->qp)) {
3122 dev_err(&qp_info->port_priv->device->dev,
3123 "Couldn't create ib_mad QP%d\n",
3124 get_spl_qp_index(qp_type));
3125 ret = PTR_ERR(qp_info->qp);
3128 /* Use minimum queue sizes unless the CQ is resized */
3129 qp_info->send_queue.max_active = mad_sendq_size;
3130 qp_info->recv_queue.max_active = mad_recvq_size;
3137 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
3142 ib_destroy_qp(qp_info->qp);
3143 kfree(qp_info->snoop_table);
3148 * Create the QP, PD, MR, and CQ if needed
3150 static int ib_mad_port_open(struct ib_device *device,
3154 struct ib_mad_port_private *port_priv;
3155 unsigned long flags;
3156 char name[sizeof "ib_mad123"];
3158 struct ib_cq_init_attr cq_attr = {};
3160 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
3163 if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
3164 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
3167 /* Create new device info */
3168 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
3170 dev_err(&device->dev, "No memory for ib_mad_port_private\n");
3174 port_priv->device = device;
3175 port_priv->port_num = port_num;
3176 spin_lock_init(&port_priv->reg_lock);
3177 INIT_LIST_HEAD(&port_priv->agent_list);
3178 init_mad_qp(port_priv, &port_priv->qp_info[0]);
3179 init_mad_qp(port_priv, &port_priv->qp_info[1]);
3181 cq_size = mad_sendq_size + mad_recvq_size;
3182 has_smi = rdma_cap_ib_smi(device, port_num);
3186 cq_attr.cqe = cq_size;
3187 port_priv->cq = ib_create_cq(port_priv->device,
3188 ib_mad_thread_completion_handler,
3189 NULL, port_priv, &cq_attr);
3190 if (IS_ERR(port_priv->cq)) {
3191 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
3192 ret = PTR_ERR(port_priv->cq);
3196 port_priv->pd = ib_alloc_pd(device);
3197 if (IS_ERR(port_priv->pd)) {
3198 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
3199 ret = PTR_ERR(port_priv->pd);
3203 port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
3204 if (IS_ERR(port_priv->mr)) {
3205 dev_err(&device->dev, "Couldn't get ib_mad DMA MR\n");
3206 ret = PTR_ERR(port_priv->mr);
3211 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3215 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3219 snprintf(name, sizeof name, "ib_mad%d", port_num);
3220 port_priv->wq = create_singlethread_workqueue(name);
3221 if (!port_priv->wq) {
3225 INIT_WORK(&port_priv->work, ib_mad_completion_handler);
3227 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3228 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3229 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3231 ret = ib_mad_port_start(port_priv);
3233 dev_err(&device->dev, "Couldn't start port\n");
3240 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3241 list_del_init(&port_priv->port_list);
3242 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3244 destroy_workqueue(port_priv->wq);
3246 destroy_mad_qp(&port_priv->qp_info[1]);
3248 destroy_mad_qp(&port_priv->qp_info[0]);
3250 ib_dereg_mr(port_priv->mr);
3252 ib_dealloc_pd(port_priv->pd);
3254 ib_destroy_cq(port_priv->cq);
3255 cleanup_recv_queue(&port_priv->qp_info[1]);
3256 cleanup_recv_queue(&port_priv->qp_info[0]);
3265 * If there are no classes using the port, free the port
3266 * resources (CQ, MR, PD, QP) and remove the port's info structure
3268 static int ib_mad_port_close(struct ib_device *device, int port_num)
3270 struct ib_mad_port_private *port_priv;
3271 unsigned long flags;
3273 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3274 port_priv = __ib_get_mad_port(device, port_num);
3275 if (port_priv == NULL) {
3276 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3277 dev_err(&device->dev, "Port %d not found\n", port_num);
3280 list_del_init(&port_priv->port_list);
3281 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3283 destroy_workqueue(port_priv->wq);
3284 destroy_mad_qp(&port_priv->qp_info[1]);
3285 destroy_mad_qp(&port_priv->qp_info[0]);
3286 ib_dereg_mr(port_priv->mr);
3287 ib_dealloc_pd(port_priv->pd);
3288 ib_destroy_cq(port_priv->cq);
3289 cleanup_recv_queue(&port_priv->qp_info[1]);
3290 cleanup_recv_queue(&port_priv->qp_info[0]);
3291 /* XXX: Handle deallocation of MAD registration tables */
3298 static void ib_mad_init_device(struct ib_device *device)
3302 if (device->node_type == RDMA_NODE_IB_SWITCH) {
3307 end = device->phys_port_cnt;
3310 for (i = start; i <= end; i++) {
3311 if (!rdma_cap_ib_mad(device, i))
3314 if (ib_mad_port_open(device, i)) {
3315 dev_err(&device->dev, "Couldn't open port %d\n", i);
3318 if (ib_agent_port_open(device, i)) {
3319 dev_err(&device->dev,
3320 "Couldn't open port %d for agents\n", i);
3327 if (ib_mad_port_close(device, i))
3328 dev_err(&device->dev, "Couldn't close port %d\n", i);
3331 while (--i >= start) {
3332 if (!rdma_cap_ib_mad(device, i))
3335 if (ib_agent_port_close(device, i))
3336 dev_err(&device->dev,
3337 "Couldn't close port %d for agents\n", i);
3338 if (ib_mad_port_close(device, i))
3339 dev_err(&device->dev, "Couldn't close port %d\n", i);
3343 static void ib_mad_remove_device(struct ib_device *device)
3347 if (device->node_type == RDMA_NODE_IB_SWITCH) {
3352 end = device->phys_port_cnt;
3355 for (i = start; i <= end; i++) {
3356 if (!rdma_cap_ib_mad(device, i))
3359 if (ib_agent_port_close(device, i))
3360 dev_err(&device->dev,
3361 "Couldn't close port %d for agents\n", i);
3362 if (ib_mad_port_close(device, i))
3363 dev_err(&device->dev, "Couldn't close port %d\n", i);
3367 static struct ib_client mad_client = {
3369 .add = ib_mad_init_device,
3370 .remove = ib_mad_remove_device
3373 static int __init ib_mad_init_module(void)
3375 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3376 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3378 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3379 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3381 INIT_LIST_HEAD(&ib_mad_port_list);
3383 if (ib_register_client(&mad_client)) {
3384 pr_err("Couldn't register ib_mad client\n");
3391 static void __exit ib_mad_cleanup_module(void)
3393 ib_unregister_client(&mad_client);
3396 module_init(ib_mad_init_module);
3397 module_exit(ib_mad_cleanup_module);