1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5 * Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
10 #define KMSG_COMPONENT "qeth"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/compat.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/log2.h>
21 #include <linux/tcp.h>
22 #include <linux/mii.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/netdevice.h>
28 #include <linux/netdev_features.h>
29 #include <linux/skbuff.h>
30 #include <linux/vmalloc.h>
32 #include <net/iucv/af_iucv.h>
33 #include <net/dsfield.h>
35 #include <asm/ebcdic.h>
36 #include <asm/chpid.h>
38 #include <asm/sysinfo.h>
41 #include <asm/ccwdev.h>
42 #include <asm/cpcmd.h>
44 #include "qeth_core.h"
46 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
47 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
49 [QETH_DBF_SETUP] = {"qeth_setup",
50 8, 1, 8, 5, &debug_hex_ascii_view, NULL},
51 [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
52 &debug_sprintf_view, NULL},
53 [QETH_DBF_CTRL] = {"qeth_control",
54 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
56 EXPORT_SYMBOL_GPL(qeth_dbf);
58 struct kmem_cache *qeth_core_header_cache;
59 EXPORT_SYMBOL_GPL(qeth_core_header_cache);
60 static struct kmem_cache *qeth_qdio_outbuf_cache;
62 static struct device *qeth_core_root_dev;
63 static struct lock_class_key qdio_out_skb_queue_key;
65 static void qeth_issue_next_read_cb(struct qeth_card *card,
66 struct qeth_cmd_buffer *iob,
67 unsigned int data_length);
68 static void qeth_free_buffer_pool(struct qeth_card *);
69 static int qeth_qdio_establish(struct qeth_card *);
70 static void qeth_free_qdio_queues(struct qeth_card *card);
71 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
72 struct qeth_qdio_out_buffer *buf,
73 enum iucv_tx_notify notification);
74 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
76 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
78 static void qeth_close_dev_handler(struct work_struct *work)
80 struct qeth_card *card;
82 card = container_of(work, struct qeth_card, close_dev_work);
83 QETH_CARD_TEXT(card, 2, "cldevhdl");
84 ccwgroup_set_offline(card->gdev);
87 static const char *qeth_get_cardname(struct qeth_card *card)
89 if (IS_VM_NIC(card)) {
90 switch (card->info.type) {
91 case QETH_CARD_TYPE_OSD:
92 return " Virtual NIC QDIO";
93 case QETH_CARD_TYPE_IQD:
94 return " Virtual NIC Hiper";
95 case QETH_CARD_TYPE_OSM:
96 return " Virtual NIC QDIO - OSM";
97 case QETH_CARD_TYPE_OSX:
98 return " Virtual NIC QDIO - OSX";
103 switch (card->info.type) {
104 case QETH_CARD_TYPE_OSD:
105 return " OSD Express";
106 case QETH_CARD_TYPE_IQD:
107 return " HiperSockets";
108 case QETH_CARD_TYPE_OSN:
110 case QETH_CARD_TYPE_OSM:
112 case QETH_CARD_TYPE_OSX:
121 /* max length to be returned: 14 */
122 const char *qeth_get_cardname_short(struct qeth_card *card)
124 if (IS_VM_NIC(card)) {
125 switch (card->info.type) {
126 case QETH_CARD_TYPE_OSD:
127 return "Virt.NIC QDIO";
128 case QETH_CARD_TYPE_IQD:
129 return "Virt.NIC Hiper";
130 case QETH_CARD_TYPE_OSM:
131 return "Virt.NIC OSM";
132 case QETH_CARD_TYPE_OSX:
133 return "Virt.NIC OSX";
138 switch (card->info.type) {
139 case QETH_CARD_TYPE_OSD:
140 switch (card->info.link_type) {
141 case QETH_LINK_TYPE_FAST_ETH:
143 case QETH_LINK_TYPE_HSTR:
145 case QETH_LINK_TYPE_GBIT_ETH:
147 case QETH_LINK_TYPE_10GBIT_ETH:
149 case QETH_LINK_TYPE_25GBIT_ETH:
151 case QETH_LINK_TYPE_LANE_ETH100:
152 return "OSD_FE_LANE";
153 case QETH_LINK_TYPE_LANE_TR:
154 return "OSD_TR_LANE";
155 case QETH_LINK_TYPE_LANE_ETH1000:
156 return "OSD_GbE_LANE";
157 case QETH_LINK_TYPE_LANE:
158 return "OSD_ATM_LANE";
160 return "OSD_Express";
162 case QETH_CARD_TYPE_IQD:
163 return "HiperSockets";
164 case QETH_CARD_TYPE_OSN:
166 case QETH_CARD_TYPE_OSM:
168 case QETH_CARD_TYPE_OSX:
177 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
178 int clear_start_mask)
182 spin_lock_irqsave(&card->thread_mask_lock, flags);
183 card->thread_allowed_mask = threads;
184 if (clear_start_mask)
185 card->thread_start_mask &= threads;
186 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
187 wake_up(&card->wait_q);
189 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
191 int qeth_threads_running(struct qeth_card *card, unsigned long threads)
196 spin_lock_irqsave(&card->thread_mask_lock, flags);
197 rc = (card->thread_running_mask & threads);
198 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
201 EXPORT_SYMBOL_GPL(qeth_threads_running);
203 void qeth_clear_working_pool_list(struct qeth_card *card)
205 struct qeth_buffer_pool_entry *pool_entry, *tmp;
207 QETH_CARD_TEXT(card, 5, "clwrklst");
208 list_for_each_entry_safe(pool_entry, tmp,
209 &card->qdio.in_buf_pool.entry_list, list){
210 list_del(&pool_entry->list);
213 EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
215 static int qeth_alloc_buffer_pool(struct qeth_card *card)
217 struct qeth_buffer_pool_entry *pool_entry;
221 QETH_CARD_TEXT(card, 5, "alocpool");
222 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
223 pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL);
225 qeth_free_buffer_pool(card);
228 for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) {
229 ptr = (void *) __get_free_page(GFP_KERNEL);
232 free_page((unsigned long)
233 pool_entry->elements[--j]);
235 qeth_free_buffer_pool(card);
238 pool_entry->elements[j] = ptr;
240 list_add(&pool_entry->init_list,
241 &card->qdio.init_pool.entry_list);
246 int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
248 QETH_CARD_TEXT(card, 2, "realcbp");
250 /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
251 qeth_clear_working_pool_list(card);
252 qeth_free_buffer_pool(card);
253 card->qdio.in_buf_pool.buf_count = bufcnt;
254 card->qdio.init_pool.buf_count = bufcnt;
255 return qeth_alloc_buffer_pool(card);
257 EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
259 static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
264 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
268 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
270 struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
276 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
281 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
282 q->bufs[i].buffer = q->qdio_bufs[i];
284 QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
288 static int qeth_cq_init(struct qeth_card *card)
292 if (card->options.cq == QETH_CQ_ENABLED) {
293 QETH_CARD_TEXT(card, 2, "cqinit");
294 qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
295 QDIO_MAX_BUFFERS_PER_Q);
296 card->qdio.c_q->next_buf_to_init = 127;
297 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
298 card->qdio.no_in_queues - 1, 0,
301 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
310 static int qeth_alloc_cq(struct qeth_card *card)
314 if (card->options.cq == QETH_CQ_ENABLED) {
316 struct qdio_outbuf_state *outbuf_states;
318 QETH_CARD_TEXT(card, 2, "cqon");
319 card->qdio.c_q = qeth_alloc_qdio_queue();
320 if (!card->qdio.c_q) {
324 card->qdio.no_in_queues = 2;
325 card->qdio.out_bufstates =
326 kcalloc(card->qdio.no_out_queues *
327 QDIO_MAX_BUFFERS_PER_Q,
328 sizeof(struct qdio_outbuf_state),
330 outbuf_states = card->qdio.out_bufstates;
331 if (outbuf_states == NULL) {
335 for (i = 0; i < card->qdio.no_out_queues; ++i) {
336 card->qdio.out_qs[i]->bufstates = outbuf_states;
337 outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
340 QETH_CARD_TEXT(card, 2, "nocq");
341 card->qdio.c_q = NULL;
342 card->qdio.no_in_queues = 1;
344 QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
349 qeth_free_qdio_queue(card->qdio.c_q);
350 card->qdio.c_q = NULL;
352 dev_err(&card->gdev->dev, "Failed to create completion queue\n");
356 static void qeth_free_cq(struct qeth_card *card)
358 if (card->qdio.c_q) {
359 --card->qdio.no_in_queues;
360 qeth_free_qdio_queue(card->qdio.c_q);
361 card->qdio.c_q = NULL;
363 kfree(card->qdio.out_bufstates);
364 card->qdio.out_bufstates = NULL;
367 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
370 enum iucv_tx_notify n;
374 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
380 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
381 TX_NOTIFY_UNREACHABLE;
384 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
385 TX_NOTIFY_GENERALERROR;
392 static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
395 if (q->card->options.cq != QETH_CQ_ENABLED)
398 if (q->bufs[bidx]->next_pending != NULL) {
399 struct qeth_qdio_out_buffer *head = q->bufs[bidx];
400 struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
403 if (forced_cleanup ||
404 atomic_read(&c->state) ==
405 QETH_QDIO_BUF_HANDLED_DELAYED) {
406 struct qeth_qdio_out_buffer *f = c;
407 QETH_CARD_TEXT(f->q->card, 5, "fp");
408 QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
409 /* release here to avoid interleaving between
410 outbound tasklet and inbound tasklet
411 regarding notifications and lifecycle */
412 qeth_tx_complete_buf(c, forced_cleanup, 0);
415 WARN_ON_ONCE(head->next_pending != f);
416 head->next_pending = c;
417 kmem_cache_free(qeth_qdio_outbuf_cache, f);
425 if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
426 QETH_QDIO_BUF_HANDLED_DELAYED)) {
427 /* for recovery situations */
428 qeth_init_qdio_out_buf(q, bidx);
429 QETH_CARD_TEXT(q->card, 2, "clprecov");
434 static void qeth_qdio_handle_aob(struct qeth_card *card,
435 unsigned long phys_aob_addr)
438 struct qeth_qdio_out_buffer *buffer;
439 enum iucv_tx_notify notification;
442 aob = (struct qaob *) phys_to_virt(phys_aob_addr);
443 QETH_CARD_TEXT(card, 5, "haob");
444 QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
445 buffer = (struct qeth_qdio_out_buffer *) aob->user1;
446 QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
448 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
449 QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
450 notification = TX_NOTIFY_OK;
452 WARN_ON_ONCE(atomic_read(&buffer->state) !=
453 QETH_QDIO_BUF_PENDING);
454 atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
455 notification = TX_NOTIFY_DELAYED_OK;
458 if (aob->aorc != 0) {
459 QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
460 notification = qeth_compute_cq_notification(aob->aorc, 1);
462 qeth_notify_skbs(buffer->q, buffer, notification);
464 /* Free dangling allocations. The attached skbs are handled by
465 * qeth_cleanup_handled_pending().
468 i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
470 if (aob->sba[i] && buffer->is_header[i])
471 kmem_cache_free(qeth_core_header_cache,
472 (void *) aob->sba[i]);
474 atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
476 qdio_release_aob(aob);
479 static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
481 return card->options.cq == QETH_CQ_ENABLED &&
482 card->qdio.c_q != NULL &&
484 queue == card->qdio.no_in_queues - 1;
487 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
490 ccw->cmd_code = cmd_code;
491 ccw->flags = flags | CCW_FLAG_SLI;
493 ccw->cda = (__u32) __pa(data);
496 static int __qeth_issue_next_read(struct qeth_card *card)
498 struct qeth_cmd_buffer *iob = card->read_cmd;
499 struct qeth_channel *channel = iob->channel;
500 struct ccw1 *ccw = __ccw_from_cmd(iob);
503 QETH_CARD_TEXT(card, 5, "issnxrd");
504 if (channel->state != CH_STATE_UP)
507 memset(iob->data, 0, iob->length);
508 qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
509 iob->callback = qeth_issue_next_read_cb;
510 /* keep the cmd alive after completion: */
513 QETH_CARD_TEXT(card, 6, "noirqpnd");
514 rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
516 channel->active_cmd = iob;
518 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
519 rc, CARD_DEVID(card));
520 qeth_unlock_channel(card, channel);
522 card->read_or_write_problem = 1;
523 qeth_schedule_recovery(card);
528 static int qeth_issue_next_read(struct qeth_card *card)
532 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
533 ret = __qeth_issue_next_read(card);
534 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
539 static void qeth_enqueue_cmd(struct qeth_card *card,
540 struct qeth_cmd_buffer *iob)
542 spin_lock_irq(&card->lock);
543 list_add_tail(&iob->list, &card->cmd_waiter_list);
544 spin_unlock_irq(&card->lock);
547 static void qeth_dequeue_cmd(struct qeth_card *card,
548 struct qeth_cmd_buffer *iob)
550 spin_lock_irq(&card->lock);
551 list_del(&iob->list);
552 spin_unlock_irq(&card->lock);
555 void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
558 complete(&iob->done);
560 EXPORT_SYMBOL_GPL(qeth_notify_cmd);
562 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
563 struct qeth_card *card)
565 const char *ipa_name;
566 int com = cmd->hdr.command;
567 ipa_name = qeth_get_ipa_cmd_name(com);
570 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
571 ipa_name, com, CARD_DEVID(card), rc,
572 qeth_get_ipa_msg(rc));
574 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
575 ipa_name, com, CARD_DEVID(card));
578 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
579 struct qeth_ipa_cmd *cmd)
581 QETH_CARD_TEXT(card, 5, "chkipad");
583 if (IS_IPA_REPLY(cmd)) {
584 if (cmd->hdr.command != IPA_CMD_SETCCID &&
585 cmd->hdr.command != IPA_CMD_DELCCID &&
586 cmd->hdr.command != IPA_CMD_MODCCID &&
587 cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
588 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
592 /* handle unsolicited event: */
593 switch (cmd->hdr.command) {
594 case IPA_CMD_STOPLAN:
595 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
596 dev_err(&card->gdev->dev,
597 "Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
598 QETH_CARD_IFNAME(card));
599 schedule_work(&card->close_dev_work);
601 dev_warn(&card->gdev->dev,
602 "The link for interface %s on CHPID 0x%X failed\n",
603 QETH_CARD_IFNAME(card), card->info.chpid);
604 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
605 netif_carrier_off(card->dev);
608 case IPA_CMD_STARTLAN:
609 dev_info(&card->gdev->dev,
610 "The link for %s on CHPID 0x%X has been restored\n",
611 QETH_CARD_IFNAME(card), card->info.chpid);
612 if (card->info.hwtrap)
613 card->info.hwtrap = 2;
614 qeth_schedule_recovery(card);
616 case IPA_CMD_SETBRIDGEPORT_IQD:
617 case IPA_CMD_SETBRIDGEPORT_OSA:
618 case IPA_CMD_ADDRESS_CHANGE_NOTIF:
619 if (card->discipline->control_event_handler(card, cmd))
622 case IPA_CMD_MODCCID:
624 case IPA_CMD_REGISTER_LOCAL_ADDR:
625 QETH_CARD_TEXT(card, 3, "irla");
627 case IPA_CMD_UNREGISTER_LOCAL_ADDR:
628 QETH_CARD_TEXT(card, 3, "urla");
631 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
636 void qeth_clear_ipacmd_list(struct qeth_card *card)
638 struct qeth_cmd_buffer *iob;
641 QETH_CARD_TEXT(card, 4, "clipalst");
643 spin_lock_irqsave(&card->lock, flags);
644 list_for_each_entry(iob, &card->cmd_waiter_list, list)
645 qeth_notify_cmd(iob, -EIO);
646 spin_unlock_irqrestore(&card->lock, flags);
648 EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
650 static int qeth_check_idx_response(struct qeth_card *card,
651 unsigned char *buffer)
653 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
654 if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
655 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
657 QETH_CARD_TEXT(card, 2, "ckidxres");
658 QETH_CARD_TEXT(card, 2, " idxterm");
659 QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
660 if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
661 buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
662 dev_err(&card->gdev->dev,
663 "The device does not support the configured transport mode\n");
664 return -EPROTONOSUPPORT;
671 void qeth_put_cmd(struct qeth_cmd_buffer *iob)
673 if (refcount_dec_and_test(&iob->ref_count)) {
678 EXPORT_SYMBOL_GPL(qeth_put_cmd);
680 static void qeth_release_buffer_cb(struct qeth_card *card,
681 struct qeth_cmd_buffer *iob,
682 unsigned int data_length)
687 static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
689 qeth_notify_cmd(iob, rc);
693 struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
694 unsigned int length, unsigned int ccws,
697 struct qeth_cmd_buffer *iob;
699 if (length > QETH_BUFSIZE)
702 iob = kzalloc(sizeof(*iob), GFP_KERNEL);
706 iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
707 GFP_KERNEL | GFP_DMA);
713 init_completion(&iob->done);
714 spin_lock_init(&iob->lock);
715 INIT_LIST_HEAD(&iob->list);
716 refcount_set(&iob->ref_count, 1);
717 iob->channel = channel;
718 iob->timeout = timeout;
719 iob->length = length;
722 EXPORT_SYMBOL_GPL(qeth_alloc_cmd);
724 static void qeth_issue_next_read_cb(struct qeth_card *card,
725 struct qeth_cmd_buffer *iob,
726 unsigned int data_length)
728 struct qeth_cmd_buffer *request = NULL;
729 struct qeth_ipa_cmd *cmd = NULL;
730 struct qeth_reply *reply = NULL;
731 struct qeth_cmd_buffer *tmp;
735 QETH_CARD_TEXT(card, 4, "sndctlcb");
736 rc = qeth_check_idx_response(card, iob->data);
741 qeth_schedule_recovery(card);
744 qeth_clear_ipacmd_list(card);
748 cmd = __ipa_reply(iob);
750 cmd = qeth_check_ipa_data(card, cmd);
753 if (IS_OSN(card) && card->osn_info.assist_cb &&
754 cmd->hdr.command != IPA_CMD_STARTLAN) {
755 card->osn_info.assist_cb(card->dev, cmd);
760 /* match against pending cmd requests */
761 spin_lock_irqsave(&card->lock, flags);
762 list_for_each_entry(tmp, &card->cmd_waiter_list, list) {
763 if (tmp->match && tmp->match(tmp, iob)) {
765 /* take the object outside the lock */
766 qeth_get_cmd(request);
770 spin_unlock_irqrestore(&card->lock, flags);
775 reply = &request->reply;
776 if (!reply->callback) {
781 spin_lock_irqsave(&request->lock, flags);
783 /* Bail out when the requestor has already left: */
786 rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
788 spin_unlock_irqrestore(&request->lock, flags);
792 qeth_notify_cmd(request, rc);
793 qeth_put_cmd(request);
795 memcpy(&card->seqno.pdu_hdr_ack,
796 QETH_PDU_HEADER_SEQ_NO(iob->data),
799 __qeth_issue_next_read(card);
802 static int qeth_set_thread_start_bit(struct qeth_card *card,
803 unsigned long thread)
807 spin_lock_irqsave(&card->thread_mask_lock, flags);
808 if (!(card->thread_allowed_mask & thread) ||
809 (card->thread_start_mask & thread)) {
810 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
813 card->thread_start_mask |= thread;
814 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
818 static void qeth_clear_thread_start_bit(struct qeth_card *card,
819 unsigned long thread)
823 spin_lock_irqsave(&card->thread_mask_lock, flags);
824 card->thread_start_mask &= ~thread;
825 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
826 wake_up(&card->wait_q);
829 static void qeth_clear_thread_running_bit(struct qeth_card *card,
830 unsigned long thread)
834 spin_lock_irqsave(&card->thread_mask_lock, flags);
835 card->thread_running_mask &= ~thread;
836 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
837 wake_up_all(&card->wait_q);
840 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
845 spin_lock_irqsave(&card->thread_mask_lock, flags);
846 if (card->thread_start_mask & thread) {
847 if ((card->thread_allowed_mask & thread) &&
848 !(card->thread_running_mask & thread)) {
850 card->thread_start_mask &= ~thread;
851 card->thread_running_mask |= thread;
855 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
859 static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
863 wait_event(card->wait_q,
864 (rc = __qeth_do_run_thread(card, thread)) >= 0);
868 void qeth_schedule_recovery(struct qeth_card *card)
870 QETH_CARD_TEXT(card, 2, "startrec");
871 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
872 schedule_work(&card->kernel_thread_starter);
875 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
881 sense = (char *) irb->ecw;
882 cstat = irb->scsw.cmd.cstat;
883 dstat = irb->scsw.cmd.dstat;
885 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
886 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
887 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
888 QETH_CARD_TEXT(card, 2, "CGENCHK");
889 dev_warn(&cdev->dev, "The qeth device driver "
890 "failed to recover an error on the device\n");
891 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
892 CCW_DEVID(cdev), dstat, cstat);
893 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
898 if (dstat & DEV_STAT_UNIT_CHECK) {
899 if (sense[SENSE_RESETTING_EVENT_BYTE] &
900 SENSE_RESETTING_EVENT_FLAG) {
901 QETH_CARD_TEXT(card, 2, "REVIND");
904 if (sense[SENSE_COMMAND_REJECT_BYTE] &
905 SENSE_COMMAND_REJECT_FLAG) {
906 QETH_CARD_TEXT(card, 2, "CMDREJi");
909 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
910 QETH_CARD_TEXT(card, 2, "AFFE");
913 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
914 QETH_CARD_TEXT(card, 2, "ZEROSEN");
917 QETH_CARD_TEXT(card, 2, "DGENCHK");
923 static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
929 switch (PTR_ERR(irb)) {
931 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
933 QETH_CARD_TEXT(card, 2, "ckirberr");
934 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
937 dev_warn(&cdev->dev, "A hardware operation timed out"
939 QETH_CARD_TEXT(card, 2, "ckirberr");
940 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT);
943 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
944 PTR_ERR(irb), CCW_DEVID(cdev));
945 QETH_CARD_TEXT(card, 2, "ckirberr");
946 QETH_CARD_TEXT(card, 2, " rc???");
951 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
956 struct qeth_cmd_buffer *iob = NULL;
957 struct ccwgroup_device *gdev;
958 struct qeth_channel *channel;
959 struct qeth_card *card;
961 /* while we hold the ccwdev lock, this stays valid: */
962 gdev = dev_get_drvdata(&cdev->dev);
963 card = dev_get_drvdata(&gdev->dev);
965 QETH_CARD_TEXT(card, 5, "irq");
967 if (card->read.ccwdev == cdev) {
968 channel = &card->read;
969 QETH_CARD_TEXT(card, 5, "read");
970 } else if (card->write.ccwdev == cdev) {
971 channel = &card->write;
972 QETH_CARD_TEXT(card, 5, "write");
974 channel = &card->data;
975 QETH_CARD_TEXT(card, 5, "data");
979 QETH_CARD_TEXT(card, 5, "irqunsol");
980 } else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
981 QETH_CARD_TEXT(card, 5, "irqunexp");
984 "Received IRQ with intparm %lx, expected %px\n",
985 intparm, channel->active_cmd);
986 if (channel->active_cmd)
987 qeth_cancel_cmd(channel->active_cmd, -EIO);
989 iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
992 channel->active_cmd = NULL;
993 qeth_unlock_channel(card, channel);
995 rc = qeth_check_irb_error(card, cdev, irb);
997 /* IO was terminated, free its resources. */
999 qeth_cancel_cmd(iob, rc);
1003 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
1004 channel->state = CH_STATE_STOPPED;
1005 wake_up(&card->wait_q);
1008 if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
1009 channel->state = CH_STATE_HALTED;
1010 wake_up(&card->wait_q);
1013 if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
1014 SCSW_FCTL_HALT_FUNC))) {
1015 qeth_cancel_cmd(iob, -ECANCELED);
1019 cstat = irb->scsw.cmd.cstat;
1020 dstat = irb->scsw.cmd.dstat;
1022 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
1023 (dstat & DEV_STAT_UNIT_CHECK) ||
1025 if (irb->esw.esw0.erw.cons) {
1026 dev_warn(&channel->ccwdev->dev,
1027 "The qeth device driver failed to recover "
1028 "an error on the device\n");
1029 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1030 CCW_DEVID(channel->ccwdev), cstat,
1032 print_hex_dump(KERN_WARNING, "qeth: irb ",
1033 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
1034 print_hex_dump(KERN_WARNING, "qeth: sense data ",
1035 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
1038 rc = qeth_get_problem(card, cdev, irb);
1040 card->read_or_write_problem = 1;
1042 qeth_cancel_cmd(iob, rc);
1043 qeth_clear_ipacmd_list(card);
1044 qeth_schedule_recovery(card);
1051 if (irb->scsw.cmd.count > iob->length) {
1052 qeth_cancel_cmd(iob, -EIO);
1056 iob->callback(card, iob,
1057 iob->length - irb->scsw.cmd.count);
1061 static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1062 struct qeth_qdio_out_buffer *buf,
1063 enum iucv_tx_notify notification)
1065 struct sk_buff *skb;
1067 skb_queue_walk(&buf->skb_list, skb) {
1068 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
1069 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1070 if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk)
1071 iucv_sk(skb->sk)->sk_txnotify(skb, notification);
1075 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
1078 struct qeth_qdio_out_q *queue = buf->q;
1079 struct sk_buff *skb;
1081 /* release may never happen from within CQ tasklet scope */
1082 WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
1084 if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
1085 qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR);
1088 if (buf->next_element_to_fill == 0)
1091 QETH_TXQ_STAT_INC(queue, bufs);
1092 QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
1093 while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
1094 unsigned int bytes = qdisc_pkt_len(skb);
1095 bool is_tso = skb_is_gso(skb);
1096 unsigned int packets;
1098 packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
1100 QETH_TXQ_STAT_ADD(queue, tx_errors, packets);
1102 QETH_TXQ_STAT_ADD(queue, tx_packets, packets);
1103 QETH_TXQ_STAT_ADD(queue, tx_bytes, bytes);
1104 if (skb->ip_summed == CHECKSUM_PARTIAL)
1105 QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
1106 if (skb_is_nonlinear(skb))
1107 QETH_TXQ_STAT_INC(queue, skbs_sg);
1109 QETH_TXQ_STAT_INC(queue, skbs_tso);
1110 QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
1114 napi_consume_skb(skb, budget);
1118 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1119 struct qeth_qdio_out_buffer *buf,
1120 bool error, int budget)
1124 /* is PCI flag set on buffer? */
1125 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
1126 atomic_dec(&queue->set_pci_flags_count);
1128 qeth_tx_complete_buf(buf, error, budget);
1130 for (i = 0; i < queue->max_elements; ++i) {
1131 if (buf->buffer->element[i].addr && buf->is_header[i])
1132 kmem_cache_free(qeth_core_header_cache,
1133 buf->buffer->element[i].addr);
1134 buf->is_header[i] = 0;
1137 qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
1138 buf->next_element_to_fill = 0;
1140 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1143 static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
1147 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
1150 qeth_cleanup_handled_pending(q, j, 1);
1151 qeth_clear_output_buffer(q, q->bufs[j], true, 0);
1153 kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
1159 void qeth_drain_output_queues(struct qeth_card *card)
1163 QETH_CARD_TEXT(card, 2, "clearqdbf");
1164 /* clear outbound buffers to free skbs */
1165 for (i = 0; i < card->qdio.no_out_queues; ++i) {
1166 if (card->qdio.out_qs[i])
1167 qeth_drain_output_queue(card->qdio.out_qs[i], false);
1170 EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
1172 static void qeth_free_buffer_pool(struct qeth_card *card)
1174 struct qeth_buffer_pool_entry *pool_entry, *tmp;
1176 list_for_each_entry_safe(pool_entry, tmp,
1177 &card->qdio.init_pool.entry_list, init_list){
1178 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
1179 free_page((unsigned long)pool_entry->elements[i]);
1180 list_del(&pool_entry->init_list);
1185 static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
1187 unsigned int count = single ? 1 : card->dev->num_tx_queues;
1191 rc = netif_set_real_num_tx_queues(card->dev, count);
1197 if (card->qdio.no_out_queues == count)
1200 if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
1201 qeth_free_qdio_queues(card);
1204 dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
1206 card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE;
1207 card->qdio.no_out_queues = count;
1211 static int qeth_update_from_chp_desc(struct qeth_card *card)
1213 struct ccw_device *ccwdev;
1214 struct channel_path_desc_fmt0 *chp_dsc;
1217 QETH_CARD_TEXT(card, 2, "chp_desc");
1219 ccwdev = card->data.ccwdev;
1220 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
1224 card->info.func_level = 0x4100 + chp_dsc->desc;
1226 if (IS_OSD(card) || IS_OSX(card))
1227 /* CHPP field bit 6 == 1 -> single queue */
1228 rc = qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
1231 QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
1232 QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
1236 static void qeth_init_qdio_info(struct qeth_card *card)
1238 QETH_CARD_TEXT(card, 4, "intqdinf");
1239 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1240 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1241 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1244 card->qdio.no_in_queues = 1;
1245 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1247 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
1249 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1250 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1251 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1252 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1255 static void qeth_set_initial_options(struct qeth_card *card)
1257 card->options.route4.type = NO_ROUTER;
1258 card->options.route6.type = NO_ROUTER;
1259 card->options.rx_sg_cb = QETH_RX_SG_CB;
1260 card->options.isolation = ISOLATION_MODE_NONE;
1261 card->options.cq = QETH_CQ_DISABLED;
1262 card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
1265 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1267 unsigned long flags;
1270 spin_lock_irqsave(&card->thread_mask_lock, flags);
1271 QETH_CARD_TEXT_(card, 4, " %02x%02x%02x",
1272 (u8) card->thread_start_mask,
1273 (u8) card->thread_allowed_mask,
1274 (u8) card->thread_running_mask);
1275 rc = (card->thread_start_mask & thread);
1276 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1280 static int qeth_do_reset(void *data);
1281 static void qeth_start_kernel_thread(struct work_struct *work)
1283 struct task_struct *ts;
1284 struct qeth_card *card = container_of(work, struct qeth_card,
1285 kernel_thread_starter);
1286 QETH_CARD_TEXT(card , 2, "strthrd");
1288 if (card->read.state != CH_STATE_UP &&
1289 card->write.state != CH_STATE_UP)
1291 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1292 ts = kthread_run(qeth_do_reset, card, "qeth_recover");
1294 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1295 qeth_clear_thread_running_bit(card,
1296 QETH_RECOVER_THREAD);
1301 static void qeth_buffer_reclaim_work(struct work_struct *);
1302 static void qeth_setup_card(struct qeth_card *card)
1304 QETH_CARD_TEXT(card, 2, "setupcrd");
1306 card->info.type = CARD_RDEV(card)->id.driver_info;
1307 card->state = CARD_STATE_DOWN;
1308 spin_lock_init(&card->lock);
1309 spin_lock_init(&card->thread_mask_lock);
1310 mutex_init(&card->conf_mutex);
1311 mutex_init(&card->discipline_mutex);
1312 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1313 INIT_LIST_HEAD(&card->cmd_waiter_list);
1314 init_waitqueue_head(&card->wait_q);
1315 qeth_set_initial_options(card);
1316 /* IP address takeover */
1317 INIT_LIST_HEAD(&card->ipato.entries);
1318 qeth_init_qdio_info(card);
1319 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1320 INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
1323 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
1325 struct qeth_card *card = container_of(slr, struct qeth_card,
1326 qeth_service_level);
1327 if (card->info.mcl_level[0])
1328 seq_printf(m, "qeth: %s firmware level %s\n",
1329 CARD_BUS_ID(card), card->info.mcl_level);
1332 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
1334 struct qeth_card *card;
1336 QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1337 card = kzalloc(sizeof(*card), GFP_KERNEL);
1340 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1343 dev_set_drvdata(&gdev->dev, card);
1344 CARD_RDEV(card) = gdev->cdev[0];
1345 CARD_WDEV(card) = gdev->cdev[1];
1346 CARD_DDEV(card) = gdev->cdev[2];
1348 card->event_wq = alloc_ordered_workqueue("%s_event", 0,
1349 dev_name(&gdev->dev));
1350 if (!card->event_wq)
1353 card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
1354 if (!card->read_cmd)
1357 card->qeth_service_level.seq_print = qeth_core_sl_print;
1358 register_service_level(&card->qeth_service_level);
1362 destroy_workqueue(card->event_wq);
1364 dev_set_drvdata(&gdev->dev, NULL);
1370 static int qeth_clear_channel(struct qeth_card *card,
1371 struct qeth_channel *channel)
1375 QETH_CARD_TEXT(card, 3, "clearch");
1376 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1377 rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
1378 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1382 rc = wait_event_interruptible_timeout(card->wait_q,
1383 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1384 if (rc == -ERESTARTSYS)
1386 if (channel->state != CH_STATE_STOPPED)
1388 channel->state = CH_STATE_DOWN;
1392 static int qeth_halt_channel(struct qeth_card *card,
1393 struct qeth_channel *channel)
1397 QETH_CARD_TEXT(card, 3, "haltch");
1398 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1399 rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
1400 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1404 rc = wait_event_interruptible_timeout(card->wait_q,
1405 channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1406 if (rc == -ERESTARTSYS)
1408 if (channel->state != CH_STATE_HALTED)
1413 int qeth_stop_channel(struct qeth_channel *channel)
1415 struct ccw_device *cdev = channel->ccwdev;
1418 rc = ccw_device_set_offline(cdev);
1420 spin_lock_irq(get_ccwdev_lock(cdev));
1421 if (channel->active_cmd) {
1422 dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
1423 channel->active_cmd);
1424 channel->active_cmd = NULL;
1426 cdev->handler = NULL;
1427 spin_unlock_irq(get_ccwdev_lock(cdev));
1431 EXPORT_SYMBOL_GPL(qeth_stop_channel);
1433 static int qeth_start_channel(struct qeth_channel *channel)
1435 struct ccw_device *cdev = channel->ccwdev;
1438 channel->state = CH_STATE_DOWN;
1439 atomic_set(&channel->irq_pending, 0);
1441 spin_lock_irq(get_ccwdev_lock(cdev));
1442 cdev->handler = qeth_irq;
1443 spin_unlock_irq(get_ccwdev_lock(cdev));
1445 rc = ccw_device_set_online(cdev);
1452 spin_lock_irq(get_ccwdev_lock(cdev));
1453 cdev->handler = NULL;
1454 spin_unlock_irq(get_ccwdev_lock(cdev));
1458 static int qeth_halt_channels(struct qeth_card *card)
1460 int rc1 = 0, rc2 = 0, rc3 = 0;
1462 QETH_CARD_TEXT(card, 3, "haltchs");
1463 rc1 = qeth_halt_channel(card, &card->read);
1464 rc2 = qeth_halt_channel(card, &card->write);
1465 rc3 = qeth_halt_channel(card, &card->data);
1473 static int qeth_clear_channels(struct qeth_card *card)
1475 int rc1 = 0, rc2 = 0, rc3 = 0;
1477 QETH_CARD_TEXT(card, 3, "clearchs");
1478 rc1 = qeth_clear_channel(card, &card->read);
1479 rc2 = qeth_clear_channel(card, &card->write);
1480 rc3 = qeth_clear_channel(card, &card->data);
1488 static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1492 QETH_CARD_TEXT(card, 3, "clhacrd");
1495 rc = qeth_halt_channels(card);
1498 return qeth_clear_channels(card);
1501 int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1505 QETH_CARD_TEXT(card, 3, "qdioclr");
1506 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1507 QETH_QDIO_CLEANING)) {
1508 case QETH_QDIO_ESTABLISHED:
1510 rc = qdio_shutdown(CARD_DDEV(card),
1511 QDIO_FLAG_CLEANUP_USING_HALT);
1513 rc = qdio_shutdown(CARD_DDEV(card),
1514 QDIO_FLAG_CLEANUP_USING_CLEAR);
1516 QETH_CARD_TEXT_(card, 3, "1err%d", rc);
1517 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1519 case QETH_QDIO_CLEANING:
1524 rc = qeth_clear_halt_card(card, use_halt);
1526 QETH_CARD_TEXT_(card, 3, "2err%d", rc);
1529 EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
1531 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
1533 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1534 struct diag26c_vnic_resp *response = NULL;
1535 struct diag26c_vnic_req *request = NULL;
1536 struct ccw_dev_id id;
1540 QETH_CARD_TEXT(card, 2, "vmlayer");
1542 cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
1546 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
1547 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
1548 if (!request || !response) {
1553 ccw_device_get_id(CARD_RDEV(card), &id);
1554 request->resp_buf_len = sizeof(*response);
1555 request->resp_version = DIAG26C_VERSION6_VM65918;
1556 request->req_format = DIAG26C_VNIC_INFO;
1558 memcpy(&request->sys_name, userid, 8);
1559 request->devno = id.devno;
1561 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1562 rc = diag26c(request, response, DIAG26C_PORT_VNIC);
1563 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1566 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
1568 if (request->resp_buf_len < sizeof(*response) ||
1569 response->version != request->resp_version) {
1574 if (response->protocol == VNIC_INFO_PROT_L2)
1575 disc = QETH_DISCIPLINE_LAYER2;
1576 else if (response->protocol == VNIC_INFO_PROT_L3)
1577 disc = QETH_DISCIPLINE_LAYER3;
1583 QETH_CARD_TEXT_(card, 2, "err%x", rc);
1587 /* Determine whether the device requires a specific layer discipline */
1588 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
1590 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1592 if (IS_OSM(card) || IS_OSN(card))
1593 disc = QETH_DISCIPLINE_LAYER2;
1594 else if (IS_VM_NIC(card))
1595 disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
1596 qeth_vm_detect_layer(card);
1599 case QETH_DISCIPLINE_LAYER2:
1600 QETH_CARD_TEXT(card, 3, "force l2");
1602 case QETH_DISCIPLINE_LAYER3:
1603 QETH_CARD_TEXT(card, 3, "force l3");
1606 QETH_CARD_TEXT(card, 3, "force no");
1612 static void qeth_set_blkt_defaults(struct qeth_card *card)
1614 QETH_CARD_TEXT(card, 2, "cfgblkt");
1616 if (card->info.use_v1_blkt) {
1617 card->info.blkt.time_total = 0;
1618 card->info.blkt.inter_packet = 0;
1619 card->info.blkt.inter_packet_jumbo = 0;
1621 card->info.blkt.time_total = 250;
1622 card->info.blkt.inter_packet = 5;
1623 card->info.blkt.inter_packet_jumbo = 15;
1627 static void qeth_init_tokens(struct qeth_card *card)
1629 card->token.issuer_rm_w = 0x00010103UL;
1630 card->token.cm_filter_w = 0x00010108UL;
1631 card->token.cm_connection_w = 0x0001010aUL;
1632 card->token.ulp_filter_w = 0x0001010bUL;
1633 card->token.ulp_connection_w = 0x0001010dUL;
1636 static void qeth_init_func_level(struct qeth_card *card)
1638 switch (card->info.type) {
1639 case QETH_CARD_TYPE_IQD:
1640 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
1642 case QETH_CARD_TYPE_OSD:
1643 case QETH_CARD_TYPE_OSN:
1644 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
1651 static void qeth_idx_finalize_cmd(struct qeth_card *card,
1652 struct qeth_cmd_buffer *iob)
1654 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
1655 QETH_SEQ_NO_LENGTH);
1656 if (iob->channel == &card->write)
1657 card->seqno.trans_hdr++;
1660 static int qeth_peer_func_level(int level)
1662 if ((level & 0xff) == 8)
1663 return (level & 0xff) + 0x400;
1664 if (((level >> 8) & 3) == 1)
1665 return (level & 0xff) + 0x200;
1669 static void qeth_mpc_finalize_cmd(struct qeth_card *card,
1670 struct qeth_cmd_buffer *iob)
1672 qeth_idx_finalize_cmd(card, iob);
1674 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1675 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1676 card->seqno.pdu_hdr++;
1677 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1678 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1680 iob->callback = qeth_release_buffer_cb;
1683 static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
1684 struct qeth_cmd_buffer *reply)
1686 /* MPC cmds are issued strictly in sequence. */
1687 return !IS_IPA(reply->data);
1690 static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
1692 unsigned int data_length)
1694 struct qeth_cmd_buffer *iob;
1696 iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
1700 memcpy(iob->data, data, data_length);
1701 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
1703 iob->finalize = qeth_mpc_finalize_cmd;
1704 iob->match = qeth_mpc_match_reply;
1709 * qeth_send_control_data() - send control command to the card
1710 * @card: qeth_card structure pointer
1711 * @iob: qeth_cmd_buffer pointer
1712 * @reply_cb: callback function pointer
1713 * @cb_card: pointer to the qeth_card structure
1714 * @cb_reply: pointer to the qeth_reply structure
1715 * @cb_cmd: pointer to the original iob for non-IPA
1716 * commands, or to the qeth_ipa_cmd structure
1717 * for the IPA commands.
1718 * @reply_param: private pointer passed to the callback
1720 * Callback function gets called one or more times, with cb_cmd
1721 * pointing to the response returned by the hardware. Callback
1722 * function must return
1723 * > 0 if more reply blocks are expected,
1724 * 0 if the last or only reply block is received, and
1726 * Callback function can get the value of the reply_param pointer from the
1727 * field 'param' of the structure qeth_reply.
1730 static int qeth_send_control_data(struct qeth_card *card,
1731 struct qeth_cmd_buffer *iob,
1732 int (*reply_cb)(struct qeth_card *cb_card,
1733 struct qeth_reply *cb_reply,
1734 unsigned long cb_cmd),
1737 struct qeth_channel *channel = iob->channel;
1738 struct qeth_reply *reply = &iob->reply;
1739 long timeout = iob->timeout;
1742 QETH_CARD_TEXT(card, 2, "sendctl");
1744 reply->callback = reply_cb;
1745 reply->param = reply_param;
1747 timeout = wait_event_interruptible_timeout(card->wait_q,
1748 qeth_trylock_channel(channel),
1752 return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
1756 iob->finalize(card, iob);
1757 QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
1759 qeth_enqueue_cmd(card, iob);
1761 /* This pairs with iob->callback, and keeps the iob alive after IO: */
1764 QETH_CARD_TEXT(card, 6, "noirqpnd");
1765 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1766 rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
1767 (addr_t) iob, 0, 0, timeout);
1769 channel->active_cmd = iob;
1770 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1772 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
1773 CARD_DEVID(card), rc);
1774 QETH_CARD_TEXT_(card, 2, " err%d", rc);
1775 qeth_dequeue_cmd(card, iob);
1777 qeth_unlock_channel(card, channel);
1781 timeout = wait_for_completion_interruptible_timeout(&iob->done,
1784 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
1786 qeth_dequeue_cmd(card, iob);
1789 /* Wait until the callback for a late reply has completed: */
1790 spin_lock_irq(&iob->lock);
1792 /* Zap any callback that's still pending: */
1794 spin_unlock_irq(&iob->lock);
1805 struct qeth_node_desc {
1806 struct node_descriptor nd1;
1807 struct node_descriptor nd2;
1808 struct node_descriptor nd3;
1811 static void qeth_read_conf_data_cb(struct qeth_card *card,
1812 struct qeth_cmd_buffer *iob,
1813 unsigned int data_length)
1815 struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
1819 QETH_CARD_TEXT(card, 2, "cfgunit");
1821 if (data_length < sizeof(*nd)) {
1826 card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
1827 nd->nd1.plant[1] == _ascebc['M'];
1828 tag = (u8 *)&nd->nd1.tag;
1829 card->info.chpid = tag[0];
1830 card->info.unit_addr2 = tag[1];
1832 tag = (u8 *)&nd->nd2.tag;
1833 card->info.cula = tag[1];
1835 card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
1836 nd->nd3.model[1] == 0xF0 &&
1837 nd->nd3.model[2] >= 0xF1 &&
1838 nd->nd3.model[2] <= 0xF4;
1841 qeth_notify_cmd(iob, rc);
1845 static int qeth_read_conf_data(struct qeth_card *card)
1847 struct qeth_channel *channel = &card->data;
1848 struct qeth_cmd_buffer *iob;
1851 /* scan for RCD command in extended SenseID data */
1852 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
1853 if (!ciw || ciw->cmd == 0)
1855 if (ciw->count < sizeof(struct qeth_node_desc))
1858 iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
1862 iob->callback = qeth_read_conf_data_cb;
1863 qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
1866 return qeth_send_control_data(card, iob, NULL, NULL);
1869 static int qeth_idx_check_activate_response(struct qeth_card *card,
1870 struct qeth_channel *channel,
1871 struct qeth_cmd_buffer *iob)
1875 rc = qeth_check_idx_response(card, iob->data);
1879 if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
1882 /* negative reply: */
1883 QETH_CARD_TEXT_(card, 2, "idxneg%c",
1884 QETH_IDX_ACT_CAUSE_CODE(iob->data));
1886 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
1887 case QETH_IDX_ACT_ERR_EXCL:
1888 dev_err(&channel->ccwdev->dev,
1889 "The adapter is used exclusively by another host\n");
1891 case QETH_IDX_ACT_ERR_AUTH:
1892 case QETH_IDX_ACT_ERR_AUTH_USER:
1893 dev_err(&channel->ccwdev->dev,
1894 "Setting the device online failed because of insufficient authorization\n");
1897 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
1898 CCW_DEVID(channel->ccwdev));
1903 static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
1904 struct qeth_cmd_buffer *iob,
1905 unsigned int data_length)
1907 struct qeth_channel *channel = iob->channel;
1911 QETH_CARD_TEXT(card, 2, "idxrdcb");
1913 rc = qeth_idx_check_activate_response(card, channel, iob);
1917 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1918 if (peer_level != qeth_peer_func_level(card->info.func_level)) {
1919 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
1920 CCW_DEVID(channel->ccwdev),
1921 card->info.func_level, peer_level);
1926 memcpy(&card->token.issuer_rm_r,
1927 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1928 QETH_MPC_TOKEN_LENGTH);
1929 memcpy(&card->info.mcl_level[0],
1930 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
1933 qeth_notify_cmd(iob, rc);
1937 static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
1938 struct qeth_cmd_buffer *iob,
1939 unsigned int data_length)
1941 struct qeth_channel *channel = iob->channel;
1945 QETH_CARD_TEXT(card, 2, "idxwrcb");
1947 rc = qeth_idx_check_activate_response(card, channel, iob);
1951 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1952 if ((peer_level & ~0x0100) !=
1953 qeth_peer_func_level(card->info.func_level)) {
1954 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
1955 CCW_DEVID(channel->ccwdev),
1956 card->info.func_level, peer_level);
1961 qeth_notify_cmd(iob, rc);
1965 static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
1966 struct qeth_cmd_buffer *iob)
1968 u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
1969 u8 port = ((u8)card->dev->dev_port) | 0x80;
1970 struct ccw1 *ccw = __ccw_from_cmd(iob);
1971 struct ccw_dev_id dev_id;
1973 qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
1975 qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
1976 ccw_device_get_id(CARD_DDEV(card), &dev_id);
1977 iob->finalize = qeth_idx_finalize_cmd;
1979 port |= QETH_IDX_ACT_INVAL_FRAME;
1980 memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
1981 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1982 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
1983 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
1984 &card->info.func_level, 2);
1985 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &dev_id.devno, 2);
1986 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
1989 static int qeth_idx_activate_read_channel(struct qeth_card *card)
1991 struct qeth_channel *channel = &card->read;
1992 struct qeth_cmd_buffer *iob;
1995 QETH_CARD_TEXT(card, 2, "idxread");
1997 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2001 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
2002 qeth_idx_setup_activate_cmd(card, iob);
2003 iob->callback = qeth_idx_activate_read_channel_cb;
2005 rc = qeth_send_control_data(card, iob, NULL, NULL);
2009 channel->state = CH_STATE_UP;
2013 static int qeth_idx_activate_write_channel(struct qeth_card *card)
2015 struct qeth_channel *channel = &card->write;
2016 struct qeth_cmd_buffer *iob;
2019 QETH_CARD_TEXT(card, 2, "idxwrite");
2021 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2025 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
2026 qeth_idx_setup_activate_cmd(card, iob);
2027 iob->callback = qeth_idx_activate_write_channel_cb;
2029 rc = qeth_send_control_data(card, iob, NULL, NULL);
2033 channel->state = CH_STATE_UP;
2037 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2040 struct qeth_cmd_buffer *iob;
2042 QETH_CARD_TEXT(card, 2, "cmenblcb");
2044 iob = (struct qeth_cmd_buffer *) data;
2045 memcpy(&card->token.cm_filter_r,
2046 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
2047 QETH_MPC_TOKEN_LENGTH);
2051 static int qeth_cm_enable(struct qeth_card *card)
2053 struct qeth_cmd_buffer *iob;
2055 QETH_CARD_TEXT(card, 2, "cmenable");
2057 iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
2061 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
2062 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2063 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
2064 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
2066 return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
2069 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2072 struct qeth_cmd_buffer *iob;
2074 QETH_CARD_TEXT(card, 2, "cmsetpcb");
2076 iob = (struct qeth_cmd_buffer *) data;
2077 memcpy(&card->token.cm_connection_r,
2078 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2079 QETH_MPC_TOKEN_LENGTH);
2083 static int qeth_cm_setup(struct qeth_card *card)
2085 struct qeth_cmd_buffer *iob;
2087 QETH_CARD_TEXT(card, 2, "cmsetup");
2089 iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
2093 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2094 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2095 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2096 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2097 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2098 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2099 return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
2102 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
2104 struct net_device *dev = card->dev;
2105 unsigned int new_mtu;
2108 /* IQD needs accurate max MTU to set up its RX buffers: */
2111 /* tolerate quirky HW: */
2112 max_mtu = ETH_MAX_MTU;
2117 /* move any device with default MTU to new max MTU: */
2118 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
2120 /* adjust RX buffer size to new max MTU: */
2121 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
2122 if (dev->max_mtu && dev->max_mtu != max_mtu)
2123 qeth_free_qdio_queues(card);
2127 /* default MTUs for first setup: */
2128 else if (IS_LAYER2(card))
2129 new_mtu = ETH_DATA_LEN;
2131 new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
2134 dev->max_mtu = max_mtu;
2135 dev->mtu = min(new_mtu, max_mtu);
2140 static int qeth_get_mtu_outof_framesize(int framesize)
2142 switch (framesize) {
2156 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2159 __u16 mtu, framesize;
2162 struct qeth_cmd_buffer *iob;
2164 QETH_CARD_TEXT(card, 2, "ulpenacb");
2166 iob = (struct qeth_cmd_buffer *) data;
2167 memcpy(&card->token.ulp_filter_r,
2168 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2169 QETH_MPC_TOKEN_LENGTH);
2171 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2172 mtu = qeth_get_mtu_outof_framesize(framesize);
2174 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
2176 *(u16 *)reply->param = mtu;
2178 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2179 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2181 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2182 card->info.link_type = link_type;
2184 card->info.link_type = 0;
2185 QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
2189 static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
2192 return QETH_PROT_OSN2;
2193 return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
2196 static int qeth_ulp_enable(struct qeth_card *card)
2198 u8 prot_type = qeth_mpc_select_prot_type(card);
2199 struct qeth_cmd_buffer *iob;
2203 QETH_CARD_TEXT(card, 2, "ulpenabl");
2205 iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
2209 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
2210 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
2211 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2212 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2213 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2214 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2215 rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
2218 return qeth_update_max_mtu(card, max_mtu);
2221 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2224 struct qeth_cmd_buffer *iob;
2226 QETH_CARD_TEXT(card, 2, "ulpstpcb");
2228 iob = (struct qeth_cmd_buffer *) data;
2229 memcpy(&card->token.ulp_connection_r,
2230 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2231 QETH_MPC_TOKEN_LENGTH);
2232 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2234 QETH_CARD_TEXT(card, 2, "olmlimit");
2235 dev_err(&card->gdev->dev, "A connection could not be "
2236 "established because of an OLM limit\n");
2242 static int qeth_ulp_setup(struct qeth_card *card)
2245 struct qeth_cmd_buffer *iob;
2246 struct ccw_dev_id dev_id;
2248 QETH_CARD_TEXT(card, 2, "ulpsetup");
2250 iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
2254 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2255 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2256 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2257 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2258 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2259 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2261 ccw_device_get_id(CARD_DDEV(card), &dev_id);
2262 memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
2263 temp = (card->info.cula << 8) + card->info.unit_addr2;
2264 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2265 return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
2268 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
2270 struct qeth_qdio_out_buffer *newbuf;
2272 newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
2276 newbuf->buffer = q->qdio_bufs[bidx];
2277 skb_queue_head_init(&newbuf->skb_list);
2278 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
2280 newbuf->next_pending = q->bufs[bidx];
2281 atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
2282 q->bufs[bidx] = newbuf;
2286 static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2291 qeth_drain_output_queue(q, true);
2292 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2296 static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
2298 struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2303 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
2310 static void qeth_tx_completion_timer(struct timer_list *timer)
2312 struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);
2314 napi_schedule(&queue->napi);
2315 QETH_TXQ_STAT_INC(queue, completion_timer);
2318 static int qeth_alloc_qdio_queues(struct qeth_card *card)
2322 QETH_CARD_TEXT(card, 2, "allcqdbf");
2324 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
2325 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2328 QETH_CARD_TEXT(card, 2, "inq");
2329 card->qdio.in_q = qeth_alloc_qdio_queue();
2330 if (!card->qdio.in_q)
2333 /* inbound buffer pool */
2334 if (qeth_alloc_buffer_pool(card))
2338 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2339 struct qeth_qdio_out_q *queue;
2341 queue = qeth_alloc_output_queue();
2344 QETH_CARD_TEXT_(card, 2, "outq %i", i);
2345 QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
2346 card->qdio.out_qs[i] = queue;
2348 queue->queue_no = i;
2349 timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
2351 /* give outbound qeth_qdio_buffers their qdio_buffers */
2352 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2353 WARN_ON(queue->bufs[j]);
2354 if (qeth_init_qdio_out_buf(queue, j))
2355 goto out_freeoutqbufs;
2360 if (qeth_alloc_cq(card))
2368 kmem_cache_free(qeth_qdio_outbuf_cache,
2369 card->qdio.out_qs[i]->bufs[j]);
2370 card->qdio.out_qs[i]->bufs[j] = NULL;
2374 qeth_free_output_queue(card->qdio.out_qs[--i]);
2375 card->qdio.out_qs[i] = NULL;
2377 qeth_free_buffer_pool(card);
2379 qeth_free_qdio_queue(card->qdio.in_q);
2380 card->qdio.in_q = NULL;
2382 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2386 static void qeth_free_qdio_queues(struct qeth_card *card)
2390 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
2391 QETH_QDIO_UNINITIALIZED)
2395 cancel_delayed_work_sync(&card->buffer_reclaim_work);
2396 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2397 if (card->qdio.in_q->bufs[j].rx_skb)
2398 dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
2400 qeth_free_qdio_queue(card->qdio.in_q);
2401 card->qdio.in_q = NULL;
2402 /* inbound buffer pool */
2403 qeth_free_buffer_pool(card);
2404 /* free outbound qdio_qs */
2405 for (i = 0; i < card->qdio.no_out_queues; i++) {
2406 qeth_free_output_queue(card->qdio.out_qs[i]);
2407 card->qdio.out_qs[i] = NULL;
2411 static void qeth_create_qib_param_field(struct qeth_card *card,
2415 param_field[0] = _ascebc['P'];
2416 param_field[1] = _ascebc['C'];
2417 param_field[2] = _ascebc['I'];
2418 param_field[3] = _ascebc['T'];
2419 *((unsigned int *) (¶m_field[4])) = QETH_PCI_THRESHOLD_A(card);
2420 *((unsigned int *) (¶m_field[8])) = QETH_PCI_THRESHOLD_B(card);
2421 *((unsigned int *) (¶m_field[12])) = QETH_PCI_TIMER_VALUE(card);
2424 static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
2427 param_field[16] = _ascebc['B'];
2428 param_field[17] = _ascebc['L'];
2429 param_field[18] = _ascebc['K'];
2430 param_field[19] = _ascebc['T'];
2431 *((unsigned int *) (¶m_field[20])) = card->info.blkt.time_total;
2432 *((unsigned int *) (¶m_field[24])) = card->info.blkt.inter_packet;
2433 *((unsigned int *) (¶m_field[28])) =
2434 card->info.blkt.inter_packet_jumbo;
2437 static int qeth_qdio_activate(struct qeth_card *card)
2439 QETH_CARD_TEXT(card, 3, "qdioact");
2440 return qdio_activate(CARD_DDEV(card));
2443 static int qeth_dm_act(struct qeth_card *card)
2445 struct qeth_cmd_buffer *iob;
2447 QETH_CARD_TEXT(card, 2, "dmact");
2449 iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
2453 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2454 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2455 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2456 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2457 return qeth_send_control_data(card, iob, NULL, NULL);
2460 static int qeth_mpc_initialize(struct qeth_card *card)
2464 QETH_CARD_TEXT(card, 2, "mpcinit");
2466 rc = qeth_issue_next_read(card);
2468 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2471 rc = qeth_cm_enable(card);
2473 QETH_CARD_TEXT_(card, 2, "2err%d", rc);
2476 rc = qeth_cm_setup(card);
2478 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
2481 rc = qeth_ulp_enable(card);
2483 QETH_CARD_TEXT_(card, 2, "4err%d", rc);
2486 rc = qeth_ulp_setup(card);
2488 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2491 rc = qeth_alloc_qdio_queues(card);
2493 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2496 rc = qeth_qdio_establish(card);
2498 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
2499 qeth_free_qdio_queues(card);
2502 rc = qeth_qdio_activate(card);
2504 QETH_CARD_TEXT_(card, 2, "7err%d", rc);
2507 rc = qeth_dm_act(card);
2509 QETH_CARD_TEXT_(card, 2, "8err%d", rc);
2516 void qeth_print_status_message(struct qeth_card *card)
2518 switch (card->info.type) {
2519 case QETH_CARD_TYPE_OSD:
2520 case QETH_CARD_TYPE_OSM:
2521 case QETH_CARD_TYPE_OSX:
2522 /* VM will use a non-zero first character
2523 * to indicate a HiperSockets like reporting
2524 * of the level OSA sets the first character to zero
2526 if (!card->info.mcl_level[0]) {
2527 sprintf(card->info.mcl_level, "%02x%02x",
2528 card->info.mcl_level[2],
2529 card->info.mcl_level[3]);
2533 case QETH_CARD_TYPE_IQD:
2534 if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
2535 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2536 card->info.mcl_level[0]];
2537 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2538 card->info.mcl_level[1]];
2539 card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2540 card->info.mcl_level[2]];
2541 card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2542 card->info.mcl_level[3]];
2543 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2547 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2549 dev_info(&card->gdev->dev,
2550 "Device is a%s card%s%s%s\nwith link type %s.\n",
2551 qeth_get_cardname(card),
2552 (card->info.mcl_level[0]) ? " (level: " : "",
2553 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2554 (card->info.mcl_level[0]) ? ")" : "",
2555 qeth_get_cardname_short(card));
2557 EXPORT_SYMBOL_GPL(qeth_print_status_message);
2559 static void qeth_initialize_working_pool_list(struct qeth_card *card)
2561 struct qeth_buffer_pool_entry *entry;
2563 QETH_CARD_TEXT(card, 5, "inwrklst");
2565 list_for_each_entry(entry,
2566 &card->qdio.init_pool.entry_list, init_list) {
2567 qeth_put_buffer_pool_entry(card, entry);
2571 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2572 struct qeth_card *card)
2574 struct list_head *plh;
2575 struct qeth_buffer_pool_entry *entry;
2579 if (list_empty(&card->qdio.in_buf_pool.entry_list))
2582 list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
2583 entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
2585 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2586 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2592 list_del_init(&entry->list);
2597 /* no free buffer in pool so take first one and swap pages */
2598 entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2599 struct qeth_buffer_pool_entry, list);
2600 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2601 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2602 page = alloc_page(GFP_ATOMIC);
2606 free_page((unsigned long)entry->elements[i]);
2607 entry->elements[i] = page_address(page);
2608 QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
2612 list_del_init(&entry->list);
2616 static int qeth_init_input_buffer(struct qeth_card *card,
2617 struct qeth_qdio_buffer *buf)
2619 struct qeth_buffer_pool_entry *pool_entry;
2622 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2623 buf->rx_skb = netdev_alloc_skb(card->dev,
2625 sizeof(struct ipv6hdr));
2630 pool_entry = qeth_find_free_buffer_pool_entry(card);
2635 * since the buffer is accessed only from the input_tasklet
2636 * there shouldn't be a need to synchronize; also, since we use
2637 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2641 buf->pool_entry = pool_entry;
2642 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2643 buf->buffer->element[i].length = PAGE_SIZE;
2644 buf->buffer->element[i].addr = pool_entry->elements[i];
2645 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2646 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
2648 buf->buffer->element[i].eflags = 0;
2649 buf->buffer->element[i].sflags = 0;
2654 static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
2655 struct qeth_qdio_out_q *queue)
2657 if (!IS_IQD(card) ||
2658 qeth_iqd_is_mcast_queue(card, queue) ||
2659 card->options.cq == QETH_CQ_ENABLED ||
2660 qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
2663 return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
2666 static int qeth_init_qdio_queues(struct qeth_card *card)
2671 QETH_CARD_TEXT(card, 2, "initqdqs");
2674 qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2675 memset(&card->rx, 0, sizeof(struct qeth_rx));
2676 qeth_initialize_working_pool_list(card);
2677 /*give only as many buffers to hardware as we have buffer pool entries*/
2678 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
2679 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
2680 card->qdio.in_q->next_buf_to_init =
2681 card->qdio.in_buf_pool.buf_count - 1;
2682 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
2683 card->qdio.in_buf_pool.buf_count - 1);
2685 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2690 rc = qeth_cq_init(card);
2695 /* outbound queue */
2696 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2697 struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];
2699 qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2700 queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
2701 queue->next_buf_to_fill = 0;
2703 queue->prev_hdr = NULL;
2704 queue->bulk_start = 0;
2705 queue->bulk_count = 0;
2706 queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
2707 atomic_set(&queue->used_buffers, 0);
2708 atomic_set(&queue->set_pci_flags_count, 0);
2709 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
2710 netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
2715 static void qeth_ipa_finalize_cmd(struct qeth_card *card,
2716 struct qeth_cmd_buffer *iob)
2718 qeth_mpc_finalize_cmd(card, iob);
2720 /* override with IPA-specific values: */
2721 __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
2724 void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2726 bool (*match)(struct qeth_cmd_buffer *iob,
2727 struct qeth_cmd_buffer *reply))
2729 u8 prot_type = qeth_mpc_select_prot_type(card);
2730 u16 total_length = iob->length;
2732 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
2734 iob->finalize = qeth_ipa_finalize_cmd;
2737 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2738 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
2739 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
2740 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
2741 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
2742 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
2743 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2744 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
2746 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
2748 static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
2749 struct qeth_cmd_buffer *reply)
2751 struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);
2753 return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
2756 struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
2757 enum qeth_ipa_cmds cmd_code,
2758 enum qeth_prot_versions prot,
2759 unsigned int data_length)
2761 enum qeth_link_types link_type = card->info.link_type;
2762 struct qeth_cmd_buffer *iob;
2763 struct qeth_ipacmd_hdr *hdr;
2765 data_length += offsetof(struct qeth_ipa_cmd, data);
2766 iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
2771 qeth_prepare_ipa_cmd(card, iob, data_length, qeth_ipa_match_reply);
2773 hdr = &__ipa_cmd(iob)->hdr;
2774 hdr->command = cmd_code;
2775 hdr->initiator = IPA_CMD_INITIATOR_HOST;
2776 /* hdr->seqno is set by qeth_send_control_data() */
2777 hdr->adapter_type = (link_type == QETH_LINK_TYPE_HSTR) ? 2 : 1;
2778 hdr->rel_adapter_no = (u8) card->dev->dev_port;
2779 hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
2780 hdr->param_count = 1;
2781 hdr->prot_version = prot;
2784 EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);
2786 static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
2787 struct qeth_reply *reply, unsigned long data)
2789 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2791 return (cmd->hdr.return_code) ? -EIO : 0;
2795 * qeth_send_ipa_cmd() - send an IPA command
2797 * See qeth_send_control_data() for explanation of the arguments.
2800 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2801 int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
2807 QETH_CARD_TEXT(card, 4, "sendipa");
2809 if (card->read_or_write_problem) {
2814 if (reply_cb == NULL)
2815 reply_cb = qeth_send_ipa_cmd_cb;
2816 rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
2818 qeth_clear_ipacmd_list(card);
2819 qeth_schedule_recovery(card);
2823 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
2825 static int qeth_send_startlan_cb(struct qeth_card *card,
2826 struct qeth_reply *reply, unsigned long data)
2828 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2830 if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
2833 return (cmd->hdr.return_code) ? -EIO : 0;
2836 static int qeth_send_startlan(struct qeth_card *card)
2838 struct qeth_cmd_buffer *iob;
2840 QETH_CARD_TEXT(card, 2, "strtlan");
2842 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
2845 return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
2848 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
2850 if (!cmd->hdr.return_code)
2851 cmd->hdr.return_code =
2852 cmd->data.setadapterparms.hdr.return_code;
2853 return cmd->hdr.return_code;
2856 static int qeth_query_setadapterparms_cb(struct qeth_card *card,
2857 struct qeth_reply *reply, unsigned long data)
2859 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2861 QETH_CARD_TEXT(card, 3, "quyadpcb");
2862 if (qeth_setadpparms_inspect_rc(cmd))
2865 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
2866 card->info.link_type =
2867 cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
2868 QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
2870 card->options.adp.supported =
2871 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
2875 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
2876 enum qeth_ipa_setadp_cmd adp_cmd,
2877 unsigned int data_length)
2879 struct qeth_ipacmd_setadpparms_hdr *hdr;
2880 struct qeth_cmd_buffer *iob;
2882 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
2884 offsetof(struct qeth_ipacmd_setadpparms,
2889 hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
2890 hdr->cmdlength = sizeof(*hdr) + data_length;
2891 hdr->command_code = adp_cmd;
2892 hdr->used_total = 1;
2897 static int qeth_query_setadapterparms(struct qeth_card *card)
2900 struct qeth_cmd_buffer *iob;
2902 QETH_CARD_TEXT(card, 3, "queryadp");
2903 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
2904 SETADP_DATA_SIZEOF(query_cmds_supp));
2907 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
2911 static int qeth_query_ipassists_cb(struct qeth_card *card,
2912 struct qeth_reply *reply, unsigned long data)
2914 struct qeth_ipa_cmd *cmd;
2916 QETH_CARD_TEXT(card, 2, "qipasscb");
2918 cmd = (struct qeth_ipa_cmd *) data;
2920 switch (cmd->hdr.return_code) {
2921 case IPA_RC_SUCCESS:
2923 case IPA_RC_NOTSUPP:
2924 case IPA_RC_L2_UNSUPPORTED_CMD:
2925 QETH_CARD_TEXT(card, 2, "ipaunsup");
2926 card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
2927 card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
2930 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
2931 CARD_DEVID(card), cmd->hdr.return_code);
2935 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
2936 card->options.ipa4 = cmd->hdr.assists;
2937 else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
2938 card->options.ipa6 = cmd->hdr.assists;
2940 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
2945 static int qeth_query_ipassists(struct qeth_card *card,
2946 enum qeth_prot_versions prot)
2949 struct qeth_cmd_buffer *iob;
2951 QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
2952 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
2955 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
2959 static int qeth_query_switch_attributes_cb(struct qeth_card *card,
2960 struct qeth_reply *reply, unsigned long data)
2962 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2963 struct qeth_query_switch_attributes *attrs;
2964 struct qeth_switch_info *sw_info;
2966 QETH_CARD_TEXT(card, 2, "qswiatcb");
2967 if (qeth_setadpparms_inspect_rc(cmd))
2970 sw_info = (struct qeth_switch_info *)reply->param;
2971 attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
2972 sw_info->capabilities = attrs->capabilities;
2973 sw_info->settings = attrs->settings;
2974 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
2979 int qeth_query_switch_attributes(struct qeth_card *card,
2980 struct qeth_switch_info *sw_info)
2982 struct qeth_cmd_buffer *iob;
2984 QETH_CARD_TEXT(card, 2, "qswiattr");
2985 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
2987 if (!netif_carrier_ok(card->dev))
2989 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
2992 return qeth_send_ipa_cmd(card, iob,
2993 qeth_query_switch_attributes_cb, sw_info);
2996 struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
2997 enum qeth_diags_cmds sub_cmd,
2998 unsigned int data_length)
3000 struct qeth_ipacmd_diagass *cmd;
3001 struct qeth_cmd_buffer *iob;
3003 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
3004 DIAG_HDR_LEN + data_length);
3008 cmd = &__ipa_cmd(iob)->data.diagass;
3009 cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
3010 cmd->subcmd = sub_cmd;
3013 EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);
3015 static int qeth_query_setdiagass_cb(struct qeth_card *card,
3016 struct qeth_reply *reply, unsigned long data)
3018 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3019 u16 rc = cmd->hdr.return_code;
3022 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3026 card->info.diagass_support = cmd->data.diagass.ext;
3030 static int qeth_query_setdiagass(struct qeth_card *card)
3032 struct qeth_cmd_buffer *iob;
3034 QETH_CARD_TEXT(card, 2, "qdiagass");
3035 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
3038 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
3041 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
3043 unsigned long info = get_zeroed_page(GFP_KERNEL);
3044 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
3045 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
3046 struct ccw_dev_id ccwid;
3049 tid->chpid = card->info.chpid;
3050 ccw_device_get_id(CARD_RDEV(card), &ccwid);
3051 tid->ssid = ccwid.ssid;
3052 tid->devno = ccwid.devno;
3055 level = stsi(NULL, 0, 0, 0);
3056 if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3057 tid->lparnr = info222->lpar_number;
3058 if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3059 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
3060 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
3066 static int qeth_hw_trap_cb(struct qeth_card *card,
3067 struct qeth_reply *reply, unsigned long data)
3069 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3070 u16 rc = cmd->hdr.return_code;
3073 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3079 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
3081 struct qeth_cmd_buffer *iob;
3082 struct qeth_ipa_cmd *cmd;
3084 QETH_CARD_TEXT(card, 2, "diagtrap");
3085 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
3088 cmd = __ipa_cmd(iob);
3089 cmd->data.diagass.type = 1;
3090 cmd->data.diagass.action = action;
3092 case QETH_DIAGS_TRAP_ARM:
3093 cmd->data.diagass.options = 0x0003;
3094 cmd->data.diagass.ext = 0x00010000 +
3095 sizeof(struct qeth_trap_id);
3096 qeth_get_trap_id(card,
3097 (struct qeth_trap_id *)cmd->data.diagass.cdata);
3099 case QETH_DIAGS_TRAP_DISARM:
3100 cmd->data.diagass.options = 0x0001;
3102 case QETH_DIAGS_TRAP_CAPTURE:
3105 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
3108 static int qeth_check_qdio_errors(struct qeth_card *card,
3109 struct qdio_buffer *buf,
3110 unsigned int qdio_error,
3111 const char *dbftext)
3114 QETH_CARD_TEXT(card, 2, dbftext);
3115 QETH_CARD_TEXT_(card, 2, " F15=%02X",
3116 buf->element[15].sflags);
3117 QETH_CARD_TEXT_(card, 2, " F14=%02X",
3118 buf->element[14].sflags);
3119 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3120 if ((buf->element[15].sflags) == 0x12) {
3121 QETH_CARD_STAT_INC(card, rx_fifo_errors);
3129 static void qeth_queue_input_buffer(struct qeth_card *card, int index)
3131 struct qeth_qdio_q *queue = card->qdio.in_q;
3132 struct list_head *lh;
3138 count = (index < queue->next_buf_to_init)?
3139 card->qdio.in_buf_pool.buf_count -
3140 (queue->next_buf_to_init - index) :
3141 card->qdio.in_buf_pool.buf_count -
3142 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
3143 /* only requeue at a certain threshold to avoid SIGAs */
3144 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
3145 for (i = queue->next_buf_to_init;
3146 i < queue->next_buf_to_init + count; ++i) {
3147 if (qeth_init_input_buffer(card,
3148 &queue->bufs[QDIO_BUFNR(i)])) {
3155 if (newcount < count) {
3156 /* we are in memory shortage so we switch back to
3157 traditional skb allocation and drop packages */
3158 atomic_set(&card->force_alloc_skb, 3);
3161 atomic_add_unless(&card->force_alloc_skb, -1, 0);
3166 list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
3168 if (i == card->qdio.in_buf_pool.buf_count) {
3169 QETH_CARD_TEXT(card, 2, "qsarbw");
3170 card->reclaim_index = index;
3171 schedule_delayed_work(
3172 &card->buffer_reclaim_work,
3173 QETH_RECLAIM_WORK_TIME);
3179 * according to old code it should be avoided to requeue all
3180 * 128 buffers in order to benefit from PCI avoidance.
3181 * this function keeps at least one buffer (the buffer at
3182 * 'index') un-requeued -> this buffer is the first buffer that
3183 * will be requeued the next time
3185 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
3186 queue->next_buf_to_init, count);
3188 QETH_CARD_TEXT(card, 2, "qinberr");
3190 queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
3195 static void qeth_buffer_reclaim_work(struct work_struct *work)
3197 struct qeth_card *card = container_of(work, struct qeth_card,
3198 buffer_reclaim_work.work);
3200 QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index);
3201 qeth_queue_input_buffer(card, card->reclaim_index);
3204 static void qeth_handle_send_error(struct qeth_card *card,
3205 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
3207 int sbalf15 = buffer->buffer->element[15].sflags;
3209 QETH_CARD_TEXT(card, 6, "hdsnderr");
3210 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3215 if ((sbalf15 >= 15) && (sbalf15 <= 31))
3218 QETH_CARD_TEXT(card, 1, "lnkfail");
3219 QETH_CARD_TEXT_(card, 1, "%04x %02x",
3220 (u16)qdio_err, (u8)sbalf15);
3224 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
3225 * @queue: queue to check for packing buffer
3227 * Returns number of buffers that were prepared for flush.
3229 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
3231 struct qeth_qdio_out_buffer *buffer;
3233 buffer = queue->bufs[queue->next_buf_to_fill];
3234 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
3235 (buffer->next_element_to_fill > 0)) {
3236 /* it's a packing buffer */
3237 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3238 queue->next_buf_to_fill =
3239 QDIO_BUFNR(queue->next_buf_to_fill + 1);
3246 * Switched to packing state if the number of used buffers on a queue
3247 * reaches a certain limit.
3249 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
3251 if (!queue->do_pack) {
3252 if (atomic_read(&queue->used_buffers)
3253 >= QETH_HIGH_WATERMARK_PACK){
3254 /* switch non-PACKING -> PACKING */
3255 QETH_CARD_TEXT(queue->card, 6, "np->pack");
3256 QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3263 * Switches from packing to non-packing mode. If there is a packing
3264 * buffer on the queue this buffer will be prepared to be flushed.
3265 * In that case 1 is returned to inform the caller. If no buffer
3266 * has to be flushed, zero is returned.
3268 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
3270 if (queue->do_pack) {
3271 if (atomic_read(&queue->used_buffers)
3272 <= QETH_LOW_WATERMARK_PACK) {
3273 /* switch PACKING -> non-PACKING */
3274 QETH_CARD_TEXT(queue->card, 6, "pack->np");
3275 QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3277 return qeth_prep_flush_pack_buffer(queue);
3283 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
3286 struct qeth_card *card = queue->card;
3287 struct qeth_qdio_out_buffer *buf;
3290 unsigned int qdio_flags;
3292 for (i = index; i < index + count; ++i) {
3293 unsigned int bidx = QDIO_BUFNR(i);
3295 buf = queue->bufs[bidx];
3296 buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
3297 SBAL_EFLAGS_LAST_ENTRY;
3299 if (queue->bufstates)
3300 queue->bufstates[bidx].user = buf;
3302 if (IS_IQD(queue->card))
3305 if (!queue->do_pack) {
3306 if ((atomic_read(&queue->used_buffers) >=
3307 (QETH_HIGH_WATERMARK_PACK -
3308 QETH_WATERMARK_PACK_FUZZ)) &&
3309 !atomic_read(&queue->set_pci_flags_count)) {
3310 /* it's likely that we'll go to packing
3312 atomic_inc(&queue->set_pci_flags_count);
3313 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3316 if (!atomic_read(&queue->set_pci_flags_count)) {
3318 * there's no outstanding PCI any more, so we
3319 * have to request a PCI to be sure the the PCI
3320 * will wake at some time in the future then we
3321 * can flush packed buffers that might still be
3322 * hanging around, which can happen if no
3323 * further send was requested by the stack
3325 atomic_inc(&queue->set_pci_flags_count);
3326 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3331 qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
3332 if (atomic_read(&queue->set_pci_flags_count))
3333 qdio_flags |= QDIO_FLAG_PCI_OUT;
3334 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
3335 queue->queue_no, index, count);
3337 /* Fake the TX completion interrupt: */
3339 napi_schedule(&queue->napi);
3342 /* ignore temporary SIGA errors without busy condition */
3345 QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3346 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
3347 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
3348 QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
3349 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3351 /* this must not happen under normal circumstances. if it
3352 * happens something is really wrong -> recover */
3353 qeth_schedule_recovery(queue->card);
3358 static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
3360 qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
3362 queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
3363 queue->prev_hdr = NULL;
3364 queue->bulk_count = 0;
3367 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
3371 int q_was_packing = 0;
3374 * check if weed have to switch to non-packing mode or if
3375 * we have to get a pci flag out on the queue
3377 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
3378 !atomic_read(&queue->set_pci_flags_count)) {
3379 if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
3380 QETH_OUT_Q_UNLOCKED) {
3382 * If we get in here, there was no action in
3383 * do_send_packet. So, we check if there is a
3384 * packing buffer to be flushed here.
3386 index = queue->next_buf_to_fill;
3387 q_was_packing = queue->do_pack;
3388 /* queue->do_pack may change */
3390 flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
3392 !atomic_read(&queue->set_pci_flags_count))
3393 flush_cnt += qeth_prep_flush_pack_buffer(queue);
3395 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
3397 qeth_flush_buffers(queue, index, flush_cnt);
3398 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3403 static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
3404 unsigned long card_ptr)
3406 struct qeth_card *card = (struct qeth_card *)card_ptr;
3408 if (card->dev->flags & IFF_UP)
3409 napi_schedule_irqoff(&card->napi);
3412 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
3416 if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
3420 if (card->options.cq == cq) {
3425 qeth_free_qdio_queues(card);
3426 card->options.cq = cq;
3433 EXPORT_SYMBOL_GPL(qeth_configure_cq);
3435 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
3436 unsigned int queue, int first_element,
3439 struct qeth_qdio_q *cq = card->qdio.c_q;
3443 if (!qeth_is_cq(card, queue))
3446 QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
3447 QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
3448 QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
3451 netif_tx_stop_all_queues(card->dev);
3452 qeth_schedule_recovery(card);
3456 for (i = first_element; i < first_element + count; ++i) {
3457 struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
3460 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
3461 buffer->element[e].addr) {
3462 unsigned long phys_aob_addr;
3464 phys_aob_addr = (unsigned long) buffer->element[e].addr;
3465 qeth_qdio_handle_aob(card, phys_aob_addr);
3468 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3470 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
3471 card->qdio.c_q->next_buf_to_init,
3474 dev_warn(&card->gdev->dev,
3475 "QDIO reported an error, rc=%i\n", rc);
3476 QETH_CARD_TEXT(card, 2, "qcqherr");
3479 cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
3482 static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
3483 unsigned int qdio_err, int queue,
3484 int first_elem, int count,
3485 unsigned long card_ptr)
3487 struct qeth_card *card = (struct qeth_card *)card_ptr;
3489 QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
3490 QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
3492 if (qeth_is_cq(card, queue))
3493 qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count);
3495 qeth_schedule_recovery(card);
3498 static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3499 unsigned int qdio_error, int __queue,
3500 int first_element, int count,
3501 unsigned long card_ptr)
3503 struct qeth_card *card = (struct qeth_card *) card_ptr;
3504 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
3505 struct net_device *dev = card->dev;
3506 struct netdev_queue *txq;
3509 QETH_CARD_TEXT(card, 6, "qdouhdl");
3510 if (qdio_error & QDIO_ERROR_FATAL) {
3511 QETH_CARD_TEXT(card, 2, "achkcond");
3512 netif_tx_stop_all_queues(dev);
3513 qeth_schedule_recovery(card);
3517 for (i = first_element; i < (first_element + count); ++i) {
3518 struct qeth_qdio_out_buffer *buf = queue->bufs[QDIO_BUFNR(i)];
3520 qeth_handle_send_error(card, buf, qdio_error);
3521 qeth_clear_output_buffer(queue, buf, qdio_error, 0);
3524 atomic_sub(count, &queue->used_buffers);
3525 qeth_check_outbound_queue(queue);
3527 txq = netdev_get_tx_queue(dev, __queue);
3528 /* xmit may have observed the full-condition, but not yet stopped the
3529 * txq. In which case the code below won't trigger. So before returning,
3530 * xmit will re-check the txq's fill level and wake it up if needed.
3532 if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue))
3533 netif_tx_wake_queue(txq);
3537 * Note: Function assumes that we have 4 outbound queues.
3539 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
3541 struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
3544 switch (card->qdio.do_prio_queueing) {
3545 case QETH_PRIO_Q_ING_TOS:
3546 case QETH_PRIO_Q_ING_PREC:
3547 switch (qeth_get_ip_version(skb)) {
3549 tos = ipv4_get_dsfield(ip_hdr(skb));
3552 tos = ipv6_get_dsfield(ipv6_hdr(skb));
3555 return card->qdio.default_out_queue;
3557 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
3558 return ~tos >> 6 & 3;
3559 if (tos & IPTOS_MINCOST)
3561 if (tos & IPTOS_RELIABILITY)
3563 if (tos & IPTOS_THROUGHPUT)
3565 if (tos & IPTOS_LOWDELAY)
3568 case QETH_PRIO_Q_ING_SKB:
3569 if (skb->priority > 5)
3571 return ~skb->priority >> 1 & 3;
3572 case QETH_PRIO_Q_ING_VLAN:
3573 if (veth->h_vlan_proto == htons(ETH_P_8021Q))
3574 return ~ntohs(veth->h_vlan_TCI) >>
3575 (VLAN_PRIO_SHIFT + 1) & 3;
3580 return card->qdio.default_out_queue;
3582 EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3585 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags.
3588 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3589 * fragmented part of the SKB. Returns zero for linear SKB.
3591 static int qeth_get_elements_for_frags(struct sk_buff *skb)
3593 int cnt, elements = 0;
3595 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3596 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3598 elements += qeth_get_elements_for_range(
3599 (addr_t)skb_frag_address(frag),
3600 (addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3606 * qeth_count_elements() - Counts the number of QDIO buffer elements needed
3607 * to transmit an skb.
3608 * @skb: the skb to operate on.
3609 * @data_offset: skip this part of the skb's linear data
3611 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3612 * skb's data (both its linear part and paged fragments).
3614 unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
3616 unsigned int elements = qeth_get_elements_for_frags(skb);
3617 addr_t end = (addr_t)skb->data + skb_headlen(skb);
3618 addr_t start = (addr_t)skb->data + data_offset;
3621 elements += qeth_get_elements_for_range(start, end);
3624 EXPORT_SYMBOL_GPL(qeth_count_elements);
3626 #define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \
3630 * qeth_add_hw_header() - add a HW header to an skb.
3631 * @skb: skb that the HW header should be added to.
3632 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
3633 * it contains a valid pointer to a qeth_hdr.
3634 * @hdr_len: length of the HW header.
3635 * @proto_len: length of protocol headers that need to be in same page as the
3638 * Returns the pushed length. If the header can't be pushed on
3639 * (eg. because it would cross a page boundary), it is allocated from
3640 * the cache instead and 0 is returned.
3641 * The number of needed buffer elements is returned in @elements.
3642 * Error to create the hdr is indicated by returning with < 0.
3644 static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
3645 struct sk_buff *skb, struct qeth_hdr **hdr,
3646 unsigned int hdr_len, unsigned int proto_len,
3647 unsigned int *elements)
3649 const unsigned int contiguous = proto_len ? proto_len : 1;
3650 const unsigned int max_elements = queue->max_elements;
3651 unsigned int __elements;
3657 start = (addr_t)skb->data - hdr_len;
3658 end = (addr_t)skb->data;
3660 if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
3661 /* Push HW header into same page as first protocol header. */
3663 /* ... but TSO always needs a separate element for headers: */
3664 if (skb_is_gso(skb))
3665 __elements = 1 + qeth_count_elements(skb, proto_len);
3667 __elements = qeth_count_elements(skb, 0);
3668 } else if (!proto_len && PAGE_ALIGNED(skb->data)) {
3669 /* Push HW header into preceding page, flush with skb->data. */
3671 __elements = 1 + qeth_count_elements(skb, 0);
3673 /* Use header cache, copy protocol headers up. */
3675 __elements = 1 + qeth_count_elements(skb, proto_len);
3678 /* Compress skb to fit into one IO buffer: */
3679 if (__elements > max_elements) {
3680 if (!skb_is_nonlinear(skb)) {
3681 /* Drop it, no easy way of shrinking it further. */
3682 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
3683 max_elements, __elements, skb->len);
3687 rc = skb_linearize(skb);
3689 QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
3693 QETH_TXQ_STAT_INC(queue, skbs_linearized);
3694 /* Linearization changed the layout, re-evaluate: */
3698 *elements = __elements;
3699 /* Add the header: */
3701 *hdr = skb_push(skb, hdr_len);
3705 if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
3707 *hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
3710 /* Copy protocol headers behind HW header: */
3711 skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
3715 static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
3716 struct sk_buff *curr_skb,
3717 struct qeth_hdr *curr_hdr)
3719 struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
3720 struct qeth_hdr *prev_hdr = queue->prev_hdr;
3725 /* All packets must have the same target: */
3726 if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
3727 struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);
3729 return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
3730 eth_hdr(curr_skb)->h_dest) &&
3731 qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
3734 return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
3735 qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
3738 static unsigned int __qeth_fill_buffer(struct sk_buff *skb,
3739 struct qeth_qdio_out_buffer *buf,
3740 bool is_first_elem, unsigned int offset)
3742 struct qdio_buffer *buffer = buf->buffer;
3743 int element = buf->next_element_to_fill;
3744 int length = skb_headlen(skb) - offset;
3745 char *data = skb->data + offset;
3746 unsigned int elem_length, cnt;
3748 /* map linear part into buffer element(s) */
3749 while (length > 0) {
3750 elem_length = min_t(unsigned int, length,
3751 PAGE_SIZE - offset_in_page(data));
3753 buffer->element[element].addr = data;
3754 buffer->element[element].length = elem_length;
3755 length -= elem_length;
3756 if (is_first_elem) {
3757 is_first_elem = false;
3758 if (length || skb_is_nonlinear(skb))
3759 /* skb needs additional elements */
3760 buffer->element[element].eflags =
3761 SBAL_EFLAGS_FIRST_FRAG;
3763 buffer->element[element].eflags = 0;
3765 buffer->element[element].eflags =
3766 SBAL_EFLAGS_MIDDLE_FRAG;
3769 data += elem_length;
3773 /* map page frags into buffer element(s) */
3774 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3775 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3777 data = skb_frag_address(frag);
3778 length = skb_frag_size(frag);
3779 while (length > 0) {
3780 elem_length = min_t(unsigned int, length,
3781 PAGE_SIZE - offset_in_page(data));
3783 buffer->element[element].addr = data;
3784 buffer->element[element].length = elem_length;
3785 buffer->element[element].eflags =
3786 SBAL_EFLAGS_MIDDLE_FRAG;
3788 length -= elem_length;
3789 data += elem_length;
3794 if (buffer->element[element - 1].eflags)
3795 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
3796 buf->next_element_to_fill = element;
3801 * qeth_fill_buffer() - map skb into an output buffer
3802 * @buf: buffer to transport the skb
3803 * @skb: skb to map into the buffer
3804 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
3805 * from qeth_core_header_cache.
3806 * @offset: when mapping the skb, start at skb->data + offset
3807 * @hd_len: if > 0, build a dedicated header element of this size
3809 static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
3810 struct sk_buff *skb, struct qeth_hdr *hdr,
3811 unsigned int offset, unsigned int hd_len)
3813 struct qdio_buffer *buffer = buf->buffer;
3814 bool is_first_elem = true;
3816 __skb_queue_tail(&buf->skb_list, skb);
3818 /* build dedicated header element */
3820 int element = buf->next_element_to_fill;
3821 is_first_elem = false;
3823 buffer->element[element].addr = hdr;
3824 buffer->element[element].length = hd_len;
3825 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
3826 /* remember to free cache-allocated qeth_hdr: */
3827 buf->is_header[element] = ((void *)hdr != skb->data);
3828 buf->next_element_to_fill++;
3831 return __qeth_fill_buffer(skb, buf, is_first_elem, offset);
3834 static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3835 struct sk_buff *skb, unsigned int elements,
3836 struct qeth_hdr *hdr, unsigned int offset,
3837 unsigned int hd_len)
3839 unsigned int bytes = qdisc_pkt_len(skb);
3840 struct qeth_qdio_out_buffer *buffer;
3841 unsigned int next_element;
3842 struct netdev_queue *txq;
3843 bool stopped = false;
3846 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
3847 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
3849 /* Just a sanity check, the wake/stop logic should ensure that we always
3850 * get a free buffer.
3852 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
3855 flush = !qeth_iqd_may_bulk(queue, skb, hdr);
3858 (buffer->next_element_to_fill + elements > queue->max_elements)) {
3859 if (buffer->next_element_to_fill > 0) {
3860 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3861 queue->bulk_count++;
3864 if (queue->bulk_count >= queue->bulk_max)
3868 qeth_flush_queue(queue);
3870 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
3871 queue->bulk_count)];
3873 /* Sanity-check again: */
3874 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
3878 if (buffer->next_element_to_fill == 0 &&
3879 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
3880 /* If a TX completion happens right _here_ and misses to wake
3881 * the txq, then our re-check below will catch the race.
3883 QETH_TXQ_STAT_INC(queue, stopped);
3884 netif_tx_stop_queue(txq);
3888 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
3889 buffer->bytes += bytes;
3890 queue->prev_hdr = hdr;
3892 flush = __netdev_tx_sent_queue(txq, bytes,
3893 !stopped && netdev_xmit_more());
3895 if (flush || next_element >= queue->max_elements) {
3896 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3897 queue->bulk_count++;
3899 if (queue->bulk_count >= queue->bulk_max)
3903 qeth_flush_queue(queue);
3906 if (stopped && !qeth_out_queue_is_full(queue))
3907 netif_tx_start_queue(txq);
3911 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3912 struct sk_buff *skb, struct qeth_hdr *hdr,
3913 unsigned int offset, unsigned int hd_len,
3914 int elements_needed)
3916 struct qeth_qdio_out_buffer *buffer;
3917 unsigned int next_element;
3918 struct netdev_queue *txq;
3919 bool stopped = false;
3921 int flush_count = 0;
3926 /* spin until we get the queue ... */
3927 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
3928 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
3929 start_index = queue->next_buf_to_fill;
3930 buffer = queue->bufs[queue->next_buf_to_fill];
3932 /* Just a sanity check, the wake/stop logic should ensure that we always
3933 * get a free buffer.
3935 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
3936 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3940 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
3942 /* check if we need to switch packing state of this queue */
3943 qeth_switch_to_packing_if_needed(queue);
3944 if (queue->do_pack) {
3946 /* does packet fit in current buffer? */
3947 if (buffer->next_element_to_fill + elements_needed >
3948 queue->max_elements) {
3949 /* ... no -> set state PRIMED */
3950 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3952 queue->next_buf_to_fill =
3953 QDIO_BUFNR(queue->next_buf_to_fill + 1);
3954 buffer = queue->bufs[queue->next_buf_to_fill];
3956 /* We stepped forward, so sanity-check again: */
3957 if (atomic_read(&buffer->state) !=
3958 QETH_QDIO_BUF_EMPTY) {
3959 qeth_flush_buffers(queue, start_index,
3961 atomic_set(&queue->state,
3962 QETH_OUT_Q_UNLOCKED);
3969 if (buffer->next_element_to_fill == 0 &&
3970 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
3971 /* If a TX completion happens right _here_ and misses to wake
3972 * the txq, then our re-check below will catch the race.
3974 QETH_TXQ_STAT_INC(queue, stopped);
3975 netif_tx_stop_queue(txq);
3979 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
3982 QETH_TXQ_STAT_INC(queue, skbs_pack);
3983 if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
3985 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3986 queue->next_buf_to_fill =
3987 QDIO_BUFNR(queue->next_buf_to_fill + 1);
3991 qeth_flush_buffers(queue, start_index, flush_count);
3992 else if (!atomic_read(&queue->set_pci_flags_count))
3993 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
3995 * queue->state will go from LOCKED -> UNLOCKED or from
3996 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
3997 * (switch packing state or flush buffer to get another pci flag out).
3998 * In that case we will enter this loop
4000 while (atomic_dec_return(&queue->state)) {
4001 start_index = queue->next_buf_to_fill;
4002 /* check if we can go back to non-packing state */
4003 tmp = qeth_switch_to_nonpacking_if_needed(queue);
4005 * check if we need to flush a packing buffer to get a pci
4006 * flag out on the queue
4008 if (!tmp && !atomic_read(&queue->set_pci_flags_count))
4009 tmp = qeth_prep_flush_pack_buffer(queue);
4011 qeth_flush_buffers(queue, start_index, tmp);
4016 /* at this point the queue is UNLOCKED again */
4018 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
4020 if (stopped && !qeth_out_queue_is_full(queue))
4021 netif_tx_start_queue(txq);
4024 EXPORT_SYMBOL_GPL(qeth_do_send_packet);
4026 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
4027 unsigned int payload_len, struct sk_buff *skb,
4028 unsigned int proto_len)
4030 struct qeth_hdr_ext_tso *ext = &hdr->ext;
4032 ext->hdr_tot_len = sizeof(*ext);
4033 ext->imb_hdr_no = 1;
4035 ext->hdr_version = 1;
4037 ext->payload_len = payload_len;
4038 ext->mss = skb_shinfo(skb)->gso_size;
4039 ext->dg_hdr_len = proto_len;
4042 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4043 struct qeth_qdio_out_q *queue, int ipv,
4044 void (*fill_header)(struct qeth_qdio_out_q *queue,
4045 struct qeth_hdr *hdr, struct sk_buff *skb,
4046 int ipv, unsigned int data_len))
4048 unsigned int proto_len, hw_hdr_len;
4049 unsigned int frame_len = skb->len;
4050 bool is_tso = skb_is_gso(skb);
4051 unsigned int data_offset = 0;
4052 struct qeth_hdr *hdr = NULL;
4053 unsigned int hd_len = 0;
4054 unsigned int elements;
4058 hw_hdr_len = sizeof(struct qeth_hdr_tso);
4059 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4061 hw_hdr_len = sizeof(struct qeth_hdr);
4062 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4065 rc = skb_cow_head(skb, hw_hdr_len);
4069 push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
4073 if (is_tso || !push_len) {
4074 /* HW header needs its own buffer element. */
4075 hd_len = hw_hdr_len + proto_len;
4076 data_offset = push_len + proto_len;
4078 memset(hdr, 0, hw_hdr_len);
4079 fill_header(queue, hdr, skb, ipv, frame_len);
4081 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
4082 frame_len - proto_len, skb, proto_len);
4085 rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
4088 /* TODO: drop skb_orphan() once TX completion is fast enough */
4090 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
4094 if (rc && !push_len)
4095 kmem_cache_free(qeth_core_header_cache, hdr);
4099 EXPORT_SYMBOL_GPL(qeth_xmit);
4101 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
4102 struct qeth_reply *reply, unsigned long data)
4104 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4105 struct qeth_ipacmd_setadpparms *setparms;
4107 QETH_CARD_TEXT(card, 4, "prmadpcb");
4109 setparms = &(cmd->data.setadapterparms);
4110 if (qeth_setadpparms_inspect_rc(cmd)) {
4111 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
4112 setparms->data.mode = SET_PROMISC_MODE_OFF;
4114 card->info.promisc_mode = setparms->data.mode;
4115 return (cmd->hdr.return_code) ? -EIO : 0;
4118 void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
4120 enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
4121 SET_PROMISC_MODE_OFF;
4122 struct qeth_cmd_buffer *iob;
4123 struct qeth_ipa_cmd *cmd;
4125 QETH_CARD_TEXT(card, 4, "setprom");
4126 QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
4128 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4129 SETADP_DATA_SIZEOF(mode));
4132 cmd = __ipa_cmd(iob);
4133 cmd->data.setadapterparms.data.mode = mode;
4134 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
4136 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
4138 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
4139 struct qeth_reply *reply, unsigned long data)
4141 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4142 struct qeth_ipacmd_setadpparms *adp_cmd;
4144 QETH_CARD_TEXT(card, 4, "chgmaccb");
4145 if (qeth_setadpparms_inspect_rc(cmd))
4148 adp_cmd = &cmd->data.setadapterparms;
4149 if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
4150 return -EADDRNOTAVAIL;
4152 if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
4153 !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4154 return -EADDRNOTAVAIL;
4156 ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
4160 int qeth_setadpparms_change_macaddr(struct qeth_card *card)
4163 struct qeth_cmd_buffer *iob;
4164 struct qeth_ipa_cmd *cmd;
4166 QETH_CARD_TEXT(card, 4, "chgmac");
4168 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4169 SETADP_DATA_SIZEOF(change_addr));
4172 cmd = __ipa_cmd(iob);
4173 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4174 cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
4175 ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
4176 card->dev->dev_addr);
4177 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
4181 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
4183 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4184 struct qeth_reply *reply, unsigned long data)
4186 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4187 struct qeth_set_access_ctrl *access_ctrl_req;
4188 int fallback = *(int *)reply->param;
4190 QETH_CARD_TEXT(card, 4, "setaccb");
4191 if (cmd->hdr.return_code)
4193 qeth_setadpparms_inspect_rc(cmd);
4195 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4196 QETH_CARD_TEXT_(card, 2, "rc=%d",
4197 cmd->data.setadapterparms.hdr.return_code);
4198 if (cmd->data.setadapterparms.hdr.return_code !=
4199 SET_ACCESS_CTRL_RC_SUCCESS)
4200 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4201 access_ctrl_req->subcmd_code, CARD_DEVID(card),
4202 cmd->data.setadapterparms.hdr.return_code);
4203 switch (cmd->data.setadapterparms.hdr.return_code) {
4204 case SET_ACCESS_CTRL_RC_SUCCESS:
4205 if (card->options.isolation == ISOLATION_MODE_NONE) {
4206 dev_info(&card->gdev->dev,
4207 "QDIO data connection isolation is deactivated\n");
4209 dev_info(&card->gdev->dev,
4210 "QDIO data connection isolation is activated\n");
4213 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4214 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4217 card->options.isolation = card->options.prev_isolation;
4219 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4220 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4223 card->options.isolation = card->options.prev_isolation;
4225 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
4226 dev_err(&card->gdev->dev, "Adapter does not "
4227 "support QDIO data connection isolation\n");
4229 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
4230 dev_err(&card->gdev->dev,
4231 "Adapter is dedicated. "
4232 "QDIO data connection isolation not supported\n");
4234 card->options.isolation = card->options.prev_isolation;
4236 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
4237 dev_err(&card->gdev->dev,
4238 "TSO does not permit QDIO data connection isolation\n");
4240 card->options.isolation = card->options.prev_isolation;
4242 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
4243 dev_err(&card->gdev->dev, "The adjacent switch port does not "
4244 "support reflective relay mode\n");
4246 card->options.isolation = card->options.prev_isolation;
4248 case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
4249 dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
4250 "enabled at the adjacent switch port");
4252 card->options.isolation = card->options.prev_isolation;
4254 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
4255 dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
4256 "at the adjacent switch failed\n");
4259 /* this should never happen */
4261 card->options.isolation = card->options.prev_isolation;
4264 return (cmd->hdr.return_code) ? -EIO : 0;
4267 static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
4268 enum qeth_ipa_isolation_modes isolation, int fallback)
4271 struct qeth_cmd_buffer *iob;
4272 struct qeth_ipa_cmd *cmd;
4273 struct qeth_set_access_ctrl *access_ctrl_req;
4275 QETH_CARD_TEXT(card, 4, "setacctl");
4277 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4278 SETADP_DATA_SIZEOF(set_access_ctrl));
4281 cmd = __ipa_cmd(iob);
4282 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4283 access_ctrl_req->subcmd_code = isolation;
4285 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4287 QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
4291 int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
4295 QETH_CARD_TEXT(card, 4, "setactlo");
4297 if ((IS_OSD(card) || IS_OSX(card)) &&
4298 qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
4299 rc = qeth_setadpparms_set_access_ctrl(card,
4300 card->options.isolation, fallback);
4302 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4303 rc, CARD_DEVID(card));
4306 } else if (card->options.isolation != ISOLATION_MODE_NONE) {
4307 card->options.isolation = ISOLATION_MODE_NONE;
4309 dev_err(&card->gdev->dev, "Adapter does not "
4310 "support QDIO data connection isolation\n");
4316 void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
4318 struct qeth_card *card;
4320 card = dev->ml_priv;
4321 QETH_CARD_TEXT(card, 4, "txtimeo");
4322 qeth_schedule_recovery(card);
4324 EXPORT_SYMBOL_GPL(qeth_tx_timeout);
4326 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4328 struct qeth_card *card = dev->ml_priv;
4332 case MII_BMCR: /* Basic mode control register */
4334 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
4335 (card->info.link_type != QETH_LINK_TYPE_OSN) &&
4336 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
4337 (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
4338 rc |= BMCR_SPEED100;
4340 case MII_BMSR: /* Basic mode status register */
4341 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4342 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4345 case MII_PHYSID1: /* PHYS ID 1 */
4346 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4348 rc = (rc >> 5) & 0xFFFF;
4350 case MII_PHYSID2: /* PHYS ID 2 */
4351 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4353 case MII_ADVERTISE: /* Advertisement control reg */
4356 case MII_LPA: /* Link partner ability reg */
4357 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4358 LPA_100BASE4 | LPA_LPACK;
4360 case MII_EXPANSION: /* Expansion register */
4362 case MII_DCOUNTER: /* disconnect counter */
4364 case MII_FCSCOUNTER: /* false carrier counter */
4366 case MII_NWAYTEST: /* N-way auto-neg test register */
4368 case MII_RERRCOUNTER: /* rx error counter */
4369 rc = card->stats.rx_length_errors +
4370 card->stats.rx_frame_errors +
4371 card->stats.rx_fifo_errors;
4373 case MII_SREVISION: /* silicon revision */
4375 case MII_RESV1: /* reserved 1 */
4377 case MII_LBRERROR: /* loopback, rx, bypass error */
4379 case MII_PHYADDR: /* physical address */
4381 case MII_RESV2: /* reserved 2 */
4383 case MII_TPISTATUS: /* TPI status for 10mbps */
4385 case MII_NCONFIG: /* network interface config */
4393 static int qeth_snmp_command_cb(struct qeth_card *card,
4394 struct qeth_reply *reply, unsigned long data)
4396 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4397 struct qeth_arp_query_info *qinfo = reply->param;
4398 struct qeth_ipacmd_setadpparms *adp_cmd;
4399 unsigned int data_len;
4402 QETH_CARD_TEXT(card, 3, "snpcmdcb");
4404 if (cmd->hdr.return_code) {
4405 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4408 if (cmd->data.setadapterparms.hdr.return_code) {
4409 cmd->hdr.return_code =
4410 cmd->data.setadapterparms.hdr.return_code;
4411 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4415 adp_cmd = &cmd->data.setadapterparms;
4416 data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
4417 if (adp_cmd->hdr.seq_no == 1) {
4418 snmp_data = &adp_cmd->data.snmp;
4420 snmp_data = &adp_cmd->data.snmp.request;
4421 data_len -= offsetof(struct qeth_snmp_cmd, request);
4424 /* check if there is enough room in userspace */
4425 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4426 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
4429 QETH_CARD_TEXT_(card, 4, "snore%i",
4430 cmd->data.setadapterparms.hdr.used_total);
4431 QETH_CARD_TEXT_(card, 4, "sseqn%i",
4432 cmd->data.setadapterparms.hdr.seq_no);
4433 /*copy entries to user buffer*/
4434 memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
4435 qinfo->udata_offset += data_len;
4437 if (cmd->data.setadapterparms.hdr.seq_no <
4438 cmd->data.setadapterparms.hdr.used_total)
4443 static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4445 struct qeth_snmp_ureq __user *ureq;
4446 struct qeth_cmd_buffer *iob;
4447 unsigned int req_len;
4448 struct qeth_arp_query_info qinfo = {0, };
4451 QETH_CARD_TEXT(card, 3, "snmpcmd");
4453 if (IS_VM_NIC(card))
4456 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4460 ureq = (struct qeth_snmp_ureq __user *) udata;
4461 if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
4462 get_user(req_len, &ureq->hdr.req_len))
4465 /* Sanitize user input, to avoid overflows in iob size calculation: */
4466 if (req_len > QETH_BUFSIZE)
4469 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
4473 if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
4474 &ureq->cmd, req_len)) {
4479 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
4484 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4486 rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
4488 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4489 CARD_DEVID(card), rc);
4491 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4499 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4500 struct qeth_reply *reply, unsigned long data)
4502 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4503 struct qeth_qoat_priv *priv;
4507 QETH_CARD_TEXT(card, 3, "qoatcb");
4508 if (qeth_setadpparms_inspect_rc(cmd))
4511 priv = (struct qeth_qoat_priv *)reply->param;
4512 resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4513 resdata = (char *)data + 28;
4515 if (resdatalen > (priv->buffer_len - priv->response_len))
4518 memcpy((priv->buffer + priv->response_len), resdata,
4520 priv->response_len += resdatalen;
4522 if (cmd->data.setadapterparms.hdr.seq_no <
4523 cmd->data.setadapterparms.hdr.used_total)
4528 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4531 struct qeth_cmd_buffer *iob;
4532 struct qeth_ipa_cmd *cmd;
4533 struct qeth_query_oat *oat_req;
4534 struct qeth_query_oat_data oat_data;
4535 struct qeth_qoat_priv priv;
4538 QETH_CARD_TEXT(card, 3, "qoatcmd");
4540 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
4545 if (copy_from_user(&oat_data, udata,
4546 sizeof(struct qeth_query_oat_data))) {
4551 priv.buffer_len = oat_data.buffer_len;
4552 priv.response_len = 0;
4553 priv.buffer = vzalloc(oat_data.buffer_len);
4559 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4560 SETADP_DATA_SIZEOF(query_oat));
4565 cmd = __ipa_cmd(iob);
4566 oat_req = &cmd->data.setadapterparms.data.query_oat;
4567 oat_req->subcmd_code = oat_data.command;
4569 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb,
4572 if (is_compat_task())
4573 tmp = compat_ptr(oat_data.ptr);
4575 tmp = (void __user *)(unsigned long)oat_data.ptr;
4577 if (copy_to_user(tmp, priv.buffer,
4578 priv.response_len)) {
4583 oat_data.response_len = priv.response_len;
4585 if (copy_to_user(udata, &oat_data,
4586 sizeof(struct qeth_query_oat_data)))
4596 static int qeth_query_card_info_cb(struct qeth_card *card,
4597 struct qeth_reply *reply, unsigned long data)
4599 struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
4600 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4601 struct qeth_query_card_info *card_info;
4603 QETH_CARD_TEXT(card, 2, "qcrdincb");
4604 if (qeth_setadpparms_inspect_rc(cmd))
4607 card_info = &cmd->data.setadapterparms.data.card_info;
4608 carrier_info->card_type = card_info->card_type;
4609 carrier_info->port_mode = card_info->port_mode;
4610 carrier_info->port_speed = card_info->port_speed;
4614 int qeth_query_card_info(struct qeth_card *card,
4615 struct carrier_info *carrier_info)
4617 struct qeth_cmd_buffer *iob;
4619 QETH_CARD_TEXT(card, 2, "qcrdinfo");
4620 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
4622 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
4625 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
4626 (void *)carrier_info);
4630 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
4631 * @card: pointer to a qeth_card
4634 * 0, if a MAC address has been set for the card's netdevice
4635 * a return code, for various error conditions
4637 int qeth_vm_request_mac(struct qeth_card *card)
4639 struct diag26c_mac_resp *response;
4640 struct diag26c_mac_req *request;
4641 struct ccw_dev_id id;
4644 QETH_CARD_TEXT(card, 2, "vmreqmac");
4646 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
4647 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
4648 if (!request || !response) {
4653 ccw_device_get_id(CARD_DDEV(card), &id);
4654 request->resp_buf_len = sizeof(*response);
4655 request->resp_version = DIAG26C_VERSION2;
4656 request->op_code = DIAG26C_GET_MAC;
4657 request->devno = id.devno;
4659 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4660 rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
4661 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4664 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
4666 if (request->resp_buf_len < sizeof(*response) ||
4667 response->version != request->resp_version) {
4669 QETH_CARD_TEXT(card, 2, "badresp");
4670 QETH_CARD_HEX(card, 2, &request->resp_buf_len,
4671 sizeof(request->resp_buf_len));
4672 } else if (!is_valid_ether_addr(response->mac)) {
4674 QETH_CARD_TEXT(card, 2, "badmac");
4675 QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
4677 ether_addr_copy(card->dev->dev_addr, response->mac);
4685 EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
4687 static void qeth_determine_capabilities(struct qeth_card *card)
4689 struct qeth_channel *channel = &card->data;
4690 struct ccw_device *ddev = channel->ccwdev;
4692 int ddev_offline = 0;
4694 QETH_CARD_TEXT(card, 2, "detcapab");
4695 if (!ddev->online) {
4697 rc = qeth_start_channel(channel);
4699 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
4704 rc = qeth_read_conf_data(card);
4706 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
4707 CARD_DEVID(card), rc);
4708 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
4712 rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
4714 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
4716 QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
4717 QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
4718 QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
4719 QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
4720 QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
4721 if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
4722 ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
4723 ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
4724 dev_info(&card->gdev->dev,
4725 "Completion Queueing supported\n");
4727 card->options.cq = QETH_CQ_NOTAVAILABLE;
4732 if (ddev_offline == 1)
4733 qeth_stop_channel(channel);
4738 static void qeth_qdio_establish_cq(struct qeth_card *card,
4739 struct qdio_buffer **in_sbal_ptrs,
4740 void (**queue_start_poll)
4741 (struct ccw_device *, int,
4746 if (card->options.cq == QETH_CQ_ENABLED) {
4747 int offset = QDIO_MAX_BUFFERS_PER_Q *
4748 (card->qdio.no_in_queues - 1);
4749 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
4750 in_sbal_ptrs[offset + i] = (struct qdio_buffer *)
4751 virt_to_phys(card->qdio.c_q->bufs[i].buffer);
4754 queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
4758 static int qeth_qdio_establish(struct qeth_card *card)
4760 struct qdio_initialize init_data;
4761 char *qib_param_field;
4762 struct qdio_buffer **in_sbal_ptrs;
4763 void (**queue_start_poll) (struct ccw_device *, int, unsigned long);
4764 struct qdio_buffer **out_sbal_ptrs;
4768 QETH_CARD_TEXT(card, 2, "qdioest");
4770 qib_param_field = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
4771 if (!qib_param_field) {
4773 goto out_free_nothing;
4776 qeth_create_qib_param_field(card, qib_param_field);
4777 qeth_create_qib_param_field_blkt(card, qib_param_field);
4779 in_sbal_ptrs = kcalloc(card->qdio.no_in_queues * QDIO_MAX_BUFFERS_PER_Q,
4782 if (!in_sbal_ptrs) {
4784 goto out_free_qib_param;
4786 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
4787 in_sbal_ptrs[i] = (struct qdio_buffer *)
4788 virt_to_phys(card->qdio.in_q->bufs[i].buffer);
4791 queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *),
4793 if (!queue_start_poll) {
4795 goto out_free_in_sbals;
4797 for (i = 0; i < card->qdio.no_in_queues; ++i)
4798 queue_start_poll[i] = qeth_qdio_start_poll;
4800 qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
4803 kcalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q,
4806 if (!out_sbal_ptrs) {
4808 goto out_free_queue_start_poll;
4810 for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
4811 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) {
4812 out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys(
4813 card->qdio.out_qs[i]->bufs[j]->buffer);
4816 memset(&init_data, 0, sizeof(struct qdio_initialize));
4817 init_data.cdev = CARD_DDEV(card);
4818 init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT :
4820 init_data.qib_param_field_format = 0;
4821 init_data.qib_param_field = qib_param_field;
4822 init_data.no_input_qs = card->qdio.no_in_queues;
4823 init_data.no_output_qs = card->qdio.no_out_queues;
4824 init_data.input_handler = qeth_qdio_input_handler;
4825 init_data.output_handler = qeth_qdio_output_handler;
4826 init_data.queue_start_poll_array = queue_start_poll;
4827 init_data.int_parm = (unsigned long) card;
4828 init_data.input_sbal_addr_array = in_sbal_ptrs;
4829 init_data.output_sbal_addr_array = out_sbal_ptrs;
4830 init_data.output_sbal_state_array = card->qdio.out_bufstates;
4831 init_data.scan_threshold = IS_IQD(card) ? 0 : 32;
4833 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
4834 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
4835 rc = qdio_allocate(&init_data);
4837 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
4840 rc = qdio_establish(&init_data);
4842 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
4843 qdio_free(CARD_DDEV(card));
4847 switch (card->options.cq) {
4848 case QETH_CQ_ENABLED:
4849 dev_info(&card->gdev->dev, "Completion Queue support enabled");
4851 case QETH_CQ_DISABLED:
4852 dev_info(&card->gdev->dev, "Completion Queue support disabled");
4858 kfree(out_sbal_ptrs);
4859 out_free_queue_start_poll:
4860 kfree(queue_start_poll);
4862 kfree(in_sbal_ptrs);
4864 kfree(qib_param_field);
4869 static void qeth_core_free_card(struct qeth_card *card)
4871 QETH_CARD_TEXT(card, 2, "freecrd");
4872 qeth_put_cmd(card->read_cmd);
4873 destroy_workqueue(card->event_wq);
4874 unregister_service_level(&card->qeth_service_level);
4875 dev_set_drvdata(&card->gdev->dev, NULL);
4879 void qeth_trace_features(struct qeth_card *card)
4881 QETH_CARD_TEXT(card, 2, "features");
4882 QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
4883 QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
4884 QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
4885 QETH_CARD_HEX(card, 2, &card->info.diagass_support,
4886 sizeof(card->info.diagass_support));
4888 EXPORT_SYMBOL_GPL(qeth_trace_features);
4890 static struct ccw_device_id qeth_ids[] = {
4891 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
4892 .driver_info = QETH_CARD_TYPE_OSD},
4893 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
4894 .driver_info = QETH_CARD_TYPE_IQD},
4895 {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
4896 .driver_info = QETH_CARD_TYPE_OSN},
4897 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
4898 .driver_info = QETH_CARD_TYPE_OSM},
4899 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
4900 .driver_info = QETH_CARD_TYPE_OSX},
4903 MODULE_DEVICE_TABLE(ccw, qeth_ids);
4905 static struct ccw_driver qeth_ccw_driver = {
4907 .owner = THIS_MODULE,
4911 .probe = ccwgroup_probe_ccwdev,
4912 .remove = ccwgroup_remove_ccwdev,
4915 int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
4920 QETH_CARD_TEXT(card, 2, "hrdsetup");
4921 atomic_set(&card->force_alloc_skb, 0);
4922 rc = qeth_update_from_chp_desc(card);
4927 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
4929 rc = qeth_qdio_clear_card(card, !IS_IQD(card));
4930 qeth_stop_channel(&card->data);
4931 qeth_stop_channel(&card->write);
4932 qeth_stop_channel(&card->read);
4933 qdio_free(CARD_DDEV(card));
4935 rc = qeth_start_channel(&card->read);
4938 rc = qeth_start_channel(&card->write);
4941 rc = qeth_start_channel(&card->data);
4945 if (rc == -ERESTARTSYS) {
4946 QETH_CARD_TEXT(card, 2, "break1");
4949 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
4955 qeth_determine_capabilities(card);
4956 qeth_init_tokens(card);
4957 qeth_init_func_level(card);
4959 rc = qeth_idx_activate_read_channel(card);
4961 QETH_CARD_TEXT(card, 2, "break2");
4964 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
4971 rc = qeth_idx_activate_write_channel(card);
4973 QETH_CARD_TEXT(card, 2, "break3");
4976 QETH_CARD_TEXT_(card, 2, "4err%d", rc);
4982 card->read_or_write_problem = 0;
4983 rc = qeth_mpc_initialize(card);
4985 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
4989 rc = qeth_send_startlan(card);
4991 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
4992 if (rc == -ENETDOWN) {
4993 dev_warn(&card->gdev->dev, "The LAN is offline\n");
4994 *carrier_ok = false;
5002 card->options.ipa4.supported = 0;
5003 card->options.ipa6.supported = 0;
5004 card->options.adp.supported = 0;
5005 card->options.sbp.supported_funcs = 0;
5006 card->info.diagass_support = 0;
5007 rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
5010 if (qeth_is_supported(card, IPA_IPV6)) {
5011 rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
5015 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
5016 rc = qeth_query_setadapterparms(card);
5018 QETH_CARD_TEXT_(card, 2, "7err%d", rc);
5022 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
5023 rc = qeth_query_setdiagass(card);
5025 QETH_CARD_TEXT_(card, 2, "8err%d", rc);
5028 if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
5029 (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
5030 card->info.hwtrap = 0;
5032 rc = qeth_set_access_ctrl_online(card, 0);
5036 rc = qeth_init_qdio_queues(card);
5038 QETH_CARD_TEXT_(card, 2, "9err%d", rc);
5044 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
5045 "an error on the device\n");
5046 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
5047 CARD_DEVID(card), rc);
5050 EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
5052 static int qeth_set_online(struct qeth_card *card)
5056 mutex_lock(&card->discipline_mutex);
5057 mutex_lock(&card->conf_mutex);
5058 QETH_CARD_TEXT(card, 2, "setonlin");
5060 rc = card->discipline->set_online(card);
5062 mutex_unlock(&card->conf_mutex);
5063 mutex_unlock(&card->discipline_mutex);
5068 int qeth_set_offline(struct qeth_card *card, bool resetting)
5072 mutex_lock(&card->discipline_mutex);
5073 mutex_lock(&card->conf_mutex);
5074 QETH_CARD_TEXT(card, 3, "setoffl");
5076 if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
5077 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
5078 card->info.hwtrap = 1;
5082 card->info.open_when_online = card->dev->flags & IFF_UP;
5083 dev_close(card->dev);
5084 netif_device_detach(card->dev);
5085 netif_carrier_off(card->dev);
5088 card->discipline->set_offline(card);
5090 rc = qeth_stop_channel(&card->data);
5091 rc2 = qeth_stop_channel(&card->write);
5092 rc3 = qeth_stop_channel(&card->read);
5094 rc = (rc2) ? rc2 : rc3;
5096 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5097 qdio_free(CARD_DDEV(card));
5099 /* let user_space know that device is offline */
5100 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5102 mutex_unlock(&card->conf_mutex);
5103 mutex_unlock(&card->discipline_mutex);
5106 EXPORT_SYMBOL_GPL(qeth_set_offline);
5108 static int qeth_do_reset(void *data)
5110 struct qeth_card *card = data;
5113 QETH_CARD_TEXT(card, 2, "recover1");
5114 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
5116 QETH_CARD_TEXT(card, 2, "recover2");
5117 dev_warn(&card->gdev->dev,
5118 "A recovery process has been started for the device\n");
5120 qeth_set_offline(card, true);
5121 rc = qeth_set_online(card);
5123 dev_info(&card->gdev->dev,
5124 "Device successfully recovered!\n");
5126 ccwgroup_set_offline(card->gdev);
5127 dev_warn(&card->gdev->dev,
5128 "The qeth device driver failed to recover an error on the device\n");
5130 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
5131 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
5135 #if IS_ENABLED(CONFIG_QETH_L3)
5136 static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
5137 struct qeth_hdr *hdr)
5139 struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
5140 struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
5141 struct net_device *dev = skb->dev;
5143 if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
5144 dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
5145 "FAKELL", skb->len);
5149 if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
5150 u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
5152 unsigned char tg_addr[ETH_ALEN];
5154 skb_reset_network_header(skb);
5155 switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
5156 case QETH_CAST_MULTICAST:
5157 if (prot == ETH_P_IP)
5158 ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
5160 ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
5161 QETH_CARD_STAT_INC(card, rx_multicast);
5163 case QETH_CAST_BROADCAST:
5164 ether_addr_copy(tg_addr, dev->broadcast);
5165 QETH_CARD_STAT_INC(card, rx_multicast);
5168 if (card->options.sniffer)
5169 skb->pkt_type = PACKET_OTHERHOST;
5170 ether_addr_copy(tg_addr, dev->dev_addr);
5173 if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
5174 dev_hard_header(skb, dev, prot, tg_addr,
5175 &l3_hdr->next_hop.rx.src_mac, skb->len);
5177 dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
5181 /* copy VLAN tag from hdr into skb */
5182 if (!card->options.sniffer &&
5183 (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
5184 QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
5185 u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
5187 l3_hdr->next_hop.rx.vlan_id;
5189 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
5194 static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
5195 struct qeth_hdr *hdr, bool uses_frags)
5197 struct napi_struct *napi = &card->napi;
5200 switch (hdr->hdr.l2.id) {
5201 case QETH_HEADER_TYPE_OSN:
5202 skb_push(skb, sizeof(*hdr));
5203 skb_copy_to_linear_data(skb, hdr, sizeof(*hdr));
5204 QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
5205 QETH_CARD_STAT_INC(card, rx_packets);
5207 card->osn_info.data_cb(skb);
5209 #if IS_ENABLED(CONFIG_QETH_L3)
5210 case QETH_HEADER_TYPE_LAYER3:
5211 qeth_l3_rebuild_skb(card, skb, hdr);
5212 is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5215 case QETH_HEADER_TYPE_LAYER2:
5216 is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5221 napi_free_frags(napi);
5223 dev_kfree_skb_any(skb);
5227 if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
5228 skb->ip_summed = CHECKSUM_UNNECESSARY;
5229 QETH_CARD_STAT_INC(card, rx_skb_csum);
5231 skb->ip_summed = CHECKSUM_NONE;
5234 QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
5235 QETH_CARD_STAT_INC(card, rx_packets);
5236 if (skb_is_nonlinear(skb)) {
5237 QETH_CARD_STAT_INC(card, rx_sg_skbs);
5238 QETH_CARD_STAT_ADD(card, rx_sg_frags,
5239 skb_shinfo(skb)->nr_frags);
5243 napi_gro_frags(napi);
5245 skb->protocol = eth_type_trans(skb, skb->dev);
5246 napi_gro_receive(napi, skb);
5250 static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
5252 struct page *page = virt_to_page(data);
5253 unsigned int next_frag;
5255 next_frag = skb_shinfo(skb)->nr_frags;
5257 skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
5261 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
5263 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
5266 static int qeth_extract_skb(struct qeth_card *card,
5267 struct qeth_qdio_buffer *qethbuffer,
5268 struct qdio_buffer_element **__element,
5271 struct qdio_buffer_element *element = *__element;
5272 struct qdio_buffer *buffer = qethbuffer->buffer;
5273 struct napi_struct *napi = &card->napi;
5274 unsigned int linear_len = 0;
5275 bool uses_frags = false;
5276 int offset = *__offset;
5277 bool use_rx_sg = false;
5278 unsigned int headroom;
5279 struct qeth_hdr *hdr;
5280 struct sk_buff *skb;
5284 /* qeth_hdr must not cross element boundaries */
5285 while (element->length < offset + sizeof(struct qeth_hdr)) {
5286 if (qeth_is_last_sbale(element))
5292 hdr = element->addr + offset;
5293 offset += sizeof(*hdr);
5296 switch (hdr->hdr.l2.id) {
5297 case QETH_HEADER_TYPE_LAYER2:
5298 skb_len = hdr->hdr.l2.pkt_length;
5299 linear_len = ETH_HLEN;
5302 case QETH_HEADER_TYPE_LAYER3:
5303 skb_len = hdr->hdr.l3.length;
5304 if (!IS_LAYER3(card)) {
5305 QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5309 if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
5310 linear_len = ETH_HLEN;
5315 if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
5316 linear_len = sizeof(struct ipv6hdr);
5318 linear_len = sizeof(struct iphdr);
5319 headroom = ETH_HLEN;
5321 case QETH_HEADER_TYPE_OSN:
5322 skb_len = hdr->hdr.osn.pdu_length;
5323 if (!IS_OSN(card)) {
5324 QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5328 linear_len = skb_len;
5329 headroom = sizeof(struct qeth_hdr);
5332 if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
5333 QETH_CARD_STAT_INC(card, rx_frame_errors);
5335 QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5337 /* Can't determine packet length, drop the whole buffer. */
5338 return -EPROTONOSUPPORT;
5341 if (skb_len < linear_len) {
5342 QETH_CARD_STAT_INC(card, rx_dropped_runt);
5346 use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
5347 (skb_len > card->options.rx_sg_cb &&
5348 !atomic_read(&card->force_alloc_skb) &&
5352 /* QETH_CQ_ENABLED only: */
5353 if (qethbuffer->rx_skb &&
5354 skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
5355 skb = qethbuffer->rx_skb;
5356 qethbuffer->rx_skb = NULL;
5360 skb = napi_get_frags(napi);
5362 /* -ENOMEM, no point in falling back further. */
5363 QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5367 if (skb_tailroom(skb) >= linear_len + headroom) {
5372 netdev_info_once(card->dev,
5373 "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
5374 linear_len + headroom, skb_tailroom(skb));
5375 /* Shouldn't happen. Don't optimize, fall back to linear skb. */
5378 linear_len = skb_len;
5379 skb = napi_alloc_skb(napi, linear_len + headroom);
5381 QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5387 skb_reserve(skb, headroom);
5390 int data_len = min(skb_len, (int)(element->length - offset));
5391 char *data = element->addr + offset;
5393 skb_len -= data_len;
5396 /* Extract data from current element: */
5397 if (skb && data_len) {
5399 unsigned int copy_len;
5401 copy_len = min_t(unsigned int, linear_len,
5404 skb_put_data(skb, data, copy_len);
5405 linear_len -= copy_len;
5406 data_len -= copy_len;
5411 qeth_create_skb_frag(skb, data, data_len);
5414 /* Step forward to next element: */
5416 if (qeth_is_last_sbale(element)) {
5417 QETH_CARD_TEXT(card, 4, "unexeob");
5418 QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5421 napi_free_frags(napi);
5423 dev_kfree_skb_any(skb);
5424 QETH_CARD_STAT_INC(card,
5434 /* This packet was skipped, go get another one: */
5438 *__element = element;
5441 qeth_receive_skb(card, skb, hdr, uses_frags);
5445 static int qeth_extract_skbs(struct qeth_card *card, int budget,
5446 struct qeth_qdio_buffer *buf, bool *done)
5453 if (qeth_extract_skb(card, buf, &card->rx.b_element,
5454 &card->rx.e_offset)) {
5466 int qeth_poll(struct napi_struct *napi, int budget)
5468 struct qeth_card *card = container_of(napi, struct qeth_card, napi);
5470 struct qeth_qdio_buffer *buffer;
5471 int new_budget = budget;
5475 if (!card->rx.b_count) {
5476 card->rx.qdio_err = 0;
5477 card->rx.b_count = qdio_get_next_buffers(
5478 card->data.ccwdev, 0, &card->rx.b_index,
5479 &card->rx.qdio_err);
5480 if (card->rx.b_count <= 0) {
5481 card->rx.b_count = 0;
5484 card->rx.b_element =
5485 &card->qdio.in_q->bufs[card->rx.b_index]
5486 .buffer->element[0];
5487 card->rx.e_offset = 0;
5490 while (card->rx.b_count) {
5491 buffer = &card->qdio.in_q->bufs[card->rx.b_index];
5492 if (!(card->rx.qdio_err &&
5493 qeth_check_qdio_errors(card, buffer->buffer,
5494 card->rx.qdio_err, "qinerr")))
5495 work_done += qeth_extract_skbs(card, new_budget,
5501 QETH_CARD_STAT_INC(card, rx_bufs);
5502 qeth_put_buffer_pool_entry(card,
5503 buffer->pool_entry);
5504 qeth_queue_input_buffer(card, card->rx.b_index);
5506 if (card->rx.b_count) {
5508 QDIO_BUFNR(card->rx.b_index + 1);
5509 card->rx.b_element =
5511 ->bufs[card->rx.b_index]
5512 .buffer->element[0];
5513 card->rx.e_offset = 0;
5517 if (work_done >= budget)
5520 new_budget = budget - work_done;
5524 if (napi_complete_done(napi, work_done) &&
5525 qdio_start_irq(CARD_DDEV(card), 0))
5526 napi_schedule(napi);
5530 EXPORT_SYMBOL_GPL(qeth_poll);
5532 static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
5533 unsigned int bidx, bool error, int budget)
5535 struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
5536 u8 sflags = buffer->buffer->element[15].sflags;
5537 struct qeth_card *card = queue->card;
5539 if (queue->bufstates && (queue->bufstates[bidx].flags &
5540 QDIO_OUTBUF_STATE_FLAG_PENDING)) {
5541 WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
5543 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
5544 QETH_QDIO_BUF_PENDING) ==
5545 QETH_QDIO_BUF_PRIMED)
5546 qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
5548 QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
5550 /* prepare the queue slot for re-use: */
5551 qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
5552 if (qeth_init_qdio_out_buf(queue, bidx)) {
5553 QETH_CARD_TEXT(card, 2, "outofbuf");
5554 qeth_schedule_recovery(card);
5560 if (card->options.cq == QETH_CQ_ENABLED)
5561 qeth_notify_skbs(queue, buffer,
5562 qeth_compute_cq_notification(sflags, 0));
5563 qeth_clear_output_buffer(queue, buffer, error, budget);
5566 static int qeth_tx_poll(struct napi_struct *napi, int budget)
5568 struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
5569 unsigned int queue_no = queue->queue_no;
5570 struct qeth_card *card = queue->card;
5571 struct net_device *dev = card->dev;
5572 unsigned int work_done = 0;
5573 struct netdev_queue *txq;
5575 txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
5578 unsigned int start, error, i;
5579 unsigned int packets = 0;
5580 unsigned int bytes = 0;
5583 if (qeth_out_queue_is_empty(queue)) {
5584 napi_complete(napi);
5588 /* Give the CPU a breather: */
5589 if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
5590 QETH_TXQ_STAT_INC(queue, completion_yield);
5591 if (napi_complete_done(napi, 0))
5592 napi_schedule(napi);
5596 completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false,
5598 if (completed <= 0) {
5599 /* Ensure we see TX completion for pending work: */
5600 if (napi_complete_done(napi, 0))
5601 qeth_tx_arm_timer(queue);
5605 for (i = start; i < start + completed; i++) {
5606 struct qeth_qdio_out_buffer *buffer;
5607 unsigned int bidx = QDIO_BUFNR(i);
5609 buffer = queue->bufs[bidx];
5610 packets += skb_queue_len(&buffer->skb_list);
5611 bytes += buffer->bytes;
5613 qeth_handle_send_error(card, buffer, error);
5614 qeth_iqd_tx_complete(queue, bidx, error, budget);
5615 qeth_cleanup_handled_pending(queue, bidx, false);
5618 netdev_tx_completed_queue(txq, packets, bytes);
5619 atomic_sub(completed, &queue->used_buffers);
5620 work_done += completed;
5622 /* xmit may have observed the full-condition, but not yet
5623 * stopped the txq. In which case the code below won't trigger.
5624 * So before returning, xmit will re-check the txq's fill level
5625 * and wake it up if needed.
5627 if (netif_tx_queue_stopped(txq) &&
5628 !qeth_out_queue_is_full(queue))
5629 netif_tx_wake_queue(txq);
5633 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
5635 if (!cmd->hdr.return_code)
5636 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5637 return cmd->hdr.return_code;
5640 static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
5641 struct qeth_reply *reply,
5644 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5645 struct qeth_ipa_caps *caps = reply->param;
5647 if (qeth_setassparms_inspect_rc(cmd))
5650 caps->supported = cmd->data.setassparms.data.caps.supported;
5651 caps->enabled = cmd->data.setassparms.data.caps.enabled;
5655 int qeth_setassparms_cb(struct qeth_card *card,
5656 struct qeth_reply *reply, unsigned long data)
5658 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5660 QETH_CARD_TEXT(card, 4, "defadpcb");
5662 if (cmd->hdr.return_code)
5665 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5666 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
5667 card->options.ipa4.enabled = cmd->hdr.assists.enabled;
5668 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
5669 card->options.ipa6.enabled = cmd->hdr.assists.enabled;
5672 EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
5674 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
5675 enum qeth_ipa_funcs ipa_func,
5677 unsigned int data_length,
5678 enum qeth_prot_versions prot)
5680 struct qeth_ipacmd_setassparms *setassparms;
5681 struct qeth_ipacmd_setassparms_hdr *hdr;
5682 struct qeth_cmd_buffer *iob;
5684 QETH_CARD_TEXT(card, 4, "getasscm");
5685 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
5687 offsetof(struct qeth_ipacmd_setassparms,
5692 setassparms = &__ipa_cmd(iob)->data.setassparms;
5693 setassparms->assist_no = ipa_func;
5695 hdr = &setassparms->hdr;
5696 hdr->length = sizeof(*hdr) + data_length;
5697 hdr->command_code = cmd_code;
5700 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
5702 int qeth_send_simple_setassparms_prot(struct qeth_card *card,
5703 enum qeth_ipa_funcs ipa_func,
5704 u16 cmd_code, u32 *data,
5705 enum qeth_prot_versions prot)
5707 unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
5708 struct qeth_cmd_buffer *iob;
5710 QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
5711 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
5716 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
5717 return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
5719 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
5721 static void qeth_unregister_dbf_views(void)
5724 for (x = 0; x < QETH_DBF_INFOS; x++) {
5725 debug_unregister(qeth_dbf[x].id);
5726 qeth_dbf[x].id = NULL;
5730 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
5732 char dbf_txt_buf[32];
5735 if (!debug_level_enabled(id, level))
5737 va_start(args, fmt);
5738 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
5740 debug_text_event(id, level, dbf_txt_buf);
5742 EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
5744 static int qeth_register_dbf_views(void)
5749 for (x = 0; x < QETH_DBF_INFOS; x++) {
5750 /* register the areas */
5751 qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
5755 if (qeth_dbf[x].id == NULL) {
5756 qeth_unregister_dbf_views();
5760 /* register a view */
5761 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
5763 qeth_unregister_dbf_views();
5767 /* set a passing level */
5768 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
5774 static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */
5776 int qeth_core_load_discipline(struct qeth_card *card,
5777 enum qeth_discipline_id discipline)
5779 mutex_lock(&qeth_mod_mutex);
5780 switch (discipline) {
5781 case QETH_DISCIPLINE_LAYER3:
5782 card->discipline = try_then_request_module(
5783 symbol_get(qeth_l3_discipline), "qeth_l3");
5785 case QETH_DISCIPLINE_LAYER2:
5786 card->discipline = try_then_request_module(
5787 symbol_get(qeth_l2_discipline), "qeth_l2");
5792 mutex_unlock(&qeth_mod_mutex);
5794 if (!card->discipline) {
5795 dev_err(&card->gdev->dev, "There is no kernel module to "
5796 "support discipline %d\n", discipline);
5800 card->options.layer = discipline;
5804 void qeth_core_free_discipline(struct qeth_card *card)
5806 if (IS_LAYER2(card))
5807 symbol_put(qeth_l2_discipline);
5809 symbol_put(qeth_l3_discipline);
5810 card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
5811 card->discipline = NULL;
5814 const struct device_type qeth_generic_devtype = {
5815 .name = "qeth_generic",
5816 .groups = qeth_generic_attr_groups,
5818 EXPORT_SYMBOL_GPL(qeth_generic_devtype);
5820 static const struct device_type qeth_osn_devtype = {
5822 .groups = qeth_osn_attr_groups,
5825 #define DBF_NAME_LEN 20
5827 struct qeth_dbf_entry {
5828 char dbf_name[DBF_NAME_LEN];
5829 debug_info_t *dbf_info;
5830 struct list_head dbf_list;
5833 static LIST_HEAD(qeth_dbf_list);
5834 static DEFINE_MUTEX(qeth_dbf_list_mutex);
5836 static debug_info_t *qeth_get_dbf_entry(char *name)
5838 struct qeth_dbf_entry *entry;
5839 debug_info_t *rc = NULL;
5841 mutex_lock(&qeth_dbf_list_mutex);
5842 list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
5843 if (strcmp(entry->dbf_name, name) == 0) {
5844 rc = entry->dbf_info;
5848 mutex_unlock(&qeth_dbf_list_mutex);
5852 static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
5854 struct qeth_dbf_entry *new_entry;
5856 card->debug = debug_register(name, 2, 1, 8);
5858 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
5861 if (debug_register_view(card->debug, &debug_hex_ascii_view))
5863 new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
5866 strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
5867 new_entry->dbf_info = card->debug;
5868 mutex_lock(&qeth_dbf_list_mutex);
5869 list_add(&new_entry->dbf_list, &qeth_dbf_list);
5870 mutex_unlock(&qeth_dbf_list_mutex);
5875 debug_unregister(card->debug);
5880 static void qeth_clear_dbf_list(void)
5882 struct qeth_dbf_entry *entry, *tmp;
5884 mutex_lock(&qeth_dbf_list_mutex);
5885 list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
5886 list_del(&entry->dbf_list);
5887 debug_unregister(entry->dbf_info);
5890 mutex_unlock(&qeth_dbf_list_mutex);
5893 static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
5895 struct net_device *dev;
5897 switch (card->info.type) {
5898 case QETH_CARD_TYPE_IQD:
5899 dev = alloc_netdev_mqs(0, "hsi%d", NET_NAME_UNKNOWN,
5900 ether_setup, QETH_MAX_QUEUES, 1);
5902 case QETH_CARD_TYPE_OSM:
5903 dev = alloc_etherdev(0);
5905 case QETH_CARD_TYPE_OSN:
5906 dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup);
5909 dev = alloc_etherdev_mqs(0, QETH_MAX_QUEUES, 1);
5915 dev->ml_priv = card;
5916 dev->watchdog_timeo = QETH_TX_TIMEOUT;
5917 dev->min_mtu = IS_OSN(card) ? 64 : 576;
5918 /* initialized when device first goes online: */
5921 SET_NETDEV_DEV(dev, &card->gdev->dev);
5922 netif_carrier_off(dev);
5925 dev->ethtool_ops = &qeth_osn_ethtool_ops;
5927 dev->ethtool_ops = &qeth_ethtool_ops;
5928 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
5929 dev->hw_features |= NETIF_F_SG;
5930 dev->vlan_features |= NETIF_F_SG;
5932 dev->features |= NETIF_F_SG;
5933 if (netif_set_real_num_tx_queues(dev,
5934 QETH_IQD_MIN_TXQ)) {
5944 struct net_device *qeth_clone_netdev(struct net_device *orig)
5946 struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
5951 clone->dev_port = orig->dev_port;
5955 static int qeth_core_probe_device(struct ccwgroup_device *gdev)
5957 struct qeth_card *card;
5960 enum qeth_discipline_id enforced_disc;
5961 char dbf_name[DBF_NAME_LEN];
5963 QETH_DBF_TEXT(SETUP, 2, "probedev");
5966 if (!get_device(dev))
5969 QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
5971 card = qeth_alloc_card(gdev);
5973 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
5978 snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
5979 dev_name(&gdev->dev));
5980 card->debug = qeth_get_dbf_entry(dbf_name);
5982 rc = qeth_add_dbf_entry(card, dbf_name);
5987 qeth_setup_card(card);
5988 card->dev = qeth_alloc_netdev(card);
5994 card->qdio.no_out_queues = card->dev->num_tx_queues;
5995 rc = qeth_update_from_chp_desc(card);
5998 qeth_determine_capabilities(card);
5999 qeth_set_blkt_defaults(card);
6001 enforced_disc = qeth_enforce_discipline(card);
6002 switch (enforced_disc) {
6003 case QETH_DISCIPLINE_UNDETERMINED:
6004 gdev->dev.type = &qeth_generic_devtype;
6007 card->info.layer_enforced = true;
6008 rc = qeth_core_load_discipline(card, enforced_disc);
6012 gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype :
6013 card->discipline->devtype;
6014 rc = card->discipline->setup(card->gdev);
6023 qeth_core_free_discipline(card);
6026 free_netdev(card->dev);
6028 qeth_core_free_card(card);
6034 static void qeth_core_remove_device(struct ccwgroup_device *gdev)
6036 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6038 QETH_CARD_TEXT(card, 2, "removedv");
6040 if (card->discipline) {
6041 card->discipline->remove(gdev);
6042 qeth_core_free_discipline(card);
6045 qeth_free_qdio_queues(card);
6047 free_netdev(card->dev);
6048 qeth_core_free_card(card);
6049 put_device(&gdev->dev);
6052 static int qeth_core_set_online(struct ccwgroup_device *gdev)
6054 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6056 enum qeth_discipline_id def_discipline;
6058 if (!card->discipline) {
6059 def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
6060 QETH_DISCIPLINE_LAYER2;
6061 rc = qeth_core_load_discipline(card, def_discipline);
6064 rc = card->discipline->setup(card->gdev);
6066 qeth_core_free_discipline(card);
6071 rc = qeth_set_online(card);
6076 static int qeth_core_set_offline(struct ccwgroup_device *gdev)
6078 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6080 return qeth_set_offline(card, false);
6083 static void qeth_core_shutdown(struct ccwgroup_device *gdev)
6085 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6086 qeth_set_allowed_threads(card, 0, 1);
6087 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
6088 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
6089 qeth_qdio_clear_card(card, 0);
6090 qeth_drain_output_queues(card);
6091 qdio_free(CARD_DDEV(card));
6094 static int qeth_suspend(struct ccwgroup_device *gdev)
6096 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6098 qeth_set_allowed_threads(card, 0, 1);
6099 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
6100 if (gdev->state == CCWGROUP_OFFLINE)
6103 qeth_set_offline(card, false);
6107 static int qeth_resume(struct ccwgroup_device *gdev)
6109 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6112 rc = qeth_set_online(card);
6114 qeth_set_allowed_threads(card, 0xffffffff, 0);
6116 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover an error on the device\n");
6120 static ssize_t group_store(struct device_driver *ddrv, const char *buf,
6125 err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
6128 return err ? err : count;
6130 static DRIVER_ATTR_WO(group);
6132 static struct attribute *qeth_drv_attrs[] = {
6133 &driver_attr_group.attr,
6136 static struct attribute_group qeth_drv_attr_group = {
6137 .attrs = qeth_drv_attrs,
6139 static const struct attribute_group *qeth_drv_attr_groups[] = {
6140 &qeth_drv_attr_group,
6144 static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
6146 .groups = qeth_drv_attr_groups,
6147 .owner = THIS_MODULE,
6150 .ccw_driver = &qeth_ccw_driver,
6151 .setup = qeth_core_probe_device,
6152 .remove = qeth_core_remove_device,
6153 .set_online = qeth_core_set_online,
6154 .set_offline = qeth_core_set_offline,
6155 .shutdown = qeth_core_shutdown,
6158 .freeze = qeth_suspend,
6159 .thaw = qeth_resume,
6160 .restore = qeth_resume,
6163 struct qeth_card *qeth_get_card_by_busid(char *bus_id)
6165 struct ccwgroup_device *gdev;
6166 struct qeth_card *card;
6168 gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id);
6172 card = dev_get_drvdata(&gdev->dev);
6173 put_device(&gdev->dev);
6176 EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);
6178 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6180 struct qeth_card *card = dev->ml_priv;
6181 struct mii_ioctl_data *mii_data;
6188 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
6189 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
6191 case SIOC_QETH_GET_CARD_TYPE:
6192 if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
6197 mii_data = if_mii(rq);
6198 mii_data->phy_id = 0;
6201 mii_data = if_mii(rq);
6202 if (mii_data->phy_id != 0)
6205 mii_data->val_out = qeth_mdio_read(dev,
6206 mii_data->phy_id, mii_data->reg_num);
6208 case SIOC_QETH_QUERY_OAT:
6209 rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
6212 if (card->discipline->do_ioctl)
6213 rc = card->discipline->do_ioctl(dev, rq, cmd);
6218 QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
6221 EXPORT_SYMBOL_GPL(qeth_do_ioctl);
6223 static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
6226 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6227 u32 *features = reply->param;
6229 if (qeth_setassparms_inspect_rc(cmd))
6232 *features = cmd->data.setassparms.data.flags_32bit;
6236 static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6237 enum qeth_prot_versions prot)
6239 return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
6243 static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6244 enum qeth_prot_versions prot)
6246 u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
6247 struct qeth_cmd_buffer *iob;
6248 struct qeth_ipa_caps caps;
6252 /* some L3 HW requires combined L3+L4 csum offload: */
6253 if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
6254 cstype == IPA_OUTBOUND_CHECKSUM)
6255 required_features |= QETH_IPA_CHECKSUM_IP_HDR;
6257 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
6262 rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
6266 if ((required_features & features) != required_features) {
6267 qeth_set_csum_off(card, cstype, prot);
6271 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
6272 SETASS_DATA_SIZEOF(flags_32bit),
6275 qeth_set_csum_off(card, cstype, prot);
6279 if (features & QETH_IPA_CHECKSUM_LP2LP)
6280 required_features |= QETH_IPA_CHECKSUM_LP2LP;
6281 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
6282 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6284 qeth_set_csum_off(card, cstype, prot);
6288 if (!qeth_ipa_caps_supported(&caps, required_features) ||
6289 !qeth_ipa_caps_enabled(&caps, required_features)) {
6290 qeth_set_csum_off(card, cstype, prot);
6294 dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
6295 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
6296 if (!qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP) &&
6297 cstype == IPA_OUTBOUND_CHECKSUM)
6298 dev_warn(&card->gdev->dev,
6299 "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n",
6300 QETH_CARD_IFNAME(card));
6304 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6305 enum qeth_prot_versions prot)
6307 return on ? qeth_set_csum_on(card, cstype, prot) :
6308 qeth_set_csum_off(card, cstype, prot);
6311 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
6314 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6315 struct qeth_tso_start_data *tso_data = reply->param;
6317 if (qeth_setassparms_inspect_rc(cmd))
6320 tso_data->mss = cmd->data.setassparms.data.tso.mss;
6321 tso_data->supported = cmd->data.setassparms.data.tso.supported;
6325 static int qeth_set_tso_off(struct qeth_card *card,
6326 enum qeth_prot_versions prot)
6328 return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
6329 IPA_CMD_ASS_STOP, NULL, prot);
6332 static int qeth_set_tso_on(struct qeth_card *card,
6333 enum qeth_prot_versions prot)
6335 struct qeth_tso_start_data tso_data;
6336 struct qeth_cmd_buffer *iob;
6337 struct qeth_ipa_caps caps;
6340 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6341 IPA_CMD_ASS_START, 0, prot);
6345 rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6349 if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
6350 qeth_set_tso_off(card, prot);
6354 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6356 SETASS_DATA_SIZEOF(caps), prot);
6358 qeth_set_tso_off(card, prot);
6362 /* enable TSO capability */
6363 __ipa_cmd(iob)->data.setassparms.data.caps.enabled =
6364 QETH_IPA_LARGE_SEND_TCP;
6365 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6367 qeth_set_tso_off(card, prot);
6371 if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
6372 !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
6373 qeth_set_tso_off(card, prot);
6377 dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
6382 static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
6383 enum qeth_prot_versions prot)
6385 return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
6388 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
6390 int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
6393 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
6394 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6396 if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6397 /* no/one Offload Assist available, so the rc is trivial */
6400 rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6404 /* enable: success if any Assist is active */
6405 return (rc_ipv6) ? rc_ipv4 : 0;
6407 /* disable: failure if any Assist is still active */
6408 return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
6412 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6413 * @dev: a net_device
6415 void qeth_enable_hw_features(struct net_device *dev)
6417 struct qeth_card *card = dev->ml_priv;
6418 netdev_features_t features;
6420 features = dev->features;
6421 /* force-off any feature that might need an IPA sequence.
6422 * netdev_update_features() will restart them.
6424 dev->features &= ~dev->hw_features;
6425 /* toggle VLAN filter, so that VIDs are re-programmed: */
6426 if (IS_LAYER2(card) && IS_VM_NIC(card)) {
6427 dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
6428 dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6430 netdev_update_features(dev);
6431 if (features != dev->features)
6432 dev_warn(&card->gdev->dev,
6433 "Device recovery failed to restore all offload features\n");
6435 EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6437 int qeth_set_features(struct net_device *dev, netdev_features_t features)
6439 struct qeth_card *card = dev->ml_priv;
6440 netdev_features_t changed = dev->features ^ features;
6443 QETH_CARD_TEXT(card, 2, "setfeat");
6444 QETH_CARD_HEX(card, 2, &features, sizeof(features));
6446 if ((changed & NETIF_F_IP_CSUM)) {
6447 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6448 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4);
6450 changed ^= NETIF_F_IP_CSUM;
6452 if (changed & NETIF_F_IPV6_CSUM) {
6453 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6454 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6);
6456 changed ^= NETIF_F_IPV6_CSUM;
6458 if (changed & NETIF_F_RXCSUM) {
6459 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6461 changed ^= NETIF_F_RXCSUM;
6463 if (changed & NETIF_F_TSO) {
6464 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
6467 changed ^= NETIF_F_TSO;
6469 if (changed & NETIF_F_TSO6) {
6470 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
6473 changed ^= NETIF_F_TSO6;
6476 /* everything changed successfully? */
6477 if ((dev->features ^ features) == changed)
6479 /* something went wrong. save changed features and return error */
6480 dev->features ^= changed;
6483 EXPORT_SYMBOL_GPL(qeth_set_features);
6485 netdev_features_t qeth_fix_features(struct net_device *dev,
6486 netdev_features_t features)
6488 struct qeth_card *card = dev->ml_priv;
6490 QETH_CARD_TEXT(card, 2, "fixfeat");
6491 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
6492 features &= ~NETIF_F_IP_CSUM;
6493 if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
6494 features &= ~NETIF_F_IPV6_CSUM;
6495 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
6496 !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6497 features &= ~NETIF_F_RXCSUM;
6498 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
6499 features &= ~NETIF_F_TSO;
6500 if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
6501 features &= ~NETIF_F_TSO6;
6503 QETH_CARD_HEX(card, 2, &features, sizeof(features));
6506 EXPORT_SYMBOL_GPL(qeth_fix_features);
6508 netdev_features_t qeth_features_check(struct sk_buff *skb,
6509 struct net_device *dev,
6510 netdev_features_t features)
6512 /* GSO segmentation builds skbs with
6513 * a (small) linear part for the headers, and
6514 * page frags for the data.
6515 * Compared to a linear skb, the header-only part consumes an
6516 * additional buffer element. This reduces buffer utilization, and
6517 * hurts throughput. So compress small segments into one element.
6519 if (netif_needs_gso(skb, features)) {
6520 /* match skb_segment(): */
6521 unsigned int doffset = skb->data - skb_mac_header(skb);
6522 unsigned int hsize = skb_shinfo(skb)->gso_size;
6523 unsigned int hroom = skb_headroom(skb);
6525 /* linearize only if resulting skb allocations are order-0: */
6526 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
6527 features &= ~NETIF_F_SG;
6530 return vlan_features_check(skb, features);
6532 EXPORT_SYMBOL_GPL(qeth_features_check);
6534 void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6536 struct qeth_card *card = dev->ml_priv;
6537 struct qeth_qdio_out_q *queue;
6540 QETH_CARD_TEXT(card, 5, "getstat");
6542 stats->rx_packets = card->stats.rx_packets;
6543 stats->rx_bytes = card->stats.rx_bytes;
6544 stats->rx_errors = card->stats.rx_length_errors +
6545 card->stats.rx_frame_errors +
6546 card->stats.rx_fifo_errors;
6547 stats->rx_dropped = card->stats.rx_dropped_nomem +
6548 card->stats.rx_dropped_notsupp +
6549 card->stats.rx_dropped_runt;
6550 stats->multicast = card->stats.rx_multicast;
6551 stats->rx_length_errors = card->stats.rx_length_errors;
6552 stats->rx_frame_errors = card->stats.rx_frame_errors;
6553 stats->rx_fifo_errors = card->stats.rx_fifo_errors;
6555 for (i = 0; i < card->qdio.no_out_queues; i++) {
6556 queue = card->qdio.out_qs[i];
6558 stats->tx_packets += queue->stats.tx_packets;
6559 stats->tx_bytes += queue->stats.tx_bytes;
6560 stats->tx_errors += queue->stats.tx_errors;
6561 stats->tx_dropped += queue->stats.tx_dropped;
6564 EXPORT_SYMBOL_GPL(qeth_get_stats64);
6566 u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
6567 u8 cast_type, struct net_device *sb_dev)
6569 if (cast_type != RTN_UNICAST)
6570 return QETH_IQD_MCAST_TXQ;
6571 return QETH_IQD_MIN_UCAST_TXQ;
6573 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
6575 int qeth_open(struct net_device *dev)
6577 struct qeth_card *card = dev->ml_priv;
6579 QETH_CARD_TEXT(card, 4, "qethopen");
6581 if (qdio_stop_irq(CARD_DDEV(card), 0) < 0)
6584 card->data.state = CH_STATE_UP;
6585 netif_tx_start_all_queues(dev);
6587 napi_enable(&card->napi);
6589 napi_schedule(&card->napi);
6591 struct qeth_qdio_out_q *queue;
6594 qeth_for_each_output_queue(card, queue, i) {
6595 netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
6597 napi_enable(&queue->napi);
6598 napi_schedule(&queue->napi);
6601 /* kick-start the NAPI softirq: */
6605 EXPORT_SYMBOL_GPL(qeth_open);
6607 int qeth_stop(struct net_device *dev)
6609 struct qeth_card *card = dev->ml_priv;
6611 QETH_CARD_TEXT(card, 4, "qethstop");
6613 struct qeth_qdio_out_q *queue;
6616 /* Quiesce the NAPI instances: */
6617 qeth_for_each_output_queue(card, queue, i) {
6618 napi_disable(&queue->napi);
6619 del_timer_sync(&queue->timer);
6622 /* Stop .ndo_start_xmit, might still access queue->napi. */
6623 netif_tx_disable(dev);
6625 /* Queues may get re-allocated, so remove the NAPIs here. */
6626 qeth_for_each_output_queue(card, queue, i)
6627 netif_napi_del(&queue->napi);
6629 netif_tx_disable(dev);
6632 napi_disable(&card->napi);
6635 EXPORT_SYMBOL_GPL(qeth_stop);
6637 static int __init qeth_core_init(void)
6641 pr_info("loading core functions\n");
6643 rc = qeth_register_dbf_views();
6646 qeth_core_root_dev = root_device_register("qeth");
6647 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
6650 qeth_core_header_cache =
6651 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
6652 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
6654 if (!qeth_core_header_cache) {
6658 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
6659 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
6660 if (!qeth_qdio_outbuf_cache) {
6664 rc = ccw_driver_register(&qeth_ccw_driver);
6667 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
6674 ccw_driver_unregister(&qeth_ccw_driver);
6676 kmem_cache_destroy(qeth_qdio_outbuf_cache);
6678 kmem_cache_destroy(qeth_core_header_cache);
6680 root_device_unregister(qeth_core_root_dev);
6682 qeth_unregister_dbf_views();
6684 pr_err("Initializing the qeth device driver failed\n");
6688 static void __exit qeth_core_exit(void)
6690 qeth_clear_dbf_list();
6691 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
6692 ccw_driver_unregister(&qeth_ccw_driver);
6693 kmem_cache_destroy(qeth_qdio_outbuf_cache);
6694 kmem_cache_destroy(qeth_core_header_cache);
6695 root_device_unregister(qeth_core_root_dev);
6696 qeth_unregister_dbf_views();
6697 pr_info("core functions removed\n");
6700 module_init(qeth_core_init);
6701 module_exit(qeth_core_exit);
6702 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
6703 MODULE_DESCRIPTION("qeth core functions");
6704 MODULE_LICENSE("GPL");