1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5 * Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
10 #define KMSG_COMPONENT "qeth"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/compat.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/log2.h>
21 #include <linux/tcp.h>
22 #include <linux/mii.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/netdevice.h>
28 #include <linux/netdev_features.h>
29 #include <linux/skbuff.h>
30 #include <linux/vmalloc.h>
32 #include <net/iucv/af_iucv.h>
33 #include <net/dsfield.h>
35 #include <asm/ebcdic.h>
36 #include <asm/chpid.h>
38 #include <asm/sysinfo.h>
41 #include <asm/ccwdev.h>
42 #include <asm/cpcmd.h>
44 #include "qeth_core.h"
46 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
47 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
49 [QETH_DBF_SETUP] = {"qeth_setup",
50 8, 1, 8, 5, &debug_hex_ascii_view, NULL},
51 [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
52 &debug_sprintf_view, NULL},
53 [QETH_DBF_CTRL] = {"qeth_control",
54 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
56 EXPORT_SYMBOL_GPL(qeth_dbf);
58 struct kmem_cache *qeth_core_header_cache;
59 EXPORT_SYMBOL_GPL(qeth_core_header_cache);
60 static struct kmem_cache *qeth_qdio_outbuf_cache;
62 static struct device *qeth_core_root_dev;
63 static struct lock_class_key qdio_out_skb_queue_key;
65 static void qeth_issue_next_read_cb(struct qeth_card *card,
66 struct qeth_cmd_buffer *iob,
67 unsigned int data_length);
68 static void qeth_free_buffer_pool(struct qeth_card *);
69 static int qeth_qdio_establish(struct qeth_card *);
70 static void qeth_free_qdio_queues(struct qeth_card *card);
71 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
72 struct qeth_qdio_out_buffer *buf,
73 enum iucv_tx_notify notification);
74 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
76 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
78 static void qeth_close_dev_handler(struct work_struct *work)
80 struct qeth_card *card;
82 card = container_of(work, struct qeth_card, close_dev_work);
83 QETH_CARD_TEXT(card, 2, "cldevhdl");
84 ccwgroup_set_offline(card->gdev);
87 static const char *qeth_get_cardname(struct qeth_card *card)
89 if (IS_VM_NIC(card)) {
90 switch (card->info.type) {
91 case QETH_CARD_TYPE_OSD:
92 return " Virtual NIC QDIO";
93 case QETH_CARD_TYPE_IQD:
94 return " Virtual NIC Hiper";
95 case QETH_CARD_TYPE_OSM:
96 return " Virtual NIC QDIO - OSM";
97 case QETH_CARD_TYPE_OSX:
98 return " Virtual NIC QDIO - OSX";
103 switch (card->info.type) {
104 case QETH_CARD_TYPE_OSD:
105 return " OSD Express";
106 case QETH_CARD_TYPE_IQD:
107 return " HiperSockets";
108 case QETH_CARD_TYPE_OSN:
110 case QETH_CARD_TYPE_OSM:
112 case QETH_CARD_TYPE_OSX:
121 /* max length to be returned: 14 */
122 const char *qeth_get_cardname_short(struct qeth_card *card)
124 if (IS_VM_NIC(card)) {
125 switch (card->info.type) {
126 case QETH_CARD_TYPE_OSD:
127 return "Virt.NIC QDIO";
128 case QETH_CARD_TYPE_IQD:
129 return "Virt.NIC Hiper";
130 case QETH_CARD_TYPE_OSM:
131 return "Virt.NIC OSM";
132 case QETH_CARD_TYPE_OSX:
133 return "Virt.NIC OSX";
138 switch (card->info.type) {
139 case QETH_CARD_TYPE_OSD:
140 switch (card->info.link_type) {
141 case QETH_LINK_TYPE_FAST_ETH:
143 case QETH_LINK_TYPE_HSTR:
145 case QETH_LINK_TYPE_GBIT_ETH:
147 case QETH_LINK_TYPE_10GBIT_ETH:
149 case QETH_LINK_TYPE_25GBIT_ETH:
151 case QETH_LINK_TYPE_LANE_ETH100:
152 return "OSD_FE_LANE";
153 case QETH_LINK_TYPE_LANE_TR:
154 return "OSD_TR_LANE";
155 case QETH_LINK_TYPE_LANE_ETH1000:
156 return "OSD_GbE_LANE";
157 case QETH_LINK_TYPE_LANE:
158 return "OSD_ATM_LANE";
160 return "OSD_Express";
162 case QETH_CARD_TYPE_IQD:
163 return "HiperSockets";
164 case QETH_CARD_TYPE_OSN:
166 case QETH_CARD_TYPE_OSM:
168 case QETH_CARD_TYPE_OSX:
177 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
178 int clear_start_mask)
182 spin_lock_irqsave(&card->thread_mask_lock, flags);
183 card->thread_allowed_mask = threads;
184 if (clear_start_mask)
185 card->thread_start_mask &= threads;
186 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
187 wake_up(&card->wait_q);
189 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
191 int qeth_threads_running(struct qeth_card *card, unsigned long threads)
196 spin_lock_irqsave(&card->thread_mask_lock, flags);
197 rc = (card->thread_running_mask & threads);
198 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
201 EXPORT_SYMBOL_GPL(qeth_threads_running);
203 void qeth_clear_working_pool_list(struct qeth_card *card)
205 struct qeth_buffer_pool_entry *pool_entry, *tmp;
207 QETH_CARD_TEXT(card, 5, "clwrklst");
208 list_for_each_entry_safe(pool_entry, tmp,
209 &card->qdio.in_buf_pool.entry_list, list){
210 list_del(&pool_entry->list);
213 EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
215 static int qeth_alloc_buffer_pool(struct qeth_card *card)
217 struct qeth_buffer_pool_entry *pool_entry;
221 QETH_CARD_TEXT(card, 5, "alocpool");
222 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
223 pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL);
225 qeth_free_buffer_pool(card);
228 for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) {
229 ptr = (void *) __get_free_page(GFP_KERNEL);
232 free_page((unsigned long)
233 pool_entry->elements[--j]);
235 qeth_free_buffer_pool(card);
238 pool_entry->elements[j] = ptr;
240 list_add(&pool_entry->init_list,
241 &card->qdio.init_pool.entry_list);
246 int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
248 QETH_CARD_TEXT(card, 2, "realcbp");
250 if (card->state != CARD_STATE_DOWN)
253 /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
254 qeth_clear_working_pool_list(card);
255 qeth_free_buffer_pool(card);
256 card->qdio.in_buf_pool.buf_count = bufcnt;
257 card->qdio.init_pool.buf_count = bufcnt;
258 return qeth_alloc_buffer_pool(card);
260 EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
262 static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
267 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
271 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
273 struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
279 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
284 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
285 q->bufs[i].buffer = q->qdio_bufs[i];
287 QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
291 static int qeth_cq_init(struct qeth_card *card)
295 if (card->options.cq == QETH_CQ_ENABLED) {
296 QETH_CARD_TEXT(card, 2, "cqinit");
297 qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
298 QDIO_MAX_BUFFERS_PER_Q);
299 card->qdio.c_q->next_buf_to_init = 127;
300 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
301 card->qdio.no_in_queues - 1, 0,
304 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
313 static int qeth_alloc_cq(struct qeth_card *card)
317 if (card->options.cq == QETH_CQ_ENABLED) {
319 struct qdio_outbuf_state *outbuf_states;
321 QETH_CARD_TEXT(card, 2, "cqon");
322 card->qdio.c_q = qeth_alloc_qdio_queue();
323 if (!card->qdio.c_q) {
327 card->qdio.no_in_queues = 2;
328 card->qdio.out_bufstates =
329 kcalloc(card->qdio.no_out_queues *
330 QDIO_MAX_BUFFERS_PER_Q,
331 sizeof(struct qdio_outbuf_state),
333 outbuf_states = card->qdio.out_bufstates;
334 if (outbuf_states == NULL) {
338 for (i = 0; i < card->qdio.no_out_queues; ++i) {
339 card->qdio.out_qs[i]->bufstates = outbuf_states;
340 outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
343 QETH_CARD_TEXT(card, 2, "nocq");
344 card->qdio.c_q = NULL;
345 card->qdio.no_in_queues = 1;
347 QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
352 qeth_free_qdio_queue(card->qdio.c_q);
353 card->qdio.c_q = NULL;
355 dev_err(&card->gdev->dev, "Failed to create completion queue\n");
359 static void qeth_free_cq(struct qeth_card *card)
361 if (card->qdio.c_q) {
362 --card->qdio.no_in_queues;
363 qeth_free_qdio_queue(card->qdio.c_q);
364 card->qdio.c_q = NULL;
366 kfree(card->qdio.out_bufstates);
367 card->qdio.out_bufstates = NULL;
370 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
373 enum iucv_tx_notify n;
377 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
383 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
384 TX_NOTIFY_UNREACHABLE;
387 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
388 TX_NOTIFY_GENERALERROR;
395 static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
398 if (q->card->options.cq != QETH_CQ_ENABLED)
401 if (q->bufs[bidx]->next_pending != NULL) {
402 struct qeth_qdio_out_buffer *head = q->bufs[bidx];
403 struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
406 if (forced_cleanup ||
407 atomic_read(&c->state) ==
408 QETH_QDIO_BUF_HANDLED_DELAYED) {
409 struct qeth_qdio_out_buffer *f = c;
410 QETH_CARD_TEXT(f->q->card, 5, "fp");
411 QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
412 /* release here to avoid interleaving between
413 outbound tasklet and inbound tasklet
414 regarding notifications and lifecycle */
415 qeth_tx_complete_buf(c, forced_cleanup, 0);
418 WARN_ON_ONCE(head->next_pending != f);
419 head->next_pending = c;
420 kmem_cache_free(qeth_qdio_outbuf_cache, f);
428 if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
429 QETH_QDIO_BUF_HANDLED_DELAYED)) {
430 /* for recovery situations */
431 qeth_init_qdio_out_buf(q, bidx);
432 QETH_CARD_TEXT(q->card, 2, "clprecov");
437 static void qeth_qdio_handle_aob(struct qeth_card *card,
438 unsigned long phys_aob_addr)
441 struct qeth_qdio_out_buffer *buffer;
442 enum iucv_tx_notify notification;
445 aob = (struct qaob *) phys_to_virt(phys_aob_addr);
446 QETH_CARD_TEXT(card, 5, "haob");
447 QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
448 buffer = (struct qeth_qdio_out_buffer *) aob->user1;
449 QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
451 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
452 QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
453 notification = TX_NOTIFY_OK;
455 WARN_ON_ONCE(atomic_read(&buffer->state) !=
456 QETH_QDIO_BUF_PENDING);
457 atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
458 notification = TX_NOTIFY_DELAYED_OK;
461 if (aob->aorc != 0) {
462 QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
463 notification = qeth_compute_cq_notification(aob->aorc, 1);
465 qeth_notify_skbs(buffer->q, buffer, notification);
467 /* Free dangling allocations. The attached skbs are handled by
468 * qeth_cleanup_handled_pending().
471 i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
473 if (aob->sba[i] && buffer->is_header[i])
474 kmem_cache_free(qeth_core_header_cache,
475 (void *) aob->sba[i]);
477 atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
479 qdio_release_aob(aob);
482 static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
484 return card->options.cq == QETH_CQ_ENABLED &&
485 card->qdio.c_q != NULL &&
487 queue == card->qdio.no_in_queues - 1;
490 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
493 ccw->cmd_code = cmd_code;
494 ccw->flags = flags | CCW_FLAG_SLI;
496 ccw->cda = (__u32) __pa(data);
499 static int __qeth_issue_next_read(struct qeth_card *card)
501 struct qeth_cmd_buffer *iob = card->read_cmd;
502 struct qeth_channel *channel = iob->channel;
503 struct ccw1 *ccw = __ccw_from_cmd(iob);
506 QETH_CARD_TEXT(card, 5, "issnxrd");
507 if (channel->state != CH_STATE_UP)
510 memset(iob->data, 0, iob->length);
511 qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
512 iob->callback = qeth_issue_next_read_cb;
513 /* keep the cmd alive after completion: */
516 QETH_CARD_TEXT(card, 6, "noirqpnd");
517 rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
519 channel->active_cmd = iob;
521 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
522 rc, CARD_DEVID(card));
523 atomic_set(&channel->irq_pending, 0);
525 card->read_or_write_problem = 1;
526 qeth_schedule_recovery(card);
527 wake_up(&card->wait_q);
532 static int qeth_issue_next_read(struct qeth_card *card)
536 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
537 ret = __qeth_issue_next_read(card);
538 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
543 static void qeth_enqueue_cmd(struct qeth_card *card,
544 struct qeth_cmd_buffer *iob)
546 spin_lock_irq(&card->lock);
547 list_add_tail(&iob->list, &card->cmd_waiter_list);
548 spin_unlock_irq(&card->lock);
551 static void qeth_dequeue_cmd(struct qeth_card *card,
552 struct qeth_cmd_buffer *iob)
554 spin_lock_irq(&card->lock);
555 list_del(&iob->list);
556 spin_unlock_irq(&card->lock);
559 void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
562 complete(&iob->done);
564 EXPORT_SYMBOL_GPL(qeth_notify_cmd);
566 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
567 struct qeth_card *card)
569 const char *ipa_name;
570 int com = cmd->hdr.command;
571 ipa_name = qeth_get_ipa_cmd_name(com);
574 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
575 ipa_name, com, CARD_DEVID(card), rc,
576 qeth_get_ipa_msg(rc));
578 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
579 ipa_name, com, CARD_DEVID(card));
582 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
583 struct qeth_ipa_cmd *cmd)
585 QETH_CARD_TEXT(card, 5, "chkipad");
587 if (IS_IPA_REPLY(cmd)) {
588 if (cmd->hdr.command != IPA_CMD_SETCCID &&
589 cmd->hdr.command != IPA_CMD_DELCCID &&
590 cmd->hdr.command != IPA_CMD_MODCCID &&
591 cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
592 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
596 /* handle unsolicited event: */
597 switch (cmd->hdr.command) {
598 case IPA_CMD_STOPLAN:
599 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
600 dev_err(&card->gdev->dev,
601 "Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
602 QETH_CARD_IFNAME(card));
603 schedule_work(&card->close_dev_work);
605 dev_warn(&card->gdev->dev,
606 "The link for interface %s on CHPID 0x%X failed\n",
607 QETH_CARD_IFNAME(card), card->info.chpid);
608 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
609 netif_carrier_off(card->dev);
612 case IPA_CMD_STARTLAN:
613 dev_info(&card->gdev->dev,
614 "The link for %s on CHPID 0x%X has been restored\n",
615 QETH_CARD_IFNAME(card), card->info.chpid);
616 if (card->info.hwtrap)
617 card->info.hwtrap = 2;
618 qeth_schedule_recovery(card);
620 case IPA_CMD_SETBRIDGEPORT_IQD:
621 case IPA_CMD_SETBRIDGEPORT_OSA:
622 case IPA_CMD_ADDRESS_CHANGE_NOTIF:
623 if (card->discipline->control_event_handler(card, cmd))
626 case IPA_CMD_MODCCID:
628 case IPA_CMD_REGISTER_LOCAL_ADDR:
629 QETH_CARD_TEXT(card, 3, "irla");
631 case IPA_CMD_UNREGISTER_LOCAL_ADDR:
632 QETH_CARD_TEXT(card, 3, "urla");
635 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
640 void qeth_clear_ipacmd_list(struct qeth_card *card)
642 struct qeth_cmd_buffer *iob;
645 QETH_CARD_TEXT(card, 4, "clipalst");
647 spin_lock_irqsave(&card->lock, flags);
648 list_for_each_entry(iob, &card->cmd_waiter_list, list)
649 qeth_notify_cmd(iob, -EIO);
650 spin_unlock_irqrestore(&card->lock, flags);
652 EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
654 static int qeth_check_idx_response(struct qeth_card *card,
655 unsigned char *buffer)
657 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
658 if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
659 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
661 QETH_CARD_TEXT(card, 2, "ckidxres");
662 QETH_CARD_TEXT(card, 2, " idxterm");
663 QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
664 if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
665 buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
666 dev_err(&card->gdev->dev,
667 "The device does not support the configured transport mode\n");
668 return -EPROTONOSUPPORT;
675 void qeth_put_cmd(struct qeth_cmd_buffer *iob)
677 if (refcount_dec_and_test(&iob->ref_count)) {
682 EXPORT_SYMBOL_GPL(qeth_put_cmd);
684 static void qeth_release_buffer_cb(struct qeth_card *card,
685 struct qeth_cmd_buffer *iob,
686 unsigned int data_length)
691 static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
693 qeth_notify_cmd(iob, rc);
697 struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
698 unsigned int length, unsigned int ccws,
701 struct qeth_cmd_buffer *iob;
703 if (length > QETH_BUFSIZE)
706 iob = kzalloc(sizeof(*iob), GFP_KERNEL);
710 iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
711 GFP_KERNEL | GFP_DMA);
717 init_completion(&iob->done);
718 spin_lock_init(&iob->lock);
719 INIT_LIST_HEAD(&iob->list);
720 refcount_set(&iob->ref_count, 1);
721 iob->channel = channel;
722 iob->timeout = timeout;
723 iob->length = length;
726 EXPORT_SYMBOL_GPL(qeth_alloc_cmd);
728 static void qeth_issue_next_read_cb(struct qeth_card *card,
729 struct qeth_cmd_buffer *iob,
730 unsigned int data_length)
732 struct qeth_cmd_buffer *request = NULL;
733 struct qeth_ipa_cmd *cmd = NULL;
734 struct qeth_reply *reply = NULL;
735 struct qeth_cmd_buffer *tmp;
739 QETH_CARD_TEXT(card, 4, "sndctlcb");
740 rc = qeth_check_idx_response(card, iob->data);
745 qeth_schedule_recovery(card);
748 qeth_clear_ipacmd_list(card);
752 if (IS_IPA(iob->data)) {
753 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
754 cmd = qeth_check_ipa_data(card, cmd);
757 if (IS_OSN(card) && card->osn_info.assist_cb &&
758 cmd->hdr.command != IPA_CMD_STARTLAN) {
759 card->osn_info.assist_cb(card->dev, cmd);
763 /* non-IPA commands should only flow during initialization */
764 if (card->state != CARD_STATE_DOWN)
768 /* match against pending cmd requests */
769 spin_lock_irqsave(&card->lock, flags);
770 list_for_each_entry(tmp, &card->cmd_waiter_list, list) {
771 if (!IS_IPA(tmp->data) ||
772 __ipa_cmd(tmp)->hdr.seqno == cmd->hdr.seqno) {
774 /* take the object outside the lock */
775 qeth_get_cmd(request);
779 spin_unlock_irqrestore(&card->lock, flags);
784 reply = &request->reply;
785 if (!reply->callback) {
790 spin_lock_irqsave(&request->lock, flags);
792 /* Bail out when the requestor has already left: */
795 rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
797 spin_unlock_irqrestore(&request->lock, flags);
801 qeth_notify_cmd(request, rc);
802 qeth_put_cmd(request);
804 memcpy(&card->seqno.pdu_hdr_ack,
805 QETH_PDU_HEADER_SEQ_NO(iob->data),
808 __qeth_issue_next_read(card);
811 static int qeth_set_thread_start_bit(struct qeth_card *card,
812 unsigned long thread)
816 spin_lock_irqsave(&card->thread_mask_lock, flags);
817 if (!(card->thread_allowed_mask & thread) ||
818 (card->thread_start_mask & thread)) {
819 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
822 card->thread_start_mask |= thread;
823 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
827 void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
831 spin_lock_irqsave(&card->thread_mask_lock, flags);
832 card->thread_start_mask &= ~thread;
833 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
834 wake_up(&card->wait_q);
836 EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit);
838 void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
842 spin_lock_irqsave(&card->thread_mask_lock, flags);
843 card->thread_running_mask &= ~thread;
844 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
845 wake_up_all(&card->wait_q);
847 EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
849 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
854 spin_lock_irqsave(&card->thread_mask_lock, flags);
855 if (card->thread_start_mask & thread) {
856 if ((card->thread_allowed_mask & thread) &&
857 !(card->thread_running_mask & thread)) {
859 card->thread_start_mask &= ~thread;
860 card->thread_running_mask |= thread;
864 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
868 int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
872 wait_event(card->wait_q,
873 (rc = __qeth_do_run_thread(card, thread)) >= 0);
876 EXPORT_SYMBOL_GPL(qeth_do_run_thread);
878 void qeth_schedule_recovery(struct qeth_card *card)
880 QETH_CARD_TEXT(card, 2, "startrec");
881 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
882 schedule_work(&card->kernel_thread_starter);
884 EXPORT_SYMBOL_GPL(qeth_schedule_recovery);
886 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
892 sense = (char *) irb->ecw;
893 cstat = irb->scsw.cmd.cstat;
894 dstat = irb->scsw.cmd.dstat;
896 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
897 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
898 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
899 QETH_CARD_TEXT(card, 2, "CGENCHK");
900 dev_warn(&cdev->dev, "The qeth device driver "
901 "failed to recover an error on the device\n");
902 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
903 CCW_DEVID(cdev), dstat, cstat);
904 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
909 if (dstat & DEV_STAT_UNIT_CHECK) {
910 if (sense[SENSE_RESETTING_EVENT_BYTE] &
911 SENSE_RESETTING_EVENT_FLAG) {
912 QETH_CARD_TEXT(card, 2, "REVIND");
915 if (sense[SENSE_COMMAND_REJECT_BYTE] &
916 SENSE_COMMAND_REJECT_FLAG) {
917 QETH_CARD_TEXT(card, 2, "CMDREJi");
920 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
921 QETH_CARD_TEXT(card, 2, "AFFE");
924 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
925 QETH_CARD_TEXT(card, 2, "ZEROSEN");
928 QETH_CARD_TEXT(card, 2, "DGENCHK");
934 static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
940 switch (PTR_ERR(irb)) {
942 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
944 QETH_CARD_TEXT(card, 2, "ckirberr");
945 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
948 dev_warn(&cdev->dev, "A hardware operation timed out"
950 QETH_CARD_TEXT(card, 2, "ckirberr");
951 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT);
954 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
955 PTR_ERR(irb), CCW_DEVID(cdev));
956 QETH_CARD_TEXT(card, 2, "ckirberr");
957 QETH_CARD_TEXT(card, 2, " rc???");
962 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
967 struct qeth_cmd_buffer *iob = NULL;
968 struct ccwgroup_device *gdev;
969 struct qeth_channel *channel;
970 struct qeth_card *card;
972 /* while we hold the ccwdev lock, this stays valid: */
973 gdev = dev_get_drvdata(&cdev->dev);
974 card = dev_get_drvdata(&gdev->dev);
978 QETH_CARD_TEXT(card, 5, "irq");
980 if (card->read.ccwdev == cdev) {
981 channel = &card->read;
982 QETH_CARD_TEXT(card, 5, "read");
983 } else if (card->write.ccwdev == cdev) {
984 channel = &card->write;
985 QETH_CARD_TEXT(card, 5, "write");
987 channel = &card->data;
988 QETH_CARD_TEXT(card, 5, "data");
992 QETH_CARD_TEXT(card, 5, "irqunsol");
993 } else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
994 QETH_CARD_TEXT(card, 5, "irqunexp");
997 "Received IRQ with intparm %lx, expected %px\n",
998 intparm, channel->active_cmd);
999 if (channel->active_cmd)
1000 qeth_cancel_cmd(channel->active_cmd, -EIO);
1002 iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
1005 channel->active_cmd = NULL;
1007 rc = qeth_check_irb_error(card, cdev, irb);
1009 /* IO was terminated, free its resources. */
1011 qeth_cancel_cmd(iob, rc);
1012 atomic_set(&channel->irq_pending, 0);
1013 wake_up(&card->wait_q);
1017 atomic_set(&channel->irq_pending, 0);
1019 if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
1020 channel->state = CH_STATE_STOPPED;
1022 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
1023 channel->state = CH_STATE_HALTED;
1025 if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
1026 SCSW_FCTL_HALT_FUNC))) {
1027 qeth_cancel_cmd(iob, -ECANCELED);
1031 cstat = irb->scsw.cmd.cstat;
1032 dstat = irb->scsw.cmd.dstat;
1034 if ((dstat & DEV_STAT_UNIT_EXCEP) ||
1035 (dstat & DEV_STAT_UNIT_CHECK) ||
1037 if (irb->esw.esw0.erw.cons) {
1038 dev_warn(&channel->ccwdev->dev,
1039 "The qeth device driver failed to recover "
1040 "an error on the device\n");
1041 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1042 CCW_DEVID(channel->ccwdev), cstat,
1044 print_hex_dump(KERN_WARNING, "qeth: irb ",
1045 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
1046 print_hex_dump(KERN_WARNING, "qeth: sense data ",
1047 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
1050 rc = qeth_get_problem(card, cdev, irb);
1052 card->read_or_write_problem = 1;
1054 qeth_cancel_cmd(iob, rc);
1055 qeth_clear_ipacmd_list(card);
1056 qeth_schedule_recovery(card);
1063 if (irb->scsw.cmd.count > iob->length) {
1064 qeth_cancel_cmd(iob, -EIO);
1068 iob->callback(card, iob,
1069 iob->length - irb->scsw.cmd.count);
1073 wake_up(&card->wait_q);
1077 static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1078 struct qeth_qdio_out_buffer *buf,
1079 enum iucv_tx_notify notification)
1081 struct sk_buff *skb;
1083 skb_queue_walk(&buf->skb_list, skb) {
1084 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
1085 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1086 if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk)
1087 iucv_sk(skb->sk)->sk_txnotify(skb, notification);
1091 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
1094 struct qeth_qdio_out_q *queue = buf->q;
1095 struct sk_buff *skb;
1097 /* release may never happen from within CQ tasklet scope */
1098 WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
1100 if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
1101 qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR);
1104 if (buf->next_element_to_fill == 0)
1107 QETH_TXQ_STAT_INC(queue, bufs);
1108 QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
1109 while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
1110 unsigned int bytes = qdisc_pkt_len(skb);
1111 bool is_tso = skb_is_gso(skb);
1112 unsigned int packets;
1114 packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
1116 QETH_TXQ_STAT_ADD(queue, tx_errors, packets);
1118 QETH_TXQ_STAT_ADD(queue, tx_packets, packets);
1119 QETH_TXQ_STAT_ADD(queue, tx_bytes, bytes);
1120 if (skb->ip_summed == CHECKSUM_PARTIAL)
1121 QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
1122 if (skb_is_nonlinear(skb))
1123 QETH_TXQ_STAT_INC(queue, skbs_sg);
1125 QETH_TXQ_STAT_INC(queue, skbs_tso);
1126 QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
1130 napi_consume_skb(skb, budget);
1134 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1135 struct qeth_qdio_out_buffer *buf,
1136 bool error, int budget)
1140 /* is PCI flag set on buffer? */
1141 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
1142 atomic_dec(&queue->set_pci_flags_count);
1144 qeth_tx_complete_buf(buf, error, budget);
1146 for (i = 0; i < queue->max_elements; ++i) {
1147 if (buf->buffer->element[i].addr && buf->is_header[i])
1148 kmem_cache_free(qeth_core_header_cache,
1149 buf->buffer->element[i].addr);
1150 buf->is_header[i] = 0;
1153 qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
1154 buf->next_element_to_fill = 0;
1156 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1159 static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
1163 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
1166 qeth_cleanup_handled_pending(q, j, 1);
1167 qeth_clear_output_buffer(q, q->bufs[j], true, 0);
1169 kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
1175 void qeth_drain_output_queues(struct qeth_card *card)
1179 QETH_CARD_TEXT(card, 2, "clearqdbf");
1180 /* clear outbound buffers to free skbs */
1181 for (i = 0; i < card->qdio.no_out_queues; ++i) {
1182 if (card->qdio.out_qs[i])
1183 qeth_drain_output_queue(card->qdio.out_qs[i], false);
1186 EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
1188 static void qeth_free_buffer_pool(struct qeth_card *card)
1190 struct qeth_buffer_pool_entry *pool_entry, *tmp;
1192 list_for_each_entry_safe(pool_entry, tmp,
1193 &card->qdio.init_pool.entry_list, init_list){
1194 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
1195 free_page((unsigned long)pool_entry->elements[i]);
1196 list_del(&pool_entry->init_list);
1201 static void qeth_clean_channel(struct qeth_channel *channel)
1203 struct ccw_device *cdev = channel->ccwdev;
1205 QETH_DBF_TEXT(SETUP, 2, "freech");
1207 spin_lock_irq(get_ccwdev_lock(cdev));
1208 cdev->handler = NULL;
1209 spin_unlock_irq(get_ccwdev_lock(cdev));
1212 static void qeth_setup_channel(struct qeth_channel *channel)
1214 struct ccw_device *cdev = channel->ccwdev;
1216 QETH_DBF_TEXT(SETUP, 2, "setupch");
1218 channel->state = CH_STATE_DOWN;
1219 atomic_set(&channel->irq_pending, 0);
1221 spin_lock_irq(get_ccwdev_lock(cdev));
1222 cdev->handler = qeth_irq;
1223 spin_unlock_irq(get_ccwdev_lock(cdev));
1226 static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
1228 unsigned int count = single ? 1 : card->dev->num_tx_queues;
1232 rc = netif_set_real_num_tx_queues(card->dev, count);
1238 if (card->qdio.no_out_queues == count)
1241 if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
1242 qeth_free_qdio_queues(card);
1245 dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
1247 card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE;
1248 card->qdio.no_out_queues = count;
1252 static int qeth_update_from_chp_desc(struct qeth_card *card)
1254 struct ccw_device *ccwdev;
1255 struct channel_path_desc_fmt0 *chp_dsc;
1258 QETH_CARD_TEXT(card, 2, "chp_desc");
1260 ccwdev = card->data.ccwdev;
1261 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
1265 card->info.func_level = 0x4100 + chp_dsc->desc;
1267 if (IS_OSD(card) || IS_OSX(card))
1268 /* CHPP field bit 6 == 1 -> single queue */
1269 rc = qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
1272 QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
1273 QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
1277 static void qeth_init_qdio_info(struct qeth_card *card)
1279 QETH_CARD_TEXT(card, 4, "intqdinf");
1280 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1281 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1282 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1285 card->qdio.no_in_queues = 1;
1286 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1288 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
1290 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1291 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1292 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1293 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1296 static void qeth_set_initial_options(struct qeth_card *card)
1298 card->options.route4.type = NO_ROUTER;
1299 card->options.route6.type = NO_ROUTER;
1300 card->options.rx_sg_cb = QETH_RX_SG_CB;
1301 card->options.isolation = ISOLATION_MODE_NONE;
1302 card->options.cq = QETH_CQ_DISABLED;
1303 card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
1306 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1308 unsigned long flags;
1311 spin_lock_irqsave(&card->thread_mask_lock, flags);
1312 QETH_CARD_TEXT_(card, 4, " %02x%02x%02x",
1313 (u8) card->thread_start_mask,
1314 (u8) card->thread_allowed_mask,
1315 (u8) card->thread_running_mask);
1316 rc = (card->thread_start_mask & thread);
1317 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1321 static void qeth_start_kernel_thread(struct work_struct *work)
1323 struct task_struct *ts;
1324 struct qeth_card *card = container_of(work, struct qeth_card,
1325 kernel_thread_starter);
1326 QETH_CARD_TEXT(card , 2, "strthrd");
1328 if (card->read.state != CH_STATE_UP &&
1329 card->write.state != CH_STATE_UP)
1331 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1332 ts = kthread_run(card->discipline->recover, (void *)card,
1335 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1336 qeth_clear_thread_running_bit(card,
1337 QETH_RECOVER_THREAD);
1342 static void qeth_buffer_reclaim_work(struct work_struct *);
1343 static void qeth_setup_card(struct qeth_card *card)
1345 QETH_CARD_TEXT(card, 2, "setupcrd");
1347 card->info.type = CARD_RDEV(card)->id.driver_info;
1348 card->state = CARD_STATE_DOWN;
1349 spin_lock_init(&card->lock);
1350 spin_lock_init(&card->thread_mask_lock);
1351 mutex_init(&card->conf_mutex);
1352 mutex_init(&card->discipline_mutex);
1353 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1354 INIT_LIST_HEAD(&card->cmd_waiter_list);
1355 init_waitqueue_head(&card->wait_q);
1356 qeth_set_initial_options(card);
1357 /* IP address takeover */
1358 INIT_LIST_HEAD(&card->ipato.entries);
1359 qeth_init_qdio_info(card);
1360 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1361 INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
1364 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
1366 struct qeth_card *card = container_of(slr, struct qeth_card,
1367 qeth_service_level);
1368 if (card->info.mcl_level[0])
1369 seq_printf(m, "qeth: %s firmware level %s\n",
1370 CARD_BUS_ID(card), card->info.mcl_level);
1373 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
1375 struct qeth_card *card;
1377 QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1378 card = kzalloc(sizeof(*card), GFP_KERNEL);
1381 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1384 dev_set_drvdata(&gdev->dev, card);
1385 CARD_RDEV(card) = gdev->cdev[0];
1386 CARD_WDEV(card) = gdev->cdev[1];
1387 CARD_DDEV(card) = gdev->cdev[2];
1389 card->event_wq = alloc_ordered_workqueue("%s_event", 0,
1390 dev_name(&gdev->dev));
1391 if (!card->event_wq)
1394 card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
1395 if (!card->read_cmd)
1398 qeth_setup_channel(&card->read);
1399 qeth_setup_channel(&card->write);
1400 qeth_setup_channel(&card->data);
1401 card->qeth_service_level.seq_print = qeth_core_sl_print;
1402 register_service_level(&card->qeth_service_level);
1406 destroy_workqueue(card->event_wq);
1408 dev_set_drvdata(&gdev->dev, NULL);
1414 static int qeth_clear_channel(struct qeth_card *card,
1415 struct qeth_channel *channel)
1419 QETH_CARD_TEXT(card, 3, "clearch");
1420 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1421 rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
1422 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1426 rc = wait_event_interruptible_timeout(card->wait_q,
1427 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1428 if (rc == -ERESTARTSYS)
1430 if (channel->state != CH_STATE_STOPPED)
1432 channel->state = CH_STATE_DOWN;
1436 static int qeth_halt_channel(struct qeth_card *card,
1437 struct qeth_channel *channel)
1441 QETH_CARD_TEXT(card, 3, "haltch");
1442 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1443 rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
1444 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1448 rc = wait_event_interruptible_timeout(card->wait_q,
1449 channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1450 if (rc == -ERESTARTSYS)
1452 if (channel->state != CH_STATE_HALTED)
1457 int qeth_stop_channel(struct qeth_channel *channel)
1459 struct ccw_device *cdev = channel->ccwdev;
1462 rc = ccw_device_set_offline(cdev);
1464 spin_lock_irq(get_ccwdev_lock(cdev));
1465 if (channel->active_cmd) {
1466 dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
1467 channel->active_cmd);
1468 channel->active_cmd = NULL;
1470 spin_unlock_irq(get_ccwdev_lock(cdev));
1474 EXPORT_SYMBOL_GPL(qeth_stop_channel);
1476 static int qeth_halt_channels(struct qeth_card *card)
1478 int rc1 = 0, rc2 = 0, rc3 = 0;
1480 QETH_CARD_TEXT(card, 3, "haltchs");
1481 rc1 = qeth_halt_channel(card, &card->read);
1482 rc2 = qeth_halt_channel(card, &card->write);
1483 rc3 = qeth_halt_channel(card, &card->data);
1491 static int qeth_clear_channels(struct qeth_card *card)
1493 int rc1 = 0, rc2 = 0, rc3 = 0;
1495 QETH_CARD_TEXT(card, 3, "clearchs");
1496 rc1 = qeth_clear_channel(card, &card->read);
1497 rc2 = qeth_clear_channel(card, &card->write);
1498 rc3 = qeth_clear_channel(card, &card->data);
1506 static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1510 QETH_CARD_TEXT(card, 3, "clhacrd");
1513 rc = qeth_halt_channels(card);
1516 return qeth_clear_channels(card);
1519 int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1523 QETH_CARD_TEXT(card, 3, "qdioclr");
1524 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1525 QETH_QDIO_CLEANING)) {
1526 case QETH_QDIO_ESTABLISHED:
1528 rc = qdio_shutdown(CARD_DDEV(card),
1529 QDIO_FLAG_CLEANUP_USING_HALT);
1531 rc = qdio_shutdown(CARD_DDEV(card),
1532 QDIO_FLAG_CLEANUP_USING_CLEAR);
1534 QETH_CARD_TEXT_(card, 3, "1err%d", rc);
1535 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1537 case QETH_QDIO_CLEANING:
1542 rc = qeth_clear_halt_card(card, use_halt);
1544 QETH_CARD_TEXT_(card, 3, "2err%d", rc);
1547 EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
1549 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
1551 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1552 struct diag26c_vnic_resp *response = NULL;
1553 struct diag26c_vnic_req *request = NULL;
1554 struct ccw_dev_id id;
1558 QETH_CARD_TEXT(card, 2, "vmlayer");
1560 cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
1564 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
1565 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
1566 if (!request || !response) {
1571 ccw_device_get_id(CARD_RDEV(card), &id);
1572 request->resp_buf_len = sizeof(*response);
1573 request->resp_version = DIAG26C_VERSION6_VM65918;
1574 request->req_format = DIAG26C_VNIC_INFO;
1576 memcpy(&request->sys_name, userid, 8);
1577 request->devno = id.devno;
1579 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1580 rc = diag26c(request, response, DIAG26C_PORT_VNIC);
1581 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1584 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
1586 if (request->resp_buf_len < sizeof(*response) ||
1587 response->version != request->resp_version) {
1592 if (response->protocol == VNIC_INFO_PROT_L2)
1593 disc = QETH_DISCIPLINE_LAYER2;
1594 else if (response->protocol == VNIC_INFO_PROT_L3)
1595 disc = QETH_DISCIPLINE_LAYER3;
1601 QETH_CARD_TEXT_(card, 2, "err%x", rc);
1605 /* Determine whether the device requires a specific layer discipline */
1606 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
1608 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1610 if (IS_OSM(card) || IS_OSN(card))
1611 disc = QETH_DISCIPLINE_LAYER2;
1612 else if (IS_VM_NIC(card))
1613 disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
1614 qeth_vm_detect_layer(card);
1617 case QETH_DISCIPLINE_LAYER2:
1618 QETH_CARD_TEXT(card, 3, "force l2");
1620 case QETH_DISCIPLINE_LAYER3:
1621 QETH_CARD_TEXT(card, 3, "force l3");
1624 QETH_CARD_TEXT(card, 3, "force no");
1630 static void qeth_set_blkt_defaults(struct qeth_card *card)
1632 QETH_CARD_TEXT(card, 2, "cfgblkt");
1634 if (card->info.use_v1_blkt) {
1635 card->info.blkt.time_total = 0;
1636 card->info.blkt.inter_packet = 0;
1637 card->info.blkt.inter_packet_jumbo = 0;
1639 card->info.blkt.time_total = 250;
1640 card->info.blkt.inter_packet = 5;
1641 card->info.blkt.inter_packet_jumbo = 15;
1645 static void qeth_init_tokens(struct qeth_card *card)
1647 card->token.issuer_rm_w = 0x00010103UL;
1648 card->token.cm_filter_w = 0x00010108UL;
1649 card->token.cm_connection_w = 0x0001010aUL;
1650 card->token.ulp_filter_w = 0x0001010bUL;
1651 card->token.ulp_connection_w = 0x0001010dUL;
1654 static void qeth_init_func_level(struct qeth_card *card)
1656 switch (card->info.type) {
1657 case QETH_CARD_TYPE_IQD:
1658 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
1660 case QETH_CARD_TYPE_OSD:
1661 case QETH_CARD_TYPE_OSN:
1662 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
1669 static void qeth_idx_finalize_cmd(struct qeth_card *card,
1670 struct qeth_cmd_buffer *iob)
1672 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
1673 QETH_SEQ_NO_LENGTH);
1674 if (iob->channel == &card->write)
1675 card->seqno.trans_hdr++;
1678 static int qeth_peer_func_level(int level)
1680 if ((level & 0xff) == 8)
1681 return (level & 0xff) + 0x400;
1682 if (((level >> 8) & 3) == 1)
1683 return (level & 0xff) + 0x200;
1687 static void qeth_mpc_finalize_cmd(struct qeth_card *card,
1688 struct qeth_cmd_buffer *iob)
1690 qeth_idx_finalize_cmd(card, iob);
1692 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1693 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1694 card->seqno.pdu_hdr++;
1695 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1696 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1698 iob->callback = qeth_release_buffer_cb;
1701 static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
1703 unsigned int data_length)
1705 struct qeth_cmd_buffer *iob;
1707 iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
1711 memcpy(iob->data, data, data_length);
1712 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
1714 iob->finalize = qeth_mpc_finalize_cmd;
1719 * qeth_send_control_data() - send control command to the card
1720 * @card: qeth_card structure pointer
1721 * @iob: qeth_cmd_buffer pointer
1722 * @reply_cb: callback function pointer
1723 * @cb_card: pointer to the qeth_card structure
1724 * @cb_reply: pointer to the qeth_reply structure
1725 * @cb_cmd: pointer to the original iob for non-IPA
1726 * commands, or to the qeth_ipa_cmd structure
1727 * for the IPA commands.
1728 * @reply_param: private pointer passed to the callback
1730 * Callback function gets called one or more times, with cb_cmd
1731 * pointing to the response returned by the hardware. Callback
1732 * function must return
1733 * > 0 if more reply blocks are expected,
1734 * 0 if the last or only reply block is received, and
1736 * Callback function can get the value of the reply_param pointer from the
1737 * field 'param' of the structure qeth_reply.
1740 static int qeth_send_control_data(struct qeth_card *card,
1741 struct qeth_cmd_buffer *iob,
1742 int (*reply_cb)(struct qeth_card *cb_card,
1743 struct qeth_reply *cb_reply,
1744 unsigned long cb_cmd),
1747 struct qeth_channel *channel = iob->channel;
1748 struct qeth_reply *reply = &iob->reply;
1749 long timeout = iob->timeout;
1752 QETH_CARD_TEXT(card, 2, "sendctl");
1754 reply->callback = reply_cb;
1755 reply->param = reply_param;
1757 timeout = wait_event_interruptible_timeout(card->wait_q,
1758 qeth_trylock_channel(channel),
1762 return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
1766 iob->finalize(card, iob);
1767 QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
1769 qeth_enqueue_cmd(card, iob);
1771 /* This pairs with iob->callback, and keeps the iob alive after IO: */
1774 QETH_CARD_TEXT(card, 6, "noirqpnd");
1775 spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1776 rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
1777 (addr_t) iob, 0, 0, timeout);
1779 channel->active_cmd = iob;
1780 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1782 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
1783 CARD_DEVID(card), rc);
1784 QETH_CARD_TEXT_(card, 2, " err%d", rc);
1785 qeth_dequeue_cmd(card, iob);
1787 atomic_set(&channel->irq_pending, 0);
1788 wake_up(&card->wait_q);
1792 timeout = wait_for_completion_interruptible_timeout(&iob->done,
1795 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
1797 qeth_dequeue_cmd(card, iob);
1800 /* Wait until the callback for a late reply has completed: */
1801 spin_lock_irq(&iob->lock);
1803 /* Zap any callback that's still pending: */
1805 spin_unlock_irq(&iob->lock);
1816 struct qeth_node_desc {
1817 struct node_descriptor nd1;
1818 struct node_descriptor nd2;
1819 struct node_descriptor nd3;
1822 static void qeth_read_conf_data_cb(struct qeth_card *card,
1823 struct qeth_cmd_buffer *iob,
1824 unsigned int data_length)
1826 struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
1830 QETH_CARD_TEXT(card, 2, "cfgunit");
1832 if (data_length < sizeof(*nd)) {
1837 card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
1838 nd->nd1.plant[1] == _ascebc['M'];
1839 tag = (u8 *)&nd->nd1.tag;
1840 card->info.chpid = tag[0];
1841 card->info.unit_addr2 = tag[1];
1843 tag = (u8 *)&nd->nd2.tag;
1844 card->info.cula = tag[1];
1846 card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
1847 nd->nd3.model[1] == 0xF0 &&
1848 nd->nd3.model[2] >= 0xF1 &&
1849 nd->nd3.model[2] <= 0xF4;
1852 qeth_notify_cmd(iob, rc);
1856 static int qeth_read_conf_data(struct qeth_card *card)
1858 struct qeth_channel *channel = &card->data;
1859 struct qeth_cmd_buffer *iob;
1862 /* scan for RCD command in extended SenseID data */
1863 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
1864 if (!ciw || ciw->cmd == 0)
1866 if (ciw->count < sizeof(struct qeth_node_desc))
1869 iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
1873 iob->callback = qeth_read_conf_data_cb;
1874 qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
1877 return qeth_send_control_data(card, iob, NULL, NULL);
1880 static int qeth_idx_check_activate_response(struct qeth_card *card,
1881 struct qeth_channel *channel,
1882 struct qeth_cmd_buffer *iob)
1886 rc = qeth_check_idx_response(card, iob->data);
1890 if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
1893 /* negative reply: */
1894 QETH_CARD_TEXT_(card, 2, "idxneg%c",
1895 QETH_IDX_ACT_CAUSE_CODE(iob->data));
1897 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
1898 case QETH_IDX_ACT_ERR_EXCL:
1899 dev_err(&channel->ccwdev->dev,
1900 "The adapter is used exclusively by another host\n");
1902 case QETH_IDX_ACT_ERR_AUTH:
1903 case QETH_IDX_ACT_ERR_AUTH_USER:
1904 dev_err(&channel->ccwdev->dev,
1905 "Setting the device online failed because of insufficient authorization\n");
1908 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
1909 CCW_DEVID(channel->ccwdev));
1914 static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
1915 struct qeth_cmd_buffer *iob,
1916 unsigned int data_length)
1918 struct qeth_channel *channel = iob->channel;
1922 QETH_CARD_TEXT(card, 2, "idxrdcb");
1924 rc = qeth_idx_check_activate_response(card, channel, iob);
1928 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1929 if (peer_level != qeth_peer_func_level(card->info.func_level)) {
1930 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
1931 CCW_DEVID(channel->ccwdev),
1932 card->info.func_level, peer_level);
1937 memcpy(&card->token.issuer_rm_r,
1938 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1939 QETH_MPC_TOKEN_LENGTH);
1940 memcpy(&card->info.mcl_level[0],
1941 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
1944 qeth_notify_cmd(iob, rc);
1948 static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
1949 struct qeth_cmd_buffer *iob,
1950 unsigned int data_length)
1952 struct qeth_channel *channel = iob->channel;
1956 QETH_CARD_TEXT(card, 2, "idxwrcb");
1958 rc = qeth_idx_check_activate_response(card, channel, iob);
1962 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1963 if ((peer_level & ~0x0100) !=
1964 qeth_peer_func_level(card->info.func_level)) {
1965 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
1966 CCW_DEVID(channel->ccwdev),
1967 card->info.func_level, peer_level);
1972 qeth_notify_cmd(iob, rc);
1976 static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
1977 struct qeth_cmd_buffer *iob)
1979 u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
1980 u8 port = ((u8)card->dev->dev_port) | 0x80;
1981 struct ccw1 *ccw = __ccw_from_cmd(iob);
1982 struct ccw_dev_id dev_id;
1984 qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
1986 qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
1987 ccw_device_get_id(CARD_DDEV(card), &dev_id);
1988 iob->finalize = qeth_idx_finalize_cmd;
1990 port |= QETH_IDX_ACT_INVAL_FRAME;
1991 memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
1992 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1993 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
1994 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
1995 &card->info.func_level, 2);
1996 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &dev_id.devno, 2);
1997 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
2000 static int qeth_idx_activate_read_channel(struct qeth_card *card)
2002 struct qeth_channel *channel = &card->read;
2003 struct qeth_cmd_buffer *iob;
2006 QETH_CARD_TEXT(card, 2, "idxread");
2008 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2012 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
2013 qeth_idx_setup_activate_cmd(card, iob);
2014 iob->callback = qeth_idx_activate_read_channel_cb;
2016 rc = qeth_send_control_data(card, iob, NULL, NULL);
2020 channel->state = CH_STATE_UP;
2024 static int qeth_idx_activate_write_channel(struct qeth_card *card)
2026 struct qeth_channel *channel = &card->write;
2027 struct qeth_cmd_buffer *iob;
2030 QETH_CARD_TEXT(card, 2, "idxwrite");
2032 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2036 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
2037 qeth_idx_setup_activate_cmd(card, iob);
2038 iob->callback = qeth_idx_activate_write_channel_cb;
2040 rc = qeth_send_control_data(card, iob, NULL, NULL);
2044 channel->state = CH_STATE_UP;
2048 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2051 struct qeth_cmd_buffer *iob;
2053 QETH_CARD_TEXT(card, 2, "cmenblcb");
2055 iob = (struct qeth_cmd_buffer *) data;
2056 memcpy(&card->token.cm_filter_r,
2057 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
2058 QETH_MPC_TOKEN_LENGTH);
2062 static int qeth_cm_enable(struct qeth_card *card)
2064 struct qeth_cmd_buffer *iob;
2066 QETH_CARD_TEXT(card, 2, "cmenable");
2068 iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
2072 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
2073 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2074 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
2075 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
2077 return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
2080 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2083 struct qeth_cmd_buffer *iob;
2085 QETH_CARD_TEXT(card, 2, "cmsetpcb");
2087 iob = (struct qeth_cmd_buffer *) data;
2088 memcpy(&card->token.cm_connection_r,
2089 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2090 QETH_MPC_TOKEN_LENGTH);
2094 static int qeth_cm_setup(struct qeth_card *card)
2096 struct qeth_cmd_buffer *iob;
2098 QETH_CARD_TEXT(card, 2, "cmsetup");
2100 iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
2104 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2105 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2106 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2107 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2108 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2109 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2110 return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
2113 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
2115 struct net_device *dev = card->dev;
2116 unsigned int new_mtu;
2119 /* IQD needs accurate max MTU to set up its RX buffers: */
2122 /* tolerate quirky HW: */
2123 max_mtu = ETH_MAX_MTU;
2128 /* move any device with default MTU to new max MTU: */
2129 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
2131 /* adjust RX buffer size to new max MTU: */
2132 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
2133 if (dev->max_mtu && dev->max_mtu != max_mtu)
2134 qeth_free_qdio_queues(card);
2138 /* default MTUs for first setup: */
2139 else if (IS_LAYER2(card))
2140 new_mtu = ETH_DATA_LEN;
2142 new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
2145 dev->max_mtu = max_mtu;
2146 dev->mtu = min(new_mtu, max_mtu);
2151 static int qeth_get_mtu_outof_framesize(int framesize)
2153 switch (framesize) {
2167 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2170 __u16 mtu, framesize;
2173 struct qeth_cmd_buffer *iob;
2175 QETH_CARD_TEXT(card, 2, "ulpenacb");
2177 iob = (struct qeth_cmd_buffer *) data;
2178 memcpy(&card->token.ulp_filter_r,
2179 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2180 QETH_MPC_TOKEN_LENGTH);
2182 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2183 mtu = qeth_get_mtu_outof_framesize(framesize);
2185 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
2187 *(u16 *)reply->param = mtu;
2189 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2190 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2192 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2193 card->info.link_type = link_type;
2195 card->info.link_type = 0;
2196 QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
2200 static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
2203 return QETH_PROT_OSN2;
2204 return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
2207 static int qeth_ulp_enable(struct qeth_card *card)
2209 u8 prot_type = qeth_mpc_select_prot_type(card);
2210 struct qeth_cmd_buffer *iob;
2214 QETH_CARD_TEXT(card, 2, "ulpenabl");
2216 iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
2220 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
2221 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
2222 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2223 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2224 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2225 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2226 rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
2229 return qeth_update_max_mtu(card, max_mtu);
2232 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2235 struct qeth_cmd_buffer *iob;
2237 QETH_CARD_TEXT(card, 2, "ulpstpcb");
2239 iob = (struct qeth_cmd_buffer *) data;
2240 memcpy(&card->token.ulp_connection_r,
2241 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2242 QETH_MPC_TOKEN_LENGTH);
2243 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2245 QETH_CARD_TEXT(card, 2, "olmlimit");
2246 dev_err(&card->gdev->dev, "A connection could not be "
2247 "established because of an OLM limit\n");
2253 static int qeth_ulp_setup(struct qeth_card *card)
2256 struct qeth_cmd_buffer *iob;
2257 struct ccw_dev_id dev_id;
2259 QETH_CARD_TEXT(card, 2, "ulpsetup");
2261 iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
2265 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2266 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2267 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2268 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2269 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2270 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2272 ccw_device_get_id(CARD_DDEV(card), &dev_id);
2273 memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
2274 temp = (card->info.cula << 8) + card->info.unit_addr2;
2275 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2276 return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
2279 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
2281 struct qeth_qdio_out_buffer *newbuf;
2283 newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
2287 newbuf->buffer = q->qdio_bufs[bidx];
2288 skb_queue_head_init(&newbuf->skb_list);
2289 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
2291 newbuf->next_pending = q->bufs[bidx];
2292 atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
2293 q->bufs[bidx] = newbuf;
2297 static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2302 qeth_drain_output_queue(q, true);
2303 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2307 static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
2309 struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2314 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
2321 static void qeth_tx_completion_timer(struct timer_list *timer)
2323 struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);
2325 napi_schedule(&queue->napi);
2326 QETH_TXQ_STAT_INC(queue, completion_timer);
2329 static int qeth_alloc_qdio_queues(struct qeth_card *card)
2333 QETH_CARD_TEXT(card, 2, "allcqdbf");
2335 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
2336 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2339 QETH_CARD_TEXT(card, 2, "inq");
2340 card->qdio.in_q = qeth_alloc_qdio_queue();
2341 if (!card->qdio.in_q)
2344 /* inbound buffer pool */
2345 if (qeth_alloc_buffer_pool(card))
2349 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2350 struct qeth_qdio_out_q *queue;
2352 queue = qeth_alloc_output_queue();
2355 QETH_CARD_TEXT_(card, 2, "outq %i", i);
2356 QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
2357 card->qdio.out_qs[i] = queue;
2359 queue->queue_no = i;
2360 timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
2362 /* give outbound qeth_qdio_buffers their qdio_buffers */
2363 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2364 WARN_ON(queue->bufs[j]);
2365 if (qeth_init_qdio_out_buf(queue, j))
2366 goto out_freeoutqbufs;
2371 if (qeth_alloc_cq(card))
2379 kmem_cache_free(qeth_qdio_outbuf_cache,
2380 card->qdio.out_qs[i]->bufs[j]);
2381 card->qdio.out_qs[i]->bufs[j] = NULL;
2385 qeth_free_output_queue(card->qdio.out_qs[--i]);
2386 card->qdio.out_qs[i] = NULL;
2388 qeth_free_buffer_pool(card);
2390 qeth_free_qdio_queue(card->qdio.in_q);
2391 card->qdio.in_q = NULL;
2393 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2397 static void qeth_free_qdio_queues(struct qeth_card *card)
2401 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
2402 QETH_QDIO_UNINITIALIZED)
2406 cancel_delayed_work_sync(&card->buffer_reclaim_work);
2407 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2408 if (card->qdio.in_q->bufs[j].rx_skb)
2409 dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
2411 qeth_free_qdio_queue(card->qdio.in_q);
2412 card->qdio.in_q = NULL;
2413 /* inbound buffer pool */
2414 qeth_free_buffer_pool(card);
2415 /* free outbound qdio_qs */
2416 for (i = 0; i < card->qdio.no_out_queues; i++) {
2417 qeth_free_output_queue(card->qdio.out_qs[i]);
2418 card->qdio.out_qs[i] = NULL;
2422 static void qeth_create_qib_param_field(struct qeth_card *card,
2426 param_field[0] = _ascebc['P'];
2427 param_field[1] = _ascebc['C'];
2428 param_field[2] = _ascebc['I'];
2429 param_field[3] = _ascebc['T'];
2430 *((unsigned int *) (¶m_field[4])) = QETH_PCI_THRESHOLD_A(card);
2431 *((unsigned int *) (¶m_field[8])) = QETH_PCI_THRESHOLD_B(card);
2432 *((unsigned int *) (¶m_field[12])) = QETH_PCI_TIMER_VALUE(card);
2435 static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
2438 param_field[16] = _ascebc['B'];
2439 param_field[17] = _ascebc['L'];
2440 param_field[18] = _ascebc['K'];
2441 param_field[19] = _ascebc['T'];
2442 *((unsigned int *) (¶m_field[20])) = card->info.blkt.time_total;
2443 *((unsigned int *) (¶m_field[24])) = card->info.blkt.inter_packet;
2444 *((unsigned int *) (¶m_field[28])) =
2445 card->info.blkt.inter_packet_jumbo;
2448 static int qeth_qdio_activate(struct qeth_card *card)
2450 QETH_CARD_TEXT(card, 3, "qdioact");
2451 return qdio_activate(CARD_DDEV(card));
2454 static int qeth_dm_act(struct qeth_card *card)
2456 struct qeth_cmd_buffer *iob;
2458 QETH_CARD_TEXT(card, 2, "dmact");
2460 iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
2464 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2465 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2466 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2467 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2468 return qeth_send_control_data(card, iob, NULL, NULL);
2471 static int qeth_mpc_initialize(struct qeth_card *card)
2475 QETH_CARD_TEXT(card, 2, "mpcinit");
2477 rc = qeth_issue_next_read(card);
2479 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2482 rc = qeth_cm_enable(card);
2484 QETH_CARD_TEXT_(card, 2, "2err%d", rc);
2487 rc = qeth_cm_setup(card);
2489 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
2492 rc = qeth_ulp_enable(card);
2494 QETH_CARD_TEXT_(card, 2, "4err%d", rc);
2497 rc = qeth_ulp_setup(card);
2499 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2502 rc = qeth_alloc_qdio_queues(card);
2504 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2507 rc = qeth_qdio_establish(card);
2509 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
2510 qeth_free_qdio_queues(card);
2513 rc = qeth_qdio_activate(card);
2515 QETH_CARD_TEXT_(card, 2, "7err%d", rc);
2518 rc = qeth_dm_act(card);
2520 QETH_CARD_TEXT_(card, 2, "8err%d", rc);
2526 qeth_qdio_clear_card(card, !IS_IQD(card));
2527 qdio_free(CARD_DDEV(card));
2531 void qeth_print_status_message(struct qeth_card *card)
2533 switch (card->info.type) {
2534 case QETH_CARD_TYPE_OSD:
2535 case QETH_CARD_TYPE_OSM:
2536 case QETH_CARD_TYPE_OSX:
2537 /* VM will use a non-zero first character
2538 * to indicate a HiperSockets like reporting
2539 * of the level OSA sets the first character to zero
2541 if (!card->info.mcl_level[0]) {
2542 sprintf(card->info.mcl_level, "%02x%02x",
2543 card->info.mcl_level[2],
2544 card->info.mcl_level[3]);
2548 case QETH_CARD_TYPE_IQD:
2549 if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
2550 card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2551 card->info.mcl_level[0]];
2552 card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2553 card->info.mcl_level[1]];
2554 card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2555 card->info.mcl_level[2]];
2556 card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2557 card->info.mcl_level[3]];
2558 card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2562 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2564 dev_info(&card->gdev->dev,
2565 "Device is a%s card%s%s%s\nwith link type %s.\n",
2566 qeth_get_cardname(card),
2567 (card->info.mcl_level[0]) ? " (level: " : "",
2568 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2569 (card->info.mcl_level[0]) ? ")" : "",
2570 qeth_get_cardname_short(card));
2572 EXPORT_SYMBOL_GPL(qeth_print_status_message);
2574 static void qeth_initialize_working_pool_list(struct qeth_card *card)
2576 struct qeth_buffer_pool_entry *entry;
2578 QETH_CARD_TEXT(card, 5, "inwrklst");
2580 list_for_each_entry(entry,
2581 &card->qdio.init_pool.entry_list, init_list) {
2582 qeth_put_buffer_pool_entry(card, entry);
2586 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2587 struct qeth_card *card)
2589 struct list_head *plh;
2590 struct qeth_buffer_pool_entry *entry;
2594 if (list_empty(&card->qdio.in_buf_pool.entry_list))
2597 list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
2598 entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
2600 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2601 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2607 list_del_init(&entry->list);
2612 /* no free buffer in pool so take first one and swap pages */
2613 entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2614 struct qeth_buffer_pool_entry, list);
2615 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2616 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2617 page = alloc_page(GFP_ATOMIC);
2621 free_page((unsigned long)entry->elements[i]);
2622 entry->elements[i] = page_address(page);
2623 QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
2627 list_del_init(&entry->list);
2631 static int qeth_init_input_buffer(struct qeth_card *card,
2632 struct qeth_qdio_buffer *buf)
2634 struct qeth_buffer_pool_entry *pool_entry;
2637 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2638 buf->rx_skb = netdev_alloc_skb(card->dev,
2639 QETH_RX_PULL_LEN + ETH_HLEN);
2644 pool_entry = qeth_find_free_buffer_pool_entry(card);
2649 * since the buffer is accessed only from the input_tasklet
2650 * there shouldn't be a need to synchronize; also, since we use
2651 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
2655 buf->pool_entry = pool_entry;
2656 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2657 buf->buffer->element[i].length = PAGE_SIZE;
2658 buf->buffer->element[i].addr = pool_entry->elements[i];
2659 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2660 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
2662 buf->buffer->element[i].eflags = 0;
2663 buf->buffer->element[i].sflags = 0;
2668 static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
2669 struct qeth_qdio_out_q *queue)
2671 if (!IS_IQD(card) ||
2672 qeth_iqd_is_mcast_queue(card, queue) ||
2673 card->options.cq == QETH_CQ_ENABLED ||
2674 qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
2677 return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
2680 int qeth_init_qdio_queues(struct qeth_card *card)
2685 QETH_CARD_TEXT(card, 2, "initqdqs");
2688 qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2689 memset(&card->rx, 0, sizeof(struct qeth_rx));
2690 qeth_initialize_working_pool_list(card);
2691 /*give only as many buffers to hardware as we have buffer pool entries*/
2692 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
2693 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
2694 card->qdio.in_q->next_buf_to_init =
2695 card->qdio.in_buf_pool.buf_count - 1;
2696 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
2697 card->qdio.in_buf_pool.buf_count - 1);
2699 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2704 rc = qeth_cq_init(card);
2709 /* outbound queue */
2710 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2711 struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];
2713 qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2714 queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
2715 queue->next_buf_to_fill = 0;
2717 queue->prev_hdr = NULL;
2718 queue->bulk_start = 0;
2719 queue->bulk_count = 0;
2720 queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
2721 atomic_set(&queue->used_buffers, 0);
2722 atomic_set(&queue->set_pci_flags_count, 0);
2723 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
2724 netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
2728 EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
2730 static void qeth_ipa_finalize_cmd(struct qeth_card *card,
2731 struct qeth_cmd_buffer *iob)
2733 qeth_mpc_finalize_cmd(card, iob);
2735 /* override with IPA-specific values: */
2736 __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
2739 void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2742 u8 prot_type = qeth_mpc_select_prot_type(card);
2743 u16 total_length = iob->length;
2745 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
2747 iob->finalize = qeth_ipa_finalize_cmd;
2749 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2750 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
2751 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
2752 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
2753 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
2754 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
2755 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2756 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
2758 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
2760 struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
2761 enum qeth_ipa_cmds cmd_code,
2762 enum qeth_prot_versions prot,
2763 unsigned int data_length)
2765 enum qeth_link_types link_type = card->info.link_type;
2766 struct qeth_cmd_buffer *iob;
2767 struct qeth_ipacmd_hdr *hdr;
2769 data_length += offsetof(struct qeth_ipa_cmd, data);
2770 iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
2775 qeth_prepare_ipa_cmd(card, iob, data_length);
2777 hdr = &__ipa_cmd(iob)->hdr;
2778 hdr->command = cmd_code;
2779 hdr->initiator = IPA_CMD_INITIATOR_HOST;
2780 /* hdr->seqno is set by qeth_send_control_data() */
2781 hdr->adapter_type = (link_type == QETH_LINK_TYPE_HSTR) ? 2 : 1;
2782 hdr->rel_adapter_no = (u8) card->dev->dev_port;
2783 hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
2784 hdr->param_count = 1;
2785 hdr->prot_version = prot;
2788 EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);
2790 static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
2791 struct qeth_reply *reply, unsigned long data)
2793 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2795 return (cmd->hdr.return_code) ? -EIO : 0;
2799 * qeth_send_ipa_cmd() - send an IPA command
2801 * See qeth_send_control_data() for explanation of the arguments.
2804 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2805 int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
2811 QETH_CARD_TEXT(card, 4, "sendipa");
2813 if (card->read_or_write_problem) {
2818 if (reply_cb == NULL)
2819 reply_cb = qeth_send_ipa_cmd_cb;
2820 rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
2822 qeth_clear_ipacmd_list(card);
2823 qeth_schedule_recovery(card);
2827 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
2829 static int qeth_send_startlan_cb(struct qeth_card *card,
2830 struct qeth_reply *reply, unsigned long data)
2832 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2834 if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
2837 return (cmd->hdr.return_code) ? -EIO : 0;
2840 static int qeth_send_startlan(struct qeth_card *card)
2842 struct qeth_cmd_buffer *iob;
2844 QETH_CARD_TEXT(card, 2, "strtlan");
2846 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
2849 return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
2852 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
2854 if (!cmd->hdr.return_code)
2855 cmd->hdr.return_code =
2856 cmd->data.setadapterparms.hdr.return_code;
2857 return cmd->hdr.return_code;
2860 static int qeth_query_setadapterparms_cb(struct qeth_card *card,
2861 struct qeth_reply *reply, unsigned long data)
2863 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2865 QETH_CARD_TEXT(card, 3, "quyadpcb");
2866 if (qeth_setadpparms_inspect_rc(cmd))
2869 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
2870 card->info.link_type =
2871 cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
2872 QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
2874 card->options.adp.supported_funcs =
2875 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
2879 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
2880 enum qeth_ipa_setadp_cmd adp_cmd,
2881 unsigned int data_length)
2883 struct qeth_ipacmd_setadpparms_hdr *hdr;
2884 struct qeth_cmd_buffer *iob;
2886 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
2888 offsetof(struct qeth_ipacmd_setadpparms,
2893 hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
2894 hdr->cmdlength = sizeof(*hdr) + data_length;
2895 hdr->command_code = adp_cmd;
2896 hdr->used_total = 1;
2901 static int qeth_query_setadapterparms(struct qeth_card *card)
2904 struct qeth_cmd_buffer *iob;
2906 QETH_CARD_TEXT(card, 3, "queryadp");
2907 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
2908 SETADP_DATA_SIZEOF(query_cmds_supp));
2911 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
2915 static int qeth_query_ipassists_cb(struct qeth_card *card,
2916 struct qeth_reply *reply, unsigned long data)
2918 struct qeth_ipa_cmd *cmd;
2920 QETH_CARD_TEXT(card, 2, "qipasscb");
2922 cmd = (struct qeth_ipa_cmd *) data;
2924 switch (cmd->hdr.return_code) {
2925 case IPA_RC_SUCCESS:
2927 case IPA_RC_NOTSUPP:
2928 case IPA_RC_L2_UNSUPPORTED_CMD:
2929 QETH_CARD_TEXT(card, 2, "ipaunsup");
2930 card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
2931 card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
2934 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
2935 CARD_DEVID(card), cmd->hdr.return_code);
2939 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
2940 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
2941 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
2942 } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) {
2943 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
2944 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
2946 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
2951 static int qeth_query_ipassists(struct qeth_card *card,
2952 enum qeth_prot_versions prot)
2955 struct qeth_cmd_buffer *iob;
2957 QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
2958 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
2961 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
2965 static int qeth_query_switch_attributes_cb(struct qeth_card *card,
2966 struct qeth_reply *reply, unsigned long data)
2968 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2969 struct qeth_query_switch_attributes *attrs;
2970 struct qeth_switch_info *sw_info;
2972 QETH_CARD_TEXT(card, 2, "qswiatcb");
2973 if (qeth_setadpparms_inspect_rc(cmd))
2976 sw_info = (struct qeth_switch_info *)reply->param;
2977 attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
2978 sw_info->capabilities = attrs->capabilities;
2979 sw_info->settings = attrs->settings;
2980 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
2985 int qeth_query_switch_attributes(struct qeth_card *card,
2986 struct qeth_switch_info *sw_info)
2988 struct qeth_cmd_buffer *iob;
2990 QETH_CARD_TEXT(card, 2, "qswiattr");
2991 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
2993 if (!netif_carrier_ok(card->dev))
2995 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
2998 return qeth_send_ipa_cmd(card, iob,
2999 qeth_query_switch_attributes_cb, sw_info);
3002 struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
3003 enum qeth_diags_cmds sub_cmd,
3004 unsigned int data_length)
3006 struct qeth_ipacmd_diagass *cmd;
3007 struct qeth_cmd_buffer *iob;
3009 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
3010 DIAG_HDR_LEN + data_length);
3014 cmd = &__ipa_cmd(iob)->data.diagass;
3015 cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
3016 cmd->subcmd = sub_cmd;
3019 EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);
3021 static int qeth_query_setdiagass_cb(struct qeth_card *card,
3022 struct qeth_reply *reply, unsigned long data)
3024 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3025 u16 rc = cmd->hdr.return_code;
3028 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3032 card->info.diagass_support = cmd->data.diagass.ext;
3036 static int qeth_query_setdiagass(struct qeth_card *card)
3038 struct qeth_cmd_buffer *iob;
3040 QETH_CARD_TEXT(card, 2, "qdiagass");
3041 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
3044 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
3047 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
3049 unsigned long info = get_zeroed_page(GFP_KERNEL);
3050 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
3051 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
3052 struct ccw_dev_id ccwid;
3055 tid->chpid = card->info.chpid;
3056 ccw_device_get_id(CARD_RDEV(card), &ccwid);
3057 tid->ssid = ccwid.ssid;
3058 tid->devno = ccwid.devno;
3061 level = stsi(NULL, 0, 0, 0);
3062 if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3063 tid->lparnr = info222->lpar_number;
3064 if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3065 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
3066 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
3072 static int qeth_hw_trap_cb(struct qeth_card *card,
3073 struct qeth_reply *reply, unsigned long data)
3075 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3076 u16 rc = cmd->hdr.return_code;
3079 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3085 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
3087 struct qeth_cmd_buffer *iob;
3088 struct qeth_ipa_cmd *cmd;
3090 QETH_CARD_TEXT(card, 2, "diagtrap");
3091 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
3094 cmd = __ipa_cmd(iob);
3095 cmd->data.diagass.type = 1;
3096 cmd->data.diagass.action = action;
3098 case QETH_DIAGS_TRAP_ARM:
3099 cmd->data.diagass.options = 0x0003;
3100 cmd->data.diagass.ext = 0x00010000 +
3101 sizeof(struct qeth_trap_id);
3102 qeth_get_trap_id(card,
3103 (struct qeth_trap_id *)cmd->data.diagass.cdata);
3105 case QETH_DIAGS_TRAP_DISARM:
3106 cmd->data.diagass.options = 0x0001;
3108 case QETH_DIAGS_TRAP_CAPTURE:
3111 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
3113 EXPORT_SYMBOL_GPL(qeth_hw_trap);
3115 static int qeth_check_qdio_errors(struct qeth_card *card,
3116 struct qdio_buffer *buf,
3117 unsigned int qdio_error,
3118 const char *dbftext)
3121 QETH_CARD_TEXT(card, 2, dbftext);
3122 QETH_CARD_TEXT_(card, 2, " F15=%02X",
3123 buf->element[15].sflags);
3124 QETH_CARD_TEXT_(card, 2, " F14=%02X",
3125 buf->element[14].sflags);
3126 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3127 if ((buf->element[15].sflags) == 0x12) {
3128 QETH_CARD_STAT_INC(card, rx_fifo_errors);
3136 static void qeth_queue_input_buffer(struct qeth_card *card, int index)
3138 struct qeth_qdio_q *queue = card->qdio.in_q;
3139 struct list_head *lh;
3145 count = (index < queue->next_buf_to_init)?
3146 card->qdio.in_buf_pool.buf_count -
3147 (queue->next_buf_to_init - index) :
3148 card->qdio.in_buf_pool.buf_count -
3149 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
3150 /* only requeue at a certain threshold to avoid SIGAs */
3151 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
3152 for (i = queue->next_buf_to_init;
3153 i < queue->next_buf_to_init + count; ++i) {
3154 if (qeth_init_input_buffer(card,
3155 &queue->bufs[QDIO_BUFNR(i)])) {
3162 if (newcount < count) {
3163 /* we are in memory shortage so we switch back to
3164 traditional skb allocation and drop packages */
3165 atomic_set(&card->force_alloc_skb, 3);
3168 atomic_add_unless(&card->force_alloc_skb, -1, 0);
3173 list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
3175 if (i == card->qdio.in_buf_pool.buf_count) {
3176 QETH_CARD_TEXT(card, 2, "qsarbw");
3177 card->reclaim_index = index;
3178 schedule_delayed_work(
3179 &card->buffer_reclaim_work,
3180 QETH_RECLAIM_WORK_TIME);
3186 * according to old code it should be avoided to requeue all
3187 * 128 buffers in order to benefit from PCI avoidance.
3188 * this function keeps at least one buffer (the buffer at
3189 * 'index') un-requeued -> this buffer is the first buffer that
3190 * will be requeued the next time
3192 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
3193 queue->next_buf_to_init, count);
3195 QETH_CARD_TEXT(card, 2, "qinberr");
3197 queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
3202 static void qeth_buffer_reclaim_work(struct work_struct *work)
3204 struct qeth_card *card = container_of(work, struct qeth_card,
3205 buffer_reclaim_work.work);
3207 QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index);
3208 qeth_queue_input_buffer(card, card->reclaim_index);
3211 static void qeth_handle_send_error(struct qeth_card *card,
3212 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
3214 int sbalf15 = buffer->buffer->element[15].sflags;
3216 QETH_CARD_TEXT(card, 6, "hdsnderr");
3217 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3222 if ((sbalf15 >= 15) && (sbalf15 <= 31))
3225 QETH_CARD_TEXT(card, 1, "lnkfail");
3226 QETH_CARD_TEXT_(card, 1, "%04x %02x",
3227 (u16)qdio_err, (u8)sbalf15);
3231 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
3232 * @queue: queue to check for packing buffer
3234 * Returns number of buffers that were prepared for flush.
3236 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
3238 struct qeth_qdio_out_buffer *buffer;
3240 buffer = queue->bufs[queue->next_buf_to_fill];
3241 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
3242 (buffer->next_element_to_fill > 0)) {
3243 /* it's a packing buffer */
3244 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3245 queue->next_buf_to_fill =
3246 QDIO_BUFNR(queue->next_buf_to_fill + 1);
3253 * Switched to packing state if the number of used buffers on a queue
3254 * reaches a certain limit.
3256 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
3258 if (!queue->do_pack) {
3259 if (atomic_read(&queue->used_buffers)
3260 >= QETH_HIGH_WATERMARK_PACK){
3261 /* switch non-PACKING -> PACKING */
3262 QETH_CARD_TEXT(queue->card, 6, "np->pack");
3263 QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3270 * Switches from packing to non-packing mode. If there is a packing
3271 * buffer on the queue this buffer will be prepared to be flushed.
3272 * In that case 1 is returned to inform the caller. If no buffer
3273 * has to be flushed, zero is returned.
3275 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
3277 if (queue->do_pack) {
3278 if (atomic_read(&queue->used_buffers)
3279 <= QETH_LOW_WATERMARK_PACK) {
3280 /* switch PACKING -> non-PACKING */
3281 QETH_CARD_TEXT(queue->card, 6, "pack->np");
3282 QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3284 return qeth_prep_flush_pack_buffer(queue);
3290 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
3293 struct qeth_card *card = queue->card;
3294 struct qeth_qdio_out_buffer *buf;
3297 unsigned int qdio_flags;
3299 for (i = index; i < index + count; ++i) {
3300 unsigned int bidx = QDIO_BUFNR(i);
3302 buf = queue->bufs[bidx];
3303 buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
3304 SBAL_EFLAGS_LAST_ENTRY;
3306 if (queue->bufstates)
3307 queue->bufstates[bidx].user = buf;
3309 if (IS_IQD(queue->card))
3312 if (!queue->do_pack) {
3313 if ((atomic_read(&queue->used_buffers) >=
3314 (QETH_HIGH_WATERMARK_PACK -
3315 QETH_WATERMARK_PACK_FUZZ)) &&
3316 !atomic_read(&queue->set_pci_flags_count)) {
3317 /* it's likely that we'll go to packing
3319 atomic_inc(&queue->set_pci_flags_count);
3320 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3323 if (!atomic_read(&queue->set_pci_flags_count)) {
3325 * there's no outstanding PCI any more, so we
3326 * have to request a PCI to be sure the the PCI
3327 * will wake at some time in the future then we
3328 * can flush packed buffers that might still be
3329 * hanging around, which can happen if no
3330 * further send was requested by the stack
3332 atomic_inc(&queue->set_pci_flags_count);
3333 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3338 qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
3339 if (atomic_read(&queue->set_pci_flags_count))
3340 qdio_flags |= QDIO_FLAG_PCI_OUT;
3341 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
3342 queue->queue_no, index, count);
3344 /* Fake the TX completion interrupt: */
3346 napi_schedule(&queue->napi);
3349 /* ignore temporary SIGA errors without busy condition */
3352 QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3353 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
3354 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
3355 QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
3356 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3358 /* this must not happen under normal circumstances. if it
3359 * happens something is really wrong -> recover */
3360 qeth_schedule_recovery(queue->card);
3365 static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
3367 qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
3369 queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
3370 queue->prev_hdr = NULL;
3371 queue->bulk_count = 0;
3374 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
3378 int q_was_packing = 0;
3381 * check if weed have to switch to non-packing mode or if
3382 * we have to get a pci flag out on the queue
3384 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
3385 !atomic_read(&queue->set_pci_flags_count)) {
3386 if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
3387 QETH_OUT_Q_UNLOCKED) {
3389 * If we get in here, there was no action in
3390 * do_send_packet. So, we check if there is a
3391 * packing buffer to be flushed here.
3393 index = queue->next_buf_to_fill;
3394 q_was_packing = queue->do_pack;
3395 /* queue->do_pack may change */
3397 flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
3399 !atomic_read(&queue->set_pci_flags_count))
3400 flush_cnt += qeth_prep_flush_pack_buffer(queue);
3402 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
3404 qeth_flush_buffers(queue, index, flush_cnt);
3405 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3410 static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
3411 unsigned long card_ptr)
3413 struct qeth_card *card = (struct qeth_card *)card_ptr;
3415 if (card->dev->flags & IFF_UP)
3416 napi_schedule(&card->napi);
3419 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
3423 if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
3427 if (card->options.cq == cq) {
3432 if (card->state != CARD_STATE_DOWN) {
3437 qeth_free_qdio_queues(card);
3438 card->options.cq = cq;
3445 EXPORT_SYMBOL_GPL(qeth_configure_cq);
3447 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
3448 unsigned int queue, int first_element,
3451 struct qeth_qdio_q *cq = card->qdio.c_q;
3455 if (!qeth_is_cq(card, queue))
3458 QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
3459 QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
3460 QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
3463 netif_tx_stop_all_queues(card->dev);
3464 qeth_schedule_recovery(card);
3468 for (i = first_element; i < first_element + count; ++i) {
3469 struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
3472 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
3473 buffer->element[e].addr) {
3474 unsigned long phys_aob_addr;
3476 phys_aob_addr = (unsigned long) buffer->element[e].addr;
3477 qeth_qdio_handle_aob(card, phys_aob_addr);
3480 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3482 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
3483 card->qdio.c_q->next_buf_to_init,
3486 dev_warn(&card->gdev->dev,
3487 "QDIO reported an error, rc=%i\n", rc);
3488 QETH_CARD_TEXT(card, 2, "qcqherr");
3491 cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
3494 static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
3495 unsigned int qdio_err, int queue,
3496 int first_elem, int count,
3497 unsigned long card_ptr)
3499 struct qeth_card *card = (struct qeth_card *)card_ptr;
3501 QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
3502 QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
3504 if (qeth_is_cq(card, queue))
3505 qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count);
3507 qeth_schedule_recovery(card);
3510 static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3511 unsigned int qdio_error, int __queue,
3512 int first_element, int count,
3513 unsigned long card_ptr)
3515 struct qeth_card *card = (struct qeth_card *) card_ptr;
3516 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
3517 struct net_device *dev = card->dev;
3518 struct netdev_queue *txq;
3521 QETH_CARD_TEXT(card, 6, "qdouhdl");
3522 if (qdio_error & QDIO_ERROR_FATAL) {
3523 QETH_CARD_TEXT(card, 2, "achkcond");
3524 netif_tx_stop_all_queues(dev);
3525 qeth_schedule_recovery(card);
3529 for (i = first_element; i < (first_element + count); ++i) {
3530 struct qeth_qdio_out_buffer *buf = queue->bufs[QDIO_BUFNR(i)];
3532 qeth_handle_send_error(card, buf, qdio_error);
3533 qeth_clear_output_buffer(queue, buf, qdio_error, 0);
3536 atomic_sub(count, &queue->used_buffers);
3537 qeth_check_outbound_queue(queue);
3539 txq = netdev_get_tx_queue(dev, __queue);
3540 /* xmit may have observed the full-condition, but not yet stopped the
3541 * txq. In which case the code below won't trigger. So before returning,
3542 * xmit will re-check the txq's fill level and wake it up if needed.
3544 if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue))
3545 netif_tx_wake_queue(txq);
3549 * Note: Function assumes that we have 4 outbound queues.
3551 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
3553 struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
3556 switch (card->qdio.do_prio_queueing) {
3557 case QETH_PRIO_Q_ING_TOS:
3558 case QETH_PRIO_Q_ING_PREC:
3559 switch (qeth_get_ip_version(skb)) {
3561 tos = ipv4_get_dsfield(ip_hdr(skb));
3564 tos = ipv6_get_dsfield(ipv6_hdr(skb));
3567 return card->qdio.default_out_queue;
3569 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
3570 return ~tos >> 6 & 3;
3571 if (tos & IPTOS_MINCOST)
3573 if (tos & IPTOS_RELIABILITY)
3575 if (tos & IPTOS_THROUGHPUT)
3577 if (tos & IPTOS_LOWDELAY)
3580 case QETH_PRIO_Q_ING_SKB:
3581 if (skb->priority > 5)
3583 return ~skb->priority >> 1 & 3;
3584 case QETH_PRIO_Q_ING_VLAN:
3585 if (veth->h_vlan_proto == htons(ETH_P_8021Q))
3586 return ~ntohs(veth->h_vlan_TCI) >>
3587 (VLAN_PRIO_SHIFT + 1) & 3;
3592 return card->qdio.default_out_queue;
3594 EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3597 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags.
3600 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3601 * fragmented part of the SKB. Returns zero for linear SKB.
3603 static int qeth_get_elements_for_frags(struct sk_buff *skb)
3605 int cnt, elements = 0;
3607 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3608 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3610 elements += qeth_get_elements_for_range(
3611 (addr_t)skb_frag_address(frag),
3612 (addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3618 * qeth_count_elements() - Counts the number of QDIO buffer elements needed
3619 * to transmit an skb.
3620 * @skb: the skb to operate on.
3621 * @data_offset: skip this part of the skb's linear data
3623 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3624 * skb's data (both its linear part and paged fragments).
3626 unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
3628 unsigned int elements = qeth_get_elements_for_frags(skb);
3629 addr_t end = (addr_t)skb->data + skb_headlen(skb);
3630 addr_t start = (addr_t)skb->data + data_offset;
3633 elements += qeth_get_elements_for_range(start, end);
3636 EXPORT_SYMBOL_GPL(qeth_count_elements);
3638 #define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \
3642 * qeth_add_hw_header() - add a HW header to an skb.
3643 * @skb: skb that the HW header should be added to.
3644 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
3645 * it contains a valid pointer to a qeth_hdr.
3646 * @hdr_len: length of the HW header.
3647 * @proto_len: length of protocol headers that need to be in same page as the
3650 * Returns the pushed length. If the header can't be pushed on
3651 * (eg. because it would cross a page boundary), it is allocated from
3652 * the cache instead and 0 is returned.
3653 * The number of needed buffer elements is returned in @elements.
3654 * Error to create the hdr is indicated by returning with < 0.
3656 static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
3657 struct sk_buff *skb, struct qeth_hdr **hdr,
3658 unsigned int hdr_len, unsigned int proto_len,
3659 unsigned int *elements)
3661 const unsigned int contiguous = proto_len ? proto_len : 1;
3662 const unsigned int max_elements = queue->max_elements;
3663 unsigned int __elements;
3669 start = (addr_t)skb->data - hdr_len;
3670 end = (addr_t)skb->data;
3672 if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
3673 /* Push HW header into same page as first protocol header. */
3675 /* ... but TSO always needs a separate element for headers: */
3676 if (skb_is_gso(skb))
3677 __elements = 1 + qeth_count_elements(skb, proto_len);
3679 __elements = qeth_count_elements(skb, 0);
3680 } else if (!proto_len && PAGE_ALIGNED(skb->data)) {
3681 /* Push HW header into preceding page, flush with skb->data. */
3683 __elements = 1 + qeth_count_elements(skb, 0);
3685 /* Use header cache, copy protocol headers up. */
3687 __elements = 1 + qeth_count_elements(skb, proto_len);
3690 /* Compress skb to fit into one IO buffer: */
3691 if (__elements > max_elements) {
3692 if (!skb_is_nonlinear(skb)) {
3693 /* Drop it, no easy way of shrinking it further. */
3694 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
3695 max_elements, __elements, skb->len);
3699 rc = skb_linearize(skb);
3701 QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
3705 QETH_TXQ_STAT_INC(queue, skbs_linearized);
3706 /* Linearization changed the layout, re-evaluate: */
3710 *elements = __elements;
3711 /* Add the header: */
3713 *hdr = skb_push(skb, hdr_len);
3717 if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
3719 *hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
3722 /* Copy protocol headers behind HW header: */
3723 skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
3727 static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
3728 struct sk_buff *curr_skb,
3729 struct qeth_hdr *curr_hdr)
3731 struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
3732 struct qeth_hdr *prev_hdr = queue->prev_hdr;
3737 /* All packets must have the same target: */
3738 if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
3739 struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);
3741 return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
3742 eth_hdr(curr_skb)->h_dest) &&
3743 qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
3746 return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
3747 qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
3750 static unsigned int __qeth_fill_buffer(struct sk_buff *skb,
3751 struct qeth_qdio_out_buffer *buf,
3752 bool is_first_elem, unsigned int offset)
3754 struct qdio_buffer *buffer = buf->buffer;
3755 int element = buf->next_element_to_fill;
3756 int length = skb_headlen(skb) - offset;
3757 char *data = skb->data + offset;
3758 unsigned int elem_length, cnt;
3760 /* map linear part into buffer element(s) */
3761 while (length > 0) {
3762 elem_length = min_t(unsigned int, length,
3763 PAGE_SIZE - offset_in_page(data));
3765 buffer->element[element].addr = data;
3766 buffer->element[element].length = elem_length;
3767 length -= elem_length;
3768 if (is_first_elem) {
3769 is_first_elem = false;
3770 if (length || skb_is_nonlinear(skb))
3771 /* skb needs additional elements */
3772 buffer->element[element].eflags =
3773 SBAL_EFLAGS_FIRST_FRAG;
3775 buffer->element[element].eflags = 0;
3777 buffer->element[element].eflags =
3778 SBAL_EFLAGS_MIDDLE_FRAG;
3781 data += elem_length;
3785 /* map page frags into buffer element(s) */
3786 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3787 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3789 data = skb_frag_address(frag);
3790 length = skb_frag_size(frag);
3791 while (length > 0) {
3792 elem_length = min_t(unsigned int, length,
3793 PAGE_SIZE - offset_in_page(data));
3795 buffer->element[element].addr = data;
3796 buffer->element[element].length = elem_length;
3797 buffer->element[element].eflags =
3798 SBAL_EFLAGS_MIDDLE_FRAG;
3800 length -= elem_length;
3801 data += elem_length;
3806 if (buffer->element[element - 1].eflags)
3807 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
3808 buf->next_element_to_fill = element;
3813 * qeth_fill_buffer() - map skb into an output buffer
3814 * @buf: buffer to transport the skb
3815 * @skb: skb to map into the buffer
3816 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
3817 * from qeth_core_header_cache.
3818 * @offset: when mapping the skb, start at skb->data + offset
3819 * @hd_len: if > 0, build a dedicated header element of this size
3821 static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
3822 struct sk_buff *skb, struct qeth_hdr *hdr,
3823 unsigned int offset, unsigned int hd_len)
3825 struct qdio_buffer *buffer = buf->buffer;
3826 bool is_first_elem = true;
3828 __skb_queue_tail(&buf->skb_list, skb);
3830 /* build dedicated header element */
3832 int element = buf->next_element_to_fill;
3833 is_first_elem = false;
3835 buffer->element[element].addr = hdr;
3836 buffer->element[element].length = hd_len;
3837 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
3838 /* remember to free cache-allocated qeth_hdr: */
3839 buf->is_header[element] = ((void *)hdr != skb->data);
3840 buf->next_element_to_fill++;
3843 return __qeth_fill_buffer(skb, buf, is_first_elem, offset);
3846 static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3847 struct sk_buff *skb, unsigned int elements,
3848 struct qeth_hdr *hdr, unsigned int offset,
3849 unsigned int hd_len)
3851 unsigned int bytes = qdisc_pkt_len(skb);
3852 struct qeth_qdio_out_buffer *buffer;
3853 unsigned int next_element;
3854 struct netdev_queue *txq;
3855 bool stopped = false;
3858 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
3859 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
3861 /* Just a sanity check, the wake/stop logic should ensure that we always
3862 * get a free buffer.
3864 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
3867 flush = !qeth_iqd_may_bulk(queue, skb, hdr);
3870 (buffer->next_element_to_fill + elements > queue->max_elements)) {
3871 if (buffer->next_element_to_fill > 0) {
3872 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3873 queue->bulk_count++;
3876 if (queue->bulk_count >= queue->bulk_max)
3880 qeth_flush_queue(queue);
3882 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
3883 queue->bulk_count)];
3885 /* Sanity-check again: */
3886 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
3890 if (buffer->next_element_to_fill == 0 &&
3891 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
3892 /* If a TX completion happens right _here_ and misses to wake
3893 * the txq, then our re-check below will catch the race.
3895 QETH_TXQ_STAT_INC(queue, stopped);
3896 netif_tx_stop_queue(txq);
3900 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
3901 buffer->bytes += bytes;
3902 queue->prev_hdr = hdr;
3904 flush = __netdev_tx_sent_queue(txq, bytes,
3905 !stopped && netdev_xmit_more());
3907 if (flush || next_element >= queue->max_elements) {
3908 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3909 queue->bulk_count++;
3911 if (queue->bulk_count >= queue->bulk_max)
3915 qeth_flush_queue(queue);
3918 if (stopped && !qeth_out_queue_is_full(queue))
3919 netif_tx_start_queue(txq);
3923 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3924 struct sk_buff *skb, struct qeth_hdr *hdr,
3925 unsigned int offset, unsigned int hd_len,
3926 int elements_needed)
3928 struct qeth_qdio_out_buffer *buffer;
3929 unsigned int next_element;
3930 struct netdev_queue *txq;
3931 bool stopped = false;
3933 int flush_count = 0;
3938 /* spin until we get the queue ... */
3939 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
3940 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
3941 start_index = queue->next_buf_to_fill;
3942 buffer = queue->bufs[queue->next_buf_to_fill];
3944 /* Just a sanity check, the wake/stop logic should ensure that we always
3945 * get a free buffer.
3947 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
3948 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3952 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
3954 /* check if we need to switch packing state of this queue */
3955 qeth_switch_to_packing_if_needed(queue);
3956 if (queue->do_pack) {
3958 /* does packet fit in current buffer? */
3959 if (buffer->next_element_to_fill + elements_needed >
3960 queue->max_elements) {
3961 /* ... no -> set state PRIMED */
3962 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3964 queue->next_buf_to_fill =
3965 QDIO_BUFNR(queue->next_buf_to_fill + 1);
3966 buffer = queue->bufs[queue->next_buf_to_fill];
3968 /* We stepped forward, so sanity-check again: */
3969 if (atomic_read(&buffer->state) !=
3970 QETH_QDIO_BUF_EMPTY) {
3971 qeth_flush_buffers(queue, start_index,
3973 atomic_set(&queue->state,
3974 QETH_OUT_Q_UNLOCKED);
3981 if (buffer->next_element_to_fill == 0 &&
3982 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
3983 /* If a TX completion happens right _here_ and misses to wake
3984 * the txq, then our re-check below will catch the race.
3986 QETH_TXQ_STAT_INC(queue, stopped);
3987 netif_tx_stop_queue(txq);
3991 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
3994 QETH_TXQ_STAT_INC(queue, skbs_pack);
3995 if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
3997 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3998 queue->next_buf_to_fill =
3999 QDIO_BUFNR(queue->next_buf_to_fill + 1);
4003 qeth_flush_buffers(queue, start_index, flush_count);
4004 else if (!atomic_read(&queue->set_pci_flags_count))
4005 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
4007 * queue->state will go from LOCKED -> UNLOCKED or from
4008 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
4009 * (switch packing state or flush buffer to get another pci flag out).
4010 * In that case we will enter this loop
4012 while (atomic_dec_return(&queue->state)) {
4013 start_index = queue->next_buf_to_fill;
4014 /* check if we can go back to non-packing state */
4015 tmp = qeth_switch_to_nonpacking_if_needed(queue);
4017 * check if we need to flush a packing buffer to get a pci
4018 * flag out on the queue
4020 if (!tmp && !atomic_read(&queue->set_pci_flags_count))
4021 tmp = qeth_prep_flush_pack_buffer(queue);
4023 qeth_flush_buffers(queue, start_index, tmp);
4028 /* at this point the queue is UNLOCKED again */
4030 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
4032 if (stopped && !qeth_out_queue_is_full(queue))
4033 netif_tx_start_queue(txq);
4036 EXPORT_SYMBOL_GPL(qeth_do_send_packet);
4038 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
4039 unsigned int payload_len, struct sk_buff *skb,
4040 unsigned int proto_len)
4042 struct qeth_hdr_ext_tso *ext = &hdr->ext;
4044 ext->hdr_tot_len = sizeof(*ext);
4045 ext->imb_hdr_no = 1;
4047 ext->hdr_version = 1;
4049 ext->payload_len = payload_len;
4050 ext->mss = skb_shinfo(skb)->gso_size;
4051 ext->dg_hdr_len = proto_len;
4054 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4055 struct qeth_qdio_out_q *queue, int ipv,
4056 void (*fill_header)(struct qeth_qdio_out_q *queue,
4057 struct qeth_hdr *hdr, struct sk_buff *skb,
4058 int ipv, unsigned int data_len))
4060 unsigned int proto_len, hw_hdr_len;
4061 unsigned int frame_len = skb->len;
4062 bool is_tso = skb_is_gso(skb);
4063 unsigned int data_offset = 0;
4064 struct qeth_hdr *hdr = NULL;
4065 unsigned int hd_len = 0;
4066 unsigned int elements;
4070 hw_hdr_len = sizeof(struct qeth_hdr_tso);
4071 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4073 hw_hdr_len = sizeof(struct qeth_hdr);
4074 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4077 rc = skb_cow_head(skb, hw_hdr_len);
4081 push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
4085 if (is_tso || !push_len) {
4086 /* HW header needs its own buffer element. */
4087 hd_len = hw_hdr_len + proto_len;
4088 data_offset = push_len + proto_len;
4090 memset(hdr, 0, hw_hdr_len);
4091 fill_header(queue, hdr, skb, ipv, frame_len);
4093 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
4094 frame_len - proto_len, skb, proto_len);
4097 rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
4100 /* TODO: drop skb_orphan() once TX completion is fast enough */
4102 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
4106 if (rc && !push_len)
4107 kmem_cache_free(qeth_core_header_cache, hdr);
4111 EXPORT_SYMBOL_GPL(qeth_xmit);
4113 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
4114 struct qeth_reply *reply, unsigned long data)
4116 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4117 struct qeth_ipacmd_setadpparms *setparms;
4119 QETH_CARD_TEXT(card, 4, "prmadpcb");
4121 setparms = &(cmd->data.setadapterparms);
4122 if (qeth_setadpparms_inspect_rc(cmd)) {
4123 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
4124 setparms->data.mode = SET_PROMISC_MODE_OFF;
4126 card->info.promisc_mode = setparms->data.mode;
4127 return (cmd->hdr.return_code) ? -EIO : 0;
4130 void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
4132 enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
4133 SET_PROMISC_MODE_OFF;
4134 struct qeth_cmd_buffer *iob;
4135 struct qeth_ipa_cmd *cmd;
4137 QETH_CARD_TEXT(card, 4, "setprom");
4138 QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
4140 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4141 SETADP_DATA_SIZEOF(mode));
4144 cmd = __ipa_cmd(iob);
4145 cmd->data.setadapterparms.data.mode = mode;
4146 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
4148 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
4150 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
4151 struct qeth_reply *reply, unsigned long data)
4153 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4154 struct qeth_ipacmd_setadpparms *adp_cmd;
4156 QETH_CARD_TEXT(card, 4, "chgmaccb");
4157 if (qeth_setadpparms_inspect_rc(cmd))
4160 adp_cmd = &cmd->data.setadapterparms;
4161 if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
4162 return -EADDRNOTAVAIL;
4164 if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
4165 !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4166 return -EADDRNOTAVAIL;
4168 ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
4172 int qeth_setadpparms_change_macaddr(struct qeth_card *card)
4175 struct qeth_cmd_buffer *iob;
4176 struct qeth_ipa_cmd *cmd;
4178 QETH_CARD_TEXT(card, 4, "chgmac");
4180 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4181 SETADP_DATA_SIZEOF(change_addr));
4184 cmd = __ipa_cmd(iob);
4185 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4186 cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
4187 ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
4188 card->dev->dev_addr);
4189 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
4193 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
4195 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4196 struct qeth_reply *reply, unsigned long data)
4198 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4199 struct qeth_set_access_ctrl *access_ctrl_req;
4200 int fallback = *(int *)reply->param;
4202 QETH_CARD_TEXT(card, 4, "setaccb");
4203 if (cmd->hdr.return_code)
4205 qeth_setadpparms_inspect_rc(cmd);
4207 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4208 QETH_CARD_TEXT_(card, 2, "rc=%d",
4209 cmd->data.setadapterparms.hdr.return_code);
4210 if (cmd->data.setadapterparms.hdr.return_code !=
4211 SET_ACCESS_CTRL_RC_SUCCESS)
4212 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4213 access_ctrl_req->subcmd_code, CARD_DEVID(card),
4214 cmd->data.setadapterparms.hdr.return_code);
4215 switch (cmd->data.setadapterparms.hdr.return_code) {
4216 case SET_ACCESS_CTRL_RC_SUCCESS:
4217 if (card->options.isolation == ISOLATION_MODE_NONE) {
4218 dev_info(&card->gdev->dev,
4219 "QDIO data connection isolation is deactivated\n");
4221 dev_info(&card->gdev->dev,
4222 "QDIO data connection isolation is activated\n");
4225 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4226 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4229 card->options.isolation = card->options.prev_isolation;
4231 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4232 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4235 card->options.isolation = card->options.prev_isolation;
4237 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
4238 dev_err(&card->gdev->dev, "Adapter does not "
4239 "support QDIO data connection isolation\n");
4241 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
4242 dev_err(&card->gdev->dev,
4243 "Adapter is dedicated. "
4244 "QDIO data connection isolation not supported\n");
4246 card->options.isolation = card->options.prev_isolation;
4248 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
4249 dev_err(&card->gdev->dev,
4250 "TSO does not permit QDIO data connection isolation\n");
4252 card->options.isolation = card->options.prev_isolation;
4254 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
4255 dev_err(&card->gdev->dev, "The adjacent switch port does not "
4256 "support reflective relay mode\n");
4258 card->options.isolation = card->options.prev_isolation;
4260 case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
4261 dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
4262 "enabled at the adjacent switch port");
4264 card->options.isolation = card->options.prev_isolation;
4266 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
4267 dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
4268 "at the adjacent switch failed\n");
4271 /* this should never happen */
4273 card->options.isolation = card->options.prev_isolation;
4276 return (cmd->hdr.return_code) ? -EIO : 0;
4279 static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
4280 enum qeth_ipa_isolation_modes isolation, int fallback)
4283 struct qeth_cmd_buffer *iob;
4284 struct qeth_ipa_cmd *cmd;
4285 struct qeth_set_access_ctrl *access_ctrl_req;
4287 QETH_CARD_TEXT(card, 4, "setacctl");
4289 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4290 SETADP_DATA_SIZEOF(set_access_ctrl));
4293 cmd = __ipa_cmd(iob);
4294 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4295 access_ctrl_req->subcmd_code = isolation;
4297 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4299 QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
4303 int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
4307 QETH_CARD_TEXT(card, 4, "setactlo");
4309 if ((IS_OSD(card) || IS_OSX(card)) &&
4310 qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
4311 rc = qeth_setadpparms_set_access_ctrl(card,
4312 card->options.isolation, fallback);
4314 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4315 rc, CARD_DEVID(card));
4318 } else if (card->options.isolation != ISOLATION_MODE_NONE) {
4319 card->options.isolation = ISOLATION_MODE_NONE;
4321 dev_err(&card->gdev->dev, "Adapter does not "
4322 "support QDIO data connection isolation\n");
4328 void qeth_tx_timeout(struct net_device *dev)
4330 struct qeth_card *card;
4332 card = dev->ml_priv;
4333 QETH_CARD_TEXT(card, 4, "txtimeo");
4334 qeth_schedule_recovery(card);
4336 EXPORT_SYMBOL_GPL(qeth_tx_timeout);
4338 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4340 struct qeth_card *card = dev->ml_priv;
4344 case MII_BMCR: /* Basic mode control register */
4346 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
4347 (card->info.link_type != QETH_LINK_TYPE_OSN) &&
4348 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
4349 (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
4350 rc |= BMCR_SPEED100;
4352 case MII_BMSR: /* Basic mode status register */
4353 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4354 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4357 case MII_PHYSID1: /* PHYS ID 1 */
4358 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4360 rc = (rc >> 5) & 0xFFFF;
4362 case MII_PHYSID2: /* PHYS ID 2 */
4363 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4365 case MII_ADVERTISE: /* Advertisement control reg */
4368 case MII_LPA: /* Link partner ability reg */
4369 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4370 LPA_100BASE4 | LPA_LPACK;
4372 case MII_EXPANSION: /* Expansion register */
4374 case MII_DCOUNTER: /* disconnect counter */
4376 case MII_FCSCOUNTER: /* false carrier counter */
4378 case MII_NWAYTEST: /* N-way auto-neg test register */
4380 case MII_RERRCOUNTER: /* rx error counter */
4381 rc = card->stats.rx_length_errors +
4382 card->stats.rx_frame_errors +
4383 card->stats.rx_fifo_errors;
4385 case MII_SREVISION: /* silicon revision */
4387 case MII_RESV1: /* reserved 1 */
4389 case MII_LBRERROR: /* loopback, rx, bypass error */
4391 case MII_PHYADDR: /* physical address */
4393 case MII_RESV2: /* reserved 2 */
4395 case MII_TPISTATUS: /* TPI status for 10mbps */
4397 case MII_NCONFIG: /* network interface config */
4405 static int qeth_snmp_command_cb(struct qeth_card *card,
4406 struct qeth_reply *reply, unsigned long data)
4408 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4409 struct qeth_arp_query_info *qinfo = reply->param;
4410 struct qeth_ipacmd_setadpparms *adp_cmd;
4411 unsigned int data_len;
4414 QETH_CARD_TEXT(card, 3, "snpcmdcb");
4416 if (cmd->hdr.return_code) {
4417 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4420 if (cmd->data.setadapterparms.hdr.return_code) {
4421 cmd->hdr.return_code =
4422 cmd->data.setadapterparms.hdr.return_code;
4423 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4427 adp_cmd = &cmd->data.setadapterparms;
4428 data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
4429 if (adp_cmd->hdr.seq_no == 1) {
4430 snmp_data = &adp_cmd->data.snmp;
4432 snmp_data = &adp_cmd->data.snmp.request;
4433 data_len -= offsetof(struct qeth_snmp_cmd, request);
4436 /* check if there is enough room in userspace */
4437 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4438 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
4441 QETH_CARD_TEXT_(card, 4, "snore%i",
4442 cmd->data.setadapterparms.hdr.used_total);
4443 QETH_CARD_TEXT_(card, 4, "sseqn%i",
4444 cmd->data.setadapterparms.hdr.seq_no);
4445 /*copy entries to user buffer*/
4446 memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
4447 qinfo->udata_offset += data_len;
4449 if (cmd->data.setadapterparms.hdr.seq_no <
4450 cmd->data.setadapterparms.hdr.used_total)
4455 static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4457 struct qeth_snmp_ureq __user *ureq;
4458 struct qeth_cmd_buffer *iob;
4459 unsigned int req_len;
4460 struct qeth_arp_query_info qinfo = {0, };
4463 QETH_CARD_TEXT(card, 3, "snmpcmd");
4465 if (IS_VM_NIC(card))
4468 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4472 ureq = (struct qeth_snmp_ureq __user *) udata;
4473 if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
4474 get_user(req_len, &ureq->hdr.req_len))
4477 /* Sanitize user input, to avoid overflows in iob size calculation: */
4478 if (req_len > QETH_BUFSIZE)
4481 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
4485 if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
4486 &ureq->cmd, req_len)) {
4491 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
4496 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4498 rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
4500 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4501 CARD_DEVID(card), rc);
4503 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4511 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4512 struct qeth_reply *reply, unsigned long data)
4514 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4515 struct qeth_qoat_priv *priv;
4519 QETH_CARD_TEXT(card, 3, "qoatcb");
4520 if (qeth_setadpparms_inspect_rc(cmd))
4523 priv = (struct qeth_qoat_priv *)reply->param;
4524 resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4525 resdata = (char *)data + 28;
4527 if (resdatalen > (priv->buffer_len - priv->response_len))
4530 memcpy((priv->buffer + priv->response_len), resdata,
4532 priv->response_len += resdatalen;
4534 if (cmd->data.setadapterparms.hdr.seq_no <
4535 cmd->data.setadapterparms.hdr.used_total)
4540 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4543 struct qeth_cmd_buffer *iob;
4544 struct qeth_ipa_cmd *cmd;
4545 struct qeth_query_oat *oat_req;
4546 struct qeth_query_oat_data oat_data;
4547 struct qeth_qoat_priv priv;
4550 QETH_CARD_TEXT(card, 3, "qoatcmd");
4552 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
4557 if (copy_from_user(&oat_data, udata,
4558 sizeof(struct qeth_query_oat_data))) {
4563 priv.buffer_len = oat_data.buffer_len;
4564 priv.response_len = 0;
4565 priv.buffer = vzalloc(oat_data.buffer_len);
4571 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4572 SETADP_DATA_SIZEOF(query_oat));
4577 cmd = __ipa_cmd(iob);
4578 oat_req = &cmd->data.setadapterparms.data.query_oat;
4579 oat_req->subcmd_code = oat_data.command;
4581 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb,
4584 if (is_compat_task())
4585 tmp = compat_ptr(oat_data.ptr);
4587 tmp = (void __user *)(unsigned long)oat_data.ptr;
4589 if (copy_to_user(tmp, priv.buffer,
4590 priv.response_len)) {
4595 oat_data.response_len = priv.response_len;
4597 if (copy_to_user(udata, &oat_data,
4598 sizeof(struct qeth_query_oat_data)))
4608 static int qeth_query_card_info_cb(struct qeth_card *card,
4609 struct qeth_reply *reply, unsigned long data)
4611 struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
4612 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4613 struct qeth_query_card_info *card_info;
4615 QETH_CARD_TEXT(card, 2, "qcrdincb");
4616 if (qeth_setadpparms_inspect_rc(cmd))
4619 card_info = &cmd->data.setadapterparms.data.card_info;
4620 carrier_info->card_type = card_info->card_type;
4621 carrier_info->port_mode = card_info->port_mode;
4622 carrier_info->port_speed = card_info->port_speed;
4626 int qeth_query_card_info(struct qeth_card *card,
4627 struct carrier_info *carrier_info)
4629 struct qeth_cmd_buffer *iob;
4631 QETH_CARD_TEXT(card, 2, "qcrdinfo");
4632 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
4634 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
4637 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
4638 (void *)carrier_info);
4642 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
4643 * @card: pointer to a qeth_card
4646 * 0, if a MAC address has been set for the card's netdevice
4647 * a return code, for various error conditions
4649 int qeth_vm_request_mac(struct qeth_card *card)
4651 struct diag26c_mac_resp *response;
4652 struct diag26c_mac_req *request;
4653 struct ccw_dev_id id;
4656 QETH_CARD_TEXT(card, 2, "vmreqmac");
4658 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
4659 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
4660 if (!request || !response) {
4665 ccw_device_get_id(CARD_DDEV(card), &id);
4666 request->resp_buf_len = sizeof(*response);
4667 request->resp_version = DIAG26C_VERSION2;
4668 request->op_code = DIAG26C_GET_MAC;
4669 request->devno = id.devno;
4671 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4672 rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
4673 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4676 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
4678 if (request->resp_buf_len < sizeof(*response) ||
4679 response->version != request->resp_version) {
4681 QETH_CARD_TEXT(card, 2, "badresp");
4682 QETH_CARD_HEX(card, 2, &request->resp_buf_len,
4683 sizeof(request->resp_buf_len));
4684 } else if (!is_valid_ether_addr(response->mac)) {
4686 QETH_CARD_TEXT(card, 2, "badmac");
4687 QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
4689 ether_addr_copy(card->dev->dev_addr, response->mac);
4697 EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
4699 static void qeth_determine_capabilities(struct qeth_card *card)
4701 struct qeth_channel *channel = &card->data;
4702 struct ccw_device *ddev = channel->ccwdev;
4704 int ddev_offline = 0;
4706 QETH_CARD_TEXT(card, 2, "detcapab");
4707 if (!ddev->online) {
4709 rc = ccw_device_set_online(ddev);
4711 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
4716 rc = qeth_read_conf_data(card);
4718 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
4719 CARD_DEVID(card), rc);
4720 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
4724 rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
4726 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
4728 QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
4729 QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
4730 QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
4731 QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
4732 QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
4733 if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
4734 ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
4735 ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
4736 dev_info(&card->gdev->dev,
4737 "Completion Queueing supported\n");
4739 card->options.cq = QETH_CQ_NOTAVAILABLE;
4744 if (ddev_offline == 1)
4745 qeth_stop_channel(channel);
4750 static void qeth_qdio_establish_cq(struct qeth_card *card,
4751 struct qdio_buffer **in_sbal_ptrs,
4752 void (**queue_start_poll)
4753 (struct ccw_device *, int,
4758 if (card->options.cq == QETH_CQ_ENABLED) {
4759 int offset = QDIO_MAX_BUFFERS_PER_Q *
4760 (card->qdio.no_in_queues - 1);
4761 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
4762 in_sbal_ptrs[offset + i] = (struct qdio_buffer *)
4763 virt_to_phys(card->qdio.c_q->bufs[i].buffer);
4766 queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
4770 static int qeth_qdio_establish(struct qeth_card *card)
4772 struct qdio_initialize init_data;
4773 char *qib_param_field;
4774 struct qdio_buffer **in_sbal_ptrs;
4775 void (**queue_start_poll) (struct ccw_device *, int, unsigned long);
4776 struct qdio_buffer **out_sbal_ptrs;
4780 QETH_CARD_TEXT(card, 2, "qdioest");
4782 qib_param_field = kzalloc(FIELD_SIZEOF(struct qib, parm), GFP_KERNEL);
4783 if (!qib_param_field) {
4785 goto out_free_nothing;
4788 qeth_create_qib_param_field(card, qib_param_field);
4789 qeth_create_qib_param_field_blkt(card, qib_param_field);
4791 in_sbal_ptrs = kcalloc(card->qdio.no_in_queues * QDIO_MAX_BUFFERS_PER_Q,
4794 if (!in_sbal_ptrs) {
4796 goto out_free_qib_param;
4798 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
4799 in_sbal_ptrs[i] = (struct qdio_buffer *)
4800 virt_to_phys(card->qdio.in_q->bufs[i].buffer);
4803 queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *),
4805 if (!queue_start_poll) {
4807 goto out_free_in_sbals;
4809 for (i = 0; i < card->qdio.no_in_queues; ++i)
4810 queue_start_poll[i] = qeth_qdio_start_poll;
4812 qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
4815 kcalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q,
4818 if (!out_sbal_ptrs) {
4820 goto out_free_queue_start_poll;
4822 for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
4823 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) {
4824 out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys(
4825 card->qdio.out_qs[i]->bufs[j]->buffer);
4828 memset(&init_data, 0, sizeof(struct qdio_initialize));
4829 init_data.cdev = CARD_DDEV(card);
4830 init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT :
4832 init_data.qib_param_field_format = 0;
4833 init_data.qib_param_field = qib_param_field;
4834 init_data.no_input_qs = card->qdio.no_in_queues;
4835 init_data.no_output_qs = card->qdio.no_out_queues;
4836 init_data.input_handler = qeth_qdio_input_handler;
4837 init_data.output_handler = qeth_qdio_output_handler;
4838 init_data.queue_start_poll_array = queue_start_poll;
4839 init_data.int_parm = (unsigned long) card;
4840 init_data.input_sbal_addr_array = in_sbal_ptrs;
4841 init_data.output_sbal_addr_array = out_sbal_ptrs;
4842 init_data.output_sbal_state_array = card->qdio.out_bufstates;
4843 init_data.scan_threshold = IS_IQD(card) ? 0 : 32;
4845 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
4846 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
4847 rc = qdio_allocate(&init_data);
4849 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
4852 rc = qdio_establish(&init_data);
4854 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
4855 qdio_free(CARD_DDEV(card));
4859 switch (card->options.cq) {
4860 case QETH_CQ_ENABLED:
4861 dev_info(&card->gdev->dev, "Completion Queue support enabled");
4863 case QETH_CQ_DISABLED:
4864 dev_info(&card->gdev->dev, "Completion Queue support disabled");
4870 kfree(out_sbal_ptrs);
4871 out_free_queue_start_poll:
4872 kfree(queue_start_poll);
4874 kfree(in_sbal_ptrs);
4876 kfree(qib_param_field);
4881 static void qeth_core_free_card(struct qeth_card *card)
4883 QETH_CARD_TEXT(card, 2, "freecrd");
4884 qeth_clean_channel(&card->read);
4885 qeth_clean_channel(&card->write);
4886 qeth_clean_channel(&card->data);
4887 qeth_put_cmd(card->read_cmd);
4888 destroy_workqueue(card->event_wq);
4889 unregister_service_level(&card->qeth_service_level);
4890 dev_set_drvdata(&card->gdev->dev, NULL);
4894 void qeth_trace_features(struct qeth_card *card)
4896 QETH_CARD_TEXT(card, 2, "features");
4897 QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
4898 QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
4899 QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
4900 QETH_CARD_HEX(card, 2, &card->info.diagass_support,
4901 sizeof(card->info.diagass_support));
4903 EXPORT_SYMBOL_GPL(qeth_trace_features);
4905 static struct ccw_device_id qeth_ids[] = {
4906 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
4907 .driver_info = QETH_CARD_TYPE_OSD},
4908 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
4909 .driver_info = QETH_CARD_TYPE_IQD},
4910 {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
4911 .driver_info = QETH_CARD_TYPE_OSN},
4912 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
4913 .driver_info = QETH_CARD_TYPE_OSM},
4914 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
4915 .driver_info = QETH_CARD_TYPE_OSX},
4918 MODULE_DEVICE_TABLE(ccw, qeth_ids);
4920 static struct ccw_driver qeth_ccw_driver = {
4922 .owner = THIS_MODULE,
4926 .probe = ccwgroup_probe_ccwdev,
4927 .remove = ccwgroup_remove_ccwdev,
4930 int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
4935 QETH_CARD_TEXT(card, 2, "hrdsetup");
4936 atomic_set(&card->force_alloc_skb, 0);
4937 rc = qeth_update_from_chp_desc(card);
4942 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
4944 rc = qeth_qdio_clear_card(card, !IS_IQD(card));
4945 qeth_stop_channel(&card->data);
4946 qeth_stop_channel(&card->write);
4947 qeth_stop_channel(&card->read);
4948 qdio_free(CARD_DDEV(card));
4949 rc = ccw_device_set_online(CARD_RDEV(card));
4952 rc = ccw_device_set_online(CARD_WDEV(card));
4955 rc = ccw_device_set_online(CARD_DDEV(card));
4959 if (rc == -ERESTARTSYS) {
4960 QETH_CARD_TEXT(card, 2, "break1");
4963 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
4969 qeth_determine_capabilities(card);
4970 qeth_init_tokens(card);
4971 qeth_init_func_level(card);
4973 rc = qeth_idx_activate_read_channel(card);
4975 QETH_CARD_TEXT(card, 2, "break2");
4978 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
4985 rc = qeth_idx_activate_write_channel(card);
4987 QETH_CARD_TEXT(card, 2, "break3");
4990 QETH_CARD_TEXT_(card, 2, "4err%d", rc);
4996 card->read_or_write_problem = 0;
4997 rc = qeth_mpc_initialize(card);
4999 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5003 rc = qeth_send_startlan(card);
5005 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5006 if (rc == -ENETDOWN) {
5007 dev_warn(&card->gdev->dev, "The LAN is offline\n");
5008 *carrier_ok = false;
5016 card->options.ipa4.supported_funcs = 0;
5017 card->options.ipa6.supported_funcs = 0;
5018 card->options.adp.supported_funcs = 0;
5019 card->options.sbp.supported_funcs = 0;
5020 card->info.diagass_support = 0;
5021 rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
5024 if (qeth_is_supported(card, IPA_IPV6)) {
5025 rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
5029 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
5030 rc = qeth_query_setadapterparms(card);
5032 QETH_CARD_TEXT_(card, 2, "7err%d", rc);
5036 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
5037 rc = qeth_query_setdiagass(card);
5039 QETH_CARD_TEXT_(card, 2, "8err%d", rc);
5044 if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
5045 (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
5046 card->info.hwtrap = 0;
5048 rc = qeth_set_access_ctrl_online(card, 0);
5054 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
5055 "an error on the device\n");
5056 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
5057 CARD_DEVID(card), rc);
5060 EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
5062 static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
5064 struct page *page = virt_to_page(data);
5065 unsigned int next_frag;
5067 next_frag = skb_shinfo(skb)->nr_frags;
5069 skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
5073 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
5075 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
5078 struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
5079 struct qeth_qdio_buffer *qethbuffer,
5080 struct qdio_buffer_element **__element, int *__offset,
5081 struct qeth_hdr **hdr)
5083 struct qdio_buffer_element *element = *__element;
5084 struct qdio_buffer *buffer = qethbuffer->buffer;
5085 unsigned int linear_len = 0;
5086 int offset = *__offset;
5087 bool use_rx_sg = false;
5088 unsigned int headroom;
5089 struct sk_buff *skb;
5093 /* qeth_hdr must not cross element boundaries */
5094 while (element->length < offset + sizeof(struct qeth_hdr)) {
5095 if (qeth_is_last_sbale(element))
5100 *hdr = element->addr + offset;
5102 offset += sizeof(struct qeth_hdr);
5105 switch ((*hdr)->hdr.l2.id) {
5106 case QETH_HEADER_TYPE_LAYER2:
5107 skb_len = (*hdr)->hdr.l2.pkt_length;
5108 linear_len = ETH_HLEN;
5111 case QETH_HEADER_TYPE_LAYER3:
5112 skb_len = (*hdr)->hdr.l3.length;
5113 if (!IS_LAYER3(card)) {
5114 QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5118 if ((*hdr)->hdr.l3.flags & QETH_HDR_PASSTHRU) {
5119 linear_len = ETH_HLEN;
5124 if ((*hdr)->hdr.l3.flags & QETH_HDR_IPV6)
5125 linear_len = sizeof(struct ipv6hdr);
5127 linear_len = sizeof(struct iphdr);
5128 headroom = ETH_HLEN;
5130 case QETH_HEADER_TYPE_OSN:
5131 skb_len = (*hdr)->hdr.osn.pdu_length;
5132 if (!IS_OSN(card)) {
5133 QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5137 linear_len = skb_len;
5138 headroom = sizeof(struct qeth_hdr);
5141 if ((*hdr)->hdr.l2.id & QETH_HEADER_MASK_INVAL)
5142 QETH_CARD_STAT_INC(card, rx_frame_errors);
5144 QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5146 /* Can't determine packet length, drop the whole buffer. */
5150 if (skb_len < linear_len) {
5151 QETH_CARD_STAT_INC(card, rx_dropped_runt);
5155 use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
5156 ((skb_len >= card->options.rx_sg_cb) &&
5157 !atomic_read(&card->force_alloc_skb) &&
5160 if (use_rx_sg && qethbuffer->rx_skb) {
5161 /* QETH_CQ_ENABLED only: */
5162 skb = qethbuffer->rx_skb;
5163 qethbuffer->rx_skb = NULL;
5166 linear_len = skb_len;
5167 skb = napi_alloc_skb(&card->napi, linear_len + headroom);
5171 QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5173 skb_reserve(skb, headroom);
5177 int data_len = min(skb_len, (int)(element->length - offset));
5178 char *data = element->addr + offset;
5180 skb_len -= data_len;
5183 /* Extract data from current element: */
5184 if (skb && data_len) {
5186 unsigned int copy_len;
5188 copy_len = min_t(unsigned int, linear_len,
5191 skb_put_data(skb, data, copy_len);
5192 linear_len -= copy_len;
5193 data_len -= copy_len;
5198 qeth_create_skb_frag(skb, data, data_len);
5201 /* Step forward to next element: */
5203 if (qeth_is_last_sbale(element)) {
5204 QETH_CARD_TEXT(card, 4, "unexeob");
5205 QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5207 dev_kfree_skb_any(skb);
5208 QETH_CARD_STAT_INC(card,
5218 /* This packet was skipped, go get another one: */
5222 *__element = element;
5225 QETH_CARD_STAT_INC(card, rx_sg_skbs);
5226 QETH_CARD_STAT_ADD(card, rx_sg_frags,
5227 skb_shinfo(skb)->nr_frags);
5231 EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
5233 int qeth_poll(struct napi_struct *napi, int budget)
5235 struct qeth_card *card = container_of(napi, struct qeth_card, napi);
5237 struct qeth_qdio_buffer *buffer;
5239 int new_budget = budget;
5242 if (!card->rx.b_count) {
5243 card->rx.qdio_err = 0;
5244 card->rx.b_count = qdio_get_next_buffers(
5245 card->data.ccwdev, 0, &card->rx.b_index,
5246 &card->rx.qdio_err);
5247 if (card->rx.b_count <= 0) {
5248 card->rx.b_count = 0;
5251 card->rx.b_element =
5252 &card->qdio.in_q->bufs[card->rx.b_index]
5253 .buffer->element[0];
5254 card->rx.e_offset = 0;
5257 while (card->rx.b_count) {
5258 buffer = &card->qdio.in_q->bufs[card->rx.b_index];
5259 if (!(card->rx.qdio_err &&
5260 qeth_check_qdio_errors(card, buffer->buffer,
5261 card->rx.qdio_err, "qinerr")))
5263 card->discipline->process_rx_buffer(
5264 card, new_budget, &done);
5269 QETH_CARD_STAT_INC(card, rx_bufs);
5270 qeth_put_buffer_pool_entry(card,
5271 buffer->pool_entry);
5272 qeth_queue_input_buffer(card, card->rx.b_index);
5274 if (card->rx.b_count) {
5276 QDIO_BUFNR(card->rx.b_index + 1);
5277 card->rx.b_element =
5279 ->bufs[card->rx.b_index]
5280 .buffer->element[0];
5281 card->rx.e_offset = 0;
5285 if (work_done >= budget)
5288 new_budget = budget - work_done;
5292 if (napi_complete_done(napi, work_done) &&
5293 qdio_start_irq(CARD_DDEV(card), 0))
5294 napi_schedule(napi);
5298 EXPORT_SYMBOL_GPL(qeth_poll);
5300 static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
5301 unsigned int bidx, bool error, int budget)
5303 struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
5304 u8 sflags = buffer->buffer->element[15].sflags;
5305 struct qeth_card *card = queue->card;
5307 if (queue->bufstates && (queue->bufstates[bidx].flags &
5308 QDIO_OUTBUF_STATE_FLAG_PENDING)) {
5309 WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
5311 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
5312 QETH_QDIO_BUF_PENDING) ==
5313 QETH_QDIO_BUF_PRIMED)
5314 qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
5316 QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
5318 /* prepare the queue slot for re-use: */
5319 qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
5320 if (qeth_init_qdio_out_buf(queue, bidx)) {
5321 QETH_CARD_TEXT(card, 2, "outofbuf");
5322 qeth_schedule_recovery(card);
5328 if (card->options.cq == QETH_CQ_ENABLED)
5329 qeth_notify_skbs(queue, buffer,
5330 qeth_compute_cq_notification(sflags, 0));
5331 qeth_clear_output_buffer(queue, buffer, error, budget);
5334 static int qeth_tx_poll(struct napi_struct *napi, int budget)
5336 struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
5337 unsigned int queue_no = queue->queue_no;
5338 struct qeth_card *card = queue->card;
5339 struct net_device *dev = card->dev;
5340 unsigned int work_done = 0;
5341 struct netdev_queue *txq;
5343 txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
5346 unsigned int start, error, i;
5347 unsigned int packets = 0;
5348 unsigned int bytes = 0;
5351 if (qeth_out_queue_is_empty(queue)) {
5352 napi_complete(napi);
5356 /* Give the CPU a breather: */
5357 if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
5358 QETH_TXQ_STAT_INC(queue, completion_yield);
5359 if (napi_complete_done(napi, 0))
5360 napi_schedule(napi);
5364 completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false,
5366 if (completed <= 0) {
5367 /* Ensure we see TX completion for pending work: */
5368 if (napi_complete_done(napi, 0))
5369 qeth_tx_arm_timer(queue);
5373 for (i = start; i < start + completed; i++) {
5374 struct qeth_qdio_out_buffer *buffer;
5375 unsigned int bidx = QDIO_BUFNR(i);
5377 buffer = queue->bufs[bidx];
5378 packets += skb_queue_len(&buffer->skb_list);
5379 bytes += buffer->bytes;
5381 qeth_handle_send_error(card, buffer, error);
5382 qeth_iqd_tx_complete(queue, bidx, error, budget);
5383 qeth_cleanup_handled_pending(queue, bidx, false);
5386 netdev_tx_completed_queue(txq, packets, bytes);
5387 atomic_sub(completed, &queue->used_buffers);
5388 work_done += completed;
5390 /* xmit may have observed the full-condition, but not yet
5391 * stopped the txq. In which case the code below won't trigger.
5392 * So before returning, xmit will re-check the txq's fill level
5393 * and wake it up if needed.
5395 if (netif_tx_queue_stopped(txq) &&
5396 !qeth_out_queue_is_full(queue))
5397 netif_tx_wake_queue(txq);
5401 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
5403 if (!cmd->hdr.return_code)
5404 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5405 return cmd->hdr.return_code;
5408 static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
5409 struct qeth_reply *reply,
5412 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5413 struct qeth_ipa_caps *caps = reply->param;
5415 if (qeth_setassparms_inspect_rc(cmd))
5418 caps->supported = cmd->data.setassparms.data.caps.supported;
5419 caps->enabled = cmd->data.setassparms.data.caps.enabled;
5423 int qeth_setassparms_cb(struct qeth_card *card,
5424 struct qeth_reply *reply, unsigned long data)
5426 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5428 QETH_CARD_TEXT(card, 4, "defadpcb");
5430 if (cmd->hdr.return_code)
5433 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5434 if (cmd->hdr.prot_version == QETH_PROT_IPV4)
5435 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
5436 if (cmd->hdr.prot_version == QETH_PROT_IPV6)
5437 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
5440 EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
5442 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
5443 enum qeth_ipa_funcs ipa_func,
5445 unsigned int data_length,
5446 enum qeth_prot_versions prot)
5448 struct qeth_ipacmd_setassparms *setassparms;
5449 struct qeth_ipacmd_setassparms_hdr *hdr;
5450 struct qeth_cmd_buffer *iob;
5452 QETH_CARD_TEXT(card, 4, "getasscm");
5453 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
5455 offsetof(struct qeth_ipacmd_setassparms,
5460 setassparms = &__ipa_cmd(iob)->data.setassparms;
5461 setassparms->assist_no = ipa_func;
5463 hdr = &setassparms->hdr;
5464 hdr->length = sizeof(*hdr) + data_length;
5465 hdr->command_code = cmd_code;
5468 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
5470 int qeth_send_simple_setassparms_prot(struct qeth_card *card,
5471 enum qeth_ipa_funcs ipa_func,
5472 u16 cmd_code, u32 *data,
5473 enum qeth_prot_versions prot)
5475 unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
5476 struct qeth_cmd_buffer *iob;
5478 QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
5479 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
5484 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
5485 return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
5487 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
5489 static void qeth_unregister_dbf_views(void)
5492 for (x = 0; x < QETH_DBF_INFOS; x++) {
5493 debug_unregister(qeth_dbf[x].id);
5494 qeth_dbf[x].id = NULL;
5498 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
5500 char dbf_txt_buf[32];
5503 if (!debug_level_enabled(id, level))
5505 va_start(args, fmt);
5506 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
5508 debug_text_event(id, level, dbf_txt_buf);
5510 EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
5512 static int qeth_register_dbf_views(void)
5517 for (x = 0; x < QETH_DBF_INFOS; x++) {
5518 /* register the areas */
5519 qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
5523 if (qeth_dbf[x].id == NULL) {
5524 qeth_unregister_dbf_views();
5528 /* register a view */
5529 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
5531 qeth_unregister_dbf_views();
5535 /* set a passing level */
5536 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
5542 static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */
5544 int qeth_core_load_discipline(struct qeth_card *card,
5545 enum qeth_discipline_id discipline)
5547 mutex_lock(&qeth_mod_mutex);
5548 switch (discipline) {
5549 case QETH_DISCIPLINE_LAYER3:
5550 card->discipline = try_then_request_module(
5551 symbol_get(qeth_l3_discipline), "qeth_l3");
5553 case QETH_DISCIPLINE_LAYER2:
5554 card->discipline = try_then_request_module(
5555 symbol_get(qeth_l2_discipline), "qeth_l2");
5560 mutex_unlock(&qeth_mod_mutex);
5562 if (!card->discipline) {
5563 dev_err(&card->gdev->dev, "There is no kernel module to "
5564 "support discipline %d\n", discipline);
5568 card->options.layer = discipline;
5572 void qeth_core_free_discipline(struct qeth_card *card)
5574 if (IS_LAYER2(card))
5575 symbol_put(qeth_l2_discipline);
5577 symbol_put(qeth_l3_discipline);
5578 card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
5579 card->discipline = NULL;
5582 const struct device_type qeth_generic_devtype = {
5583 .name = "qeth_generic",
5584 .groups = qeth_generic_attr_groups,
5586 EXPORT_SYMBOL_GPL(qeth_generic_devtype);
5588 static const struct device_type qeth_osn_devtype = {
5590 .groups = qeth_osn_attr_groups,
5593 #define DBF_NAME_LEN 20
5595 struct qeth_dbf_entry {
5596 char dbf_name[DBF_NAME_LEN];
5597 debug_info_t *dbf_info;
5598 struct list_head dbf_list;
5601 static LIST_HEAD(qeth_dbf_list);
5602 static DEFINE_MUTEX(qeth_dbf_list_mutex);
5604 static debug_info_t *qeth_get_dbf_entry(char *name)
5606 struct qeth_dbf_entry *entry;
5607 debug_info_t *rc = NULL;
5609 mutex_lock(&qeth_dbf_list_mutex);
5610 list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
5611 if (strcmp(entry->dbf_name, name) == 0) {
5612 rc = entry->dbf_info;
5616 mutex_unlock(&qeth_dbf_list_mutex);
5620 static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
5622 struct qeth_dbf_entry *new_entry;
5624 card->debug = debug_register(name, 2, 1, 8);
5626 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
5629 if (debug_register_view(card->debug, &debug_hex_ascii_view))
5631 new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
5634 strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
5635 new_entry->dbf_info = card->debug;
5636 mutex_lock(&qeth_dbf_list_mutex);
5637 list_add(&new_entry->dbf_list, &qeth_dbf_list);
5638 mutex_unlock(&qeth_dbf_list_mutex);
5643 debug_unregister(card->debug);
5648 static void qeth_clear_dbf_list(void)
5650 struct qeth_dbf_entry *entry, *tmp;
5652 mutex_lock(&qeth_dbf_list_mutex);
5653 list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
5654 list_del(&entry->dbf_list);
5655 debug_unregister(entry->dbf_info);
5658 mutex_unlock(&qeth_dbf_list_mutex);
5661 static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
5663 struct net_device *dev;
5665 switch (card->info.type) {
5666 case QETH_CARD_TYPE_IQD:
5667 dev = alloc_netdev_mqs(0, "hsi%d", NET_NAME_UNKNOWN,
5668 ether_setup, QETH_MAX_QUEUES, 1);
5670 case QETH_CARD_TYPE_OSM:
5671 dev = alloc_etherdev(0);
5673 case QETH_CARD_TYPE_OSN:
5674 dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup);
5677 dev = alloc_etherdev_mqs(0, QETH_MAX_QUEUES, 1);
5683 dev->ml_priv = card;
5684 dev->watchdog_timeo = QETH_TX_TIMEOUT;
5685 dev->min_mtu = IS_OSN(card) ? 64 : 576;
5686 /* initialized when device first goes online: */
5689 SET_NETDEV_DEV(dev, &card->gdev->dev);
5690 netif_carrier_off(dev);
5693 dev->ethtool_ops = &qeth_osn_ethtool_ops;
5695 dev->ethtool_ops = &qeth_ethtool_ops;
5696 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
5697 dev->hw_features |= NETIF_F_SG;
5698 dev->vlan_features |= NETIF_F_SG;
5700 dev->features |= NETIF_F_SG;
5701 if (netif_set_real_num_tx_queues(dev,
5702 QETH_IQD_MIN_TXQ)) {
5712 struct net_device *qeth_clone_netdev(struct net_device *orig)
5714 struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
5719 clone->dev_port = orig->dev_port;
5723 static int qeth_core_probe_device(struct ccwgroup_device *gdev)
5725 struct qeth_card *card;
5728 enum qeth_discipline_id enforced_disc;
5729 char dbf_name[DBF_NAME_LEN];
5731 QETH_DBF_TEXT(SETUP, 2, "probedev");
5734 if (!get_device(dev))
5737 QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
5739 card = qeth_alloc_card(gdev);
5741 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
5746 snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
5747 dev_name(&gdev->dev));
5748 card->debug = qeth_get_dbf_entry(dbf_name);
5750 rc = qeth_add_dbf_entry(card, dbf_name);
5755 qeth_setup_card(card);
5756 card->dev = qeth_alloc_netdev(card);
5762 card->qdio.no_out_queues = card->dev->num_tx_queues;
5763 rc = qeth_update_from_chp_desc(card);
5766 qeth_determine_capabilities(card);
5767 qeth_set_blkt_defaults(card);
5769 enforced_disc = qeth_enforce_discipline(card);
5770 switch (enforced_disc) {
5771 case QETH_DISCIPLINE_UNDETERMINED:
5772 gdev->dev.type = &qeth_generic_devtype;
5775 card->info.layer_enforced = true;
5776 rc = qeth_core_load_discipline(card, enforced_disc);
5780 gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype :
5781 card->discipline->devtype;
5782 rc = card->discipline->setup(card->gdev);
5791 qeth_core_free_discipline(card);
5794 free_netdev(card->dev);
5796 qeth_core_free_card(card);
5802 static void qeth_core_remove_device(struct ccwgroup_device *gdev)
5804 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5806 QETH_CARD_TEXT(card, 2, "removedv");
5808 if (card->discipline) {
5809 card->discipline->remove(gdev);
5810 qeth_core_free_discipline(card);
5813 qeth_free_qdio_queues(card);
5815 free_netdev(card->dev);
5816 qeth_core_free_card(card);
5817 put_device(&gdev->dev);
5820 static int qeth_core_set_online(struct ccwgroup_device *gdev)
5822 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5824 enum qeth_discipline_id def_discipline;
5826 if (!card->discipline) {
5827 def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
5828 QETH_DISCIPLINE_LAYER2;
5829 rc = qeth_core_load_discipline(card, def_discipline);
5832 rc = card->discipline->setup(card->gdev);
5834 qeth_core_free_discipline(card);
5838 rc = card->discipline->set_online(gdev);
5843 static int qeth_core_set_offline(struct ccwgroup_device *gdev)
5845 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5846 return card->discipline->set_offline(gdev);
5849 static void qeth_core_shutdown(struct ccwgroup_device *gdev)
5851 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5852 qeth_set_allowed_threads(card, 0, 1);
5853 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
5854 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
5855 qeth_qdio_clear_card(card, 0);
5856 qeth_drain_output_queues(card);
5857 qdio_free(CARD_DDEV(card));
5860 static int qeth_suspend(struct ccwgroup_device *gdev)
5862 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5864 qeth_set_allowed_threads(card, 0, 1);
5865 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
5866 if (gdev->state == CCWGROUP_OFFLINE)
5869 card->discipline->set_offline(gdev);
5873 static int qeth_resume(struct ccwgroup_device *gdev)
5875 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5878 rc = card->discipline->set_online(gdev);
5880 qeth_set_allowed_threads(card, 0xffffffff, 0);
5882 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover an error on the device\n");
5886 static ssize_t group_store(struct device_driver *ddrv, const char *buf,
5891 err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
5894 return err ? err : count;
5896 static DRIVER_ATTR_WO(group);
5898 static struct attribute *qeth_drv_attrs[] = {
5899 &driver_attr_group.attr,
5902 static struct attribute_group qeth_drv_attr_group = {
5903 .attrs = qeth_drv_attrs,
5905 static const struct attribute_group *qeth_drv_attr_groups[] = {
5906 &qeth_drv_attr_group,
5910 static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
5912 .groups = qeth_drv_attr_groups,
5913 .owner = THIS_MODULE,
5916 .ccw_driver = &qeth_ccw_driver,
5917 .setup = qeth_core_probe_device,
5918 .remove = qeth_core_remove_device,
5919 .set_online = qeth_core_set_online,
5920 .set_offline = qeth_core_set_offline,
5921 .shutdown = qeth_core_shutdown,
5924 .freeze = qeth_suspend,
5925 .thaw = qeth_resume,
5926 .restore = qeth_resume,
5929 struct qeth_card *qeth_get_card_by_busid(char *bus_id)
5931 struct ccwgroup_device *gdev;
5932 struct qeth_card *card;
5934 gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id);
5938 card = dev_get_drvdata(&gdev->dev);
5939 put_device(&gdev->dev);
5942 EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);
5944 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5946 struct qeth_card *card = dev->ml_priv;
5947 struct mii_ioctl_data *mii_data;
5954 case SIOC_QETH_ADP_SET_SNMP_CONTROL:
5955 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
5957 case SIOC_QETH_GET_CARD_TYPE:
5958 if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
5963 mii_data = if_mii(rq);
5964 mii_data->phy_id = 0;
5967 mii_data = if_mii(rq);
5968 if (mii_data->phy_id != 0)
5971 mii_data->val_out = qeth_mdio_read(dev,
5972 mii_data->phy_id, mii_data->reg_num);
5974 case SIOC_QETH_QUERY_OAT:
5975 rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
5978 if (card->discipline->do_ioctl)
5979 rc = card->discipline->do_ioctl(dev, rq, cmd);
5984 QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
5987 EXPORT_SYMBOL_GPL(qeth_do_ioctl);
5989 static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
5992 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5993 u32 *features = reply->param;
5995 if (qeth_setassparms_inspect_rc(cmd))
5998 *features = cmd->data.setassparms.data.flags_32bit;
6002 static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6003 enum qeth_prot_versions prot)
6005 return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
6009 static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6010 enum qeth_prot_versions prot)
6012 u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
6013 struct qeth_cmd_buffer *iob;
6014 struct qeth_ipa_caps caps;
6018 /* some L3 HW requires combined L3+L4 csum offload: */
6019 if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
6020 cstype == IPA_OUTBOUND_CHECKSUM)
6021 required_features |= QETH_IPA_CHECKSUM_IP_HDR;
6023 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
6028 rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
6032 if ((required_features & features) != required_features) {
6033 qeth_set_csum_off(card, cstype, prot);
6037 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
6038 SETASS_DATA_SIZEOF(flags_32bit),
6041 qeth_set_csum_off(card, cstype, prot);
6045 if (features & QETH_IPA_CHECKSUM_LP2LP)
6046 required_features |= QETH_IPA_CHECKSUM_LP2LP;
6047 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
6048 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6050 qeth_set_csum_off(card, cstype, prot);
6054 if (!qeth_ipa_caps_supported(&caps, required_features) ||
6055 !qeth_ipa_caps_enabled(&caps, required_features)) {
6056 qeth_set_csum_off(card, cstype, prot);
6060 dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
6061 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
6062 if (!qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP) &&
6063 cstype == IPA_OUTBOUND_CHECKSUM)
6064 dev_warn(&card->gdev->dev,
6065 "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n",
6066 QETH_CARD_IFNAME(card));
6070 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6071 enum qeth_prot_versions prot)
6073 return on ? qeth_set_csum_on(card, cstype, prot) :
6074 qeth_set_csum_off(card, cstype, prot);
6077 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
6080 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6081 struct qeth_tso_start_data *tso_data = reply->param;
6083 if (qeth_setassparms_inspect_rc(cmd))
6086 tso_data->mss = cmd->data.setassparms.data.tso.mss;
6087 tso_data->supported = cmd->data.setassparms.data.tso.supported;
6091 static int qeth_set_tso_off(struct qeth_card *card,
6092 enum qeth_prot_versions prot)
6094 return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
6095 IPA_CMD_ASS_STOP, NULL, prot);
6098 static int qeth_set_tso_on(struct qeth_card *card,
6099 enum qeth_prot_versions prot)
6101 struct qeth_tso_start_data tso_data;
6102 struct qeth_cmd_buffer *iob;
6103 struct qeth_ipa_caps caps;
6106 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6107 IPA_CMD_ASS_START, 0, prot);
6111 rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6115 if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
6116 qeth_set_tso_off(card, prot);
6120 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6122 SETASS_DATA_SIZEOF(caps), prot);
6124 qeth_set_tso_off(card, prot);
6128 /* enable TSO capability */
6129 __ipa_cmd(iob)->data.setassparms.data.caps.enabled =
6130 QETH_IPA_LARGE_SEND_TCP;
6131 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6133 qeth_set_tso_off(card, prot);
6137 if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
6138 !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
6139 qeth_set_tso_off(card, prot);
6143 dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
6148 static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
6149 enum qeth_prot_versions prot)
6151 return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
6154 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
6156 int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
6159 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
6160 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6162 if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6163 /* no/one Offload Assist available, so the rc is trivial */
6166 rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6170 /* enable: success if any Assist is active */
6171 return (rc_ipv6) ? rc_ipv4 : 0;
6173 /* disable: failure if any Assist is still active */
6174 return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
6178 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6179 * @dev: a net_device
6181 void qeth_enable_hw_features(struct net_device *dev)
6183 struct qeth_card *card = dev->ml_priv;
6184 netdev_features_t features;
6186 features = dev->features;
6187 /* force-off any feature that might need an IPA sequence.
6188 * netdev_update_features() will restart them.
6190 dev->features &= ~dev->hw_features;
6191 /* toggle VLAN filter, so that VIDs are re-programmed: */
6192 if (IS_LAYER2(card) && IS_VM_NIC(card)) {
6193 dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
6194 dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6196 netdev_update_features(dev);
6197 if (features != dev->features)
6198 dev_warn(&card->gdev->dev,
6199 "Device recovery failed to restore all offload features\n");
6201 EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6203 int qeth_set_features(struct net_device *dev, netdev_features_t features)
6205 struct qeth_card *card = dev->ml_priv;
6206 netdev_features_t changed = dev->features ^ features;
6209 QETH_CARD_TEXT(card, 2, "setfeat");
6210 QETH_CARD_HEX(card, 2, &features, sizeof(features));
6212 if ((changed & NETIF_F_IP_CSUM)) {
6213 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6214 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4);
6216 changed ^= NETIF_F_IP_CSUM;
6218 if (changed & NETIF_F_IPV6_CSUM) {
6219 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6220 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6);
6222 changed ^= NETIF_F_IPV6_CSUM;
6224 if (changed & NETIF_F_RXCSUM) {
6225 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6227 changed ^= NETIF_F_RXCSUM;
6229 if (changed & NETIF_F_TSO) {
6230 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
6233 changed ^= NETIF_F_TSO;
6235 if (changed & NETIF_F_TSO6) {
6236 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
6239 changed ^= NETIF_F_TSO6;
6242 /* everything changed successfully? */
6243 if ((dev->features ^ features) == changed)
6245 /* something went wrong. save changed features and return error */
6246 dev->features ^= changed;
6249 EXPORT_SYMBOL_GPL(qeth_set_features);
6251 netdev_features_t qeth_fix_features(struct net_device *dev,
6252 netdev_features_t features)
6254 struct qeth_card *card = dev->ml_priv;
6256 QETH_CARD_TEXT(card, 2, "fixfeat");
6257 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
6258 features &= ~NETIF_F_IP_CSUM;
6259 if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
6260 features &= ~NETIF_F_IPV6_CSUM;
6261 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
6262 !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6263 features &= ~NETIF_F_RXCSUM;
6264 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
6265 features &= ~NETIF_F_TSO;
6266 if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
6267 features &= ~NETIF_F_TSO6;
6269 QETH_CARD_HEX(card, 2, &features, sizeof(features));
6272 EXPORT_SYMBOL_GPL(qeth_fix_features);
6274 netdev_features_t qeth_features_check(struct sk_buff *skb,
6275 struct net_device *dev,
6276 netdev_features_t features)
6278 /* GSO segmentation builds skbs with
6279 * a (small) linear part for the headers, and
6280 * page frags for the data.
6281 * Compared to a linear skb, the header-only part consumes an
6282 * additional buffer element. This reduces buffer utilization, and
6283 * hurts throughput. So compress small segments into one element.
6285 if (netif_needs_gso(skb, features)) {
6286 /* match skb_segment(): */
6287 unsigned int doffset = skb->data - skb_mac_header(skb);
6288 unsigned int hsize = skb_shinfo(skb)->gso_size;
6289 unsigned int hroom = skb_headroom(skb);
6291 /* linearize only if resulting skb allocations are order-0: */
6292 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
6293 features &= ~NETIF_F_SG;
6296 return vlan_features_check(skb, features);
6298 EXPORT_SYMBOL_GPL(qeth_features_check);
6300 void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6302 struct qeth_card *card = dev->ml_priv;
6303 struct qeth_qdio_out_q *queue;
6306 QETH_CARD_TEXT(card, 5, "getstat");
6308 stats->rx_packets = card->stats.rx_packets;
6309 stats->rx_bytes = card->stats.rx_bytes;
6310 stats->rx_errors = card->stats.rx_length_errors +
6311 card->stats.rx_frame_errors +
6312 card->stats.rx_fifo_errors;
6313 stats->rx_dropped = card->stats.rx_dropped_nomem +
6314 card->stats.rx_dropped_notsupp +
6315 card->stats.rx_dropped_runt;
6316 stats->multicast = card->stats.rx_multicast;
6317 stats->rx_length_errors = card->stats.rx_length_errors;
6318 stats->rx_frame_errors = card->stats.rx_frame_errors;
6319 stats->rx_fifo_errors = card->stats.rx_fifo_errors;
6321 for (i = 0; i < card->qdio.no_out_queues; i++) {
6322 queue = card->qdio.out_qs[i];
6324 stats->tx_packets += queue->stats.tx_packets;
6325 stats->tx_bytes += queue->stats.tx_bytes;
6326 stats->tx_errors += queue->stats.tx_errors;
6327 stats->tx_dropped += queue->stats.tx_dropped;
6330 EXPORT_SYMBOL_GPL(qeth_get_stats64);
6332 u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
6333 u8 cast_type, struct net_device *sb_dev)
6335 if (cast_type != RTN_UNICAST)
6336 return QETH_IQD_MCAST_TXQ;
6337 return QETH_IQD_MIN_UCAST_TXQ;
6339 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
6341 int qeth_open(struct net_device *dev)
6343 struct qeth_card *card = dev->ml_priv;
6345 QETH_CARD_TEXT(card, 4, "qethopen");
6347 if (qdio_stop_irq(CARD_DDEV(card), 0) < 0)
6350 card->data.state = CH_STATE_UP;
6351 netif_tx_start_all_queues(dev);
6353 napi_enable(&card->napi);
6355 napi_schedule(&card->napi);
6357 struct qeth_qdio_out_q *queue;
6360 qeth_for_each_output_queue(card, queue, i) {
6361 netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
6363 napi_enable(&queue->napi);
6364 napi_schedule(&queue->napi);
6367 /* kick-start the NAPI softirq: */
6371 EXPORT_SYMBOL_GPL(qeth_open);
6373 int qeth_stop(struct net_device *dev)
6375 struct qeth_card *card = dev->ml_priv;
6377 QETH_CARD_TEXT(card, 4, "qethstop");
6379 struct qeth_qdio_out_q *queue;
6382 /* Quiesce the NAPI instances: */
6383 qeth_for_each_output_queue(card, queue, i) {
6384 napi_disable(&queue->napi);
6385 del_timer_sync(&queue->timer);
6388 /* Stop .ndo_start_xmit, might still access queue->napi. */
6389 netif_tx_disable(dev);
6391 /* Queues may get re-allocated, so remove the NAPIs here. */
6392 qeth_for_each_output_queue(card, queue, i)
6393 netif_napi_del(&queue->napi);
6395 netif_tx_disable(dev);
6398 napi_disable(&card->napi);
6401 EXPORT_SYMBOL_GPL(qeth_stop);
6403 static int __init qeth_core_init(void)
6407 pr_info("loading core functions\n");
6409 rc = qeth_register_dbf_views();
6412 qeth_core_root_dev = root_device_register("qeth");
6413 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
6416 qeth_core_header_cache =
6417 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
6418 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
6420 if (!qeth_core_header_cache) {
6424 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
6425 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
6426 if (!qeth_qdio_outbuf_cache) {
6430 rc = ccw_driver_register(&qeth_ccw_driver);
6433 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
6440 ccw_driver_unregister(&qeth_ccw_driver);
6442 kmem_cache_destroy(qeth_qdio_outbuf_cache);
6444 kmem_cache_destroy(qeth_core_header_cache);
6446 root_device_unregister(qeth_core_root_dev);
6448 qeth_unregister_dbf_views();
6450 pr_err("Initializing the qeth device driver failed\n");
6454 static void __exit qeth_core_exit(void)
6456 qeth_clear_dbf_list();
6457 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
6458 ccw_driver_unregister(&qeth_ccw_driver);
6459 kmem_cache_destroy(qeth_qdio_outbuf_cache);
6460 kmem_cache_destroy(qeth_core_header_cache);
6461 root_device_unregister(qeth_core_root_dev);
6462 qeth_unregister_dbf_views();
6463 pr_info("core functions removed\n");
6466 module_init(qeth_core_init);
6467 module_exit(qeth_core_exit);
6468 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
6469 MODULE_DESCRIPTION("qeth core functions");
6470 MODULE_LICENSE("GPL");