2 * PPP async serial channel driver for Linux.
4 * Copyright 1999 Paul Mackerras.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * This driver provides the encapsulation and framing for sending
12 * and receiving PPP frames over async serial lines. It relies on
13 * the generic PPP layer to give it frames to send and to process
14 * received frames. It implements the PPP line discipline.
16 * Part of the code in this driver was inspired by the old async-only
17 * PPP driver, written by Michael Callahan and Al Longyear, and
18 * subsequently hacked by Paul Mackerras.
20 * ==FILEVERSION 20020125==
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/skbuff.h>
26 #include <linux/tty.h>
27 #include <linux/netdevice.h>
28 #include <linux/poll.h>
29 #include <linux/ppp_defs.h>
30 #include <linux/if_ppp.h>
31 #include <linux/ppp_channel.h>
32 #include <linux/spinlock.h>
33 #include <linux/init.h>
34 #include <asm/uaccess.h>
36 #define PPP_VERSION "2.4.2"
40 /* Structure for storing local state. */
42 struct tty_struct *tty;
49 unsigned long xmit_flags;
52 unsigned int bytes_sent;
53 unsigned int bytes_rcvd;
60 unsigned long last_xmit;
66 struct semaphore dead_sem;
67 struct ppp_channel chan; /* interface to generic ppp layer */
68 unsigned char obuf[OBUFSIZE];
71 /* Bit numbers in xmit_flags */
77 #define SC_TOSS 0x20000000
78 #define SC_ESCAPE 0x40000000
81 #define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
83 static int flag_time = HZ;
84 MODULE_PARM(flag_time, "i");
85 MODULE_PARM_DESC(flag_time, "ppp_async: interval between flagged packets (in clock ticks)");
86 MODULE_LICENSE("GPL");
92 static int ppp_async_encode(struct asyncppp *ap);
93 static int ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb);
94 static int ppp_async_push(struct asyncppp *ap);
95 static void ppp_async_flush_output(struct asyncppp *ap);
96 static void ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
97 char *flags, int count);
98 static int ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd,
100 static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
101 int len, int inbound);
103 static struct ppp_channel_ops async_ops = {
109 * Routines implementing the PPP line discipline.
113 * We have a potential race on dereferencing tty->disc_data,
114 * because the tty layer provides no locking at all - thus one
115 * cpu could be running ppp_asynctty_receive while another
116 * calls ppp_asynctty_close, which zeroes tty->disc_data and
117 * frees the memory that ppp_asynctty_receive is using. The best
118 * way to fix this is to use a rwlock in the tty struct, but for now
119 * we use a single global rwlock for all ttys in ppp line discipline.
121 * FIXME: this is no longer true. The _close path for the ldisc is
122 * now guaranteed to be sane.
124 static rwlock_t disc_data_lock = RW_LOCK_UNLOCKED;
126 static struct asyncppp *ap_get(struct tty_struct *tty)
130 read_lock(&disc_data_lock);
133 atomic_inc(&ap->refcnt);
134 read_unlock(&disc_data_lock);
138 static void ap_put(struct asyncppp *ap)
140 if (atomic_dec_and_test(&ap->refcnt))
145 * Called when a tty is put into PPP line discipline. Called in process
149 ppp_asynctty_open(struct tty_struct *tty)
156 ap = kmalloc(sizeof(*ap), GFP_KERNEL);
160 /* initialize the asyncppp structure */
161 memset(ap, 0, sizeof(*ap));
164 spin_lock_init(&ap->xmit_lock);
165 spin_lock_init(&ap->recv_lock);
167 ap->xaccm[3] = 0x60000000U;
173 atomic_set(&ap->refcnt, 1);
174 init_MUTEX_LOCKED(&ap->dead_sem);
176 ap->chan.private = ap;
177 ap->chan.ops = &async_ops;
178 ap->chan.mtu = PPP_MRU;
179 err = ppp_register_channel(&ap->chan);
195 * Called when the tty is put into another line discipline
196 * or it hangs up. We have to wait for any cpu currently
197 * executing in any of the other ppp_asynctty_* routines to
198 * finish before we can call ppp_unregister_channel and free
199 * the asyncppp struct. This routine must be called from
200 * process context, not interrupt or softirq context.
203 ppp_asynctty_close(struct tty_struct *tty)
207 write_lock(&disc_data_lock);
210 write_unlock(&disc_data_lock);
215 * We have now ensured that nobody can start using ap from now
216 * on, but we have to wait for all existing users to finish.
217 * Note that ppp_unregister_channel ensures that no calls to
218 * our channel ops (i.e. ppp_async_send/ioctl) are in progress
219 * by the time it returns.
221 if (!atomic_dec_and_test(&ap->refcnt))
224 ppp_unregister_channel(&ap->chan);
234 * Called on tty hangup in process context.
236 * Wait for I/O to driver to complete and unregister PPP channel.
237 * This is already done by the close routine, so just call that.
239 static int ppp_asynctty_hangup(struct tty_struct *tty)
241 ppp_asynctty_close(tty);
246 * Read does nothing - no data is ever available this way.
247 * Pppd reads and writes packets via /dev/ppp instead.
250 ppp_asynctty_read(struct tty_struct *tty, struct file *file,
251 unsigned char *buf, size_t count)
257 * Write on the tty does nothing, the packets all come in
258 * from the ppp generic stuff.
261 ppp_asynctty_write(struct tty_struct *tty, struct file *file,
262 const unsigned char *buf, size_t count)
268 * Called in process context only. May be re-entered by multiple
269 * ioctl calling threads.
273 ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file,
274 unsigned int cmd, unsigned long arg)
276 struct asyncppp *ap = ap_get(tty);
288 if (put_user(ppp_channel_index(&ap->chan), (int *) arg))
298 if (put_user(ppp_unit_number(&ap->chan), (int *) arg))
305 err = n_tty_ioctl(tty, file, cmd, arg);
309 /* flush our buffers and the serial port's buffer */
310 if (arg == TCIOFLUSH || arg == TCOFLUSH)
311 ppp_async_flush_output(ap);
312 err = n_tty_ioctl(tty, file, cmd, arg);
317 if (put_user(val, (int *) arg))
330 /* No kernel lock - fine */
332 ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
338 ppp_asynctty_room(struct tty_struct *tty)
344 ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
345 char *flags, int count)
347 struct asyncppp *ap = ap_get(tty);
351 spin_lock_bh(&ap->recv_lock);
352 ppp_async_input(ap, buf, flags, count);
353 spin_unlock_bh(&ap->recv_lock);
355 if (test_and_clear_bit(TTY_THROTTLED, &tty->flags)
356 && tty->driver.unthrottle)
357 tty->driver.unthrottle(tty);
361 ppp_asynctty_wakeup(struct tty_struct *tty)
363 struct asyncppp *ap = ap_get(tty);
365 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
368 if (ppp_async_push(ap))
369 ppp_output_wakeup(&ap->chan);
374 static struct tty_ldisc ppp_ldisc = {
375 magic: TTY_LDISC_MAGIC,
377 open: ppp_asynctty_open,
378 close: ppp_asynctty_close,
379 read: ppp_asynctty_read,
380 write: ppp_asynctty_write,
381 ioctl: ppp_asynctty_ioctl,
382 poll: ppp_asynctty_poll,
383 hangup: ppp_asynctty_hangup,
384 receive_room: ppp_asynctty_room,
385 receive_buf: ppp_asynctty_receive,
386 write_wakeup: ppp_asynctty_wakeup,
394 err = tty_register_ldisc(N_PPP, &ppp_ldisc);
396 printk(KERN_ERR "PPP_async: error %d registering line disc.\n",
402 * The following routines provide the PPP channel interface.
405 ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
407 struct asyncppp *ap = chan->private;
414 val = ap->flags | ap->rbits;
415 if (put_user(val, (int *) arg))
420 if (get_user(val, (int *) arg))
422 ap->flags = val & ~SC_RCV_BITS;
423 spin_lock_bh(&ap->recv_lock);
424 ap->rbits = val & SC_RCV_BITS;
425 spin_unlock_bh(&ap->recv_lock);
429 case PPPIOCGASYNCMAP:
430 if (put_user(ap->xaccm[0], (u32 *) arg))
434 case PPPIOCSASYNCMAP:
435 if (get_user(ap->xaccm[0], (u32 *) arg))
440 case PPPIOCGRASYNCMAP:
441 if (put_user(ap->raccm, (u32 *) arg))
445 case PPPIOCSRASYNCMAP:
446 if (get_user(ap->raccm, (u32 *) arg))
451 case PPPIOCGXASYNCMAP:
452 if (copy_to_user((void *) arg, ap->xaccm, sizeof(ap->xaccm)))
456 case PPPIOCSXASYNCMAP:
457 if (copy_from_user(accm, (void *) arg, sizeof(accm)))
459 accm[2] &= ~0x40000000U; /* can't escape 0x5e */
460 accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */
461 memcpy(ap->xaccm, accm, sizeof(ap->xaccm));
466 if (put_user(ap->mru, (int *) arg))
471 if (get_user(val, (int *) arg))
487 * Procedures for encapsulation and framing.
490 u16 ppp_crc16_table[256] = {
491 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
492 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
493 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
494 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
495 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
496 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
497 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
498 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
499 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
500 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
501 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
502 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
503 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
504 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
505 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
506 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
507 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
508 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
509 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
510 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
511 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
512 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
513 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
514 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
515 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
516 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
517 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
518 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
519 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
520 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
521 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
522 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
524 EXPORT_SYMBOL(ppp_crc16_table);
525 #define fcstab ppp_crc16_table /* for PPP_FCS macro */
528 * Procedure to encode the data for async serial transmission.
529 * Does octet stuffing (escaping), puts the address/control bytes
530 * on if A/C compression is disabled, and does protocol compression.
531 * Assumes ap->tpkt != 0 on entry.
532 * Returns 1 if we finished the current frame, 0 otherwise.
535 #define PUT_BYTE(ap, buf, c, islcp) do { \
536 if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\
537 *buf++ = PPP_ESCAPE; \
544 ppp_async_encode(struct asyncppp *ap)
546 int fcs, i, count, c, proto;
547 unsigned char *buf, *buflim;
555 data = ap->tpkt->data;
556 count = ap->tpkt->len;
558 proto = (data[0] << 8) + data[1];
561 * LCP packets with code values between 1 (configure-reqest)
562 * and 7 (code-reject) must be sent as though no options
563 * had been negotiated.
565 islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7;
569 async_lcp_peek(ap, data, count, 0);
572 * Start of a new packet - insert the leading FLAG
573 * character if necessary.
575 if (islcp || flag_time == 0
576 || jiffies - ap->last_xmit >= flag_time)
578 ap->last_xmit = jiffies;
582 * Put in the address/control bytes if necessary
584 if ((ap->flags & SC_COMP_AC) == 0 || islcp) {
585 PUT_BYTE(ap, buf, 0xff, islcp);
586 fcs = PPP_FCS(fcs, 0xff);
587 PUT_BYTE(ap, buf, 0x03, islcp);
588 fcs = PPP_FCS(fcs, 0x03);
593 * Once we put in the last byte, we need to put in the FCS
594 * and closing flag, so make sure there is at least 7 bytes
595 * of free space in the output buffer.
597 buflim = ap->obuf + OBUFSIZE - 6;
598 while (i < count && buf < buflim) {
600 if (i == 1 && c == 0 && (ap->flags & SC_COMP_PROT))
601 continue; /* compress protocol field */
602 fcs = PPP_FCS(fcs, c);
603 PUT_BYTE(ap, buf, c, islcp);
608 * Remember where we are up to in this packet.
617 * We have finished the packet. Add the FCS and flag.
621 PUT_BYTE(ap, buf, c, islcp);
622 c = (fcs >> 8) & 0xff;
623 PUT_BYTE(ap, buf, c, islcp);
627 dev_kfree_skb_any(ap->tpkt);
633 * Transmit-side routines.
637 * Send a packet to the peer over an async tty line.
638 * Returns 1 iff the packet was accepted.
639 * If the packet was not accepted, we will call ppp_output_wakeup
640 * at some later time.
643 ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb)
645 struct asyncppp *ap = chan->private;
649 if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags))
650 return 0; /* already full */
659 * Push as much data as possible out to the tty.
662 ppp_async_push(struct asyncppp *ap)
664 int avail, sent, done = 0;
665 struct tty_struct *tty = ap->tty;
668 set_bit(XMIT_WAKEUP, &ap->xmit_flags);
670 * We can get called recursively here if the tty write
671 * function calls our wakeup function. This can happen
672 * for example on a pty with both the master and slave
673 * set to PPP line discipline.
674 * We use the XMIT_BUSY bit to detect this and get out,
675 * leaving the XMIT_WAKEUP bit set to tell the other
676 * instance that it may now be able to write more now.
678 if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags))
680 spin_lock_bh(&ap->xmit_lock);
682 if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags))
684 if (!tty_stuffed && ap->optr < ap->olim) {
685 avail = ap->olim - ap->optr;
686 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
687 sent = tty->driver.write(tty, 0, ap->optr, avail);
689 goto flush; /* error, e.g. loss of CD */
695 if (ap->optr >= ap->olim && ap->tpkt != 0) {
696 if (ppp_async_encode(ap)) {
697 /* finished processing ap->tpkt */
698 clear_bit(XMIT_FULL, &ap->xmit_flags);
704 * We haven't made any progress this time around.
705 * Clear XMIT_BUSY to let other callers in, but
706 * after doing so we have to check if anyone set
707 * XMIT_WAKEUP since we last checked it. If they
708 * did, we should try again to set XMIT_BUSY and go
709 * around again in case XMIT_BUSY was still set when
710 * the other caller tried.
712 clear_bit(XMIT_BUSY, &ap->xmit_flags);
713 /* any more work to do? if not, exit the loop */
714 if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags)
715 || (!tty_stuffed && ap->tpkt != 0)))
717 /* more work to do, see if we can do it now */
718 if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags))
721 spin_unlock_bh(&ap->xmit_lock);
725 clear_bit(XMIT_BUSY, &ap->xmit_flags);
729 clear_bit(XMIT_FULL, &ap->xmit_flags);
733 spin_unlock_bh(&ap->xmit_lock);
738 * Flush output from our internal buffers.
739 * Called for the TCFLSH ioctl. Can be entered in parallel
740 * but this is covered by the xmit_lock.
743 ppp_async_flush_output(struct asyncppp *ap)
747 spin_lock_bh(&ap->xmit_lock);
749 if (ap->tpkt != NULL) {
752 clear_bit(XMIT_FULL, &ap->xmit_flags);
755 spin_unlock_bh(&ap->xmit_lock);
757 ppp_output_wakeup(&ap->chan);
761 * Receive-side routines.
764 /* see how many ordinary chars there are at the start of buf */
766 scan_ordinary(struct asyncppp *ap, const unsigned char *buf, int count)
770 for (i = 0; i < count; ++i) {
772 if (c == PPP_ESCAPE || c == PPP_FLAG
773 || (c < 0x20 && (ap->raccm & (1 << c)) != 0))
779 /* called when a flag is seen - do end-of-packet processing */
781 process_input_packet(struct asyncppp *ap)
785 unsigned int len, fcs, proto;
790 if ((ap->state & (SC_TOSS | SC_ESCAPE)) || skb == 0) {
791 ap->state &= ~(SC_TOSS | SC_ESCAPE);
801 goto err; /* too short */
803 for (; len > 0; --len)
804 fcs = PPP_FCS(fcs, *p++);
805 if (fcs != PPP_GOODFCS)
806 goto err; /* bad FCS */
807 skb_trim(skb, skb->len - 2);
809 /* check for address/control and protocol compression */
811 if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
812 /* chop off address/control */
815 p = skb_pull(skb, 2);
819 /* protocol is compressed */
820 skb_push(skb, 1)[0] = 0;
824 proto = (proto << 8) + p[1];
825 if (proto == PPP_LCP)
826 async_lcp_peek(ap, p, skb->len, 1);
829 /* all OK, give it to the generic layer */
830 ppp_input(&ap->chan, skb);
835 ppp_input_error(&ap->chan, code);
839 input_error(struct asyncppp *ap, int code)
841 ap->state |= SC_TOSS;
842 ppp_input_error(&ap->chan, code);
845 /* Called when the tty driver has data for us. Runs parallel with the
846 other ldisc functions but will not be re-entered */
849 ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
850 char *flags, int count)
853 int c, i, j, n, s, f;
856 /* update bits used for 8-bit cleanness detection */
857 if (~ap->rbits & SC_RCV_BITS) {
859 for (i = 0; i < count; ++i) {
861 if (flags != 0 && flags[i] != 0)
863 s |= (c & 0x80)? SC_RCV_B7_1: SC_RCV_B7_0;
864 c = ((c >> 4) ^ c) & 0xf;
865 s |= (0x6996 & (1 << c))? SC_RCV_ODDP: SC_RCV_EVNP;
871 /* scan through and see how many chars we can do in bulk */
872 if ((ap->state & SC_ESCAPE) && buf[0] == PPP_ESCAPE)
875 n = scan_ordinary(ap, buf, count);
878 if (flags != 0 && (ap->state & SC_TOSS) == 0) {
879 /* check the flags to see if any char had an error */
880 for (j = 0; j < n; ++j)
881 if ((f = flags[j]) != 0)
888 } else if (n > 0 && (ap->state & SC_TOSS) == 0) {
889 /* stuff the chars in the skb */
892 skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
895 /* Try to get the payload 4-byte aligned */
896 if (buf[0] != PPP_ALLSTATIONS)
897 skb_reserve(skb, 2 + (buf[0] & 1));
900 if (n > skb_tailroom(skb)) {
901 /* packet overflowed MRU */
904 sp = skb_put(skb, n);
906 if (ap->state & SC_ESCAPE) {
908 ap->state &= ~SC_ESCAPE;
918 process_input_packet(ap);
919 } else if (c == PPP_ESCAPE) {
920 ap->state |= SC_ESCAPE;
922 /* otherwise it's a char in the recv ACCM */
933 printk(KERN_ERR "PPPasync: no memory (input pkt)\n");
938 * We look at LCP frames going past so that we can notice
939 * and react to the LCP configure-ack from the peer.
940 * In the situation where the peer has been sent a configure-ack
941 * already, LCP is up once it has sent its configure-ack
942 * so the immediately following packet can be sent with the
943 * configured LCP options. This allows us to process the following
944 * packet correctly without pppd needing to respond quickly.
946 * We only respond to the received configure-ack if we have just
947 * sent a configure-request, and the configure-ack contains the
948 * same data (this is checked using a 16-bit crc of the data).
950 #define CONFREQ 1 /* LCP code field values */
952 #define LCP_MRU 1 /* LCP option numbers */
953 #define LCP_ASYNCMAP 2
955 static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
956 int len, int inbound)
958 int dlen, fcs, i, code;
961 data += 2; /* skip protocol bytes */
963 if (len < 4) /* 4 = code, ID, length */
966 if (code != CONFACK && code != CONFREQ)
968 dlen = (data[2] << 8) + data[3];
970 return; /* packet got truncated or length is bogus */
972 if (code == (inbound? CONFACK: CONFREQ)) {
974 * sent confreq or received confack:
975 * calculate the crc of the data from the ID field on.
978 for (i = 1; i < dlen; ++i)
979 fcs = PPP_FCS(fcs, data[i]);
982 /* outbound confreq - remember the crc for later */
987 /* received confack, check the crc */
993 return; /* not interested in received confreq */
995 /* process the options in the confack */
998 /* data[0] is code, data[1] is length */
999 while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) {
1002 val = (data[2] << 8) + data[3];
1009 val = (data[2] << 24) + (data[3] << 16)
1010 + (data[4] << 8) + data[5];
1022 static void __exit ppp_async_cleanup(void)
1024 if (tty_register_ldisc(N_PPP, NULL) != 0)
1025 printk(KERN_ERR "failed to unregister PPP line discipline\n");
1028 module_init(ppp_async_init);
1029 module_exit(ppp_async_cleanup);