]> rtime.felk.cvut.cz Git - linux-imx.git/blob - drivers/s390/net/netiucv.c
PM / QoS: Add pm_qos and dev_pm_qos to events-power.txt
[linux-imx.git] / drivers / s390 / net / netiucv.c
1 /*
2  * IUCV network driver
3  *
4  * Copyright IBM Corp. 2001, 2009
5  *
6  * Author(s):
7  *      Original netiucv driver:
8  *              Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
9  *      Sysfs integration and all bugs therein:
10  *              Cornelia Huck (cornelia.huck@de.ibm.com)
11  *      PM functions:
12  *              Ursula Braun (ursula.braun@de.ibm.com)
13  *
14  * Documentation used:
15  *  the source of the original IUCV driver by:
16  *    Stefan Hegewald <hegewald@de.ibm.com>
17  *    Hartmut Penner <hpenner@de.ibm.com>
18  *    Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
19  *    Martin Schwidefsky (schwidefsky@de.ibm.com)
20  *    Alan Altmark (Alan_Altmark@us.ibm.com)  Sept. 2000
21  *
22  * This program is free software; you can redistribute it and/or modify
23  * it under the terms of the GNU General Public License as published by
24  * the Free Software Foundation; either version 2, or (at your option)
25  * any later version.
26  *
27  * This program is distributed in the hope that it will be useful,
28  * but WITHOUT ANY WARRANTY; without even the implied warranty of
29  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
30  * GNU General Public License for more details.
31  *
32  * You should have received a copy of the GNU General Public License
33  * along with this program; if not, write to the Free Software
34  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35  *
36  */
37
38 #define KMSG_COMPONENT "netiucv"
39 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
40
41 #undef DEBUG
42
43 #include <linux/module.h>
44 #include <linux/init.h>
45 #include <linux/kernel.h>
46 #include <linux/slab.h>
47 #include <linux/errno.h>
48 #include <linux/types.h>
49 #include <linux/interrupt.h>
50 #include <linux/timer.h>
51 #include <linux/bitops.h>
52
53 #include <linux/signal.h>
54 #include <linux/string.h>
55 #include <linux/device.h>
56
57 #include <linux/ip.h>
58 #include <linux/if_arp.h>
59 #include <linux/tcp.h>
60 #include <linux/skbuff.h>
61 #include <linux/ctype.h>
62 #include <net/dst.h>
63
64 #include <asm/io.h>
65 #include <asm/uaccess.h>
66 #include <asm/ebcdic.h>
67
68 #include <net/iucv/iucv.h>
69 #include "fsm.h"
70
71 MODULE_AUTHOR
72     ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
73 MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
74
75 /**
76  * Debug Facility stuff
77  */
78 #define IUCV_DBF_SETUP_NAME "iucv_setup"
79 #define IUCV_DBF_SETUP_LEN 64
80 #define IUCV_DBF_SETUP_PAGES 2
81 #define IUCV_DBF_SETUP_NR_AREAS 1
82 #define IUCV_DBF_SETUP_LEVEL 3
83
84 #define IUCV_DBF_DATA_NAME "iucv_data"
85 #define IUCV_DBF_DATA_LEN 128
86 #define IUCV_DBF_DATA_PAGES 2
87 #define IUCV_DBF_DATA_NR_AREAS 1
88 #define IUCV_DBF_DATA_LEVEL 2
89
90 #define IUCV_DBF_TRACE_NAME "iucv_trace"
91 #define IUCV_DBF_TRACE_LEN 16
92 #define IUCV_DBF_TRACE_PAGES 4
93 #define IUCV_DBF_TRACE_NR_AREAS 1
94 #define IUCV_DBF_TRACE_LEVEL 3
95
96 #define IUCV_DBF_TEXT(name,level,text) \
97         do { \
98                 debug_text_event(iucv_dbf_##name,level,text); \
99         } while (0)
100
101 #define IUCV_DBF_HEX(name,level,addr,len) \
102         do { \
103                 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
104         } while (0)
105
106 DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
107
108 /* Allow to sort out low debug levels early to avoid wasted sprints */
109 static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
110 {
111         return (level <= dbf_grp->level);
112 }
113
114 #define IUCV_DBF_TEXT_(name, level, text...) \
115         do { \
116                 if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
117                         char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
118                         sprintf(__buf, text); \
119                         debug_text_event(iucv_dbf_##name, level, __buf); \
120                         put_cpu_var(iucv_dbf_txt_buf); \
121                 } \
122         } while (0)
123
124 #define IUCV_DBF_SPRINTF(name,level,text...) \
125         do { \
126                 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
127                 debug_sprintf_event(iucv_dbf_trace, level, text ); \
128         } while (0)
129
130 /**
131  * some more debug stuff
132  */
133 #define IUCV_HEXDUMP16(importance,header,ptr) \
134 PRINT_##importance(header "%02x %02x %02x %02x  %02x %02x %02x %02x  " \
135                    "%02x %02x %02x %02x  %02x %02x %02x %02x\n", \
136                    *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
137                    *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
138                    *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
139                    *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
140                    *(((char*)ptr)+12),*(((char*)ptr)+13), \
141                    *(((char*)ptr)+14),*(((char*)ptr)+15)); \
142 PRINT_##importance(header "%02x %02x %02x %02x  %02x %02x %02x %02x  " \
143                    "%02x %02x %02x %02x  %02x %02x %02x %02x\n", \
144                    *(((char*)ptr)+16),*(((char*)ptr)+17), \
145                    *(((char*)ptr)+18),*(((char*)ptr)+19), \
146                    *(((char*)ptr)+20),*(((char*)ptr)+21), \
147                    *(((char*)ptr)+22),*(((char*)ptr)+23), \
148                    *(((char*)ptr)+24),*(((char*)ptr)+25), \
149                    *(((char*)ptr)+26),*(((char*)ptr)+27), \
150                    *(((char*)ptr)+28),*(((char*)ptr)+29), \
151                    *(((char*)ptr)+30),*(((char*)ptr)+31));
152
153 #define PRINTK_HEADER " iucv: "       /* for debugging */
154
155 /* dummy device to make sure netiucv_pm functions are called */
156 static struct device *netiucv_dev;
157
158 static int netiucv_pm_prepare(struct device *);
159 static void netiucv_pm_complete(struct device *);
160 static int netiucv_pm_freeze(struct device *);
161 static int netiucv_pm_restore_thaw(struct device *);
162
163 static const struct dev_pm_ops netiucv_pm_ops = {
164         .prepare = netiucv_pm_prepare,
165         .complete = netiucv_pm_complete,
166         .freeze = netiucv_pm_freeze,
167         .thaw = netiucv_pm_restore_thaw,
168         .restore = netiucv_pm_restore_thaw,
169 };
170
171 static struct device_driver netiucv_driver = {
172         .owner = THIS_MODULE,
173         .name = "netiucv",
174         .bus  = &iucv_bus,
175         .pm = &netiucv_pm_ops,
176 };
177
178 static int netiucv_callback_connreq(struct iucv_path *,
179                                     u8 ipvmid[8], u8 ipuser[16]);
180 static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
181 static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
182 static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]);
183 static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]);
184 static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
185 static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
186
187 static struct iucv_handler netiucv_handler = {
188         .path_pending     = netiucv_callback_connreq,
189         .path_complete    = netiucv_callback_connack,
190         .path_severed     = netiucv_callback_connrej,
191         .path_quiesced    = netiucv_callback_connsusp,
192         .path_resumed     = netiucv_callback_connres,
193         .message_pending  = netiucv_callback_rx,
194         .message_complete = netiucv_callback_txdone
195 };
196
197 /**
198  * Per connection profiling data
199  */
200 struct connection_profile {
201         unsigned long maxmulti;
202         unsigned long maxcqueue;
203         unsigned long doios_single;
204         unsigned long doios_multi;
205         unsigned long txlen;
206         unsigned long tx_time;
207         struct timespec send_stamp;
208         unsigned long tx_pending;
209         unsigned long tx_max_pending;
210 };
211
212 /**
213  * Representation of one iucv connection
214  */
215 struct iucv_connection {
216         struct list_head          list;
217         struct iucv_path          *path;
218         struct sk_buff            *rx_buff;
219         struct sk_buff            *tx_buff;
220         struct sk_buff_head       collect_queue;
221         struct sk_buff_head       commit_queue;
222         spinlock_t                collect_lock;
223         int                       collect_len;
224         int                       max_buffsize;
225         fsm_timer                 timer;
226         fsm_instance              *fsm;
227         struct net_device         *netdev;
228         struct connection_profile prof;
229         char                      userid[9];
230         char                      userdata[17];
231 };
232
233 /**
234  * Linked list of all connection structs.
235  */
236 static LIST_HEAD(iucv_connection_list);
237 static DEFINE_RWLOCK(iucv_connection_rwlock);
238
239 /**
240  * Representation of event-data for the
241  * connection state machine.
242  */
243 struct iucv_event {
244         struct iucv_connection *conn;
245         void                   *data;
246 };
247
248 /**
249  * Private part of the network device structure
250  */
251 struct netiucv_priv {
252         struct net_device_stats stats;
253         unsigned long           tbusy;
254         fsm_instance            *fsm;
255         struct iucv_connection  *conn;
256         struct device           *dev;
257         int                      pm_state;
258 };
259
260 /**
261  * Link level header for a packet.
262  */
263 struct ll_header {
264         u16 next;
265 };
266
267 #define NETIUCV_HDRLEN           (sizeof(struct ll_header))
268 #define NETIUCV_BUFSIZE_MAX      65537
269 #define NETIUCV_BUFSIZE_DEFAULT  NETIUCV_BUFSIZE_MAX
270 #define NETIUCV_MTU_MAX          (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
271 #define NETIUCV_MTU_DEFAULT      9216
272 #define NETIUCV_QUEUELEN_DEFAULT 50
273 #define NETIUCV_TIMEOUT_5SEC     5000
274
275 /**
276  * Compatibility macros for busy handling
277  * of network devices.
278  */
279 static inline void netiucv_clear_busy(struct net_device *dev)
280 {
281         struct netiucv_priv *priv = netdev_priv(dev);
282         clear_bit(0, &priv->tbusy);
283         netif_wake_queue(dev);
284 }
285
286 static inline int netiucv_test_and_set_busy(struct net_device *dev)
287 {
288         struct netiucv_priv *priv = netdev_priv(dev);
289         netif_stop_queue(dev);
290         return test_and_set_bit(0, &priv->tbusy);
291 }
292
293 static u8 iucvMagic_ascii[16] = {
294         0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
295         0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
296 };
297
298 static u8 iucvMagic_ebcdic[16] = {
299         0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
300         0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
301 };
302
303 /**
304  * Convert an iucv userId to its printable
305  * form (strip whitespace at end).
306  *
307  * @param An iucv userId
308  *
309  * @returns The printable string (static data!!)
310  */
311 static char *netiucv_printname(char *name, int len)
312 {
313         static char tmp[17];
314         char *p = tmp;
315         memcpy(tmp, name, len);
316         tmp[len] = '\0';
317         while (*p && ((p - tmp) < len) && (!isspace(*p)))
318                 p++;
319         *p = '\0';
320         return tmp;
321 }
322
323 static char *netiucv_printuser(struct iucv_connection *conn)
324 {
325         static char tmp_uid[9];
326         static char tmp_udat[17];
327         static char buf[100];
328
329         if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
330                 tmp_uid[8] = '\0';
331                 tmp_udat[16] = '\0';
332                 memcpy(tmp_uid, conn->userid, 8);
333                 memcpy(tmp_uid, netiucv_printname(tmp_uid, 8), 8);
334                 memcpy(tmp_udat, conn->userdata, 16);
335                 EBCASC(tmp_udat, 16);
336                 memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
337                 sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
338                 return buf;
339         } else
340                 return netiucv_printname(conn->userid, 8);
341 }
342
343 /**
344  * States of the interface statemachine.
345  */
346 enum dev_states {
347         DEV_STATE_STOPPED,
348         DEV_STATE_STARTWAIT,
349         DEV_STATE_STOPWAIT,
350         DEV_STATE_RUNNING,
351         /**
352          * MUST be always the last element!!
353          */
354         NR_DEV_STATES
355 };
356
357 static const char *dev_state_names[] = {
358         "Stopped",
359         "StartWait",
360         "StopWait",
361         "Running",
362 };
363
364 /**
365  * Events of the interface statemachine.
366  */
367 enum dev_events {
368         DEV_EVENT_START,
369         DEV_EVENT_STOP,
370         DEV_EVENT_CONUP,
371         DEV_EVENT_CONDOWN,
372         /**
373          * MUST be always the last element!!
374          */
375         NR_DEV_EVENTS
376 };
377
378 static const char *dev_event_names[] = {
379         "Start",
380         "Stop",
381         "Connection up",
382         "Connection down",
383 };
384
385 /**
386  * Events of the connection statemachine
387  */
388 enum conn_events {
389         /**
390          * Events, representing callbacks from
391          * lowlevel iucv layer)
392          */
393         CONN_EVENT_CONN_REQ,
394         CONN_EVENT_CONN_ACK,
395         CONN_EVENT_CONN_REJ,
396         CONN_EVENT_CONN_SUS,
397         CONN_EVENT_CONN_RES,
398         CONN_EVENT_RX,
399         CONN_EVENT_TXDONE,
400
401         /**
402          * Events, representing errors return codes from
403          * calls to lowlevel iucv layer
404          */
405
406         /**
407          * Event, representing timer expiry.
408          */
409         CONN_EVENT_TIMER,
410
411         /**
412          * Events, representing commands from upper levels.
413          */
414         CONN_EVENT_START,
415         CONN_EVENT_STOP,
416
417         /**
418          * MUST be always the last element!!
419          */
420         NR_CONN_EVENTS,
421 };
422
423 static const char *conn_event_names[] = {
424         "Remote connection request",
425         "Remote connection acknowledge",
426         "Remote connection reject",
427         "Connection suspended",
428         "Connection resumed",
429         "Data received",
430         "Data sent",
431
432         "Timer",
433
434         "Start",
435         "Stop",
436 };
437
438 /**
439  * States of the connection statemachine.
440  */
441 enum conn_states {
442         /**
443          * Connection not assigned to any device,
444          * initial state, invalid
445          */
446         CONN_STATE_INVALID,
447
448         /**
449          * Userid assigned but not operating
450          */
451         CONN_STATE_STOPPED,
452
453         /**
454          * Connection registered,
455          * no connection request sent yet,
456          * no connection request received
457          */
458         CONN_STATE_STARTWAIT,
459
460         /**
461          * Connection registered and connection request sent,
462          * no acknowledge and no connection request received yet.
463          */
464         CONN_STATE_SETUPWAIT,
465
466         /**
467          * Connection up and running idle
468          */
469         CONN_STATE_IDLE,
470
471         /**
472          * Data sent, awaiting CONN_EVENT_TXDONE
473          */
474         CONN_STATE_TX,
475
476         /**
477          * Error during registration.
478          */
479         CONN_STATE_REGERR,
480
481         /**
482          * Error during registration.
483          */
484         CONN_STATE_CONNERR,
485
486         /**
487          * MUST be always the last element!!
488          */
489         NR_CONN_STATES,
490 };
491
492 static const char *conn_state_names[] = {
493         "Invalid",
494         "Stopped",
495         "StartWait",
496         "SetupWait",
497         "Idle",
498         "TX",
499         "Terminating",
500         "Registration error",
501         "Connect error",
502 };
503
504
505 /**
506  * Debug Facility Stuff
507  */
508 static debug_info_t *iucv_dbf_setup = NULL;
509 static debug_info_t *iucv_dbf_data = NULL;
510 static debug_info_t *iucv_dbf_trace = NULL;
511
512 DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
513
514 static void iucv_unregister_dbf_views(void)
515 {
516         if (iucv_dbf_setup)
517                 debug_unregister(iucv_dbf_setup);
518         if (iucv_dbf_data)
519                 debug_unregister(iucv_dbf_data);
520         if (iucv_dbf_trace)
521                 debug_unregister(iucv_dbf_trace);
522 }
523 static int iucv_register_dbf_views(void)
524 {
525         iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
526                                         IUCV_DBF_SETUP_PAGES,
527                                         IUCV_DBF_SETUP_NR_AREAS,
528                                         IUCV_DBF_SETUP_LEN);
529         iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
530                                        IUCV_DBF_DATA_PAGES,
531                                        IUCV_DBF_DATA_NR_AREAS,
532                                        IUCV_DBF_DATA_LEN);
533         iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
534                                         IUCV_DBF_TRACE_PAGES,
535                                         IUCV_DBF_TRACE_NR_AREAS,
536                                         IUCV_DBF_TRACE_LEN);
537
538         if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
539             (iucv_dbf_trace == NULL)) {
540                 iucv_unregister_dbf_views();
541                 return -ENOMEM;
542         }
543         debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
544         debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
545
546         debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
547         debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
548
549         debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
550         debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
551
552         return 0;
553 }
554
555 /*
556  * Callback-wrappers, called from lowlevel iucv layer.
557  */
558
559 static void netiucv_callback_rx(struct iucv_path *path,
560                                 struct iucv_message *msg)
561 {
562         struct iucv_connection *conn = path->private;
563         struct iucv_event ev;
564
565         ev.conn = conn;
566         ev.data = msg;
567         fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
568 }
569
570 static void netiucv_callback_txdone(struct iucv_path *path,
571                                     struct iucv_message *msg)
572 {
573         struct iucv_connection *conn = path->private;
574         struct iucv_event ev;
575
576         ev.conn = conn;
577         ev.data = msg;
578         fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
579 }
580
581 static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
582 {
583         struct iucv_connection *conn = path->private;
584
585         fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
586 }
587
588 static int netiucv_callback_connreq(struct iucv_path *path,
589                                     u8 ipvmid[8], u8 ipuser[16])
590 {
591         struct iucv_connection *conn = path->private;
592         struct iucv_event ev;
593         static char tmp_user[9];
594         static char tmp_udat[17];
595         int rc;
596
597         rc = -EINVAL;
598         memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
599         memcpy(tmp_udat, ipuser, 16);
600         EBCASC(tmp_udat, 16);
601         read_lock_bh(&iucv_connection_rwlock);
602         list_for_each_entry(conn, &iucv_connection_list, list) {
603                 if (strncmp(ipvmid, conn->userid, 8) ||
604                     strncmp(ipuser, conn->userdata, 16))
605                         continue;
606                 /* Found a matching connection for this path. */
607                 conn->path = path;
608                 ev.conn = conn;
609                 ev.data = path;
610                 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
611                 rc = 0;
612         }
613         IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
614                        tmp_user, netiucv_printname(tmp_udat, 16));
615         read_unlock_bh(&iucv_connection_rwlock);
616         return rc;
617 }
618
619 static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
620 {
621         struct iucv_connection *conn = path->private;
622
623         fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
624 }
625
626 static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16])
627 {
628         struct iucv_connection *conn = path->private;
629
630         fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
631 }
632
633 static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
634 {
635         struct iucv_connection *conn = path->private;
636
637         fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
638 }
639
640 /**
641  * NOP action for statemachines
642  */
643 static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
644 {
645 }
646
647 /*
648  * Actions of the connection statemachine
649  */
650
651 /**
652  * netiucv_unpack_skb
653  * @conn: The connection where this skb has been received.
654  * @pskb: The received skb.
655  *
656  * Unpack a just received skb and hand it over to upper layers.
657  * Helper function for conn_action_rx.
658  */
659 static void netiucv_unpack_skb(struct iucv_connection *conn,
660                                struct sk_buff *pskb)
661 {
662         struct net_device     *dev = conn->netdev;
663         struct netiucv_priv   *privptr = netdev_priv(dev);
664         u16 offset = 0;
665
666         skb_put(pskb, NETIUCV_HDRLEN);
667         pskb->dev = dev;
668         pskb->ip_summed = CHECKSUM_NONE;
669         pskb->protocol = ntohs(ETH_P_IP);
670
671         while (1) {
672                 struct sk_buff *skb;
673                 struct ll_header *header = (struct ll_header *) pskb->data;
674
675                 if (!header->next)
676                         break;
677
678                 skb_pull(pskb, NETIUCV_HDRLEN);
679                 header->next -= offset;
680                 offset += header->next;
681                 header->next -= NETIUCV_HDRLEN;
682                 if (skb_tailroom(pskb) < header->next) {
683                         IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
684                                 header->next, skb_tailroom(pskb));
685                         return;
686                 }
687                 skb_put(pskb, header->next);
688                 skb_reset_mac_header(pskb);
689                 skb = dev_alloc_skb(pskb->len);
690                 if (!skb) {
691                         IUCV_DBF_TEXT(data, 2,
692                                 "Out of memory in netiucv_unpack_skb\n");
693                         privptr->stats.rx_dropped++;
694                         return;
695                 }
696                 skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
697                                           pskb->len);
698                 skb_reset_mac_header(skb);
699                 skb->dev = pskb->dev;
700                 skb->protocol = pskb->protocol;
701                 pskb->ip_summed = CHECKSUM_UNNECESSARY;
702                 privptr->stats.rx_packets++;
703                 privptr->stats.rx_bytes += skb->len;
704                 /*
705                  * Since receiving is always initiated from a tasklet (in iucv.c),
706                  * we must use netif_rx_ni() instead of netif_rx()
707                  */
708                 netif_rx_ni(skb);
709                 skb_pull(pskb, header->next);
710                 skb_put(pskb, NETIUCV_HDRLEN);
711         }
712 }
713
714 static void conn_action_rx(fsm_instance *fi, int event, void *arg)
715 {
716         struct iucv_event *ev = arg;
717         struct iucv_connection *conn = ev->conn;
718         struct iucv_message *msg = ev->data;
719         struct netiucv_priv *privptr = netdev_priv(conn->netdev);
720         int rc;
721
722         IUCV_DBF_TEXT(trace, 4, __func__);
723
724         if (!conn->netdev) {
725                 iucv_message_reject(conn->path, msg);
726                 IUCV_DBF_TEXT(data, 2,
727                               "Received data for unlinked connection\n");
728                 return;
729         }
730         if (msg->length > conn->max_buffsize) {
731                 iucv_message_reject(conn->path, msg);
732                 privptr->stats.rx_dropped++;
733                 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
734                                msg->length, conn->max_buffsize);
735                 return;
736         }
737         conn->rx_buff->data = conn->rx_buff->head;
738         skb_reset_tail_pointer(conn->rx_buff);
739         conn->rx_buff->len = 0;
740         rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
741                                   msg->length, NULL);
742         if (rc || msg->length < 5) {
743                 privptr->stats.rx_errors++;
744                 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
745                 return;
746         }
747         netiucv_unpack_skb(conn, conn->rx_buff);
748 }
749
750 static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
751 {
752         struct iucv_event *ev = arg;
753         struct iucv_connection *conn = ev->conn;
754         struct iucv_message *msg = ev->data;
755         struct iucv_message txmsg;
756         struct netiucv_priv *privptr = NULL;
757         u32 single_flag = msg->tag;
758         u32 txbytes = 0;
759         u32 txpackets = 0;
760         u32 stat_maxcq = 0;
761         struct sk_buff *skb;
762         unsigned long saveflags;
763         struct ll_header header;
764         int rc;
765
766         IUCV_DBF_TEXT(trace, 4, __func__);
767
768         if (conn && conn->netdev)
769                 privptr = netdev_priv(conn->netdev);
770         conn->prof.tx_pending--;
771         if (single_flag) {
772                 if ((skb = skb_dequeue(&conn->commit_queue))) {
773                         atomic_dec(&skb->users);
774                         if (privptr) {
775                                 privptr->stats.tx_packets++;
776                                 privptr->stats.tx_bytes +=
777                                         (skb->len - NETIUCV_HDRLEN
778                                                   - NETIUCV_HDRLEN);
779                         }
780                         dev_kfree_skb_any(skb);
781                 }
782         }
783         conn->tx_buff->data = conn->tx_buff->head;
784         skb_reset_tail_pointer(conn->tx_buff);
785         conn->tx_buff->len = 0;
786         spin_lock_irqsave(&conn->collect_lock, saveflags);
787         while ((skb = skb_dequeue(&conn->collect_queue))) {
788                 header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
789                 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
790                        NETIUCV_HDRLEN);
791                 skb_copy_from_linear_data(skb,
792                                           skb_put(conn->tx_buff, skb->len),
793                                           skb->len);
794                 txbytes += skb->len;
795                 txpackets++;
796                 stat_maxcq++;
797                 atomic_dec(&skb->users);
798                 dev_kfree_skb_any(skb);
799         }
800         if (conn->collect_len > conn->prof.maxmulti)
801                 conn->prof.maxmulti = conn->collect_len;
802         conn->collect_len = 0;
803         spin_unlock_irqrestore(&conn->collect_lock, saveflags);
804         if (conn->tx_buff->len == 0) {
805                 fsm_newstate(fi, CONN_STATE_IDLE);
806                 return;
807         }
808
809         header.next = 0;
810         memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
811         conn->prof.send_stamp = current_kernel_time();
812         txmsg.class = 0;
813         txmsg.tag = 0;
814         rc = iucv_message_send(conn->path, &txmsg, 0, 0,
815                                conn->tx_buff->data, conn->tx_buff->len);
816         conn->prof.doios_multi++;
817         conn->prof.txlen += conn->tx_buff->len;
818         conn->prof.tx_pending++;
819         if (conn->prof.tx_pending > conn->prof.tx_max_pending)
820                 conn->prof.tx_max_pending = conn->prof.tx_pending;
821         if (rc) {
822                 conn->prof.tx_pending--;
823                 fsm_newstate(fi, CONN_STATE_IDLE);
824                 if (privptr)
825                         privptr->stats.tx_errors += txpackets;
826                 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
827         } else {
828                 if (privptr) {
829                         privptr->stats.tx_packets += txpackets;
830                         privptr->stats.tx_bytes += txbytes;
831                 }
832                 if (stat_maxcq > conn->prof.maxcqueue)
833                         conn->prof.maxcqueue = stat_maxcq;
834         }
835 }
836
837 static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
838 {
839         struct iucv_event *ev = arg;
840         struct iucv_connection *conn = ev->conn;
841         struct iucv_path *path = ev->data;
842         struct net_device *netdev = conn->netdev;
843         struct netiucv_priv *privptr = netdev_priv(netdev);
844         int rc;
845
846         IUCV_DBF_TEXT(trace, 3, __func__);
847
848         conn->path = path;
849         path->msglim = NETIUCV_QUEUELEN_DEFAULT;
850         path->flags = 0;
851         rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
852         if (rc) {
853                 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
854                 return;
855         }
856         fsm_newstate(fi, CONN_STATE_IDLE);
857         netdev->tx_queue_len = conn->path->msglim;
858         fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
859 }
860
861 static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
862 {
863         struct iucv_event *ev = arg;
864         struct iucv_path *path = ev->data;
865
866         IUCV_DBF_TEXT(trace, 3, __func__);
867         iucv_path_sever(path, NULL);
868 }
869
870 static void conn_action_connack(fsm_instance *fi, int event, void *arg)
871 {
872         struct iucv_connection *conn = arg;
873         struct net_device *netdev = conn->netdev;
874         struct netiucv_priv *privptr = netdev_priv(netdev);
875
876         IUCV_DBF_TEXT(trace, 3, __func__);
877         fsm_deltimer(&conn->timer);
878         fsm_newstate(fi, CONN_STATE_IDLE);
879         netdev->tx_queue_len = conn->path->msglim;
880         fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
881 }
882
883 static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
884 {
885         struct iucv_connection *conn = arg;
886
887         IUCV_DBF_TEXT(trace, 3, __func__);
888         fsm_deltimer(&conn->timer);
889         iucv_path_sever(conn->path, conn->userdata);
890         fsm_newstate(fi, CONN_STATE_STARTWAIT);
891 }
892
893 static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
894 {
895         struct iucv_connection *conn = arg;
896         struct net_device *netdev = conn->netdev;
897         struct netiucv_priv *privptr = netdev_priv(netdev);
898
899         IUCV_DBF_TEXT(trace, 3, __func__);
900
901         fsm_deltimer(&conn->timer);
902         iucv_path_sever(conn->path, conn->userdata);
903         dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
904                                "connection\n", netiucv_printuser(conn));
905         IUCV_DBF_TEXT(data, 2,
906                       "conn_action_connsever: Remote dropped connection\n");
907         fsm_newstate(fi, CONN_STATE_STARTWAIT);
908         fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
909 }
910
911 static void conn_action_start(fsm_instance *fi, int event, void *arg)
912 {
913         struct iucv_connection *conn = arg;
914         struct net_device *netdev = conn->netdev;
915         struct netiucv_priv *privptr = netdev_priv(netdev);
916         int rc;
917
918         IUCV_DBF_TEXT(trace, 3, __func__);
919
920         fsm_newstate(fi, CONN_STATE_STARTWAIT);
921
922         /*
923          * We must set the state before calling iucv_connect because the
924          * callback handler could be called at any point after the connection
925          * request is sent
926          */
927
928         fsm_newstate(fi, CONN_STATE_SETUPWAIT);
929         conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
930         IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
931                 netdev->name, netiucv_printuser(conn));
932
933         rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
934                                NULL, conn->userdata, conn);
935         switch (rc) {
936         case 0:
937                 netdev->tx_queue_len = conn->path->msglim;
938                 fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
939                              CONN_EVENT_TIMER, conn);
940                 return;
941         case 11:
942                 dev_warn(privptr->dev,
943                         "The IUCV device failed to connect to z/VM guest %s\n",
944                         netiucv_printname(conn->userid, 8));
945                 fsm_newstate(fi, CONN_STATE_STARTWAIT);
946                 break;
947         case 12:
948                 dev_warn(privptr->dev,
949                         "The IUCV device failed to connect to the peer on z/VM"
950                         " guest %s\n", netiucv_printname(conn->userid, 8));
951                 fsm_newstate(fi, CONN_STATE_STARTWAIT);
952                 break;
953         case 13:
954                 dev_err(privptr->dev,
955                         "Connecting the IUCV device would exceed the maximum"
956                         " number of IUCV connections\n");
957                 fsm_newstate(fi, CONN_STATE_CONNERR);
958                 break;
959         case 14:
960                 dev_err(privptr->dev,
961                         "z/VM guest %s has too many IUCV connections"
962                         " to connect with the IUCV device\n",
963                         netiucv_printname(conn->userid, 8));
964                 fsm_newstate(fi, CONN_STATE_CONNERR);
965                 break;
966         case 15:
967                 dev_err(privptr->dev,
968                         "The IUCV device cannot connect to a z/VM guest with no"
969                         " IUCV authorization\n");
970                 fsm_newstate(fi, CONN_STATE_CONNERR);
971                 break;
972         default:
973                 dev_err(privptr->dev,
974                         "Connecting the IUCV device failed with error %d\n",
975                         rc);
976                 fsm_newstate(fi, CONN_STATE_CONNERR);
977                 break;
978         }
979         IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
980         kfree(conn->path);
981         conn->path = NULL;
982 }
983
984 static void netiucv_purge_skb_queue(struct sk_buff_head *q)
985 {
986         struct sk_buff *skb;
987
988         while ((skb = skb_dequeue(q))) {
989                 atomic_dec(&skb->users);
990                 dev_kfree_skb_any(skb);
991         }
992 }
993
994 static void conn_action_stop(fsm_instance *fi, int event, void *arg)
995 {
996         struct iucv_event *ev = arg;
997         struct iucv_connection *conn = ev->conn;
998         struct net_device *netdev = conn->netdev;
999         struct netiucv_priv *privptr = netdev_priv(netdev);
1000
1001         IUCV_DBF_TEXT(trace, 3, __func__);
1002
1003         fsm_deltimer(&conn->timer);
1004         fsm_newstate(fi, CONN_STATE_STOPPED);
1005         netiucv_purge_skb_queue(&conn->collect_queue);
1006         if (conn->path) {
1007                 IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
1008                 iucv_path_sever(conn->path, conn->userdata);
1009                 kfree(conn->path);
1010                 conn->path = NULL;
1011         }
1012         netiucv_purge_skb_queue(&conn->commit_queue);
1013         fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
1014 }
1015
1016 static void conn_action_inval(fsm_instance *fi, int event, void *arg)
1017 {
1018         struct iucv_connection *conn = arg;
1019         struct net_device *netdev = conn->netdev;
1020
1021         IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
1022                 netdev->name, conn->userid);
1023 }
1024
1025 static const fsm_node conn_fsm[] = {
1026         { CONN_STATE_INVALID,   CONN_EVENT_START,    conn_action_inval      },
1027         { CONN_STATE_STOPPED,   CONN_EVENT_START,    conn_action_start      },
1028
1029         { CONN_STATE_STOPPED,   CONN_EVENT_STOP,     conn_action_stop       },
1030         { CONN_STATE_STARTWAIT, CONN_EVENT_STOP,     conn_action_stop       },
1031         { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP,     conn_action_stop       },
1032         { CONN_STATE_IDLE,      CONN_EVENT_STOP,     conn_action_stop       },
1033         { CONN_STATE_TX,        CONN_EVENT_STOP,     conn_action_stop       },
1034         { CONN_STATE_REGERR,    CONN_EVENT_STOP,     conn_action_stop       },
1035         { CONN_STATE_CONNERR,   CONN_EVENT_STOP,     conn_action_stop       },
1036
1037         { CONN_STATE_STOPPED,   CONN_EVENT_CONN_REQ, conn_action_connreject },
1038         { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
1039         { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
1040         { CONN_STATE_IDLE,      CONN_EVENT_CONN_REQ, conn_action_connreject },
1041         { CONN_STATE_TX,        CONN_EVENT_CONN_REQ, conn_action_connreject },
1042
1043         { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack    },
1044         { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER,    conn_action_conntimsev },
1045
1046         { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever  },
1047         { CONN_STATE_IDLE,      CONN_EVENT_CONN_REJ, conn_action_connsever  },
1048         { CONN_STATE_TX,        CONN_EVENT_CONN_REJ, conn_action_connsever  },
1049
1050         { CONN_STATE_IDLE,      CONN_EVENT_RX,       conn_action_rx         },
1051         { CONN_STATE_TX,        CONN_EVENT_RX,       conn_action_rx         },
1052
1053         { CONN_STATE_TX,        CONN_EVENT_TXDONE,   conn_action_txdone     },
1054         { CONN_STATE_IDLE,      CONN_EVENT_TXDONE,   conn_action_txdone     },
1055 };
1056
1057 static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
1058
1059
1060 /*
1061  * Actions for interface - statemachine.
1062  */
1063
1064 /**
1065  * dev_action_start
1066  * @fi: An instance of an interface statemachine.
1067  * @event: The event, just happened.
1068  * @arg: Generic pointer, casted from struct net_device * upon call.
1069  *
1070  * Startup connection by sending CONN_EVENT_START to it.
1071  */
1072 static void dev_action_start(fsm_instance *fi, int event, void *arg)
1073 {
1074         struct net_device   *dev = arg;
1075         struct netiucv_priv *privptr = netdev_priv(dev);
1076
1077         IUCV_DBF_TEXT(trace, 3, __func__);
1078
1079         fsm_newstate(fi, DEV_STATE_STARTWAIT);
1080         fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1081 }
1082
1083 /**
1084  * Shutdown connection by sending CONN_EVENT_STOP to it.
1085  *
1086  * @param fi    An instance of an interface statemachine.
1087  * @param event The event, just happened.
1088  * @param arg   Generic pointer, casted from struct net_device * upon call.
1089  */
1090 static void
1091 dev_action_stop(fsm_instance *fi, int event, void *arg)
1092 {
1093         struct net_device   *dev = arg;
1094         struct netiucv_priv *privptr = netdev_priv(dev);
1095         struct iucv_event   ev;
1096
1097         IUCV_DBF_TEXT(trace, 3, __func__);
1098
1099         ev.conn = privptr->conn;
1100
1101         fsm_newstate(fi, DEV_STATE_STOPWAIT);
1102         fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1103 }
1104
1105 /**
1106  * Called from connection statemachine
1107  * when a connection is up and running.
1108  *
1109  * @param fi    An instance of an interface statemachine.
1110  * @param event The event, just happened.
1111  * @param arg   Generic pointer, casted from struct net_device * upon call.
1112  */
1113 static void
1114 dev_action_connup(fsm_instance *fi, int event, void *arg)
1115 {
1116         struct net_device   *dev = arg;
1117         struct netiucv_priv *privptr = netdev_priv(dev);
1118
1119         IUCV_DBF_TEXT(trace, 3, __func__);
1120
1121         switch (fsm_getstate(fi)) {
1122                 case DEV_STATE_STARTWAIT:
1123                         fsm_newstate(fi, DEV_STATE_RUNNING);
1124                         dev_info(privptr->dev,
1125                                 "The IUCV device has been connected"
1126                                 " successfully to %s\n",
1127                                 netiucv_printuser(privptr->conn));
1128                         IUCV_DBF_TEXT(setup, 3,
1129                                 "connection is up and running\n");
1130                         break;
1131                 case DEV_STATE_STOPWAIT:
1132                         IUCV_DBF_TEXT(data, 2,
1133                                 "dev_action_connup: in DEV_STATE_STOPWAIT\n");
1134                         break;
1135         }
1136 }
1137
1138 /**
1139  * Called from connection statemachine
1140  * when a connection has been shutdown.
1141  *
1142  * @param fi    An instance of an interface statemachine.
1143  * @param event The event, just happened.
1144  * @param arg   Generic pointer, casted from struct net_device * upon call.
1145  */
1146 static void
1147 dev_action_conndown(fsm_instance *fi, int event, void *arg)
1148 {
1149         IUCV_DBF_TEXT(trace, 3, __func__);
1150
1151         switch (fsm_getstate(fi)) {
1152                 case DEV_STATE_RUNNING:
1153                         fsm_newstate(fi, DEV_STATE_STARTWAIT);
1154                         break;
1155                 case DEV_STATE_STOPWAIT:
1156                         fsm_newstate(fi, DEV_STATE_STOPPED);
1157                         IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1158                         break;
1159         }
1160 }
1161
1162 static const fsm_node dev_fsm[] = {
1163         { DEV_STATE_STOPPED,    DEV_EVENT_START,   dev_action_start    },
1164
1165         { DEV_STATE_STOPWAIT,   DEV_EVENT_START,   dev_action_start    },
1166         { DEV_STATE_STOPWAIT,   DEV_EVENT_CONDOWN, dev_action_conndown },
1167
1168         { DEV_STATE_STARTWAIT,  DEV_EVENT_STOP,    dev_action_stop     },
1169         { DEV_STATE_STARTWAIT,  DEV_EVENT_CONUP,   dev_action_connup   },
1170
1171         { DEV_STATE_RUNNING,    DEV_EVENT_STOP,    dev_action_stop     },
1172         { DEV_STATE_RUNNING,    DEV_EVENT_CONDOWN, dev_action_conndown },
1173         { DEV_STATE_RUNNING,    DEV_EVENT_CONUP,   netiucv_action_nop  },
1174 };
1175
1176 static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1177
1178 /**
1179  * Transmit a packet.
1180  * This is a helper function for netiucv_tx().
1181  *
1182  * @param conn Connection to be used for sending.
1183  * @param skb Pointer to struct sk_buff of packet to send.
1184  *            The linklevel header has already been set up
1185  *            by netiucv_tx().
1186  *
1187  * @return 0 on success, -ERRNO on failure. (Never fails.)
1188  */
1189 static int netiucv_transmit_skb(struct iucv_connection *conn,
1190                                 struct sk_buff *skb)
1191 {
1192         struct iucv_message msg;
1193         unsigned long saveflags;
1194         struct ll_header header;
1195         int rc;
1196
1197         if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1198                 int l = skb->len + NETIUCV_HDRLEN;
1199
1200                 spin_lock_irqsave(&conn->collect_lock, saveflags);
1201                 if (conn->collect_len + l >
1202                     (conn->max_buffsize - NETIUCV_HDRLEN)) {
1203                         rc = -EBUSY;
1204                         IUCV_DBF_TEXT(data, 2,
1205                                       "EBUSY from netiucv_transmit_skb\n");
1206                 } else {
1207                         atomic_inc(&skb->users);
1208                         skb_queue_tail(&conn->collect_queue, skb);
1209                         conn->collect_len += l;
1210                         rc = 0;
1211                 }
1212                 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1213         } else {
1214                 struct sk_buff *nskb = skb;
1215                 /**
1216                  * Copy the skb to a new allocated skb in lowmem only if the
1217                  * data is located above 2G in memory or tailroom is < 2.
1218                  */
1219                 unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1220                                     NETIUCV_HDRLEN)) >> 31;
1221                 int copied = 0;
1222                 if (hi || (skb_tailroom(skb) < 2)) {
1223                         nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1224                                          NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1225                         if (!nskb) {
1226                                 IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1227                                 rc = -ENOMEM;
1228                                 return rc;
1229                         } else {
1230                                 skb_reserve(nskb, NETIUCV_HDRLEN);
1231                                 memcpy(skb_put(nskb, skb->len),
1232                                        skb->data, skb->len);
1233                         }
1234                         copied = 1;
1235                 }
1236                 /**
1237                  * skb now is below 2G and has enough room. Add headers.
1238                  */
1239                 header.next = nskb->len + NETIUCV_HDRLEN;
1240                 memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1241                 header.next = 0;
1242                 memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header,  NETIUCV_HDRLEN);
1243
1244                 fsm_newstate(conn->fsm, CONN_STATE_TX);
1245                 conn->prof.send_stamp = current_kernel_time();
1246
1247                 msg.tag = 1;
1248                 msg.class = 0;
1249                 rc = iucv_message_send(conn->path, &msg, 0, 0,
1250                                        nskb->data, nskb->len);
1251                 conn->prof.doios_single++;
1252                 conn->prof.txlen += skb->len;
1253                 conn->prof.tx_pending++;
1254                 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1255                         conn->prof.tx_max_pending = conn->prof.tx_pending;
1256                 if (rc) {
1257                         struct netiucv_priv *privptr;
1258                         fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1259                         conn->prof.tx_pending--;
1260                         privptr = netdev_priv(conn->netdev);
1261                         if (privptr)
1262                                 privptr->stats.tx_errors++;
1263                         if (copied)
1264                                 dev_kfree_skb(nskb);
1265                         else {
1266                                 /**
1267                                  * Remove our headers. They get added
1268                                  * again on retransmit.
1269                                  */
1270                                 skb_pull(skb, NETIUCV_HDRLEN);
1271                                 skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1272                         }
1273                         IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1274                 } else {
1275                         if (copied)
1276                                 dev_kfree_skb(skb);
1277                         atomic_inc(&nskb->users);
1278                         skb_queue_tail(&conn->commit_queue, nskb);
1279                 }
1280         }
1281
1282         return rc;
1283 }
1284
1285 /*
1286  * Interface API for upper network layers
1287  */
1288
1289 /**
1290  * Open an interface.
1291  * Called from generic network layer when ifconfig up is run.
1292  *
1293  * @param dev Pointer to interface struct.
1294  *
1295  * @return 0 on success, -ERRNO on failure. (Never fails.)
1296  */
1297 static int netiucv_open(struct net_device *dev)
1298 {
1299         struct netiucv_priv *priv = netdev_priv(dev);
1300
1301         fsm_event(priv->fsm, DEV_EVENT_START, dev);
1302         return 0;
1303 }
1304
1305 /**
1306  * Close an interface.
1307  * Called from generic network layer when ifconfig down is run.
1308  *
1309  * @param dev Pointer to interface struct.
1310  *
1311  * @return 0 on success, -ERRNO on failure. (Never fails.)
1312  */
1313 static int netiucv_close(struct net_device *dev)
1314 {
1315         struct netiucv_priv *priv = netdev_priv(dev);
1316
1317         fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1318         return 0;
1319 }
1320
1321 static int netiucv_pm_prepare(struct device *dev)
1322 {
1323         IUCV_DBF_TEXT(trace, 3, __func__);
1324         return 0;
1325 }
1326
1327 static void netiucv_pm_complete(struct device *dev)
1328 {
1329         IUCV_DBF_TEXT(trace, 3, __func__);
1330         return;
1331 }
1332
1333 /**
1334  * netiucv_pm_freeze() - Freeze PM callback
1335  * @dev:        netiucv device
1336  *
1337  * close open netiucv interfaces
1338  */
1339 static int netiucv_pm_freeze(struct device *dev)
1340 {
1341         struct netiucv_priv *priv = dev_get_drvdata(dev);
1342         struct net_device *ndev = NULL;
1343         int rc = 0;
1344
1345         IUCV_DBF_TEXT(trace, 3, __func__);
1346         if (priv && priv->conn)
1347                 ndev = priv->conn->netdev;
1348         if (!ndev)
1349                 goto out;
1350         netif_device_detach(ndev);
1351         priv->pm_state = fsm_getstate(priv->fsm);
1352         rc = netiucv_close(ndev);
1353 out:
1354         return rc;
1355 }
1356
1357 /**
1358  * netiucv_pm_restore_thaw() - Thaw and restore PM callback
1359  * @dev:        netiucv device
1360  *
1361  * re-open netiucv interfaces closed during freeze
1362  */
1363 static int netiucv_pm_restore_thaw(struct device *dev)
1364 {
1365         struct netiucv_priv *priv = dev_get_drvdata(dev);
1366         struct net_device *ndev = NULL;
1367         int rc = 0;
1368
1369         IUCV_DBF_TEXT(trace, 3, __func__);
1370         if (priv && priv->conn)
1371                 ndev = priv->conn->netdev;
1372         if (!ndev)
1373                 goto out;
1374         switch (priv->pm_state) {
1375         case DEV_STATE_RUNNING:
1376         case DEV_STATE_STARTWAIT:
1377                 rc = netiucv_open(ndev);
1378                 break;
1379         default:
1380                 break;
1381         }
1382         netif_device_attach(ndev);
1383 out:
1384         return rc;
1385 }
1386
1387 /**
1388  * Start transmission of a packet.
1389  * Called from generic network device layer.
1390  *
1391  * @param skb Pointer to buffer containing the packet.
1392  * @param dev Pointer to interface struct.
1393  *
1394  * @return 0 if packet consumed, !0 if packet rejected.
1395  *         Note: If we return !0, then the packet is free'd by
1396  *               the generic network layer.
1397  */
1398 static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1399 {
1400         struct netiucv_priv *privptr = netdev_priv(dev);
1401         int rc;
1402
1403         IUCV_DBF_TEXT(trace, 4, __func__);
1404         /**
1405          * Some sanity checks ...
1406          */
1407         if (skb == NULL) {
1408                 IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1409                 privptr->stats.tx_dropped++;
1410                 return NETDEV_TX_OK;
1411         }
1412         if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1413                 IUCV_DBF_TEXT(data, 2,
1414                         "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1415                 dev_kfree_skb(skb);
1416                 privptr->stats.tx_dropped++;
1417                 return NETDEV_TX_OK;
1418         }
1419
1420         /**
1421          * If connection is not running, try to restart it
1422          * and throw away packet.
1423          */
1424         if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1425                 dev_kfree_skb(skb);
1426                 privptr->stats.tx_dropped++;
1427                 privptr->stats.tx_errors++;
1428                 privptr->stats.tx_carrier_errors++;
1429                 return NETDEV_TX_OK;
1430         }
1431
1432         if (netiucv_test_and_set_busy(dev)) {
1433                 IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
1434                 return NETDEV_TX_BUSY;
1435         }
1436         dev->trans_start = jiffies;
1437         rc = netiucv_transmit_skb(privptr->conn, skb);
1438         netiucv_clear_busy(dev);
1439         return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
1440 }
1441
1442 /**
1443  * netiucv_stats
1444  * @dev: Pointer to interface struct.
1445  *
1446  * Returns interface statistics of a device.
1447  *
1448  * Returns pointer to stats struct of this interface.
1449  */
1450 static struct net_device_stats *netiucv_stats (struct net_device * dev)
1451 {
1452         struct netiucv_priv *priv = netdev_priv(dev);
1453
1454         IUCV_DBF_TEXT(trace, 5, __func__);
1455         return &priv->stats;
1456 }
1457
1458 /**
1459  * netiucv_change_mtu
1460  * @dev: Pointer to interface struct.
1461  * @new_mtu: The new MTU to use for this interface.
1462  *
1463  * Sets MTU of an interface.
1464  *
1465  * Returns 0 on success, -EINVAL if MTU is out of valid range.
1466  *         (valid range is 576 .. NETIUCV_MTU_MAX).
1467  */
1468 static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
1469 {
1470         IUCV_DBF_TEXT(trace, 3, __func__);
1471         if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
1472                 IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
1473                 return -EINVAL;
1474         }
1475         dev->mtu = new_mtu;
1476         return 0;
1477 }
1478
1479 /*
1480  * attributes in sysfs
1481  */
1482
1483 static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1484                          char *buf)
1485 {
1486         struct netiucv_priv *priv = dev_get_drvdata(dev);
1487
1488         IUCV_DBF_TEXT(trace, 5, __func__);
1489         return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
1490 }
1491
1492 static int netiucv_check_user(const char *buf, size_t count, char *username,
1493                               char *userdata)
1494 {
1495         const char *p;
1496         int i;
1497
1498         p = strchr(buf, '.');
1499         if ((p && ((count > 26) ||
1500                    ((p - buf) > 8) ||
1501                    (buf + count - p > 18))) ||
1502             (!p && (count > 9))) {
1503                 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1504                 return -EINVAL;
1505         }
1506
1507         for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
1508                 if (isalnum(*p) || *p == '$') {
1509                         username[i] = toupper(*p);
1510                         continue;
1511                 }
1512                 if (*p == '\n')
1513                         /* trailing lf, grr */
1514                         break;
1515                 IUCV_DBF_TEXT_(setup, 2,
1516                                "conn_write: invalid character %02x\n", *p);
1517                 return -EINVAL;
1518         }
1519         while (i < 8)
1520                 username[i++] = ' ';
1521         username[8] = '\0';
1522
1523         if (*p == '.') {
1524                 p++;
1525                 for (i = 0; i < 16 && *p; i++, p++) {
1526                         if (*p == '\n')
1527                                 break;
1528                         userdata[i] = toupper(*p);
1529                 }
1530                 while (i > 0 && i < 16)
1531                         userdata[i++] = ' ';
1532         } else
1533                 memcpy(userdata, iucvMagic_ascii, 16);
1534         userdata[16] = '\0';
1535         ASCEBC(userdata, 16);
1536
1537         return 0;
1538 }
1539
1540 static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1541                           const char *buf, size_t count)
1542 {
1543         struct netiucv_priv *priv = dev_get_drvdata(dev);
1544         struct net_device *ndev = priv->conn->netdev;
1545         char    username[9];
1546         char    userdata[17];
1547         int     rc;
1548         struct iucv_connection *cp;
1549
1550         IUCV_DBF_TEXT(trace, 3, __func__);
1551         rc = netiucv_check_user(buf, count, username, userdata);
1552         if (rc)
1553                 return rc;
1554
1555         if (memcmp(username, priv->conn->userid, 9) &&
1556             (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1557                 /* username changed while the interface is active. */
1558                 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1559                 return -EPERM;
1560         }
1561         read_lock_bh(&iucv_connection_rwlock);
1562         list_for_each_entry(cp, &iucv_connection_list, list) {
1563                 if (!strncmp(username, cp->userid, 9) &&
1564                    !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
1565                         read_unlock_bh(&iucv_connection_rwlock);
1566                         IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
1567                                 "already exists\n", netiucv_printuser(cp));
1568                         return -EEXIST;
1569                 }
1570         }
1571         read_unlock_bh(&iucv_connection_rwlock);
1572         memcpy(priv->conn->userid, username, 9);
1573         memcpy(priv->conn->userdata, userdata, 17);
1574         return count;
1575 }
1576
1577 static DEVICE_ATTR(user, 0644, user_show, user_write);
1578
1579 static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1580                             char *buf)
1581 {
1582         struct netiucv_priv *priv = dev_get_drvdata(dev);
1583
1584         IUCV_DBF_TEXT(trace, 5, __func__);
1585         return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1586 }
1587
1588 static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1589                              const char *buf, size_t count)
1590 {
1591         struct netiucv_priv *priv = dev_get_drvdata(dev);
1592         struct net_device *ndev = priv->conn->netdev;
1593         char         *e;
1594         int          bs1;
1595
1596         IUCV_DBF_TEXT(trace, 3, __func__);
1597         if (count >= 39)
1598                 return -EINVAL;
1599
1600         bs1 = simple_strtoul(buf, &e, 0);
1601
1602         if (e && (!isspace(*e))) {
1603                 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %02x\n",
1604                         *e);
1605                 return -EINVAL;
1606         }
1607         if (bs1 > NETIUCV_BUFSIZE_MAX) {
1608                 IUCV_DBF_TEXT_(setup, 2,
1609                         "buffer_write: buffer size %d too large\n",
1610                         bs1);
1611                 return -EINVAL;
1612         }
1613         if ((ndev->flags & IFF_RUNNING) &&
1614             (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1615                 IUCV_DBF_TEXT_(setup, 2,
1616                         "buffer_write: buffer size %d too small\n",
1617                         bs1);
1618                 return -EINVAL;
1619         }
1620         if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1621                 IUCV_DBF_TEXT_(setup, 2,
1622                         "buffer_write: buffer size %d too small\n",
1623                         bs1);
1624                 return -EINVAL;
1625         }
1626
1627         priv->conn->max_buffsize = bs1;
1628         if (!(ndev->flags & IFF_RUNNING))
1629                 ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1630
1631         return count;
1632
1633 }
1634
1635 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1636
1637 static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1638                              char *buf)
1639 {
1640         struct netiucv_priv *priv = dev_get_drvdata(dev);
1641
1642         IUCV_DBF_TEXT(trace, 5, __func__);
1643         return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1644 }
1645
1646 static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1647
1648 static ssize_t conn_fsm_show (struct device *dev,
1649                               struct device_attribute *attr, char *buf)
1650 {
1651         struct netiucv_priv *priv = dev_get_drvdata(dev);
1652
1653         IUCV_DBF_TEXT(trace, 5, __func__);
1654         return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1655 }
1656
1657 static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1658
1659 static ssize_t maxmulti_show (struct device *dev,
1660                               struct device_attribute *attr, char *buf)
1661 {
1662         struct netiucv_priv *priv = dev_get_drvdata(dev);
1663
1664         IUCV_DBF_TEXT(trace, 5, __func__);
1665         return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1666 }
1667
1668 static ssize_t maxmulti_write (struct device *dev,
1669                                struct device_attribute *attr,
1670                                const char *buf, size_t count)
1671 {
1672         struct netiucv_priv *priv = dev_get_drvdata(dev);
1673
1674         IUCV_DBF_TEXT(trace, 4, __func__);
1675         priv->conn->prof.maxmulti = 0;
1676         return count;
1677 }
1678
1679 static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1680
1681 static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1682                            char *buf)
1683 {
1684         struct netiucv_priv *priv = dev_get_drvdata(dev);
1685
1686         IUCV_DBF_TEXT(trace, 5, __func__);
1687         return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1688 }
1689
1690 static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1691                             const char *buf, size_t count)
1692 {
1693         struct netiucv_priv *priv = dev_get_drvdata(dev);
1694
1695         IUCV_DBF_TEXT(trace, 4, __func__);
1696         priv->conn->prof.maxcqueue = 0;
1697         return count;
1698 }
1699
1700 static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1701
1702 static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1703                            char *buf)
1704 {
1705         struct netiucv_priv *priv = dev_get_drvdata(dev);
1706
1707         IUCV_DBF_TEXT(trace, 5, __func__);
1708         return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1709 }
1710
1711 static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1712                             const char *buf, size_t count)
1713 {
1714         struct netiucv_priv *priv = dev_get_drvdata(dev);
1715
1716         IUCV_DBF_TEXT(trace, 4, __func__);
1717         priv->conn->prof.doios_single = 0;
1718         return count;
1719 }
1720
1721 static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1722
1723 static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1724                            char *buf)
1725 {
1726         struct netiucv_priv *priv = dev_get_drvdata(dev);
1727
1728         IUCV_DBF_TEXT(trace, 5, __func__);
1729         return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1730 }
1731
1732 static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1733                             const char *buf, size_t count)
1734 {
1735         struct netiucv_priv *priv = dev_get_drvdata(dev);
1736
1737         IUCV_DBF_TEXT(trace, 5, __func__);
1738         priv->conn->prof.doios_multi = 0;
1739         return count;
1740 }
1741
1742 static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1743
1744 static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1745                            char *buf)
1746 {
1747         struct netiucv_priv *priv = dev_get_drvdata(dev);
1748
1749         IUCV_DBF_TEXT(trace, 5, __func__);
1750         return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1751 }
1752
1753 static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1754                             const char *buf, size_t count)
1755 {
1756         struct netiucv_priv *priv = dev_get_drvdata(dev);
1757
1758         IUCV_DBF_TEXT(trace, 4, __func__);
1759         priv->conn->prof.txlen = 0;
1760         return count;
1761 }
1762
1763 static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1764
1765 static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1766                             char *buf)
1767 {
1768         struct netiucv_priv *priv = dev_get_drvdata(dev);
1769
1770         IUCV_DBF_TEXT(trace, 5, __func__);
1771         return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1772 }
1773
1774 static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1775                              const char *buf, size_t count)
1776 {
1777         struct netiucv_priv *priv = dev_get_drvdata(dev);
1778
1779         IUCV_DBF_TEXT(trace, 4, __func__);
1780         priv->conn->prof.tx_time = 0;
1781         return count;
1782 }
1783
1784 static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1785
1786 static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1787                             char *buf)
1788 {
1789         struct netiucv_priv *priv = dev_get_drvdata(dev);
1790
1791         IUCV_DBF_TEXT(trace, 5, __func__);
1792         return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1793 }
1794
1795 static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1796                              const char *buf, size_t count)
1797 {
1798         struct netiucv_priv *priv = dev_get_drvdata(dev);
1799
1800         IUCV_DBF_TEXT(trace, 4, __func__);
1801         priv->conn->prof.tx_pending = 0;
1802         return count;
1803 }
1804
1805 static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1806
1807 static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1808                             char *buf)
1809 {
1810         struct netiucv_priv *priv = dev_get_drvdata(dev);
1811
1812         IUCV_DBF_TEXT(trace, 5, __func__);
1813         return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1814 }
1815
1816 static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1817                              const char *buf, size_t count)
1818 {
1819         struct netiucv_priv *priv = dev_get_drvdata(dev);
1820
1821         IUCV_DBF_TEXT(trace, 4, __func__);
1822         priv->conn->prof.tx_max_pending = 0;
1823         return count;
1824 }
1825
1826 static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1827
1828 static struct attribute *netiucv_attrs[] = {
1829         &dev_attr_buffer.attr,
1830         &dev_attr_user.attr,
1831         NULL,
1832 };
1833
1834 static struct attribute_group netiucv_attr_group = {
1835         .attrs = netiucv_attrs,
1836 };
1837
1838 static struct attribute *netiucv_stat_attrs[] = {
1839         &dev_attr_device_fsm_state.attr,
1840         &dev_attr_connection_fsm_state.attr,
1841         &dev_attr_max_tx_buffer_used.attr,
1842         &dev_attr_max_chained_skbs.attr,
1843         &dev_attr_tx_single_write_ops.attr,
1844         &dev_attr_tx_multi_write_ops.attr,
1845         &dev_attr_netto_bytes.attr,
1846         &dev_attr_max_tx_io_time.attr,
1847         &dev_attr_tx_pending.attr,
1848         &dev_attr_tx_max_pending.attr,
1849         NULL,
1850 };
1851
1852 static struct attribute_group netiucv_stat_attr_group = {
1853         .name  = "stats",
1854         .attrs = netiucv_stat_attrs,
1855 };
1856
1857 static const struct attribute_group *netiucv_attr_groups[] = {
1858         &netiucv_stat_attr_group,
1859         &netiucv_attr_group,
1860         NULL,
1861 };
1862
1863 static int netiucv_register_device(struct net_device *ndev)
1864 {
1865         struct netiucv_priv *priv = netdev_priv(ndev);
1866         struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1867         int ret;
1868
1869         IUCV_DBF_TEXT(trace, 3, __func__);
1870
1871         if (dev) {
1872                 dev_set_name(dev, "net%s", ndev->name);
1873                 dev->bus = &iucv_bus;
1874                 dev->parent = iucv_root;
1875                 dev->groups = netiucv_attr_groups;
1876                 /*
1877                  * The release function could be called after the
1878                  * module has been unloaded. It's _only_ task is to
1879                  * free the struct. Therefore, we specify kfree()
1880                  * directly here. (Probably a little bit obfuscating
1881                  * but legitime ...).
1882                  */
1883                 dev->release = (void (*)(struct device *))kfree;
1884                 dev->driver = &netiucv_driver;
1885         } else
1886                 return -ENOMEM;
1887
1888         ret = device_register(dev);
1889         if (ret) {
1890                 put_device(dev);
1891                 return ret;
1892         }
1893         priv->dev = dev;
1894         dev_set_drvdata(dev, priv);
1895         return 0;
1896 }
1897
1898 static void netiucv_unregister_device(struct device *dev)
1899 {
1900         IUCV_DBF_TEXT(trace, 3, __func__);
1901         device_unregister(dev);
1902 }
1903
1904 /**
1905  * Allocate and initialize a new connection structure.
1906  * Add it to the list of netiucv connections;
1907  */
1908 static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1909                                                       char *username,
1910                                                       char *userdata)
1911 {
1912         struct iucv_connection *conn;
1913
1914         conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1915         if (!conn)
1916                 goto out;
1917         skb_queue_head_init(&conn->collect_queue);
1918         skb_queue_head_init(&conn->commit_queue);
1919         spin_lock_init(&conn->collect_lock);
1920         conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1921         conn->netdev = dev;
1922
1923         conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1924         if (!conn->rx_buff)
1925                 goto out_conn;
1926         conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1927         if (!conn->tx_buff)
1928                 goto out_rx;
1929         conn->fsm = init_fsm("netiucvconn", conn_state_names,
1930                              conn_event_names, NR_CONN_STATES,
1931                              NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1932                              GFP_KERNEL);
1933         if (!conn->fsm)
1934                 goto out_tx;
1935
1936         fsm_settimer(conn->fsm, &conn->timer);
1937         fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1938
1939         if (userdata)
1940                 memcpy(conn->userdata, userdata, 17);
1941         if (username) {
1942                 memcpy(conn->userid, username, 9);
1943                 fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1944         }
1945
1946         write_lock_bh(&iucv_connection_rwlock);
1947         list_add_tail(&conn->list, &iucv_connection_list);
1948         write_unlock_bh(&iucv_connection_rwlock);
1949         return conn;
1950
1951 out_tx:
1952         kfree_skb(conn->tx_buff);
1953 out_rx:
1954         kfree_skb(conn->rx_buff);
1955 out_conn:
1956         kfree(conn);
1957 out:
1958         return NULL;
1959 }
1960
1961 /**
1962  * Release a connection structure and remove it from the
1963  * list of netiucv connections.
1964  */
1965 static void netiucv_remove_connection(struct iucv_connection *conn)
1966 {
1967
1968         IUCV_DBF_TEXT(trace, 3, __func__);
1969         write_lock_bh(&iucv_connection_rwlock);
1970         list_del_init(&conn->list);
1971         write_unlock_bh(&iucv_connection_rwlock);
1972         fsm_deltimer(&conn->timer);
1973         netiucv_purge_skb_queue(&conn->collect_queue);
1974         if (conn->path) {
1975                 iucv_path_sever(conn->path, conn->userdata);
1976                 kfree(conn->path);
1977                 conn->path = NULL;
1978         }
1979         netiucv_purge_skb_queue(&conn->commit_queue);
1980         kfree_fsm(conn->fsm);
1981         kfree_skb(conn->rx_buff);
1982         kfree_skb(conn->tx_buff);
1983 }
1984
1985 /**
1986  * Release everything of a net device.
1987  */
1988 static void netiucv_free_netdevice(struct net_device *dev)
1989 {
1990         struct netiucv_priv *privptr = netdev_priv(dev);
1991
1992         IUCV_DBF_TEXT(trace, 3, __func__);
1993
1994         if (!dev)
1995                 return;
1996
1997         if (privptr) {
1998                 if (privptr->conn)
1999                         netiucv_remove_connection(privptr->conn);
2000                 if (privptr->fsm)
2001                         kfree_fsm(privptr->fsm);
2002                 privptr->conn = NULL; privptr->fsm = NULL;
2003                 /* privptr gets freed by free_netdev() */
2004         }
2005         free_netdev(dev);
2006 }
2007
2008 /**
2009  * Initialize a net device. (Called from kernel in alloc_netdev())
2010  */
2011 static const struct net_device_ops netiucv_netdev_ops = {
2012         .ndo_open               = netiucv_open,
2013         .ndo_stop               = netiucv_close,
2014         .ndo_get_stats          = netiucv_stats,
2015         .ndo_start_xmit         = netiucv_tx,
2016         .ndo_change_mtu         = netiucv_change_mtu,
2017 };
2018
2019 static void netiucv_setup_netdevice(struct net_device *dev)
2020 {
2021         dev->mtu                 = NETIUCV_MTU_DEFAULT;
2022         dev->destructor          = netiucv_free_netdevice;
2023         dev->hard_header_len     = NETIUCV_HDRLEN;
2024         dev->addr_len            = 0;
2025         dev->type                = ARPHRD_SLIP;
2026         dev->tx_queue_len        = NETIUCV_QUEUELEN_DEFAULT;
2027         dev->flags               = IFF_POINTOPOINT | IFF_NOARP;
2028         dev->netdev_ops          = &netiucv_netdev_ops;
2029 }
2030
2031 /**
2032  * Allocate and initialize everything of a net device.
2033  */
2034 static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
2035 {
2036         struct netiucv_priv *privptr;
2037         struct net_device *dev;
2038
2039         dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
2040                            netiucv_setup_netdevice);
2041         if (!dev)
2042                 return NULL;
2043         if (dev_alloc_name(dev, dev->name) < 0)
2044                 goto out_netdev;
2045
2046         privptr = netdev_priv(dev);
2047         privptr->fsm = init_fsm("netiucvdev", dev_state_names,
2048                                 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
2049                                 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
2050         if (!privptr->fsm)
2051                 goto out_netdev;
2052
2053         privptr->conn = netiucv_new_connection(dev, username, userdata);
2054         if (!privptr->conn) {
2055                 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
2056                 goto out_fsm;
2057         }
2058         fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
2059         return dev;
2060
2061 out_fsm:
2062         kfree_fsm(privptr->fsm);
2063 out_netdev:
2064         free_netdev(dev);
2065         return NULL;
2066 }
2067
2068 static ssize_t conn_write(struct device_driver *drv,
2069                           const char *buf, size_t count)
2070 {
2071         char username[9];
2072         char userdata[17];
2073         int rc;
2074         struct net_device *dev;
2075         struct netiucv_priv *priv;
2076         struct iucv_connection *cp;
2077
2078         IUCV_DBF_TEXT(trace, 3, __func__);
2079         rc = netiucv_check_user(buf, count, username, userdata);
2080         if (rc)
2081                 return rc;
2082
2083         read_lock_bh(&iucv_connection_rwlock);
2084         list_for_each_entry(cp, &iucv_connection_list, list) {
2085                 if (!strncmp(username, cp->userid, 9) &&
2086                     !strncmp(userdata, cp->userdata, 17)) {
2087                         read_unlock_bh(&iucv_connection_rwlock);
2088                         IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
2089                                 "already exists\n", netiucv_printuser(cp));
2090                         return -EEXIST;
2091                 }
2092         }
2093         read_unlock_bh(&iucv_connection_rwlock);
2094
2095         dev = netiucv_init_netdevice(username, userdata);
2096         if (!dev) {
2097                 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
2098                 return -ENODEV;
2099         }
2100
2101         rc = netiucv_register_device(dev);
2102         if (rc) {
2103                 IUCV_DBF_TEXT_(setup, 2,
2104                         "ret %d from netiucv_register_device\n", rc);
2105                 goto out_free_ndev;
2106         }
2107
2108         /* sysfs magic */
2109         priv = netdev_priv(dev);
2110         SET_NETDEV_DEV(dev, priv->dev);
2111
2112         rc = register_netdev(dev);
2113         if (rc)
2114                 goto out_unreg;
2115
2116         dev_info(priv->dev, "The IUCV interface to %s has been established "
2117                             "successfully\n",
2118                 netiucv_printuser(priv->conn));
2119
2120         return count;
2121
2122 out_unreg:
2123         netiucv_unregister_device(priv->dev);
2124 out_free_ndev:
2125         netiucv_free_netdevice(dev);
2126         return rc;
2127 }
2128
2129 static DRIVER_ATTR(connection, 0200, NULL, conn_write);
2130
2131 static ssize_t remove_write (struct device_driver *drv,
2132                              const char *buf, size_t count)
2133 {
2134         struct iucv_connection *cp;
2135         struct net_device *ndev;
2136         struct netiucv_priv *priv;
2137         struct device *dev;
2138         char name[IFNAMSIZ];
2139         const char *p;
2140         int i;
2141
2142         IUCV_DBF_TEXT(trace, 3, __func__);
2143
2144         if (count >= IFNAMSIZ)
2145                 count = IFNAMSIZ - 1;
2146
2147         for (i = 0, p = buf; i < count && *p; i++, p++) {
2148                 if (*p == '\n' || *p == ' ')
2149                         /* trailing lf, grr */
2150                         break;
2151                 name[i] = *p;
2152         }
2153         name[i] = '\0';
2154
2155         read_lock_bh(&iucv_connection_rwlock);
2156         list_for_each_entry(cp, &iucv_connection_list, list) {
2157                 ndev = cp->netdev;
2158                 priv = netdev_priv(ndev);
2159                 dev = priv->dev;
2160                 if (strncmp(name, ndev->name, count))
2161                         continue;
2162                 read_unlock_bh(&iucv_connection_rwlock);
2163                 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2164                         dev_warn(dev, "The IUCV device is connected"
2165                                 " to %s and cannot be removed\n",
2166                                 priv->conn->userid);
2167                         IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2168                         return -EPERM;
2169                 }
2170                 unregister_netdev(ndev);
2171                 netiucv_unregister_device(dev);
2172                 return count;
2173         }
2174         read_unlock_bh(&iucv_connection_rwlock);
2175         IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2176         return -EINVAL;
2177 }
2178
2179 static DRIVER_ATTR(remove, 0200, NULL, remove_write);
2180
2181 static struct attribute * netiucv_drv_attrs[] = {
2182         &driver_attr_connection.attr,
2183         &driver_attr_remove.attr,
2184         NULL,
2185 };
2186
2187 static struct attribute_group netiucv_drv_attr_group = {
2188         .attrs = netiucv_drv_attrs,
2189 };
2190
2191 static const struct attribute_group *netiucv_drv_attr_groups[] = {
2192         &netiucv_drv_attr_group,
2193         NULL,
2194 };
2195
2196 static void netiucv_banner(void)
2197 {
2198         pr_info("driver initialized\n");
2199 }
2200
2201 static void __exit netiucv_exit(void)
2202 {
2203         struct iucv_connection *cp;
2204         struct net_device *ndev;
2205         struct netiucv_priv *priv;
2206         struct device *dev;
2207
2208         IUCV_DBF_TEXT(trace, 3, __func__);
2209         while (!list_empty(&iucv_connection_list)) {
2210                 cp = list_entry(iucv_connection_list.next,
2211                                 struct iucv_connection, list);
2212                 ndev = cp->netdev;
2213                 priv = netdev_priv(ndev);
2214                 dev = priv->dev;
2215
2216                 unregister_netdev(ndev);
2217                 netiucv_unregister_device(dev);
2218         }
2219
2220         device_unregister(netiucv_dev);
2221         driver_unregister(&netiucv_driver);
2222         iucv_unregister(&netiucv_handler, 1);
2223         iucv_unregister_dbf_views();
2224
2225         pr_info("driver unloaded\n");
2226         return;
2227 }
2228
2229 static int __init netiucv_init(void)
2230 {
2231         int rc;
2232
2233         rc = iucv_register_dbf_views();
2234         if (rc)
2235                 goto out;
2236         rc = iucv_register(&netiucv_handler, 1);
2237         if (rc)
2238                 goto out_dbf;
2239         IUCV_DBF_TEXT(trace, 3, __func__);
2240         netiucv_driver.groups = netiucv_drv_attr_groups;
2241         rc = driver_register(&netiucv_driver);
2242         if (rc) {
2243                 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2244                 goto out_iucv;
2245         }
2246         /* establish dummy device */
2247         netiucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2248         if (!netiucv_dev) {
2249                 rc = -ENOMEM;
2250                 goto out_driver;
2251         }
2252         dev_set_name(netiucv_dev, "netiucv");
2253         netiucv_dev->bus = &iucv_bus;
2254         netiucv_dev->parent = iucv_root;
2255         netiucv_dev->release = (void (*)(struct device *))kfree;
2256         netiucv_dev->driver = &netiucv_driver;
2257         rc = device_register(netiucv_dev);
2258         if (rc) {
2259                 put_device(netiucv_dev);
2260                 goto out_driver;
2261         }
2262         netiucv_banner();
2263         return rc;
2264
2265 out_driver:
2266         driver_unregister(&netiucv_driver);
2267 out_iucv:
2268         iucv_unregister(&netiucv_handler, 1);
2269 out_dbf:
2270         iucv_unregister_dbf_views();
2271 out:
2272         return rc;
2273 }
2274
2275 module_init(netiucv_init);
2276 module_exit(netiucv_exit);
2277 MODULE_LICENSE("GPL");