2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PACKET - implements raw packet sockets.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
35 * Ulises Alonso : Frame number limit removal and
36 * packet_set_ring memory leak.
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
40 * byte arrays at the end of sockaddr_ll
42 * Johann Baudy : Added TX RING.
44 * This program is free software; you can redistribute it and/or
45 * modify it under the terms of the GNU General Public License
46 * as published by the Free Software Foundation; either version
47 * 2 of the License, or (at your option) any later version.
51 #include <linux/types.h>
53 #include <linux/capability.h>
54 #include <linux/fcntl.h>
55 #include <linux/socket.h>
57 #include <linux/inet.h>
58 #include <linux/netdevice.h>
59 #include <linux/if_packet.h>
60 #include <linux/wireless.h>
61 #include <linux/kernel.h>
62 #include <linux/kmod.h>
63 #include <linux/slab.h>
64 #include <net/net_namespace.h>
66 #include <net/protocol.h>
67 #include <linux/skbuff.h>
69 #include <linux/errno.h>
70 #include <linux/timer.h>
71 #include <asm/system.h>
72 #include <asm/uaccess.h>
73 #include <asm/ioctls.h>
75 #include <asm/cacheflush.h>
77 #include <linux/proc_fs.h>
78 #include <linux/seq_file.h>
79 #include <linux/poll.h>
80 #include <linux/module.h>
81 #include <linux/init.h>
82 #include <linux/mutex.h>
83 #include <linux/if_vlan.h>
84 #include <linux/virtio_net.h>
85 #include <linux/errqueue.h>
88 #include <net/inet_common.h>
93 - if device has no dev->hard_header routine, it adds and removes ll header
94 inside itself. In this case ll header is invisible outside of device,
95 but higher levels still should reserve dev->hard_header_len.
96 Some devices are enough clever to reallocate skb, when header
97 will not fit to reserved space (tunnel), another ones are silly
99 - packet socket receives packets with pulled ll header,
100 so that SOCK_RAW should push it back.
105 Incoming, dev->hard_header!=NULL
106 mac_header -> ll header
109 Outgoing, dev->hard_header!=NULL
110 mac_header -> ll header
113 Incoming, dev->hard_header==NULL
114 mac_header -> UNKNOWN position. It is very likely, that it points to ll
115 header. PPP makes it, that is wrong, because introduce
116 assymetry between rx and tx paths.
119 Outgoing, dev->hard_header==NULL
120 mac_header -> data. ll header is still not built!
124 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
130 dev->hard_header != NULL
131 mac_header -> ll header
134 dev->hard_header == NULL (ll header is added by device, we cannot control it)
138 We should set nh.raw on output to correct posistion,
139 packet classifier depends on it.
142 /* Private packet socket structures. */
144 struct packet_mclist {
145 struct packet_mclist *next;
150 unsigned char addr[MAX_ADDR_LEN];
152 /* identical to struct packet_mreq except it has
153 * a longer address field.
155 struct packet_mreq_max {
157 unsigned short mr_type;
158 unsigned short mr_alen;
159 unsigned char mr_address[MAX_ADDR_LEN];
162 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
163 int closing, int tx_ring);
165 struct packet_ring_buffer {
168 unsigned int frames_per_block;
169 unsigned int frame_size;
170 unsigned int frame_max;
172 unsigned int pg_vec_order;
173 unsigned int pg_vec_pages;
174 unsigned int pg_vec_len;
180 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
182 static void packet_flush_mclist(struct sock *sk);
185 /* struct sock has to be the first member of packet_sock */
187 struct tpacket_stats stats;
188 struct packet_ring_buffer rx_ring;
189 struct packet_ring_buffer tx_ring;
191 spinlock_t bind_lock;
192 struct mutex pg_vec_lock;
193 unsigned int running:1, /* prot_hook is attached*/
197 int ifindex; /* bound device */
199 struct packet_mclist *mclist;
201 enum tpacket_versions tp_version;
202 unsigned int tp_hdrlen;
203 unsigned int tp_reserve;
204 unsigned int tp_loss:1;
205 struct packet_type prot_hook ____cacheline_aligned_in_smp;
208 struct packet_skb_cb {
209 unsigned int origlen;
211 struct sockaddr_pkt pkt;
212 struct sockaddr_ll ll;
216 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
218 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
221 struct tpacket_hdr *h1;
222 struct tpacket2_hdr *h2;
227 switch (po->tp_version) {
229 h.h1->tp_status = status;
230 flush_dcache_page(virt_to_page(&h.h1->tp_status));
233 h.h2->tp_status = status;
234 flush_dcache_page(virt_to_page(&h.h2->tp_status));
237 pr_err("TPACKET version not supported\n");
244 static int __packet_get_status(struct packet_sock *po, void *frame)
247 struct tpacket_hdr *h1;
248 struct tpacket2_hdr *h2;
255 switch (po->tp_version) {
257 flush_dcache_page(virt_to_page(&h.h1->tp_status));
258 return h.h1->tp_status;
260 flush_dcache_page(virt_to_page(&h.h2->tp_status));
261 return h.h2->tp_status;
263 pr_err("TPACKET version not supported\n");
269 static void *packet_lookup_frame(struct packet_sock *po,
270 struct packet_ring_buffer *rb,
271 unsigned int position,
274 unsigned int pg_vec_pos, frame_offset;
276 struct tpacket_hdr *h1;
277 struct tpacket2_hdr *h2;
281 pg_vec_pos = position / rb->frames_per_block;
282 frame_offset = position % rb->frames_per_block;
284 h.raw = rb->pg_vec[pg_vec_pos] + (frame_offset * rb->frame_size);
286 if (status != __packet_get_status(po, h.raw))
292 static inline void *packet_current_frame(struct packet_sock *po,
293 struct packet_ring_buffer *rb,
296 return packet_lookup_frame(po, rb, rb->head, status);
299 static inline void *packet_previous_frame(struct packet_sock *po,
300 struct packet_ring_buffer *rb,
303 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
304 return packet_lookup_frame(po, rb, previous, status);
307 static inline void packet_increment_head(struct packet_ring_buffer *buff)
309 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
312 static inline struct packet_sock *pkt_sk(struct sock *sk)
314 return (struct packet_sock *)sk;
317 static void packet_sock_destruct(struct sock *sk)
319 skb_queue_purge(&sk->sk_error_queue);
321 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
322 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
324 if (!sock_flag(sk, SOCK_DEAD)) {
325 pr_err("Attempt to release alive packet socket: %p\n", sk);
329 sk_refcnt_debug_dec(sk);
333 static const struct proto_ops packet_ops;
335 static const struct proto_ops packet_ops_spkt;
337 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
338 struct packet_type *pt, struct net_device *orig_dev)
341 struct sockaddr_pkt *spkt;
344 * When we registered the protocol we saved the socket in the data
345 * field for just this event.
348 sk = pt->af_packet_priv;
351 * Yank back the headers [hope the device set this
352 * right or kerboom...]
354 * Incoming packets have ll header pulled,
357 * For outgoing ones skb->data == skb_mac_header(skb)
358 * so that this procedure is noop.
361 if (skb->pkt_type == PACKET_LOOPBACK)
364 if (!net_eq(dev_net(dev), sock_net(sk)))
367 skb = skb_share_check(skb, GFP_ATOMIC);
371 /* drop any routing info */
374 /* drop conntrack reference */
377 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
379 skb_push(skb, skb->data - skb_mac_header(skb));
382 * The SOCK_PACKET socket receives _all_ frames.
385 spkt->spkt_family = dev->type;
386 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
387 spkt->spkt_protocol = skb->protocol;
390 * Charge the memory to the socket. This is done specifically
391 * to prevent sockets using all the memory up.
394 if (sock_queue_rcv_skb(sk, skb) == 0)
405 * Output a raw packet to a device layer. This bypasses all the other
406 * protocol layers and you must therefore supply it with a complete frame
409 static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
410 struct msghdr *msg, size_t len)
412 struct sock *sk = sock->sk;
413 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
414 struct sk_buff *skb = NULL;
415 struct net_device *dev;
420 * Get and verify the address.
424 if (msg->msg_namelen < sizeof(struct sockaddr))
426 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
427 proto = saddr->spkt_protocol;
429 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
432 * Find the device first to size check it
435 saddr->spkt_device[13] = 0;
438 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
444 if (!(dev->flags & IFF_UP))
448 * You may not queue a frame bigger than the mtu. This is the lowest level
449 * raw protocol and you must do your own fragmentation at this level.
453 if (len > dev->mtu + dev->hard_header_len)
457 size_t reserved = LL_RESERVED_SPACE(dev);
458 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
461 skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL);
464 /* FIXME: Save some space for broken drivers that write a hard
465 * header at transmission time by themselves. PPP is the notable
466 * one here. This should really be fixed at the driver level.
468 skb_reserve(skb, reserved);
469 skb_reset_network_header(skb);
471 /* Try to align data part correctly */
476 skb_reset_network_header(skb);
478 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
485 skb->protocol = proto;
487 skb->priority = sk->sk_priority;
488 skb->mark = sk->sk_mark;
489 err = sock_tx_timestamp(msg, sk, skb_tx(skb));
504 static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
507 struct sk_filter *filter;
510 filter = rcu_dereference_bh(sk->sk_filter);
512 res = sk_run_filter(skb, filter->insns, filter->len);
513 rcu_read_unlock_bh();
519 This function makes lazy skb cloning in hope that most of packets
520 are discarded by BPF.
522 Note tricky part: we DO mangle shared skb! skb->data, skb->len
523 and skb->cb are mangled. It works because (and until) packets
524 falling here are owned by current CPU. Output packets are cloned
525 by dev_queue_xmit_nit(), input packets are processed by net_bh
526 sequencially, so that if we return skb to original state on exit,
527 we will not harm anyone.
530 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
531 struct packet_type *pt, struct net_device *orig_dev)
534 struct sockaddr_ll *sll;
535 struct packet_sock *po;
536 u8 *skb_head = skb->data;
537 int skb_len = skb->len;
538 unsigned int snaplen, res;
540 if (skb->pkt_type == PACKET_LOOPBACK)
543 sk = pt->af_packet_priv;
546 if (!net_eq(dev_net(dev), sock_net(sk)))
551 if (dev->header_ops) {
552 /* The device has an explicit notion of ll header,
553 exported to higher levels.
555 Otherwise, the device hides datails of it frame
556 structure, so that corresponding packet head
557 never delivered to user.
559 if (sk->sk_type != SOCK_DGRAM)
560 skb_push(skb, skb->data - skb_mac_header(skb));
561 else if (skb->pkt_type == PACKET_OUTGOING) {
562 /* Special case: outgoing packets have ll header at head */
563 skb_pull(skb, skb_network_offset(skb));
569 res = run_filter(skb, sk, snaplen);
575 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
576 (unsigned)sk->sk_rcvbuf)
579 if (skb_shared(skb)) {
580 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
584 if (skb_head != skb->data) {
585 skb->data = skb_head;
592 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
595 sll = &PACKET_SKB_CB(skb)->sa.ll;
596 sll->sll_family = AF_PACKET;
597 sll->sll_hatype = dev->type;
598 sll->sll_protocol = skb->protocol;
599 sll->sll_pkttype = skb->pkt_type;
600 if (unlikely(po->origdev))
601 sll->sll_ifindex = orig_dev->ifindex;
603 sll->sll_ifindex = dev->ifindex;
605 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
607 PACKET_SKB_CB(skb)->origlen = skb->len;
609 if (pskb_trim(skb, snaplen))
612 skb_set_owner_r(skb, sk);
616 /* drop conntrack reference */
619 spin_lock(&sk->sk_receive_queue.lock);
620 po->stats.tp_packets++;
621 skb->dropcount = atomic_read(&sk->sk_drops);
622 __skb_queue_tail(&sk->sk_receive_queue, skb);
623 spin_unlock(&sk->sk_receive_queue.lock);
624 sk->sk_data_ready(sk, skb->len);
628 po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
631 if (skb_head != skb->data && skb_shared(skb)) {
632 skb->data = skb_head;
640 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
641 struct packet_type *pt, struct net_device *orig_dev)
644 struct packet_sock *po;
645 struct sockaddr_ll *sll;
647 struct tpacket_hdr *h1;
648 struct tpacket2_hdr *h2;
651 u8 *skb_head = skb->data;
652 int skb_len = skb->len;
653 unsigned int snaplen, res;
654 unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
655 unsigned short macoff, netoff, hdrlen;
656 struct sk_buff *copy_skb = NULL;
660 if (skb->pkt_type == PACKET_LOOPBACK)
663 sk = pt->af_packet_priv;
666 if (!net_eq(dev_net(dev), sock_net(sk)))
669 if (dev->header_ops) {
670 if (sk->sk_type != SOCK_DGRAM)
671 skb_push(skb, skb->data - skb_mac_header(skb));
672 else if (skb->pkt_type == PACKET_OUTGOING) {
673 /* Special case: outgoing packets have ll header at head */
674 skb_pull(skb, skb_network_offset(skb));
678 if (skb->ip_summed == CHECKSUM_PARTIAL)
679 status |= TP_STATUS_CSUMNOTREADY;
683 res = run_filter(skb, sk, snaplen);
689 if (sk->sk_type == SOCK_DGRAM) {
690 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
693 unsigned maclen = skb_network_offset(skb);
694 netoff = TPACKET_ALIGN(po->tp_hdrlen +
695 (maclen < 16 ? 16 : maclen)) +
697 macoff = netoff - maclen;
700 if (macoff + snaplen > po->rx_ring.frame_size) {
701 if (po->copy_thresh &&
702 atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
703 (unsigned)sk->sk_rcvbuf) {
704 if (skb_shared(skb)) {
705 copy_skb = skb_clone(skb, GFP_ATOMIC);
707 copy_skb = skb_get(skb);
708 skb_head = skb->data;
711 skb_set_owner_r(copy_skb, sk);
713 snaplen = po->rx_ring.frame_size - macoff;
714 if ((int)snaplen < 0)
718 spin_lock(&sk->sk_receive_queue.lock);
719 h.raw = packet_current_frame(po, &po->rx_ring, TP_STATUS_KERNEL);
722 packet_increment_head(&po->rx_ring);
723 po->stats.tp_packets++;
725 status |= TP_STATUS_COPY;
726 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
728 if (!po->stats.tp_drops)
729 status &= ~TP_STATUS_LOSING;
730 spin_unlock(&sk->sk_receive_queue.lock);
732 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
734 switch (po->tp_version) {
736 h.h1->tp_len = skb->len;
737 h.h1->tp_snaplen = snaplen;
738 h.h1->tp_mac = macoff;
739 h.h1->tp_net = netoff;
740 if (skb->tstamp.tv64)
741 tv = ktime_to_timeval(skb->tstamp);
743 do_gettimeofday(&tv);
744 h.h1->tp_sec = tv.tv_sec;
745 h.h1->tp_usec = tv.tv_usec;
746 hdrlen = sizeof(*h.h1);
749 h.h2->tp_len = skb->len;
750 h.h2->tp_snaplen = snaplen;
751 h.h2->tp_mac = macoff;
752 h.h2->tp_net = netoff;
753 if (skb->tstamp.tv64)
754 ts = ktime_to_timespec(skb->tstamp);
757 h.h2->tp_sec = ts.tv_sec;
758 h.h2->tp_nsec = ts.tv_nsec;
759 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
760 h.h2->tp_padding = 0;
761 hdrlen = sizeof(*h.h2);
767 sll = h.raw + TPACKET_ALIGN(hdrlen);
768 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
769 sll->sll_family = AF_PACKET;
770 sll->sll_hatype = dev->type;
771 sll->sll_protocol = skb->protocol;
772 sll->sll_pkttype = skb->pkt_type;
773 if (unlikely(po->origdev))
774 sll->sll_ifindex = orig_dev->ifindex;
776 sll->sll_ifindex = dev->ifindex;
778 __packet_set_status(po, h.raw, status);
781 struct page *p_start, *p_end;
782 u8 *h_end = h.raw + macoff + snaplen - 1;
784 p_start = virt_to_page(h.raw);
785 p_end = virt_to_page(h_end);
786 while (p_start <= p_end) {
787 flush_dcache_page(p_start);
792 sk->sk_data_ready(sk, 0);
795 if (skb_head != skb->data && skb_shared(skb)) {
796 skb->data = skb_head;
804 po->stats.tp_drops++;
805 spin_unlock(&sk->sk_receive_queue.lock);
807 sk->sk_data_ready(sk, 0);
812 static void tpacket_destruct_skb(struct sk_buff *skb)
814 struct packet_sock *po = pkt_sk(skb->sk);
819 if (likely(po->tx_ring.pg_vec)) {
820 ph = skb_shinfo(skb)->destructor_arg;
821 BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
822 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
823 atomic_dec(&po->tx_ring.pending);
824 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
830 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
831 void *frame, struct net_device *dev, int size_max,
832 __be16 proto, unsigned char *addr)
835 struct tpacket_hdr *h1;
836 struct tpacket2_hdr *h2;
839 int to_write, offset, len, tp_len, nr_frags, len_max;
840 struct socket *sock = po->sk.sk_socket;
847 skb->protocol = proto;
849 skb->priority = po->sk.sk_priority;
850 skb->mark = po->sk.sk_mark;
851 skb_shinfo(skb)->destructor_arg = ph.raw;
853 switch (po->tp_version) {
855 tp_len = ph.h2->tp_len;
858 tp_len = ph.h1->tp_len;
861 if (unlikely(tp_len > size_max)) {
862 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
866 skb_reserve(skb, LL_RESERVED_SPACE(dev));
867 skb_reset_network_header(skb);
869 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
872 if (sock->type == SOCK_DGRAM) {
873 err = dev_hard_header(skb, dev, ntohs(proto), addr,
875 if (unlikely(err < 0))
877 } else if (dev->hard_header_len) {
878 /* net device doesn't like empty head */
879 if (unlikely(tp_len <= dev->hard_header_len)) {
880 pr_err("packet size is too short (%d < %d)\n",
881 tp_len, dev->hard_header_len);
885 skb_push(skb, dev->hard_header_len);
886 err = skb_store_bits(skb, 0, data,
887 dev->hard_header_len);
891 data += dev->hard_header_len;
892 to_write -= dev->hard_header_len;
896 page = virt_to_page(data);
897 offset = offset_in_page(data);
898 len_max = PAGE_SIZE - offset;
899 len = ((to_write > len_max) ? len_max : to_write);
901 skb->data_len = to_write;
902 skb->len += to_write;
903 skb->truesize += to_write;
904 atomic_add(to_write, &po->sk.sk_wmem_alloc);
906 while (likely(to_write)) {
907 nr_frags = skb_shinfo(skb)->nr_frags;
909 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
910 pr_err("Packet exceed the number of skb frags(%lu)\n",
915 flush_dcache_page(page);
917 skb_fill_page_desc(skb,
919 page++, offset, len);
923 len = ((to_write > len_max) ? len_max : to_write);
929 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
933 struct net_device *dev;
935 int ifindex, err, reserve = 0;
937 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
938 int tp_len, size_max;
943 sock = po->sk.sk_socket;
945 mutex_lock(&po->pg_vec_lock);
949 ifindex = po->ifindex;
954 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
956 if (msg->msg_namelen < (saddr->sll_halen
957 + offsetof(struct sockaddr_ll,
960 ifindex = saddr->sll_ifindex;
961 proto = saddr->sll_protocol;
962 addr = saddr->sll_addr;
965 dev = dev_get_by_index(sock_net(&po->sk), ifindex);
967 if (unlikely(dev == NULL))
970 reserve = dev->hard_header_len;
973 if (unlikely(!(dev->flags & IFF_UP)))
976 size_max = po->tx_ring.frame_size
977 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
979 if (size_max > dev->mtu + reserve)
980 size_max = dev->mtu + reserve;
983 ph = packet_current_frame(po, &po->tx_ring,
984 TP_STATUS_SEND_REQUEST);
986 if (unlikely(ph == NULL)) {
991 status = TP_STATUS_SEND_REQUEST;
992 skb = sock_alloc_send_skb(&po->sk,
993 LL_ALLOCATED_SPACE(dev)
994 + sizeof(struct sockaddr_ll),
997 if (unlikely(skb == NULL))
1000 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
1003 if (unlikely(tp_len < 0)) {
1005 __packet_set_status(po, ph,
1006 TP_STATUS_AVAILABLE);
1007 packet_increment_head(&po->tx_ring);
1011 status = TP_STATUS_WRONG_FORMAT;
1017 skb->destructor = tpacket_destruct_skb;
1018 __packet_set_status(po, ph, TP_STATUS_SENDING);
1019 atomic_inc(&po->tx_ring.pending);
1021 status = TP_STATUS_SEND_REQUEST;
1022 err = dev_queue_xmit(skb);
1023 if (unlikely(err > 0)) {
1024 err = net_xmit_errno(err);
1025 if (err && __packet_get_status(po, ph) ==
1026 TP_STATUS_AVAILABLE) {
1027 /* skb was destructed already */
1032 * skb was dropped but not destructed yet;
1033 * let's treat it like congestion or err < 0
1037 packet_increment_head(&po->tx_ring);
1039 } while (likely((ph != NULL) ||
1040 ((!(msg->msg_flags & MSG_DONTWAIT)) &&
1041 (atomic_read(&po->tx_ring.pending))))
1048 __packet_set_status(po, ph, status);
1053 mutex_unlock(&po->pg_vec_lock);
1057 static inline struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
1058 size_t reserve, size_t len,
1059 size_t linear, int noblock,
1062 struct sk_buff *skb;
1064 /* Under a page? Don't bother with paged skb. */
1065 if (prepad + len < PAGE_SIZE || !linear)
1068 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1073 skb_reserve(skb, reserve);
1074 skb_put(skb, linear);
1075 skb->data_len = len - linear;
1076 skb->len += len - linear;
1081 static int packet_snd(struct socket *sock,
1082 struct msghdr *msg, size_t len)
1084 struct sock *sk = sock->sk;
1085 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
1086 struct sk_buff *skb;
1087 struct net_device *dev;
1089 unsigned char *addr;
1090 int ifindex, err, reserve = 0;
1091 struct virtio_net_hdr vnet_hdr = { 0 };
1094 struct packet_sock *po = pkt_sk(sk);
1095 unsigned short gso_type = 0;
1098 * Get and verify the address.
1101 if (saddr == NULL) {
1102 ifindex = po->ifindex;
1107 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
1109 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
1111 ifindex = saddr->sll_ifindex;
1112 proto = saddr->sll_protocol;
1113 addr = saddr->sll_addr;
1117 dev = dev_get_by_index(sock_net(sk), ifindex);
1121 if (sock->type == SOCK_RAW)
1122 reserve = dev->hard_header_len;
1125 if (!(dev->flags & IFF_UP))
1128 if (po->has_vnet_hdr) {
1129 vnet_hdr_len = sizeof(vnet_hdr);
1132 if (len < vnet_hdr_len)
1135 len -= vnet_hdr_len;
1137 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
1142 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1143 (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
1145 vnet_hdr.hdr_len = vnet_hdr.csum_start +
1146 vnet_hdr.csum_offset + 2;
1149 if (vnet_hdr.hdr_len > len)
1152 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1153 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1154 case VIRTIO_NET_HDR_GSO_TCPV4:
1155 gso_type = SKB_GSO_TCPV4;
1157 case VIRTIO_NET_HDR_GSO_TCPV6:
1158 gso_type = SKB_GSO_TCPV6;
1160 case VIRTIO_NET_HDR_GSO_UDP:
1161 gso_type = SKB_GSO_UDP;
1167 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
1168 gso_type |= SKB_GSO_TCP_ECN;
1170 if (vnet_hdr.gso_size == 0)
1177 if (!gso_type && (len > dev->mtu+reserve))
1181 skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev),
1182 LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len,
1183 msg->msg_flags & MSG_DONTWAIT, &err);
1187 skb_set_network_header(skb, reserve);
1190 if (sock->type == SOCK_DGRAM &&
1191 (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
1194 /* Returns -EFAULT on error */
1195 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
1198 err = sock_tx_timestamp(msg, sk, skb_tx(skb));
1202 skb->protocol = proto;
1204 skb->priority = sk->sk_priority;
1205 skb->mark = sk->sk_mark;
1207 if (po->has_vnet_hdr) {
1208 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1209 if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
1210 vnet_hdr.csum_offset)) {
1216 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
1217 skb_shinfo(skb)->gso_type = gso_type;
1219 /* Header must be checked, and gso_segs computed. */
1220 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1221 skb_shinfo(skb)->gso_segs = 0;
1223 len += vnet_hdr_len;
1230 err = dev_queue_xmit(skb);
1231 if (err > 0 && (err = net_xmit_errno(err)) != 0)
1247 static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
1248 struct msghdr *msg, size_t len)
1250 struct sock *sk = sock->sk;
1251 struct packet_sock *po = pkt_sk(sk);
1252 if (po->tx_ring.pg_vec)
1253 return tpacket_snd(po, msg);
1255 return packet_snd(sock, msg, len);
1259 * Close a PACKET socket. This is fairly simple. We immediately go
1260 * to 'closed' state and remove our protocol entry in the device list.
1263 static int packet_release(struct socket *sock)
1265 struct sock *sk = sock->sk;
1266 struct packet_sock *po;
1268 struct tpacket_req req;
1276 spin_lock_bh(&net->packet.sklist_lock);
1277 sk_del_node_init_rcu(sk);
1278 sock_prot_inuse_add(net, sk->sk_prot, -1);
1279 spin_unlock_bh(&net->packet.sklist_lock);
1281 spin_lock(&po->bind_lock);
1284 * Remove from protocol table
1288 __dev_remove_pack(&po->prot_hook);
1291 spin_unlock(&po->bind_lock);
1293 packet_flush_mclist(sk);
1295 memset(&req, 0, sizeof(req));
1297 if (po->rx_ring.pg_vec)
1298 packet_set_ring(sk, &req, 1, 0);
1300 if (po->tx_ring.pg_vec)
1301 packet_set_ring(sk, &req, 1, 1);
1305 * Now the socket is dead. No more input will appear.
1312 skb_queue_purge(&sk->sk_receive_queue);
1313 sk_refcnt_debug_release(sk);
1320 * Attach a packet hook.
1323 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
1325 struct packet_sock *po = pkt_sk(sk);
1327 * Detach an existing hook if present.
1332 spin_lock(&po->bind_lock);
1337 spin_unlock(&po->bind_lock);
1338 dev_remove_pack(&po->prot_hook);
1339 spin_lock(&po->bind_lock);
1343 po->prot_hook.type = protocol;
1344 po->prot_hook.dev = dev;
1346 po->ifindex = dev ? dev->ifindex : 0;
1351 if (!dev || (dev->flags & IFF_UP)) {
1352 dev_add_pack(&po->prot_hook);
1356 sk->sk_err = ENETDOWN;
1357 if (!sock_flag(sk, SOCK_DEAD))
1358 sk->sk_error_report(sk);
1362 spin_unlock(&po->bind_lock);
1368 * Bind a packet socket to a device
1371 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
1374 struct sock *sk = sock->sk;
1376 struct net_device *dev;
1383 if (addr_len != sizeof(struct sockaddr))
1385 strlcpy(name, uaddr->sa_data, sizeof(name));
1387 dev = dev_get_by_name(sock_net(sk), name);
1389 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
1395 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1397 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
1398 struct sock *sk = sock->sk;
1399 struct net_device *dev = NULL;
1407 if (addr_len < sizeof(struct sockaddr_ll))
1409 if (sll->sll_family != AF_PACKET)
1412 if (sll->sll_ifindex) {
1414 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
1418 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
1426 static struct proto packet_proto = {
1428 .owner = THIS_MODULE,
1429 .obj_size = sizeof(struct packet_sock),
1433 * Create a packet of type SOCK_PACKET.
1436 static int packet_create(struct net *net, struct socket *sock, int protocol,
1440 struct packet_sock *po;
1441 __be16 proto = (__force __be16)protocol; /* weird, but documented */
1444 if (!capable(CAP_NET_RAW))
1446 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
1447 sock->type != SOCK_PACKET)
1448 return -ESOCKTNOSUPPORT;
1450 sock->state = SS_UNCONNECTED;
1453 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
1457 sock->ops = &packet_ops;
1458 if (sock->type == SOCK_PACKET)
1459 sock->ops = &packet_ops_spkt;
1461 sock_init_data(sock, sk);
1464 sk->sk_family = PF_PACKET;
1467 sk->sk_destruct = packet_sock_destruct;
1468 sk_refcnt_debug_inc(sk);
1471 * Attach a protocol block
1474 spin_lock_init(&po->bind_lock);
1475 mutex_init(&po->pg_vec_lock);
1476 po->prot_hook.func = packet_rcv;
1478 if (sock->type == SOCK_PACKET)
1479 po->prot_hook.func = packet_rcv_spkt;
1481 po->prot_hook.af_packet_priv = sk;
1484 po->prot_hook.type = proto;
1485 dev_add_pack(&po->prot_hook);
1490 spin_lock_bh(&net->packet.sklist_lock);
1491 sk_add_node_rcu(sk, &net->packet.sklist);
1492 sock_prot_inuse_add(net, &packet_proto, 1);
1493 spin_unlock_bh(&net->packet.sklist_lock);
1500 static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
1502 struct sock_exterr_skb *serr;
1503 struct sk_buff *skb, *skb2;
1507 skb = skb_dequeue(&sk->sk_error_queue);
1513 msg->msg_flags |= MSG_TRUNC;
1516 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1520 sock_recv_timestamp(msg, sk, skb);
1522 serr = SKB_EXT_ERR(skb);
1523 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
1524 sizeof(serr->ee), &serr->ee);
1526 msg->msg_flags |= MSG_ERRQUEUE;
1529 /* Reset and regenerate socket error */
1530 spin_lock_bh(&sk->sk_error_queue.lock);
1532 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
1533 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
1534 spin_unlock_bh(&sk->sk_error_queue.lock);
1535 sk->sk_error_report(sk);
1537 spin_unlock_bh(&sk->sk_error_queue.lock);
1546 * Pull a packet from our receive queue and hand it to the user.
1547 * If necessary we block.
1550 static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1551 struct msghdr *msg, size_t len, int flags)
1553 struct sock *sk = sock->sk;
1554 struct sk_buff *skb;
1556 struct sockaddr_ll *sll;
1557 int vnet_hdr_len = 0;
1560 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
1564 /* What error should we return now? EUNATTACH? */
1565 if (pkt_sk(sk)->ifindex < 0)
1569 if (flags & MSG_ERRQUEUE) {
1570 err = packet_recv_error(sk, msg, len);
1575 * Call the generic datagram receiver. This handles all sorts
1576 * of horrible races and re-entrancy so we can forget about it
1577 * in the protocol layers.
1579 * Now it will return ENETDOWN, if device have just gone down,
1580 * but then it will block.
1583 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
1586 * An error occurred so return it. Because skb_recv_datagram()
1587 * handles the blocking we don't see and worry about blocking
1594 if (pkt_sk(sk)->has_vnet_hdr) {
1595 struct virtio_net_hdr vnet_hdr = { 0 };
1598 vnet_hdr_len = sizeof(vnet_hdr);
1599 if (len < vnet_hdr_len)
1602 len -= vnet_hdr_len;
1604 if (skb_is_gso(skb)) {
1605 struct skb_shared_info *sinfo = skb_shinfo(skb);
1607 /* This is a hint as to how much should be linear. */
1608 vnet_hdr.hdr_len = skb_headlen(skb);
1609 vnet_hdr.gso_size = sinfo->gso_size;
1610 if (sinfo->gso_type & SKB_GSO_TCPV4)
1611 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1612 else if (sinfo->gso_type & SKB_GSO_TCPV6)
1613 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1614 else if (sinfo->gso_type & SKB_GSO_UDP)
1615 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
1616 else if (sinfo->gso_type & SKB_GSO_FCOE)
1620 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
1621 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1623 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
1625 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1626 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1627 vnet_hdr.csum_start = skb->csum_start -
1629 vnet_hdr.csum_offset = skb->csum_offset;
1630 } /* else everything is zero */
1632 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
1639 * If the address length field is there to be filled in, we fill
1643 sll = &PACKET_SKB_CB(skb)->sa.ll;
1644 if (sock->type == SOCK_PACKET)
1645 msg->msg_namelen = sizeof(struct sockaddr_pkt);
1647 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
1650 * You lose any data beyond the buffer you gave. If it worries a
1651 * user program they can ask the device for its MTU anyway.
1657 msg->msg_flags |= MSG_TRUNC;
1660 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1664 sock_recv_ts_and_drops(msg, sk, skb);
1667 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
1670 if (pkt_sk(sk)->auxdata) {
1671 struct tpacket_auxdata aux;
1673 aux.tp_status = TP_STATUS_USER;
1674 if (skb->ip_summed == CHECKSUM_PARTIAL)
1675 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
1676 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
1677 aux.tp_snaplen = skb->len;
1679 aux.tp_net = skb_network_offset(skb);
1680 aux.tp_vlan_tci = vlan_tx_tag_get(skb);
1683 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
1687 * Free or return the buffer as appropriate. Again this
1688 * hides all the races and re-entrancy issues from us.
1690 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
1693 skb_free_datagram(sk, skb);
1698 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
1699 int *uaddr_len, int peer)
1701 struct net_device *dev;
1702 struct sock *sk = sock->sk;
1707 uaddr->sa_family = AF_PACKET;
1709 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
1711 strncpy(uaddr->sa_data, dev->name, 14);
1713 memset(uaddr->sa_data, 0, 14);
1715 *uaddr_len = sizeof(*uaddr);
1720 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1721 int *uaddr_len, int peer)
1723 struct net_device *dev;
1724 struct sock *sk = sock->sk;
1725 struct packet_sock *po = pkt_sk(sk);
1726 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
1731 sll->sll_family = AF_PACKET;
1732 sll->sll_ifindex = po->ifindex;
1733 sll->sll_protocol = po->num;
1734 sll->sll_pkttype = 0;
1736 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
1738 sll->sll_hatype = dev->type;
1739 sll->sll_halen = dev->addr_len;
1740 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
1742 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
1746 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
1751 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
1755 case PACKET_MR_MULTICAST:
1756 if (i->alen != dev->addr_len)
1759 return dev_mc_add(dev, i->addr);
1761 return dev_mc_del(dev, i->addr);
1763 case PACKET_MR_PROMISC:
1764 return dev_set_promiscuity(dev, what);
1766 case PACKET_MR_ALLMULTI:
1767 return dev_set_allmulti(dev, what);
1769 case PACKET_MR_UNICAST:
1770 if (i->alen != dev->addr_len)
1773 return dev_uc_add(dev, i->addr);
1775 return dev_uc_del(dev, i->addr);
1783 static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
1785 for ( ; i; i = i->next) {
1786 if (i->ifindex == dev->ifindex)
1787 packet_dev_mc(dev, i, what);
1791 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
1793 struct packet_sock *po = pkt_sk(sk);
1794 struct packet_mclist *ml, *i;
1795 struct net_device *dev;
1801 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
1806 if (mreq->mr_alen > dev->addr_len)
1810 i = kmalloc(sizeof(*i), GFP_KERNEL);
1815 for (ml = po->mclist; ml; ml = ml->next) {
1816 if (ml->ifindex == mreq->mr_ifindex &&
1817 ml->type == mreq->mr_type &&
1818 ml->alen == mreq->mr_alen &&
1819 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1821 /* Free the new element ... */
1827 i->type = mreq->mr_type;
1828 i->ifindex = mreq->mr_ifindex;
1829 i->alen = mreq->mr_alen;
1830 memcpy(i->addr, mreq->mr_address, i->alen);
1832 i->next = po->mclist;
1834 err = packet_dev_mc(dev, i, 1);
1836 po->mclist = i->next;
1845 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
1847 struct packet_mclist *ml, **mlp;
1851 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
1852 if (ml->ifindex == mreq->mr_ifindex &&
1853 ml->type == mreq->mr_type &&
1854 ml->alen == mreq->mr_alen &&
1855 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1856 if (--ml->count == 0) {
1857 struct net_device *dev;
1859 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
1861 packet_dev_mc(dev, ml, -1);
1869 return -EADDRNOTAVAIL;
1872 static void packet_flush_mclist(struct sock *sk)
1874 struct packet_sock *po = pkt_sk(sk);
1875 struct packet_mclist *ml;
1881 while ((ml = po->mclist) != NULL) {
1882 struct net_device *dev;
1884 po->mclist = ml->next;
1885 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
1887 packet_dev_mc(dev, ml, -1);
1894 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1896 struct sock *sk = sock->sk;
1897 struct packet_sock *po = pkt_sk(sk);
1900 if (level != SOL_PACKET)
1901 return -ENOPROTOOPT;
1904 case PACKET_ADD_MEMBERSHIP:
1905 case PACKET_DROP_MEMBERSHIP:
1907 struct packet_mreq_max mreq;
1909 memset(&mreq, 0, sizeof(mreq));
1910 if (len < sizeof(struct packet_mreq))
1912 if (len > sizeof(mreq))
1914 if (copy_from_user(&mreq, optval, len))
1916 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
1918 if (optname == PACKET_ADD_MEMBERSHIP)
1919 ret = packet_mc_add(sk, &mreq);
1921 ret = packet_mc_drop(sk, &mreq);
1925 case PACKET_RX_RING:
1926 case PACKET_TX_RING:
1928 struct tpacket_req req;
1930 if (optlen < sizeof(req))
1932 if (pkt_sk(sk)->has_vnet_hdr)
1934 if (copy_from_user(&req, optval, sizeof(req)))
1936 return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING);
1938 case PACKET_COPY_THRESH:
1942 if (optlen != sizeof(val))
1944 if (copy_from_user(&val, optval, sizeof(val)))
1947 pkt_sk(sk)->copy_thresh = val;
1950 case PACKET_VERSION:
1954 if (optlen != sizeof(val))
1956 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1958 if (copy_from_user(&val, optval, sizeof(val)))
1963 po->tp_version = val;
1969 case PACKET_RESERVE:
1973 if (optlen != sizeof(val))
1975 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1977 if (copy_from_user(&val, optval, sizeof(val)))
1979 po->tp_reserve = val;
1986 if (optlen != sizeof(val))
1988 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1990 if (copy_from_user(&val, optval, sizeof(val)))
1992 po->tp_loss = !!val;
1995 case PACKET_AUXDATA:
1999 if (optlen < sizeof(val))
2001 if (copy_from_user(&val, optval, sizeof(val)))
2004 po->auxdata = !!val;
2007 case PACKET_ORIGDEV:
2011 if (optlen < sizeof(val))
2013 if (copy_from_user(&val, optval, sizeof(val)))
2016 po->origdev = !!val;
2019 case PACKET_VNET_HDR:
2023 if (sock->type != SOCK_RAW)
2025 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2027 if (optlen < sizeof(val))
2029 if (copy_from_user(&val, optval, sizeof(val)))
2032 po->has_vnet_hdr = !!val;
2036 return -ENOPROTOOPT;
2040 static int packet_getsockopt(struct socket *sock, int level, int optname,
2041 char __user *optval, int __user *optlen)
2045 struct sock *sk = sock->sk;
2046 struct packet_sock *po = pkt_sk(sk);
2048 struct tpacket_stats st;
2050 if (level != SOL_PACKET)
2051 return -ENOPROTOOPT;
2053 if (get_user(len, optlen))
2060 case PACKET_STATISTICS:
2061 if (len > sizeof(struct tpacket_stats))
2062 len = sizeof(struct tpacket_stats);
2063 spin_lock_bh(&sk->sk_receive_queue.lock);
2065 memset(&po->stats, 0, sizeof(st));
2066 spin_unlock_bh(&sk->sk_receive_queue.lock);
2067 st.tp_packets += st.tp_drops;
2071 case PACKET_AUXDATA:
2072 if (len > sizeof(int))
2078 case PACKET_ORIGDEV:
2079 if (len > sizeof(int))
2085 case PACKET_VNET_HDR:
2086 if (len > sizeof(int))
2088 val = po->has_vnet_hdr;
2092 case PACKET_VERSION:
2093 if (len > sizeof(int))
2095 val = po->tp_version;
2099 if (len > sizeof(int))
2101 if (copy_from_user(&val, optval, len))
2105 val = sizeof(struct tpacket_hdr);
2108 val = sizeof(struct tpacket2_hdr);
2115 case PACKET_RESERVE:
2116 if (len > sizeof(unsigned int))
2117 len = sizeof(unsigned int);
2118 val = po->tp_reserve;
2122 if (len > sizeof(unsigned int))
2123 len = sizeof(unsigned int);
2128 return -ENOPROTOOPT;
2131 if (put_user(len, optlen))
2133 if (copy_to_user(optval, data, len))
2139 static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
2142 struct hlist_node *node;
2143 struct net_device *dev = data;
2144 struct net *net = dev_net(dev);
2147 sk_for_each_rcu(sk, node, &net->packet.sklist) {
2148 struct packet_sock *po = pkt_sk(sk);
2151 case NETDEV_UNREGISTER:
2153 packet_dev_mclist(dev, po->mclist, -1);
2157 if (dev->ifindex == po->ifindex) {
2158 spin_lock(&po->bind_lock);
2160 __dev_remove_pack(&po->prot_hook);
2163 sk->sk_err = ENETDOWN;
2164 if (!sock_flag(sk, SOCK_DEAD))
2165 sk->sk_error_report(sk);
2167 if (msg == NETDEV_UNREGISTER) {
2169 po->prot_hook.dev = NULL;
2171 spin_unlock(&po->bind_lock);
2175 if (dev->ifindex == po->ifindex) {
2176 spin_lock(&po->bind_lock);
2177 if (po->num && !po->running) {
2178 dev_add_pack(&po->prot_hook);
2182 spin_unlock(&po->bind_lock);
2192 static int packet_ioctl(struct socket *sock, unsigned int cmd,
2195 struct sock *sk = sock->sk;
2200 int amount = sk_wmem_alloc_get(sk);
2202 return put_user(amount, (int __user *)arg);
2206 struct sk_buff *skb;
2209 spin_lock_bh(&sk->sk_receive_queue.lock);
2210 skb = skb_peek(&sk->sk_receive_queue);
2213 spin_unlock_bh(&sk->sk_receive_queue.lock);
2214 return put_user(amount, (int __user *)arg);
2217 return sock_get_timestamp(sk, (struct timeval __user *)arg);
2219 return sock_get_timestampns(sk, (struct timespec __user *)arg);
2229 case SIOCGIFBRDADDR:
2230 case SIOCSIFBRDADDR:
2231 case SIOCGIFNETMASK:
2232 case SIOCSIFNETMASK:
2233 case SIOCGIFDSTADDR:
2234 case SIOCSIFDSTADDR:
2236 return inet_dgram_ops.ioctl(sock, cmd, arg);
2240 return -ENOIOCTLCMD;
2245 static unsigned int packet_poll(struct file *file, struct socket *sock,
2248 struct sock *sk = sock->sk;
2249 struct packet_sock *po = pkt_sk(sk);
2250 unsigned int mask = datagram_poll(file, sock, wait);
2252 spin_lock_bh(&sk->sk_receive_queue.lock);
2253 if (po->rx_ring.pg_vec) {
2254 if (!packet_previous_frame(po, &po->rx_ring, TP_STATUS_KERNEL))
2255 mask |= POLLIN | POLLRDNORM;
2257 spin_unlock_bh(&sk->sk_receive_queue.lock);
2258 spin_lock_bh(&sk->sk_write_queue.lock);
2259 if (po->tx_ring.pg_vec) {
2260 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
2261 mask |= POLLOUT | POLLWRNORM;
2263 spin_unlock_bh(&sk->sk_write_queue.lock);
2268 /* Dirty? Well, I still did not learn better way to account
2272 static void packet_mm_open(struct vm_area_struct *vma)
2274 struct file *file = vma->vm_file;
2275 struct socket *sock = file->private_data;
2276 struct sock *sk = sock->sk;
2279 atomic_inc(&pkt_sk(sk)->mapped);
2282 static void packet_mm_close(struct vm_area_struct *vma)
2284 struct file *file = vma->vm_file;
2285 struct socket *sock = file->private_data;
2286 struct sock *sk = sock->sk;
2289 atomic_dec(&pkt_sk(sk)->mapped);
2292 static const struct vm_operations_struct packet_mmap_ops = {
2293 .open = packet_mm_open,
2294 .close = packet_mm_close,
2297 static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len)
2301 for (i = 0; i < len; i++) {
2302 if (likely(pg_vec[i]))
2303 free_pages((unsigned long) pg_vec[i], order);
2308 static inline char *alloc_one_pg_vec_page(unsigned long order)
2310 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN;
2312 return (char *) __get_free_pages(gfp_flags, order);
2315 static char **alloc_pg_vec(struct tpacket_req *req, int order)
2317 unsigned int block_nr = req->tp_block_nr;
2321 pg_vec = kzalloc(block_nr * sizeof(char *), GFP_KERNEL);
2322 if (unlikely(!pg_vec))
2325 for (i = 0; i < block_nr; i++) {
2326 pg_vec[i] = alloc_one_pg_vec_page(order);
2327 if (unlikely(!pg_vec[i]))
2328 goto out_free_pgvec;
2335 free_pg_vec(pg_vec, order, block_nr);
2340 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2341 int closing, int tx_ring)
2343 char **pg_vec = NULL;
2344 struct packet_sock *po = pkt_sk(sk);
2345 int was_running, order = 0;
2346 struct packet_ring_buffer *rb;
2347 struct sk_buff_head *rb_queue;
2351 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
2352 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
2356 if (atomic_read(&po->mapped))
2358 if (atomic_read(&rb->pending))
2362 if (req->tp_block_nr) {
2363 /* Sanity tests and some calculations */
2365 if (unlikely(rb->pg_vec))
2368 switch (po->tp_version) {
2370 po->tp_hdrlen = TPACKET_HDRLEN;
2373 po->tp_hdrlen = TPACKET2_HDRLEN;
2378 if (unlikely((int)req->tp_block_size <= 0))
2380 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
2382 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
2385 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
2388 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
2389 if (unlikely(rb->frames_per_block <= 0))
2391 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
2396 order = get_order(req->tp_block_size);
2397 pg_vec = alloc_pg_vec(req, order);
2398 if (unlikely(!pg_vec))
2404 if (unlikely(req->tp_frame_nr))
2410 /* Detach socket from network */
2411 spin_lock(&po->bind_lock);
2412 was_running = po->running;
2415 __dev_remove_pack(&po->prot_hook);
2420 spin_unlock(&po->bind_lock);
2425 mutex_lock(&po->pg_vec_lock);
2426 if (closing || atomic_read(&po->mapped) == 0) {
2428 #define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
2429 spin_lock_bh(&rb_queue->lock);
2430 pg_vec = XC(rb->pg_vec, pg_vec);
2431 rb->frame_max = (req->tp_frame_nr - 1);
2433 rb->frame_size = req->tp_frame_size;
2434 spin_unlock_bh(&rb_queue->lock);
2436 order = XC(rb->pg_vec_order, order);
2437 req->tp_block_nr = XC(rb->pg_vec_len, req->tp_block_nr);
2439 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
2440 po->prot_hook.func = (po->rx_ring.pg_vec) ?
2441 tpacket_rcv : packet_rcv;
2442 skb_queue_purge(rb_queue);
2444 if (atomic_read(&po->mapped))
2445 pr_err("packet_mmap: vma is busy: %d\n",
2446 atomic_read(&po->mapped));
2448 mutex_unlock(&po->pg_vec_lock);
2450 spin_lock(&po->bind_lock);
2451 if (was_running && !po->running) {
2455 dev_add_pack(&po->prot_hook);
2457 spin_unlock(&po->bind_lock);
2462 free_pg_vec(pg_vec, order, req->tp_block_nr);
2467 static int packet_mmap(struct file *file, struct socket *sock,
2468 struct vm_area_struct *vma)
2470 struct sock *sk = sock->sk;
2471 struct packet_sock *po = pkt_sk(sk);
2472 unsigned long size, expected_size;
2473 struct packet_ring_buffer *rb;
2474 unsigned long start;
2481 mutex_lock(&po->pg_vec_lock);
2484 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2486 expected_size += rb->pg_vec_len
2492 if (expected_size == 0)
2495 size = vma->vm_end - vma->vm_start;
2496 if (size != expected_size)
2499 start = vma->vm_start;
2500 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2501 if (rb->pg_vec == NULL)
2504 for (i = 0; i < rb->pg_vec_len; i++) {
2505 struct page *page = virt_to_page(rb->pg_vec[i]);
2508 for (pg_num = 0; pg_num < rb->pg_vec_pages;
2510 err = vm_insert_page(vma, start, page);
2518 atomic_inc(&po->mapped);
2519 vma->vm_ops = &packet_mmap_ops;
2523 mutex_unlock(&po->pg_vec_lock);
2527 static const struct proto_ops packet_ops_spkt = {
2528 .family = PF_PACKET,
2529 .owner = THIS_MODULE,
2530 .release = packet_release,
2531 .bind = packet_bind_spkt,
2532 .connect = sock_no_connect,
2533 .socketpair = sock_no_socketpair,
2534 .accept = sock_no_accept,
2535 .getname = packet_getname_spkt,
2536 .poll = datagram_poll,
2537 .ioctl = packet_ioctl,
2538 .listen = sock_no_listen,
2539 .shutdown = sock_no_shutdown,
2540 .setsockopt = sock_no_setsockopt,
2541 .getsockopt = sock_no_getsockopt,
2542 .sendmsg = packet_sendmsg_spkt,
2543 .recvmsg = packet_recvmsg,
2544 .mmap = sock_no_mmap,
2545 .sendpage = sock_no_sendpage,
2548 static const struct proto_ops packet_ops = {
2549 .family = PF_PACKET,
2550 .owner = THIS_MODULE,
2551 .release = packet_release,
2552 .bind = packet_bind,
2553 .connect = sock_no_connect,
2554 .socketpair = sock_no_socketpair,
2555 .accept = sock_no_accept,
2556 .getname = packet_getname,
2557 .poll = packet_poll,
2558 .ioctl = packet_ioctl,
2559 .listen = sock_no_listen,
2560 .shutdown = sock_no_shutdown,
2561 .setsockopt = packet_setsockopt,
2562 .getsockopt = packet_getsockopt,
2563 .sendmsg = packet_sendmsg,
2564 .recvmsg = packet_recvmsg,
2565 .mmap = packet_mmap,
2566 .sendpage = sock_no_sendpage,
2569 static const struct net_proto_family packet_family_ops = {
2570 .family = PF_PACKET,
2571 .create = packet_create,
2572 .owner = THIS_MODULE,
2575 static struct notifier_block packet_netdev_notifier = {
2576 .notifier_call = packet_notifier,
2579 #ifdef CONFIG_PROC_FS
2581 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
2584 struct net *net = seq_file_net(seq);
2587 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
2590 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2592 struct net *net = seq_file_net(seq);
2593 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
2596 static void packet_seq_stop(struct seq_file *seq, void *v)
2602 static int packet_seq_show(struct seq_file *seq, void *v)
2604 if (v == SEQ_START_TOKEN)
2605 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
2607 struct sock *s = sk_entry(v);
2608 const struct packet_sock *po = pkt_sk(s);
2611 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
2613 atomic_read(&s->sk_refcnt),
2618 atomic_read(&s->sk_rmem_alloc),
2626 static const struct seq_operations packet_seq_ops = {
2627 .start = packet_seq_start,
2628 .next = packet_seq_next,
2629 .stop = packet_seq_stop,
2630 .show = packet_seq_show,
2633 static int packet_seq_open(struct inode *inode, struct file *file)
2635 return seq_open_net(inode, file, &packet_seq_ops,
2636 sizeof(struct seq_net_private));
2639 static const struct file_operations packet_seq_fops = {
2640 .owner = THIS_MODULE,
2641 .open = packet_seq_open,
2643 .llseek = seq_lseek,
2644 .release = seq_release_net,
2649 static int __net_init packet_net_init(struct net *net)
2651 spin_lock_init(&net->packet.sklist_lock);
2652 INIT_HLIST_HEAD(&net->packet.sklist);
2654 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
2660 static void __net_exit packet_net_exit(struct net *net)
2662 proc_net_remove(net, "packet");
2665 static struct pernet_operations packet_net_ops = {
2666 .init = packet_net_init,
2667 .exit = packet_net_exit,
2671 static void __exit packet_exit(void)
2673 unregister_netdevice_notifier(&packet_netdev_notifier);
2674 unregister_pernet_subsys(&packet_net_ops);
2675 sock_unregister(PF_PACKET);
2676 proto_unregister(&packet_proto);
2679 static int __init packet_init(void)
2681 int rc = proto_register(&packet_proto, 0);
2686 sock_register(&packet_family_ops);
2687 register_pernet_subsys(&packet_net_ops);
2688 register_netdevice_notifier(&packet_netdev_notifier);
2693 module_init(packet_init);
2694 module_exit(packet_exit);
2695 MODULE_LICENSE("GPL");
2696 MODULE_ALIAS_NETPROTO(PF_PACKET);