#include <linux/if_ether.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
-#include <linux/can.h>
-#include <linux/can/core.h>
+#include <socketcan/can.h>
+#include <socketcan/can/core.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
#include <net/net_namespace.h>
#endif
#include "compat.h"
#endif
-#include <linux/can/version.h> /* for RCSID. Removed by mkpatch script */
+#include <socketcan/can/version.h> /* for RCSID. Removed by mkpatch script */
RCSID("$Id$");
static __initdata const char banner[] = KERN_INFO
#endif
}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)
+static int can_create(struct net *net, struct socket *sock, int protocol, int kern)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
static int can_create(struct net *net, struct socket *sock, int protocol)
#else
static int can_create(struct socket *sock, int protocol)
return -EAFNOSUPPORT;
#endif
-#ifdef CONFIG_KMOD
- /* try to load protocol module, when CONFIG_KMOD is defined */
+#ifdef CONFIG_MODULES
+ /* try to load protocol module kernel is modular */
if (!proto_tab[protocol]) {
err = request_module("can-proto-%d", protocol);
* @skb: pointer to socket buffer with CAN frame in data section
* @loop: loopback for listeners on local CAN sockets (recommended default!)
*
+ * Due to the loopback this routine must not be called from hardirq context.
+ *
* Return:
* 0 on success
* -ENETDOWN when the selected interface is down
err = net_xmit_errno(err);
if (err) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
+ /* kfree_skb() does not check for !NULL on older kernels */
if (newskb)
kfree_skb(newskb);
+#else
+ kfree_skb(newskb);
+#endif
return err;
}
if (newskb)
- netif_rx(newskb);
+ netif_rx_ni(newskb);
/* update statistics */
can_stats.tx_frames++;
* af_can rx path
*/
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
+{
+ /*
+ * find receive list for this device
+ *
+ * Since 2.6.26 a new "midlevel private" ml_priv pointer has been
+ * introduced in struct net_device. We use this pointer to omit the
+ * linear walk through the can_rx_dev_list. A similar speedup has been
+ * queued for 2.6.34 mainline but using the new netdev_rcu lists.
+ * Therefore the can_rx_dev_list is still needed (e.g. in proc.c)
+ */
+
+ /* dev == NULL is the indicator for the 'all' filterlist */
+ if (!dev)
+ return &can_rx_alldev_list;
+ else
+ return (struct dev_rcv_lists *)dev->ml_priv;
+}
+#else
static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
{
struct dev_rcv_lists *d = NULL;
return n ? d : NULL;
}
+#endif
/**
* find_rcv_list - determine optimal filterlist inside device filter struct
* The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
* filter for error frames (CAN_ERR_FLAG bit set in mask).
*
+ * The provided pointer to the sk_buff is guaranteed to be valid as long as
+ * the callback function is running. The callback function must *not* free
+ * the given sk_buff while processing it's task. When the given sk_buff is
+ * needed after the end of the callback function it must be cloned inside
+ * the callback function with skb_clone().
+ *
* Return:
* 0 on success
* -ENOMEM on missing cache mem to create subscription entry
/* insert new receiver (dev,canid,mask) -> (func,data) */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ if (dev && dev->type != ARPHRD_CAN)
+ return -ENODEV;
+#endif
+
r = kmem_cache_alloc(rcv_cache, GFP_KERNEL);
if (!r)
return -ENOMEM;
struct hlist_node *next;
struct dev_rcv_lists *d;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ if (dev && dev->type != ARPHRD_CAN)
+ return;
+#endif
+
spin_lock(&can_rcvlists_lock);
d = find_dev_rcv_lists(dev);
can_pstats.rcv_entries--;
/* remove device structure requested by NETDEV_UNREGISTER */
- if (d->remove_on_zero_entries && !d->entries)
+ if (d->remove_on_zero_entries && !d->entries) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ dev->ml_priv = NULL;
+#endif
hlist_del_rcu(&d->list);
- else
+ } else
d = NULL;
out:
static inline void deliver(struct sk_buff *skb, struct receiver *r)
{
- struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
-
- if (clone) {
- clone->sk = skb->sk;
- r->func(clone, r->data);
- r->matches++;
- }
+ r->func(skb, r->data);
+ r->matches++;
}
static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
int matches;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
- if (dev->type != ARPHRD_CAN || !net_eq(dev_net(dev), &init_net)) {
+ if (!net_eq(dev_net(dev), &init_net))
+ goto drop;
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
- if (dev->type != ARPHRD_CAN || dev->nd_net != &init_net) {
-#else
- if (dev->type != ARPHRD_CAN) {
+ if (dev->nd_net != &init_net)
+ goto drop;
#endif
- kfree_skb(skb);
- return 0;
- }
- BUG_ON(skb->len != sizeof(struct can_frame) || cf->can_dlc > 8);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
+ if (WARN_ONCE(dev->type != ARPHRD_CAN ||
+ skb->len != sizeof(struct can_frame) ||
+ cf->can_dlc > 8,
+ "PF_CAN: dropped non conform skbuf: "
+ "dev type %d, len %d, can_dlc %d\n",
+ dev->type, skb->len, cf->can_dlc))
+ goto drop;
+#else
+ BUG_ON(dev->type != ARPHRD_CAN ||
+ skb->len != sizeof(struct can_frame) ||
+ cf->can_dlc > 8);
+#endif
/* update statistics */
can_stats.rx_frames++;
rcu_read_unlock();
- /* free the skbuff allocated by the netdevice driver */
+ /* consume the skbuff allocated by the netdevice driver */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
+ consume_skb(skb);
+#else
kfree_skb(skb);
+#endif
if (matches > 0) {
can_stats.matches++;
can_stats.matches_delta++;
}
- return 0;
+ return NET_RX_SUCCESS;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+#endif
}
/*
d->dev = dev;
spin_lock(&can_rcvlists_lock);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ BUG_ON(dev->ml_priv);
+ dev->ml_priv = d;
+#endif
hlist_add_head_rcu(&d->list, &can_rx_dev_list);
spin_unlock(&can_rcvlists_lock);
if (d->entries) {
d->remove_on_zero_entries = 1;
d = NULL;
- } else
+ } else {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ dev->ml_priv = NULL;
+#endif
hlist_del_rcu(&d->list);
+ }
} else
printk(KERN_ERR "can: notifier: receive list not "
"found for dev %s\n", dev->name);
*/
static struct packet_type can_packet __read_mostly = {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
+ .type = cpu_to_be16(ETH_P_CAN),
+#else
.type = __constant_htons(ETH_P_CAN),
+#endif
.dev = NULL,
.func = can_rcv,
};
hlist_del(&can_rx_alldev_list.list);
hlist_for_each_entry_safe(d, n, next, &can_rx_dev_list, list) {
hlist_del(&d->list);
+ BUG_ON(d->entries);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ d->dev->ml_priv = NULL;
+#endif
kfree(d);
}
spin_unlock(&can_rcvlists_lock);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
+ rcu_barrier(); /* Wait for completion of call_rcu()'s */
+#endif
+
kmem_cache_destroy(rcv_cache);
}