5 * CAN bus driver for the alone generic (as possible as) MSCAN controller.
8 * Andrey Volkov <avolkov@varma-el.com>
11 * 2005-2006, Varma Electronics Oy
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/interrupt.h>
33 #include <linux/delay.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/if_ether.h>
37 #include <linux/can.h>
38 #include <linux/list.h>
41 #include <linux/can/dev.h>
42 #include <linux/can/error.h>
45 #include <linux/can/version.h> /* for RCSID. Removed by mkpatch script */
48 #define MSCAN_NORMAL_MODE 0
49 #define MSCAN_SLEEP_MODE MSCAN_SLPRQ
50 #define MSCAN_INIT_MODE (MSCAN_INITRQ | MSCAN_SLPRQ)
51 #define MSCAN_POWEROFF_MODE (MSCAN_CSWAI | MSCAN_SLPRQ)
53 #define BTR0_BRP_MASK 0x3f
54 #define BTR0_SJW_SHIFT 6
55 #define BTR0_SJW_MASK (0x3 << BTR0_SJW_SHIFT)
57 #define BTR1_TSEG1_MASK 0xf
58 #define BTR1_TSEG2_SHIFT 4
59 #define BTR1_TSEG2_MASK (0x7 << BTR1_TSEG2_SHIFT)
60 #define BTR1_SAM_SHIFT 7
62 #define BTR0_SET_BRP(brp) (((brp) - 1) & BTR0_BRP_MASK)
63 #define BTR0_SET_SJW(sjw) ((((sjw) - 1) << BTR0_SJW_SHIFT) & \
66 #define BTR1_SET_TSEG1(tseg1) (((tseg1) - 1) & BTR1_TSEG1_MASK)
67 #define BTR1_SET_TSEG2(tseg2) ((((tseg2) - 1) << BTR1_TSEG2_SHIFT) & \
69 #define BTR1_SET_SAM(sam) (((sam) & 1) << BTR1_SAM_SHIFT)
77 #define TX_QUEUE_SIZE 3
80 struct list_head list;
86 volatile unsigned long flags;
92 struct list_head tx_head;
93 tx_queue_entry_t tx_queue[TX_QUEUE_SIZE];
96 #define F_RX_PROGRESS 0
97 #define F_TX_PROGRESS 1
98 #define F_TX_WAIT_ALL 2
100 static int mscan_set_mode(struct net_device *dev, u8 mode)
102 struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
107 if (mode != MSCAN_NORMAL_MODE) {
108 canctl1 = in_8(®s->canctl1);
109 if ((mode & MSCAN_SLPRQ) && (canctl1 & MSCAN_SLPAK) == 0) {
110 out_8(®s->canctl0,
111 in_8(®s->canctl0) | MSCAN_SLPRQ);
112 for (i = 0; i < 255; i++) {
113 if (in_8(®s->canctl1) & MSCAN_SLPAK)
121 if (!ret && (mode & MSCAN_INITRQ)
122 && (canctl1 & MSCAN_INITAK) == 0) {
123 out_8(®s->canctl0,
124 in_8(®s->canctl0) | MSCAN_INITRQ);
125 for (i = 0; i < 255; i++) {
126 if (in_8(®s->canctl1) & MSCAN_INITAK)
133 if (!ret && (mode & MSCAN_CSWAI))
134 out_8(®s->canctl0,
135 in_8(®s->canctl0) | MSCAN_CSWAI);
138 canctl1 = in_8(®s->canctl1);
139 if (canctl1 & (MSCAN_SLPAK | MSCAN_INITAK)) {
140 out_8(®s->canctl0, in_8(®s->canctl0) &
141 ~(MSCAN_SLPRQ | MSCAN_INITRQ));
142 for (i = 0; i < 255; i++) {
143 canctl1 = in_8(®s->canctl1);
144 if (!(canctl1 & (MSCAN_INITAK | MSCAN_SLPAK)))
154 static void mscan_push_state(struct net_device *dev, struct mscan_state *state)
156 struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
158 state->mode = in_8(®s->canctl0) & (MSCAN_SLPRQ | MSCAN_INITRQ |
160 state->canrier = in_8(®s->canrier);
161 state->cantier = in_8(®s->cantier);
164 static int mscan_pop_state(struct net_device *dev, struct mscan_state *state)
166 struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
169 ret = mscan_set_mode(dev, state->mode);
171 out_8(®s->canrier, state->canrier);
172 out_8(®s->cantier, state->cantier);
177 static int mscan_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
179 struct can_frame *frame = (struct can_frame *)skb->data;
180 struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
181 struct mscan_priv *priv = netdev_priv(dev);
186 if (frame->can_dlc > 8)
189 dev_dbg(ND2D(dev), "%s\n", __FUNCTION__);
190 out_8(®s->cantier, 0);
192 i = ~priv->tx_active & MSCAN_TXE;
194 switch (hweight8(i)) {
196 netif_stop_queue(dev);
197 dev_err(ND2D(dev), "BUG! Tx Ring full when queue awake!\n");
198 return NETDEV_TX_BUSY;
200 /* if buf_id < 3, then current frame will be send out of order,
201 since buffer with lower id have higher priority (hell..) */
204 if (priv->cur_pri == 0xff)
205 set_bit(F_TX_WAIT_ALL, &priv->flags);
206 netif_stop_queue(dev);
208 set_bit(F_TX_PROGRESS, &priv->flags);
210 out_8(®s->cantbsel, i);
212 rtr = frame->can_id & CAN_RTR_FLAG;
214 if (frame->can_id & CAN_EFF_FLAG) {
215 dev_dbg(ND2D(dev), "sending extended frame\n");
217 can_id = (frame->can_id & CAN_EFF_MASK) << 1;
220 out_be16(®s->tx.idr3_2, can_id);
223 can_id = (can_id & 0x7) | ((can_id << 2) & 0xffe0) | (3 << 3);
225 dev_dbg(ND2D(dev), "sending standard frame\n");
226 can_id = (frame->can_id & CAN_SFF_MASK) << 5;
230 out_be16(®s->tx.idr1_0, can_id);
233 volatile void __iomem *data = ®s->tx.dsr1_0;
234 u16 *payload = (u16 *) frame->data;
235 /*Its safe to write into dsr[dlc+1] */
236 for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
237 out_be16(data, *payload++);
238 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
242 out_8(®s->tx.dlr, frame->can_dlc);
243 out_8(®s->tx.tbpr, priv->cur_pri);
245 /* Start transmission. */
246 out_8(®s->cantflg, 1 << buf_id);
248 if (!test_bit(F_TX_PROGRESS, &priv->flags))
249 dev->trans_start = jiffies;
251 list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head);
255 /* Enable interrupt. */
256 priv->tx_active |= 1 << buf_id;
257 out_8(®s->cantier, priv->tx_active);
262 static void mscan_tx_timeout(struct net_device *dev)
265 struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
266 struct mscan_priv *priv = netdev_priv(dev);
267 struct can_frame *frame;
270 printk("%s\n", __FUNCTION__);
272 out_8(®s->cantier, 0);
274 mask = list_entry(priv->tx_head.next, tx_queue_entry_t, list)->mask;
275 dev->trans_start = jiffies;
276 out_8(®s->cantarq, mask);
277 out_8(®s->cantier, priv->tx_active);
279 skb = dev_alloc_skb(sizeof(struct can_frame));
281 if (printk_ratelimit())
282 dev_notice(ND2D(dev), "TIMEOUT packet dropped\n");
285 frame = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
287 frame->can_id = CAN_ERR_FLAG | CAN_ERR_TX_TIMEOUT;
288 frame->can_dlc = CAN_ERR_DLC;
291 skb->protocol = __constant_htons(ETH_P_CAN);
292 skb->pkt_type = PACKET_BROADCAST;
293 skb->ip_summed = CHECKSUM_UNNECESSARY;
299 static can_state_t state_map[] = {
301 CAN_STATE_BUS_WARNING,
302 CAN_STATE_BUS_PASSIVE,
306 static inline int check_set_state(struct net_device *dev, u8 canrflg)
308 struct mscan_priv *priv = netdev_priv(dev);
312 if (!(canrflg & MSCAN_CSCIF) || priv->can.state > CAN_STATE_BUS_OFF)
316 state_map[max(MSCAN_STATE_RX(canrflg), MSCAN_STATE_TX(canrflg))];
317 if (priv->can.state < state)
319 if (state == CAN_STATE_BUS_OFF)
320 netif_carrier_off(dev);
321 else if (priv->can.state == CAN_STATE_BUS_OFF
322 && state != CAN_STATE_BUS_OFF)
323 netif_carrier_on(dev);
324 priv->can.state = state;
328 static int mscan_rx_poll(struct net_device *dev, int *budget)
330 struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
331 struct mscan_priv *priv = netdev_priv(dev);
332 int npackets = 0, quota = min(dev->quota, *budget);
335 struct can_frame *frame;
340 while (npackets < quota && ((canrflg = in_8(®s->canrflg)) &
341 (MSCAN_RXF | MSCAN_ERR_IF))) {
343 skb = dev_alloc_skb(sizeof(struct can_frame));
345 if (printk_ratelimit())
346 dev_notice(ND2D(dev), "packet dropped\n");
347 priv->can.net_stats.rx_dropped++;
348 out_8(®s->canrflg, canrflg);
352 frame = (struct can_frame *)skb_put(skb,
353 sizeof(struct can_frame));
355 if (canrflg & MSCAN_RXF) {
356 can_id = in_be16(®s->rx.idr1_0);
357 if (can_id & (1 << 3)) {
358 frame->can_id = CAN_EFF_FLAG;
359 can_id = ((can_id << 16) |
360 in_be16(®s->rx.idr3_2));
361 can_id = ((can_id & 0xffe00000) |
362 ((can_id & 0x7ffff) << 2)) >> 2;
368 frame->can_id |= can_id >> 1;
370 frame->can_id |= CAN_RTR_FLAG;
371 frame->can_dlc = in_8(®s->rx.dlr) & 0xf;
373 if (!(frame->can_id & CAN_RTR_FLAG)) {
374 volatile void __iomem *data = ®s->rx.dsr1_0;
375 u16 *payload = (u16 *) frame->data;
376 for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
377 *payload++ = in_be16(data);
378 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
383 "received pkt: id: %u dlc: %u data: ",
384 frame->can_id, frame->can_dlc);
387 i < frame->can_dlc && !(frame->can_id &
389 printk("%2x ", frame->data[i]);
393 out_8(®s->canrflg, MSCAN_RXF);
394 dev->last_rx = jiffies;
395 priv->can.net_stats.rx_packets++;
396 priv->can.net_stats.rx_bytes += frame->can_dlc;
397 } else if (canrflg & MSCAN_ERR_IF) {
398 frame->can_id = CAN_ERR_FLAG;
400 if (canrflg & MSCAN_OVRIF) {
401 frame->can_id |= CAN_ERR_CRTL;
402 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
403 priv->can.net_stats.rx_over_errors++;
407 if (check_set_state(dev, canrflg)) {
408 frame->can_id |= CAN_ERR_CRTL;
409 switch (priv->can.state) {
410 case CAN_STATE_BUS_WARNING:
411 if ((priv->shadow_statflg &
413 (canrflg & MSCAN_RSTAT_MSK))
415 CAN_ERR_CRTL_RX_WARNING;
417 if ((priv->shadow_statflg &
419 (canrflg & MSCAN_TSTAT_MSK))
421 CAN_ERR_CRTL_TX_WARNING;
423 case CAN_STATE_BUS_PASSIVE:
425 CAN_ERR_CRTL_RX_PASSIVE;
427 case CAN_STATE_BUS_OFF:
428 frame->can_id |= CAN_ERR_BUSOFF;
429 frame->can_id &= ~CAN_ERR_CRTL;
433 priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
434 frame->can_dlc = CAN_ERR_DLC;
435 out_8(®s->canrflg, MSCAN_ERR_IF);
440 skb->protocol = __constant_htons(ETH_P_CAN);
441 skb->ip_summed = CHECKSUM_UNNECESSARY;
442 netif_receive_skb(skb);
446 dev->quota -= npackets;
448 if (!(in_8(®s->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) {
449 netif_rx_complete(dev);
450 clear_bit(F_RX_PROGRESS, &priv->flags);
451 out_8(®s->canrier,
452 in_8(®s->canrier) | MSCAN_ERR_IF | MSCAN_RXFIE);
459 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
460 static irqreturn_t mscan_isr(int irq, void *dev_id, struct pt_regs *r)
462 static irqreturn_t mscan_isr(int irq, void *dev_id)
465 struct net_device *dev = (struct net_device *)dev_id;
466 struct mscan_priv *priv = netdev_priv(dev);
467 struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
469 irqreturn_t ret = IRQ_NONE;
471 if (in_8(®s->cantier) & MSCAN_TXE) {
472 struct list_head *tmp, *pos;
474 cantflg = in_8(®s->cantflg) & MSCAN_TXE;
476 list_for_each_safe(pos, tmp, &priv->tx_head) {
477 tx_queue_entry_t *entry =
478 list_entry(pos, tx_queue_entry_t, list);
479 u8 mask = entry->mask;
481 if (!(cantflg & mask))
484 if (in_8(®s->cantaak) & mask) {
485 priv->can.net_stats.tx_dropped++;
486 priv->can.net_stats.tx_aborted_errors++;
488 out_8(®s->cantbsel, mask);
489 priv->can.net_stats.tx_bytes +=
491 priv->can.net_stats.tx_packets++;
493 priv->tx_active &= ~mask;
497 if (list_empty(&priv->tx_head)) {
498 clear_bit(F_TX_WAIT_ALL, &priv->flags);
499 clear_bit(F_TX_PROGRESS, &priv->flags);
502 dev->trans_start = jiffies;
504 if (!test_bit(F_TX_WAIT_ALL, &priv->flags))
505 netif_wake_queue(dev);
507 out_8(®s->cantier, priv->tx_active);
511 if ((((canrflg = in_8(®s->canrflg)) & ~MSCAN_STAT_MSK)) &&
512 !test_and_set_bit(F_RX_PROGRESS, &priv->flags)) {
513 if (check_set_state(dev, canrflg)) {
514 out_8(®s->canrflg, MSCAN_CSCIF);
517 if (canrflg & ~MSCAN_STAT_MSK) {
518 priv->shadow_canrier = in_8(®s->canrier);
519 out_8(®s->canrier, 0);
520 netif_rx_schedule(dev);
523 clear_bit(F_RX_PROGRESS, &priv->flags);
528 static int mscan_do_set_mode(struct net_device *dev, can_mode_t mode)
533 netif_stop_queue(dev);
536 CAN_MODE_STOP) ? MSCAN_INIT_MODE :
540 printk("%s: CAN_MODE_START requested\n", __FUNCTION__);
541 mscan_set_mode(dev, MSCAN_NORMAL_MODE);
542 netif_wake_queue(dev);
551 static int mscan_do_set_bit_time(struct net_device *dev,
552 struct can_bittime *bt)
554 struct mscan_priv *priv = netdev_priv(dev);
555 struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
558 struct mscan_state state;
560 if (bt->type != CAN_BITTIME_STD)
563 spin_lock_irq(&priv->can.irq_lock);
565 mscan_push_state(dev, &state);
566 ret = mscan_set_mode(dev, MSCAN_INIT_MODE);
568 reg = BTR0_SET_BRP(bt->std.brp) | BTR0_SET_SJW(bt->std.sjw);
569 out_8(®s->canbtr0, reg);
571 reg = (BTR1_SET_TSEG1(bt->std.prop_seg + bt->std.phase_seg1) |
572 BTR1_SET_TSEG2(bt->std.phase_seg2) |
573 BTR1_SET_SAM(bt->std.sam));
574 out_8(®s->canbtr1, reg);
576 ret = mscan_pop_state(dev, &state);
579 spin_unlock_irq(&priv->can.irq_lock);
583 static int mscan_open(struct net_device *dev)
586 struct mscan_priv *priv = netdev_priv(dev);
587 struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
590 request_irq(dev->irq, mscan_isr, IRQF_SHARED, dev->name,
592 printk(KERN_ERR "%s - failed to attach interrupt\n",
597 INIT_LIST_HEAD(&priv->tx_head);
598 /* acceptance mask/acceptance code (accept everything) */
599 out_be16(®s->canidar1_0, 0);
600 out_be16(®s->canidar3_2, 0);
601 out_be16(®s->canidar5_4, 0);
602 out_be16(®s->canidar7_6, 0);
604 out_be16(®s->canidmr1_0, 0xffff);
605 out_be16(®s->canidmr3_2, 0xffff);
606 out_be16(®s->canidmr5_4, 0xffff);
607 out_be16(®s->canidmr7_6, 0xffff);
608 /* Two 32 bit Acceptance Filters */
609 out_8(®s->canidac, MSCAN_AF_32BIT);
611 out_8(®s->canctl1, in_8(®s->canctl1) & ~MSCAN_LISTEN);
612 mscan_set_mode(dev, MSCAN_NORMAL_MODE);
614 priv->shadow_statflg = in_8(®s->canrflg) & MSCAN_STAT_MSK;
618 out_8(®s->cantier, 0);
619 /* Enable receive interrupts. */
620 out_8(®s->canrier, MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE |
621 MSCAN_RSTATE1 | MSCAN_RSTATE0 | MSCAN_TSTATE1 | MSCAN_TSTATE0);
623 netif_start_queue(dev);
628 static int mscan_close(struct net_device *dev)
630 struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
632 netif_stop_queue(dev);
634 /* disable interrupts */
635 out_8(®s->cantier, 0);
636 out_8(®s->canrier, 0);
637 free_irq(dev->irq, dev);
639 mscan_set_mode(dev, MSCAN_INIT_MODE);
643 int register_mscandev(struct net_device *dev, int clock_src)
645 struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
648 ctl1 = in_8(®s->canctl1);
650 ctl1 |= MSCAN_CLKSRC;
652 ctl1 &= ~MSCAN_CLKSRC;
655 out_8(®s->canctl1, ctl1);
658 mscan_set_mode(dev, MSCAN_INIT_MODE);
660 return register_netdev(dev);
663 EXPORT_SYMBOL(register_mscandev);
665 void unregister_mscandev(struct net_device *dev)
667 struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
668 mscan_set_mode(dev, MSCAN_INIT_MODE);
669 out_8(®s->canctl1, in_8(®s->canctl1) & ~MSCAN_CANE);
670 unregister_netdev(dev);
673 EXPORT_SYMBOL(unregister_mscandev);
675 struct net_device *alloc_mscandev(void)
677 struct net_device *dev;
678 struct mscan_priv *priv;
681 dev = alloc_candev(sizeof(struct mscan_priv));
684 priv = netdev_priv(dev);
686 dev->watchdog_timeo = MSCAN_WATCHDOG_TIMEOUT;
687 dev->open = mscan_open;
688 dev->stop = mscan_close;
689 dev->hard_start_xmit = mscan_hard_start_xmit;
690 dev->tx_timeout = mscan_tx_timeout;
692 dev->poll = mscan_rx_poll;
695 priv->can.do_set_bit_time = mscan_do_set_bit_time;
696 priv->can.do_set_mode = mscan_do_set_mode;
698 for (i = 0; i < TX_QUEUE_SIZE; i++)
699 priv->tx_queue[i].mask = 1 << i;
704 EXPORT_SYMBOL(alloc_mscandev);
706 MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>");
707 MODULE_LICENSE("GPL v2");
708 MODULE_DESCRIPTION("CAN port driver for a mscan based chips");