struct ktermios old_termios, termios;
int cflag;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
mutex_lock(&tty->termios_mutex);
+#else
+ down_write(&tty->termios_rwsem);
+#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
old_termios = termios = *(tty->termios);
if (tty->ops->set_termios)
tty->ops->set_termios(tty, &old_termios);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
mutex_unlock(&tty->termios_mutex);
+#else
+ up_write(&tty->termios_rwsem);
+#endif
return 0;
}
return; /* ongoing concurrent processing */
clear_bit(SLF_TXBUFF_RQ, &sl->flags);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)
smp_mb__after_clear_bit();
+#else
+ smp_mb__after_atomic();
+#endif
if (sl->lin_state != SLSTATE_BREAK_SENT)
remains = sl->tx_lim - sl->tx_cnt;
remains -= actual;
}
clear_bit(SLF_TXBUFF_INPR, &sl->flags);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)
smp_mb__after_clear_bit();
+#else
+ smp_mb__after_atomic();
+#endif
} while (unlikely(test_bit(SLF_TXBUFF_RQ, &sl->flags)));
*/
static void sllin_report_error(struct sllin *sl, int err)
{
+ unsigned char *lin_buff;
+ int lin_id;
+
switch (err) {
case LIN_ERR_CHECKSUM:
sl->dev->stats.rx_crc_errors++;
break;
}
- sllin_send_canfr(sl, 0 | CAN_EFF_FLAG |
+ lin_buff = (sl->lin_master) ? sl->tx_buff : sl->rx_buff;
+ lin_id = lin_buff[SLLIN_BUFF_ID] & LIN_ID_MASK;
+ sllin_send_canfr(sl, lin_id | CAN_EFF_FLAG |
(err & ~LIN_ID_MASK), NULL, 0);
}
if (fp && *fp++) {
/*
* If we don't know the length of the current message
- * we received the break of the next message.
- * Evaluate the previous one before continuing
+ * and received at least the LIN ID, we received here
+ * the break of the next message.
+ * Evaluate the previous one before continuing.
*/
- if (sl->rx_len_unknown == true)
+ if ((sl->rx_len_unknown == true) &&
+ (sl->rx_cnt >= SLLIN_BUFF_ID))
{
hrtimer_cancel(&sl->rx_timer);
sllin_slave_finish_rx_msg(sl);
if (sce->dlc > 0) {
sl->rx_expect += sce->dlc + 1; /* + checksum */
sl->rx_len_unknown = false;
- set_bit(SLF_MSGEVENT, &sl->flags);
wake_up(&sl->kwt_wq);
} else {
sl->rx_expect += SLLIN_DATA_MAX + 1; /* + checksum */
return 0; /* ongoing concurrent processing */
clear_bit(SLF_TXBUFF_RQ, &sl->flags);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)
smp_mb__after_clear_bit();
+#else
+ smp_mb__after_atomic();
+#endif
#ifdef BREAK_BY_BAUD
if (sl->lin_state != SLSTATE_BREAK_SENT)
sl->tx_cnt, remains);
clear_bit(SLF_TXBUFF_INPR, &sl->flags);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)
smp_mb__after_clear_bit();
+#else
+ smp_mb__after_atomic();
+#endif
} while (unlikely(test_bit(SLF_TXBUFF_RQ, &sl->flags)));
{
struct sllin *sl = container_of(hrtimer, struct sllin, rx_timer);
- if (sl->lin_master) {
+ /*
+ * Signal timeout when:
+ * master: We did not receive as much characters as expected
+ * slave: * we did not receive any data bytes at all
+ * * we know the length and didn't receive enough
+ */
+ if ((sl->lin_master) ||
+ (sl->rx_cnt <= SLLIN_BUFF_DATA) ||
+ ((!sl->rx_len_unknown) &&
+ (sl->rx_cnt < sl->rx_expect))) {
sllin_report_error(sl, LIN_ERR_RX_TIMEOUT);
set_bit(SLF_TMOUTEVENT, &sl->flags);
} else {
struct sched_param schparam = { .sched_priority = 40 };
int tx_bytes = 0; /* Used for Network statistics */
unsigned long flags;
+ int mode;
int lin_id;
struct sllin_conf_entry *sce;
test_bit(SLF_TXEVENT, &sl->flags) ||
test_bit(SLF_TMOUTEVENT, &sl->flags) ||
test_bit(SLF_ERROR, &sl->flags) ||
+ (sl->lin_state == SLSTATE_ID_RECEIVED) ||
(((sl->lin_state == SLSTATE_IDLE) ||
- (sl->lin_state == SLSTATE_RESPONSE_WAIT) ||
- (sl->lin_state == SLSTATE_ID_RECEIVED))
+ (sl->lin_state == SLSTATE_RESPONSE_WAIT))
&& test_bit(SLF_MSGEVENT, &sl->flags)));
if (test_and_clear_bit(SLF_RXEVENT, &sl->flags)) {
if (!test_bit(SLF_MSGEVENT, &sl->flags))
break;
+ mode = 0;
cf = (struct can_frame *)sl->tx_req_skb->data;
+ if (cf->can_id & LIN_CHECKSUM_EXTENDED)
+ mode |= SLLIN_STPMSG_CHCKSUM_ENH;
+
/* SFF RTR CAN frame -> LIN header */
if (cf->can_id & CAN_RTR_FLAG) {
struct sllin_conf_entry *sce;
sce = &sl->linfr_cache[cf->can_id & LIN_ID_MASK];
spin_lock_irqsave(&sl->linfr_lock, flags);
+ if (sce->frame_fl & LIN_CHECKSUM_EXTENDED)
+ mode |= SLLIN_STPMSG_CHCKSUM_ENH;
/* Is there Slave response in linfr_cache to be sent? */
if ((sce->frame_fl & LIN_CACHE_RESPONSE)
spin_unlock_irqrestore(&sl->linfr_lock, flags);
} else { /* SFF NON-RTR CAN frame -> LIN header + LIN response */
+ struct sllin_conf_entry *sce;
+
netdev_dbg(sl->dev, "%s: NON-RTR SFF CAN frame, ID = %x\n",
__func__, (int)cf->can_id & LIN_ID_MASK);
+ sce = &sl->linfr_cache[cf->can_id & LIN_ID_MASK];
+ if (sce->frame_fl & LIN_CHECKSUM_EXTENDED)
+ mode |= SLLIN_STPMSG_CHCKSUM_ENH;
+
lin_data = cf->data;
lin_dlc = cf->can_dlc;
if (lin_dlc > SLLIN_DATA_MAX)
tx_bytes = lin_dlc;
}
- if (sllin_setup_msg(sl, 0, cf->can_id & LIN_ID_MASK,
+ if (sllin_setup_msg(sl, mode, cf->can_id & LIN_ID_MASK,
lin_data, lin_dlc) != -1) {
sl->id_to_send = true;
spin_lock_irqsave(&sl->linfr_lock, flags);
if ((sce->frame_fl & LIN_CACHE_RESPONSE)
- && (sce->dlc > 0)
- && (test_bit(SLF_MSGEVENT, &sl->flags))) {
- int mode;
+ && (sce->dlc > 0)) {
netdev_dbg(sl->dev, "Sending LIN response from linfr_cache\n");
sllin_send_tx_buff(sl);
}
- clear_bit(SLF_MSGEVENT, &sl->flags);
- kfree_skb(sl->tx_req_skb);
- netif_wake_queue(sl->dev);
hrtimer_start(&sl->rx_timer,
ktime_add(ktime_get(), sl->rx_timer_timeout),
HRTIMER_MODE_ABS);
char name[IFNAMSIZ];
sprintf(name, "sllin%d", i);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0))
dev = alloc_netdev(sizeof(*sl), name, sll_setup);
+#else
+ dev = alloc_netdev(sizeof(*sl), name, NET_NAME_UNKNOWN, sll_setup);
+#endif
+
if (!dev)
return NULL;
dev->base_addr = i;