#define DEVICE_NAME "mcp251x"
-static int enable_dma; /* Enable SPI DMA. Default: 0 (Off) */
-module_param(enable_dma, int, S_IRUGO);
-MODULE_PARM_DESC(enable_dma, "Enable SPI DMA. Default: 0 (Off)");
+static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */
+module_param(mcp251x_enable_dma, int, S_IRUGO);
+MODULE_PARM_DESC(mcp251x_enable_dma, "Enable SPI DMA. Default: 0 (Off)");
static struct can_bittiming_const mcp251x_bittiming_const = {
.tseg1_min = 3,
spi_message_init(&m);
- if (enable_dma) {
+ if (mcp251x_enable_dma) {
t.tx_dma = priv->spi_tx_dma;
t.rx_dma = priv->spi_rx_dma;
m.is_dma_mapped = 1;
spi_message_init(&m);
- if (enable_dma) {
+ if (mcp251x_enable_dma) {
t.tx_dma = priv->spi_tx_dma;
t.rx_dma = priv->spi_rx_dma;
m.is_dma_mapped = 1;
spi_message_init(&m);
- if (enable_dma) {
+ if (mcp251x_enable_dma) {
t.tx_dma = priv->spi_tx_dma;
t.rx_dma = priv->spi_rx_dma;
m.is_dma_mapped = 1;
spi_message_init(&m);
- if (enable_dma) {
+ if (mcp251x_enable_dma) {
t.tx_dma = priv->spi_tx_dma;
t.rx_dma = priv->spi_rx_dma;
m.is_dma_mapped = 1;
spi_message_init(&m);
- if (enable_dma) {
+ if (mcp251x_enable_dma) {
t.tx_dma = priv->spi_tx_dma;
t.rx_dma = priv->spi_rx_dma;
m.is_dma_mapped = 1;
struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
tx_work);
struct spi_device *spi = priv->spi;
- struct can_frame *frame = (struct can_frame *)priv->tx_skb->data;
+ struct can_frame *frame;;
dev_dbg(&spi->dev, "%s\n", __func__);
- if (frame->can_dlc > CAN_FRAME_MAX_DATA_LEN)
- frame->can_dlc = CAN_FRAME_MAX_DATA_LEN;
-
- mcp251x_hw_tx(spi, frame, 0);
+ if (priv->tx_skb) {
+ frame = (struct can_frame *)priv->tx_skb->data;
+ if (frame->can_dlc > CAN_FRAME_MAX_DATA_LEN)
+ frame->can_dlc = CAN_FRAME_MAX_DATA_LEN;
+ mcp251x_hw_tx(spi, frame, 0);
+ }
}
static void mcp251x_irq_work_handler(struct work_struct *ws)
priv->can.bittiming.clock = pdata->oscillator_frequency / 2;
/* If requested, allocate DMA buffers */
- if (enable_dma) {
+ if (mcp251x_enable_dma) {
spi->dev.coherent_dma_mask = DMA_32BIT_MASK;
/* Minimum coherent DMA allocation is PAGE_SIZE, so allocate
(PAGE_SIZE / 2));
} else {
/* Fall back to non-DMA */
- enable_dma = 0;
+ mcp251x_enable_dma = 0;
}
}
/* Allocate non-DMA buffers */
- if (!enable_dma) {
+ if (!mcp251x_enable_dma) {
priv->spi_tx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL);
if (!priv->spi_tx_buf) {
ret = -ENOMEM;
mcp251x_hw_reset(spi);
mcp251x_hw_sleep(spi);
+ if (pdata->transceiver_enable)
+ pdata->transceiver_enable(0);
+
ret = register_netdev(net);
if (ret >= 0) {
dev_info(&spi->dev, "probed\n");
free_irq(spi->irq, net);
error_irq:
- if (!enable_dma)
+ if (!mcp251x_enable_dma)
kfree(priv->spi_rx_buf);
error_rx_buf:
- if (!enable_dma)
+ if (!mcp251x_enable_dma)
kfree(priv->spi_tx_buf);
error_tx_buf:
free_candev(net);
- if (enable_dma) {
+ if (mcp251x_enable_dma) {
dma_free_coherent(&spi->dev, PAGE_SIZE,
priv->spi_tx_buf, priv->spi_tx_dma);
}
flush_workqueue(priv->wq);
destroy_workqueue(priv->wq);
- if (enable_dma) {
+ if (mcp251x_enable_dma) {
dma_free_coherent(&spi->dev, PAGE_SIZE,
priv->spi_tx_buf, priv->spi_tx_dma);
} else {