]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
dma: tegra: do not abort dma if next configuration skipped.
authorLaxman Dewangan <ldewangan@nvidia.com>
Fri, 21 Feb 2014 09:41:33 +0000 (15:11 +0530)
committerLaxman Dewangan <ldewangan@nvidia.com>
Tue, 25 Feb 2014 14:26:02 +0000 (06:26 -0800)
There is a case in continuous mode where the dma transfer completes
and interrupt is pending before the next transfer programmed. In this
case, next dma configuration skipped till interrupt served. So in this
case, do not abort the dma from transfers, confgure the next transfer
once pending interrupt handled.

bug 1464356

Change-Id: Ia4966e062afac613232b5bf5f4d1c9452037d985
Signed-off-by: Laxman Dewangan <ldewangan@nvidia.com>
Reviewed-on: http://git-master/r/374167

drivers/dma/tegra20-apb-dma.c

index 11b0d3602eac460bfde69ab931429822ff799e16..91888b078bcea82caa84a4129e7537a4293e813e 100644 (file)
@@ -159,6 +159,7 @@ struct tegra_dma_sg_req {
        struct tegra_dma_channel_regs   ch_regs;
        int                             req_len;
        bool                            configured;
+       bool                            skipped;
        bool                            last_sg;
        bool                            half_done;
        struct list_head                node;
@@ -470,6 +471,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
        if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
                dev_err(tdc2dev(tdc),
                        "Skipping new configuration as interrupt is pending\n");
+               nsg_req->skipped = true;
                tegra_dma_resume(tdc);
                return;
        }
@@ -483,6 +485,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
        tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
                                nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
        nsg_req->configured = true;
+       nsg_req->skipped = false;
 
        tegra_dma_resume(tdc);
 }
@@ -498,6 +501,7 @@ static void tdc_start_head_req(struct tegra_dma_channel *tdc)
                                        typeof(*sg_req), node);
        tegra_dma_start(tdc, sg_req);
        sg_req->configured = true;
+       sg_req->skipped = false;
        tdc->busy = true;
 }
 
@@ -564,7 +568,7 @@ static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
         * looping of transfer can not continue.
         */
        hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
-       if (!hsgreq->configured) {
+       if (!hsgreq->configured && !hsgreq->skipped) {
                tegra_dma_stop(tdc);
                dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n");
                tegra_dma_abort_all(tdc);
@@ -632,6 +636,7 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
        if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
                list_move_tail(&sgreq->node, &tdc->pending_sg_req);
                sgreq->configured = false;
+               sgreq->skipped = false;
                st = handle_continuous_head_request(tdc, sgreq, to_terminate);
                if (!st)
                        dma_desc->dma_status = DMA_ERROR;
@@ -1041,6 +1046,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
                sg_req->ch_regs.apb_seq = apb_seq;
                sg_req->ch_regs.ahb_seq = ahb_seq;
                sg_req->configured = false;
+               sg_req->skipped = false;
                sg_req->last_sg = false;
                sg_req->dma_desc = dma_desc;
                sg_req->req_len = len;
@@ -1175,6 +1181,7 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
                sg_req->ch_regs.apb_seq = apb_seq;
                sg_req->ch_regs.ahb_seq = ahb_seq;
                sg_req->configured = false;
+               sg_req->skipped = false;
                sg_req->half_done = false;
                sg_req->last_sg = false;
                sg_req->dma_desc = dma_desc;