From: Mahantesh Kumbar Date: Tue, 14 Oct 2014 12:14:34 +0000 (+0530) Subject: gk20a: Moved bind fecs to init_gr_support X-Git-Tag: daily-2014.11.28.0_rel-st8-l-r1-partner-shieldtablet8~13 X-Git-Url: https://rtime.felk.cvut.cz/gitweb/sojka/nv-tegra/linux-3.10.git/commitdiff_plain/d5adb025c1b6dc0a075a3bc1c7f9854018e9073e gk20a: Moved bind fecs to init_gr_support -Moved bind fecs from work queue to init_gr_support. -It makes all CPU->FECS communication to happen before booting PMU, and after we boot PMU, only PMU talks to FECS. So it removes possibility to race between CPU and PMU talking to FECS. Bug 200032923 Change-Id: I01d6d7f61f5e3c0e788d9d77fcabe5a91fe86c84 Reviewed-on: http://git-master/r/559733 Signed-off-by: Gagan Grover Reviewed-on: http://git-master/r/590405 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: Prafull Suryawanshi Reviewed-by: Gaurav Singh Reviewed-by: Vijayakumar Subbu Reviewed-by: Mahantesh Kumbar Reviewed-by: Dhiren Parmar Tested-by: Dhiren Parmar --- diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index 7fcfdfeb7cb..2fe7fcdd890 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c @@ -57,6 +57,7 @@ #define BLK_SIZE (256) +static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g); static int gr_gk20a_commit_inst(struct channel_gk20a *c, u64 gpu_va); /* global ctx buffer */ @@ -4530,6 +4531,91 @@ clean_up: return err; } +static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g) +{ + struct pmu_gk20a *pmu = &g->pmu; + struct mm_gk20a *mm = &g->mm; + struct vm_gk20a *vm = &mm->pmu.vm; + struct device *d = dev_from_gk20a(g); + int err = 0; + + u32 size; + struct sg_table *sgt_pg_buf; + dma_addr_t iova; + + gk20a_dbg_fn(""); + + size = 0; + + err = gr_gk20a_fecs_get_reglist_img_size(g, &size); + if (err) { + gk20a_err(dev_from_gk20a(g), + "fail to query fecs pg buffer size"); + return err; + } + + if (!pmu->pg_buf.cpuva) { + pmu->pg_buf.cpuva = dma_alloc_coherent(d, size, + &iova, + GFP_KERNEL); + if (!pmu->pg_buf.cpuva) { + gk20a_err(d, "failed to allocate memory\n"); + return -ENOMEM; + } + + pmu->pg_buf.iova = iova; + pmu->pg_buf.size = size; + + err = gk20a_get_sgtable(d, &sgt_pg_buf, + pmu->pg_buf.cpuva, + pmu->pg_buf.iova, + size); + if (err) { + gk20a_err(d, "failed to create sg table\n"); + goto err_free_pg_buf; + } + + pmu->pg_buf.pmu_va = gk20a_gmmu_map(vm, + &sgt_pg_buf, + size, + 0, /* flags */ + gk20a_mem_flag_none); + if (!pmu->pg_buf.pmu_va) { + gk20a_err(d, "failed to map fecs pg buffer"); + err = -ENOMEM; + goto err_free_sgtable; + } + + gk20a_free_sgtable(&sgt_pg_buf); + } + + + err = gr_gk20a_fecs_set_reglist_bind_inst(g, mm->pmu.inst_block.cpu_pa); + if (err) { + gk20a_err(dev_from_gk20a(g), + "fail to bind pmu inst to gr"); + return err; + } + + err = gr_gk20a_fecs_set_reglist_virtual_addr(g, pmu->pg_buf.pmu_va); + if (err) { + gk20a_err(dev_from_gk20a(g), + "fail to set pg buffer pmu va"); + return err; + } + + return err; + +err_free_sgtable: + gk20a_free_sgtable(&sgt_pg_buf); +err_free_pg_buf: + dma_free_coherent(d, size, + pmu->pg_buf.cpuva, pmu->pg_buf.iova); + pmu->pg_buf.cpuva = NULL; + pmu->pg_buf.iova = 0; + return err; +} + int gk20a_init_gr_support(struct gk20a *g) { u32 err; @@ -4555,6 +4641,10 @@ int gk20a_init_gr_support(struct gk20a *g) if (err) return err; + err = gk20a_init_gr_bind_fecs_elpg(g); + if (err) + return err; + /* GR is inialized, signal possible waiters */ g->gr.initialized = true; wake_up(&g->gr.init_wq); diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c index d1d1a842bed..b7301ee4420 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c @@ -1817,76 +1817,12 @@ static void pmu_setup_hw(struct work_struct *work) int gk20a_init_pmu_bind_fecs(struct gk20a *g) { struct pmu_gk20a *pmu = &g->pmu; - struct mm_gk20a *mm = &g->mm; - struct vm_gk20a *vm = &mm->pmu.vm; - struct device *d = dev_from_gk20a(g); struct pmu_cmd cmd; u32 desc; - int err; - u32 size; - struct sg_table *sgt_pg_buf; - dma_addr_t iova; - + int err = 0; gk20a_dbg_fn(""); - size = 0; gk20a_gr_wait_initialized(g); - err = gr_gk20a_fecs_get_reglist_img_size(g, &size); - if (err) { - gk20a_err(dev_from_gk20a(g), - "fail to query fecs pg buffer size"); - return err; - } - - if (!pmu->pg_buf.cpuva) { - pmu->pg_buf.cpuva = dma_alloc_coherent(d, size, - &iova, - GFP_KERNEL); - if (!pmu->pg_buf.cpuva) { - gk20a_err(d, "failed to allocate memory\n"); - return -ENOMEM; - } - - pmu->pg_buf.iova = iova; - pmu->pg_buf.size = size; - - err = gk20a_get_sgtable(d, &sgt_pg_buf, - pmu->pg_buf.cpuva, - pmu->pg_buf.iova, - size); - if (err) { - gk20a_err(d, "failed to create sg table\n"); - goto err_free_pg_buf; - } - - pmu->pg_buf.pmu_va = gk20a_gmmu_map(vm, - &sgt_pg_buf, - size, - 0, /* flags */ - gk20a_mem_flag_none); - if (!pmu->pg_buf.pmu_va) { - gk20a_err(d, "failed to map fecs pg buffer"); - err = -ENOMEM; - goto err_free_sgtable; - } - - gk20a_free_sgtable(&sgt_pg_buf); - } - - err = gr_gk20a_fecs_set_reglist_bind_inst(g, mm->pmu.inst_block.cpu_pa); - if (err) { - gk20a_err(dev_from_gk20a(g), - "fail to bind pmu inst to gr"); - return err; - } - - err = gr_gk20a_fecs_set_reglist_virtual_addr(g, pmu->pg_buf.pmu_va); - if (err) { - gk20a_err(dev_from_gk20a(g), - "fail to set pg buffer pmu va"); - return err; - } - memset(&cmd, 0, sizeof(struct pmu_cmd)); cmd.hdr.unit_id = PMU_UNIT_PG; cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_eng_buf_load); @@ -1904,15 +1840,6 @@ int gk20a_init_pmu_bind_fecs(struct gk20a *g) pmu_handle_pg_buf_config_msg, pmu, &desc, ~0); pmu->pmu_state = PMU_STATE_LOADING_PG_BUF; return err; - -err_free_sgtable: - gk20a_free_sgtable(&sgt_pg_buf); -err_free_pg_buf: - dma_free_coherent(d, size, - pmu->pg_buf.cpuva, pmu->pg_buf.iova); - pmu->pg_buf.cpuva = NULL; - pmu->pg_buf.iova = 0; - return err; } static void pmu_setup_hw_load_zbc(struct gk20a *g)