int nvhost_read_module_regs(struct platform_device *ndev,
u32 offset, int count, u32 *values)
{
- void __iomem *p = get_aperture(ndev);
int err;
- if (!p)
- return -ENODEV;
-
/* verify offset */
err = validate_reg(ndev, offset, count);
if (err)
if (err)
return err;
- p += offset;
while (count--) {
- *(values++) = readl(p);
- p += 4;
+ *(values++) = host1x_readl(ndev, offset);
+ offset += 4;
}
rmb();
nvhost_module_idle(ndev);
u32 offset, int count, const u32 *values)
{
int err;
- void __iomem *p = get_aperture(ndev);
-
- if (!p)
- return -ENODEV;
/* verify offset */
err = validate_reg(ndev, offset, count);
if (err)
return err;
- p += offset;
while (count--) {
- writel(*(values++), p);
- p += 4;
+ host1x_writel(ndev, offset, *(values++));
+ offset += 4;
}
wmb();
nvhost_module_idle(ndev);
*/
static void cdma_start(struct nvhost_cdma *cdma)
{
- void __iomem *chan_regs;
struct nvhost_channel *ch;
if (cdma->running)
return;
}
- chan_regs = ch->aperture;
cdma->last_put = cdma_pb_op().putptr(&cdma->push_buffer);
- writel(host1x_channel_dmactrl(true, false, false),
- chan_regs + host1x_channel_dmactrl_r());
+ host1x_channel_writel(ch, host1x_channel_dmactrl_r(),
+ host1x_channel_dmactrl(true, false, false));
/* set base, put, end pointer (all of memory) */
- writel(0, chan_regs + host1x_channel_dmastart_r());
- writel(cdma->last_put, chan_regs + host1x_channel_dmaput_r());
- writel(0xFFFFFFFF, chan_regs + host1x_channel_dmaend_r());
+ host1x_channel_writel(ch, host1x_channel_dmastart_r(), 0);
+ host1x_channel_writel(ch, host1x_channel_dmaput_r(), cdma->last_put);
+ host1x_channel_writel(ch, host1x_channel_dmaend_r(), 0xFFFFFFFF);
/* reset GET */
- writel(host1x_channel_dmactrl(true, true, true),
- chan_regs + host1x_channel_dmactrl_r());
+ host1x_channel_writel(ch, host1x_channel_dmactrl_r(),
+ host1x_channel_dmactrl(true, true, true));
/* prevent using setclass inside gathers */
nvhost_channel_init_gather_filter(cdma_to_channel(cdma));
/* start the command DMA */
wmb();
- writel_relaxed(host1x_channel_dmactrl(false, false, false),
- chan_regs + host1x_channel_dmactrl_r());
+ host1x_channel_writel(ch, host1x_channel_dmactrl_r(),
+ host1x_channel_dmactrl(false, false, false));
cdma->running = true;
}
static void cdma_timeout_restart(struct nvhost_cdma *cdma, u32 getptr)
{
struct nvhost_master *dev = cdma_to_dev(cdma);
- void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
+ struct nvhost_channel *ch = cdma_to_channel(cdma);
if (cdma->running)
return;
cdma->last_put = cdma_pb_op().putptr(&cdma->push_buffer);
- writel(host1x_channel_dmactrl(true, false, false),
- chan_regs + host1x_channel_dmactrl_r());
+ host1x_channel_writel(ch, host1x_channel_dmactrl_r(),
+ host1x_channel_dmactrl(true, false, false));
/* set base, end pointer (all of memory) */
- writel(0, chan_regs + host1x_channel_dmastart_r());
- writel(0xFFFFFFFF, chan_regs + host1x_channel_dmaend_r());
+ host1x_channel_writel(ch, host1x_channel_dmastart_r(), 0);
+ host1x_channel_writel(ch, host1x_channel_dmaend_r(), 0xFFFFFFFF);
/* set GET, by loading the value in PUT (then reset GET) */
- writel(getptr, chan_regs + host1x_channel_dmaput_r());
- writel(host1x_channel_dmactrl(true, true, true),
- chan_regs + host1x_channel_dmactrl_r());
+ host1x_channel_writel(ch, host1x_channel_dmaput_r(), getptr);
+ host1x_channel_writel(ch, host1x_channel_dmactrl_r(),
+ host1x_channel_dmactrl(true, true, true));
dev_dbg(&dev->dev->dev,
"%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n",
__func__,
- readl(chan_regs + host1x_channel_dmaget_r()),
- readl(chan_regs + host1x_channel_dmaput_r()),
+ host1x_channel_readl(ch, host1x_channel_dmaget_r()),
+ host1x_channel_readl(ch, host1x_channel_dmaput_r()),
cdma->last_put);
/* deassert GET reset and set PUT */
- writel(host1x_channel_dmactrl(true, false, false),
- chan_regs + host1x_channel_dmactrl_r());
- writel(cdma->last_put, chan_regs + host1x_channel_dmaput_r());
+ host1x_channel_writel(ch, host1x_channel_dmactrl_r(),
+ host1x_channel_dmactrl(true, false, false));
+ host1x_channel_writel(ch, host1x_channel_dmaput_r(), cdma->last_put);
/* reinitialise gather filter for the channel */
nvhost_channel_init_gather_filter(cdma_to_channel(cdma));
/* start the command DMA */
wmb();
- writel_relaxed(host1x_channel_dmactrl(false, false, false),
- chan_regs + host1x_channel_dmactrl_r());
+ host1x_channel_writel(ch, host1x_channel_dmactrl_r(),
+ host1x_channel_dmactrl(false, false, false));
cdma->running = true;
}
static void cdma_kick(struct nvhost_cdma *cdma)
{
u32 put;
+ struct nvhost_channel *ch = cdma_to_channel(cdma);
put = cdma_pb_op().putptr(&cdma->push_buffer);
if (put != cdma->last_put) {
- void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
wmb();
- writel_relaxed(put, chan_regs + host1x_channel_dmaput_r());
+ host1x_channel_writel(ch, host1x_channel_dmaput_r(), put);
cdma->last_put = put;
}
}
mutex_lock(&cdma->lock);
if (cdma->running) {
nvhost_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
- writel(host1x_channel_dmactrl(true, false, false),
- chan_regs + host1x_channel_dmactrl_r());
+ host1x_channel_writel(ch, host1x_channel_dmactrl_r(),
+ host1x_channel_dmactrl(true, false, false));
cdma->running = false;
}
mutex_unlock(&cdma->lock);
dev_dbg(&dev->dev->dev,
"begin channel teardown (channel id %d)\n", ch->chid);
- cmdproc_stop = readl(dev->sync_aperture + host1x_sync_cmdproc_stop_r());
+ cmdproc_stop = host1x_sync_readl(dev->dev,
+ host1x_sync_cmdproc_stop_r());
cmdproc_stop |= BIT(ch->chid);
- writel(cmdproc_stop, dev->sync_aperture + host1x_sync_cmdproc_stop_r());
+ host1x_sync_writel(dev->dev, host1x_sync_cmdproc_stop_r(),
+ cmdproc_stop);
dev_dbg(&dev->dev->dev,
"%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n",
__func__,
- readl(ch->aperture + host1x_channel_dmaget_r()),
- readl(ch->aperture + host1x_channel_dmaput_r()),
+ host1x_channel_readl(ch, host1x_channel_dmaget_r()),
+ host1x_channel_readl(ch, host1x_channel_dmaput_r()),
cdma->last_put);
- writel(host1x_channel_dmactrl(true, false, false),
- ch->aperture + host1x_channel_dmactrl_r());
+ host1x_channel_writel(ch, host1x_channel_dmactrl_r(),
+ host1x_channel_dmactrl(true, false, false));
- writel(BIT(ch->chid), dev->sync_aperture + host1x_sync_ch_teardown_r());
+ host1x_sync_writel(dev->dev,
+ host1x_sync_ch_teardown_r(), BIT(ch->chid));
nvhost_module_reset(ch->dev, true);
cdma_timeout_release_mlocks(cdma);
"end channel teardown (id %d, DMAGET restart = 0x%x)\n",
ch->chid, getptr);
- cmdproc_stop = readl(dev->sync_aperture + host1x_sync_cmdproc_stop_r());
+ cmdproc_stop = host1x_sync_readl(dev->dev,
+ host1x_sync_cmdproc_stop_r());
cmdproc_stop &= ~(BIT(ch->chid));
- writel(cmdproc_stop, dev->sync_aperture + host1x_sync_cmdproc_stop_r());
+ host1x_sync_writel(dev->dev, host1x_sync_cmdproc_stop_r(),
+ cmdproc_stop);
cdma->torndown = false;
cdma_timeout_restart(cdma, getptr);
{
struct nvhost_channel *ch = cdma_to_channel(cdma);
struct nvhost_master *dev = cdma_to_dev(cdma);
- u32 cbstat = readl(dev->sync_aperture +
+ u32 cbstat = host1x_sync_readl(dev->dev,
host1x_sync_cbstat_0_r() + 4 * ch->chid);
- u32 cbread = readl(dev->sync_aperture +
+ u32 cbread = host1x_sync_readl(dev->dev,
host1x_sync_cbread0_r() + 4 * ch->chid);
u32 waiting = cbstat == 0x00010008;
u32 syncpt_id = cbread >> 24;
}
/* stop processing to get a clean snapshot */
- prev_cmdproc = readl(dev->sync_aperture + host1x_sync_cmdproc_stop_r());
+ prev_cmdproc = host1x_sync_readl(dev->dev,
+ host1x_sync_cmdproc_stop_r());
cmdproc_stop = prev_cmdproc | BIT(ch->chid);
- writel(cmdproc_stop, dev->sync_aperture + host1x_sync_cmdproc_stop_r());
+ host1x_sync_writel(dev->dev,
+ host1x_sync_cmdproc_stop_r(), cmdproc_stop);
dev_dbg(&dev->dev->dev, "cdma_timeout: cmdproc was 0x%x is 0x%x\n",
prev_cmdproc, cmdproc_stop);
"cdma_timeout: expired, but buffer had completed\n");
/* restore */
cmdproc_stop = prev_cmdproc & ~(BIT(ch->chid));
- writel(cmdproc_stop,
- dev->sync_aperture + host1x_sync_cmdproc_stop_r());
+ host1x_sync_writel(dev->dev,
+ host1x_sync_cmdproc_stop_r(), cmdproc_stop);
mutex_unlock(&cdma->lock);
mutex_unlock(&dev->timeout_mutex);
return;
{
struct platform_device *pdev = ch->dev;
- void __iomem *regs = ch->aperture;
struct nvhost_master *master = nvhost_get_host(pdev);
int err;
return err;
}
- writel(host1x_channel_channelctrl_kernel_filter_gbuffer_f(1),
- regs + host1x_channel_channelctrl_r());
+ host1x_channel_writel(ch, host1x_channel_channelctrl_r(),
+ host1x_channel_channelctrl_kernel_filter_gbuffer_f(1));
nvhost_module_idle(nvhost_get_parent(pdev));
return 0;
u32 cbstat, cbread, cmdstat;
u32 val, base, baseval;
- dmaput = readl(channel->aperture + host1x_channel_dmaput_r());
- dmaget = readl(channel->aperture + host1x_channel_dmaget_r());
- dmactrl = readl(channel->aperture + host1x_channel_dmactrl_r());
- cbread = readl(m->sync_aperture + host1x_sync_cbread0_r() + 4 * chid);
- cbstat = readl(m->sync_aperture + host1x_sync_cbstat_0_r() + 4 * chid);
- cmdstat = readl(m->sync_aperture + host1x_sync_cmdproc_stat_r());
+ dmaput = host1x_channel_readl(channel, host1x_channel_dmaput_r());
+ dmaget = host1x_channel_readl(channel, host1x_channel_dmaget_r());
+ dmactrl = host1x_channel_readl(channel, host1x_channel_dmactrl_r());
+ cbread = host1x_sync_readl(channel->dev,
+ host1x_sync_cbread0_r() + 4 * chid);
+ cbstat = host1x_sync_readl(channel->dev,
+ host1x_sync_cbstat_0_r() + 4 * chid);
+ cmdstat = host1x_sync_readl(channel->dev, host1x_sync_cmdproc_stat_r());
#ifdef CONFIG_PM_RUNTIME
nvhost_debug_output(o, "%d-%s (%d): ", chid,
case 0x00010009:
base = (cbread >> 16) & 0xff;
- baseval = readl(m->sync_aperture +
+ baseval = host1x_sync_readl(channel->dev,
host1x_sync_syncpt_base_0_r() + 4 * base);
val = cbread & 0xffff;
nvhost_debug_output(o, "waiting on syncpt %d val %d "
nvhost_debug_output(o, "%d: fifo:\n", chid);
- writel(host1x_sync_cfpeek_ctrl_cfpeek_ena_f(1)
- | host1x_sync_cfpeek_ctrl_cfpeek_channr_f(chid),
- m->sync_aperture + host1x_sync_cfpeek_ctrl_r());
+ host1x_sync_writel(ch->dev, host1x_sync_cfpeek_ctrl_r(),
+ host1x_sync_cfpeek_ctrl_cfpeek_ena_f(1)
+ | host1x_sync_cfpeek_ctrl_cfpeek_channr_f(chid));
wmb();
- val = readl(channel->aperture + host1x_channel_fifostat_r());
+ val = host1x_channel_readl(channel, host1x_channel_fifostat_r());
if (host1x_channel_fifostat_cfempty_v(val)) {
- writel(0x0, m->sync_aperture + host1x_sync_cfpeek_ctrl_r());
+ host1x_sync_writel(ch->dev, host1x_sync_cfpeek_ctrl_r(), 0x0);
nvhost_debug_output(o, "FIFOSTAT %08x\n[empty]\n",
val);
return;
}
- val = readl(m->sync_aperture + host1x_sync_cfpeek_ptrs_r());
+ val = host1x_sync_readl(channel->dev, host1x_sync_cfpeek_ptrs_r());
rd_ptr = host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(val);
wr_ptr = host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(val);
- val = readl(m->sync_aperture + host1x_sync_cf0_setup_r() + 4 * chid);
+ val = host1x_sync_readl(channel->dev,
+ host1x_sync_cf0_setup_r() + 4 * chid);
start = host1x_sync_cf0_setup_cf0_base_v(val);
end = host1x_sync_cf0_setup_cf0_limit_v(val);
nvhost_debug_output(o, "FIFOSTAT %08x, %03x - %03x, RD %03x, WR %03x\n",
val, start, end, rd_ptr, wr_ptr);
do {
- writel(host1x_sync_cfpeek_ctrl_cfpeek_ena_f(1)
- | host1x_sync_cfpeek_ctrl_cfpeek_channr_f(chid)
- | host1x_sync_cfpeek_ctrl_cfpeek_addr_f(rd_ptr),
- m->sync_aperture + host1x_sync_cfpeek_ctrl_r());
+ host1x_sync_writel(ch->dev, host1x_sync_cfpeek_ctrl_r(),
+ host1x_sync_cfpeek_ctrl_cfpeek_ena_f(1)
+ | host1x_sync_cfpeek_ctrl_cfpeek_channr_f(chid)
+ | host1x_sync_cfpeek_ctrl_cfpeek_addr_f(rd_ptr));
wmb();
- val = readl(m->sync_aperture + host1x_sync_cfpeek_read_r());
+ val = host1x_sync_readl(channel->dev,
+ host1x_sync_cfpeek_read_r());
rmb();
nvhost_debug_output(o, "%08x ", val);
nvhost_debug_output(o, "\n");
- writel(0x0, m->sync_aperture + host1x_sync_cfpeek_ctrl_r());
+ host1x_sync_writel(ch->dev, host1x_sync_cfpeek_ctrl_r(), 0x0);
}
static void t20_debug_show_mlocks(struct nvhost_master *m, struct output *o)
{
- u32 __iomem *mlo_regs = m->sync_aperture +
- host1x_sync_mlock_owner_0_r();
int i;
nvhost_debug_output(o, "---- mlocks ----\n");
for (i = 0; i < NV_HOST1X_NB_MLOCKS; i++) {
- u32 owner = readl(mlo_regs + i);
+ u32 owner = host1x_sync_readl(m->dev,
+ host1x_sync_mlock_owner_0_r() + i * 4);
if (host1x_sync_mlock_owner_0_mlock_ch_owns_0_v(owner))
nvhost_debug_output(o, "%d: locked by channel %d\n",
i,
static irqreturn_t syncpt_thresh_cascade_isr(int irq, void *dev_id)
{
struct nvhost_master *dev = dev_id;
- void __iomem *sync_regs = dev->sync_aperture;
struct nvhost_intr *intr = &dev->intr;
unsigned long reg;
int i, id;
for (i = 0; i < DIV_ROUND_UP(nvhost_syncpt_nb_hw_pts(&dev->syncpt), 32);
i++) {
- reg = readl(sync_regs +
+ reg = host1x_sync_readl(dev->dev,
host1x_sync_syncpt_thresh_cpu0_int_status_r() +
i * REGISTER_STRIDE);
static void t20_intr_init_host_sync(struct nvhost_intr *intr)
{
struct nvhost_master *dev = intr_to_dev(intr);
- void __iomem *sync_regs = dev->sync_aperture;
int i, err;
intr_op().disable_all_syncpt_intrs(intr);
/* increase the auto-ack timout to the maximum value. 2d will hang
* otherwise on ap20.
*/
- writel(0xff, sync_regs + host1x_sync_ctxsw_timeout_cfg_r());
+ host1x_sync_writel(dev->dev, host1x_sync_ctxsw_timeout_cfg_r(), 0xff);
/* enable graphics host syncpoint interrupt */
t20_intr_set_syncpt_threshold(intr,
static void t20_intr_set_host_clocks_per_usec(struct nvhost_intr *intr, u32 cpm)
{
struct nvhost_master *dev = intr_to_dev(intr);
- void __iomem *sync_regs = dev->sync_aperture;
/* write microsecond clock register */
- writel(cpm, sync_regs + host1x_sync_usec_clk_r());
+ host1x_sync_writel(dev->dev, host1x_sync_usec_clk_r(), cpm);
/* set the ip_busy_timeout */
- writel(cpm * 500000, sync_regs + host1x_sync_ip_busy_timeout_r());
+ host1x_sync_writel(dev->dev,
+ host1x_sync_ip_busy_timeout_r(), cpm * 500000);
}
static void t20_intr_set_syncpt_threshold(struct nvhost_intr *intr,
u32 id, u32 thresh)
{
struct nvhost_master *dev = intr_to_dev(intr);
- void __iomem *sync_regs = dev->sync_aperture;
- writel(thresh, sync_regs +
- (host1x_sync_syncpt_int_thresh_0_r() + id * REGISTER_STRIDE));
+ host1x_sync_writel(dev->dev, (host1x_sync_syncpt_int_thresh_0_r() +
+ id * REGISTER_STRIDE), thresh);
}
static void t20_intr_enable_syncpt_intr(struct nvhost_intr *intr, u32 id)
{
struct nvhost_master *dev = intr_to_dev(intr);
- void __iomem *sync_regs = dev->sync_aperture;
- writel(bit_mask(id), sync_regs +
+ host1x_sync_writel(dev->dev,
host1x_sync_syncpt_thresh_int_enable_cpu0_r() +
- bit_word(id) * REGISTER_STRIDE);
+ bit_word(id) * REGISTER_STRIDE, bit_mask(id));
}
static void t20_intr_disable_syncpt_intr(struct nvhost_intr *intr, u32 id)
{
struct nvhost_master *dev = intr_to_dev(intr);
- void __iomem *sync_regs = dev->sync_aperture;
- writel(bit_mask(id), sync_regs +
+ host1x_sync_writel(dev->dev,
host1x_sync_syncpt_thresh_int_disable_r() +
- bit_word(id) * REGISTER_STRIDE);
+ bit_word(id) * REGISTER_STRIDE, bit_mask(id));
/* clear status for both cpu's */
- writel(bit_mask(id), sync_regs +
+ host1x_sync_writel(dev->dev,
host1x_sync_syncpt_thresh_cpu0_int_status_r() +
- bit_word(id) * REGISTER_STRIDE);
- writel(bit_mask(id), sync_regs +
+ bit_word(id) * REGISTER_STRIDE, bit_mask(id));
+ host1x_sync_writel(dev->dev,
host1x_sync_syncpt_thresh_cpu1_int_status_r() +
- bit_word(id) * REGISTER_STRIDE);
+ bit_word(id) * REGISTER_STRIDE, bit_mask(id));
}
static void t20_intr_disable_all_syncpt_intrs(struct nvhost_intr *intr)
{
struct nvhost_master *dev = intr_to_dev(intr);
- void __iomem *sync_regs = dev->sync_aperture;
u32 reg;
for (reg = 0; reg < bit_word(nvhost_syncpt_nb_hw_pts(&dev->syncpt))
* REGISTER_STRIDE; reg += REGISTER_STRIDE) {
/* disable interrupts for both cpu's */
- writel(0xffffffffu, sync_regs +
+ host1x_sync_writel(dev->dev,
host1x_sync_syncpt_thresh_int_disable_r() +
- reg);
+ reg, 0xffffffffu);
/* clear status for both cpu's */
- writel(0xffffffffu, sync_regs +
- host1x_sync_syncpt_thresh_cpu0_int_status_r() + reg);
- writel(0xffffffffu, sync_regs +
- host1x_sync_syncpt_thresh_cpu1_int_status_r() + reg);
+ host1x_sync_writel(dev->dev,
+ host1x_sync_syncpt_thresh_cpu0_int_status_r() + reg,
+ 0xffffffffu);
+ host1x_sync_writel(dev->dev,
+ host1x_sync_syncpt_thresh_cpu1_int_status_r() + reg,
+ 0xffffffffu);
}
}
{
unsigned int id = syncpt->id;
struct nvhost_intr *intr = intr_syncpt_to_intr(syncpt);
-
- void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+ struct nvhost_master *dev = intr_to_dev(intr);
u32 reg = bit_word(id) * REGISTER_STRIDE;
if (disable_intr)
- writel(bit_mask(id), sync_regs +
- host1x_sync_syncpt_thresh_int_disable_r() + reg);
+ host1x_sync_writel(dev->dev,
+ host1x_sync_syncpt_thresh_int_disable_r() + reg,
+ bit_mask(id));
- writel(bit_mask(id), sync_regs +
- host1x_sync_syncpt_thresh_cpu0_int_status_r() + reg);
+ host1x_sync_writel(dev->dev,
+ host1x_sync_syncpt_thresh_cpu0_int_status_r() + reg,
+ bit_mask(id));
}
/**
static irqreturn_t t20_intr_host1x_isr(int irq, void *dev_id)
{
struct nvhost_intr *intr = dev_id;
- void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+ struct nvhost_master *dev = intr_to_dev(intr);
unsigned long stat;
u32 ext_stat;
u32 addr;
unsigned long intstat;
int i;
- intstat = readl(sync_regs + host1x_sync_intstatus_r());
+ intstat = host1x_sync_readl(dev->dev, host1x_sync_intstatus_r());
intr->intstatus = intstat;
/* Handle host1x interrupt in ISR */
- stat = readl(sync_regs + host1x_sync_hintstatus_r());
- ext_stat = readl(sync_regs + host1x_sync_hintstatus_ext_r());
+ stat = host1x_sync_readl(dev->dev, host1x_sync_hintstatus_r());
+ ext_stat = host1x_sync_readl(dev->dev, host1x_sync_hintstatus_ext_r());
for_each_set_bit(i, &stat, 32) {
if (intr->host_isr[i])
}
if (host1x_sync_hintstatus_ext_ip_read_int_v(ext_stat)) {
- addr = readl(sync_regs + host1x_sync_ip_read_timeout_addr_r());
+ addr = host1x_sync_readl(dev->dev,
+ host1x_sync_ip_read_timeout_addr_r());
pr_err("Host read timeout at address %x\n", addr);
}
if (host1x_sync_hintstatus_ext_ip_write_int_v(ext_stat)) {
- addr = readl(sync_regs + host1x_sync_ip_write_timeout_addr_r());
+ addr = host1x_sync_readl(dev->dev,
+ host1x_sync_ip_write_timeout_addr_r());
pr_err("Host write timeout at address %x\n", addr);
}
- writel(ext_stat, sync_regs + host1x_sync_hintstatus_ext_r());
- writel(stat, sync_regs + host1x_sync_hintstatus_r());
+ host1x_sync_writel(dev->dev, host1x_sync_hintstatus_ext_r(), ext_stat);
+ host1x_sync_writel(dev->dev, host1x_sync_hintstatus_r(), stat);
- writel(intstat, sync_regs + host1x_sync_intstatus_r());
+ host1x_sync_writel(dev->dev, host1x_sync_intstatus_r(), intstat);
return IRQ_HANDLED;
}
static int t20_intr_request_host_general_irq(struct nvhost_intr *intr)
{
- void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+ struct nvhost_master *dev = intr_to_dev(intr);
int err;
u32 val;
/* master disable for general (not syncpt) host interrupts */
- writel(0, sync_regs + host1x_sync_intmask_r());
+ host1x_sync_writel(dev->dev, host1x_sync_intmask_r(), 0);
/* clear status & extstatus */
- writel(0xfffffffful, sync_regs + host1x_sync_hintstatus_ext_r());
- writel(0xfffffffful, sync_regs + host1x_sync_hintstatus_r());
+ host1x_sync_writel(dev->dev, host1x_sync_hintstatus_ext_r(),
+ 0xfffffffful);
+ host1x_sync_writel(dev->dev, host1x_sync_hintstatus_r(),
+ 0xfffffffful);
err = request_irq(intr->general_irq, t20_intr_host1x_isr,
0, "host_status", intr);
return err;
/* enable extra interrupt sources IP_READ_INT and IP_WRITE_INT */
- writel(BIT(30) | BIT(31), sync_regs + host1x_sync_hintmask_ext_r());
+ host1x_sync_writel(dev->dev, host1x_sync_hintmask_ext_r(),
+ BIT(30) | BIT(31));
/* enable extra interrupt sources */
- val = readl(sync_regs + host1x_sync_hintmask_r());
+ val = host1x_sync_readl(dev->dev, host1x_sync_hintmask_r());
val |= BIT(31);
- writel(val, sync_regs + host1x_sync_hintmask_r());
+ host1x_sync_writel(dev->dev, host1x_sync_hintmask_r(), val);
/* enable host module interrupt to CPU0 */
- writel(BIT(0), sync_regs + host1x_sync_intc0mask_r());
+ host1x_sync_writel(dev->dev, host1x_sync_intc0mask_r(), BIT(0));
/* master enable for general (not syncpt) host interrupts */
- writel(BIT(0), sync_regs + host1x_sync_intmask_r());
+ host1x_sync_writel(dev->dev, host1x_sync_intmask_r(), BIT(0));
return err;
}
static void t20_intr_free_host_general_irq(struct nvhost_intr *intr)
{
- void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+ struct nvhost_master *dev = intr_to_dev(intr);
/* master disable for general (not syncpt) host interrupts */
- writel(0, sync_regs + host1x_sync_intmask_r());
+ host1x_sync_writel(dev->dev, host1x_sync_intmask_r(), 0);
free_irq(intr->general_irq, intr);
}
static int intr_debug_dump(struct nvhost_intr *intr, struct output *o)
{
struct nvhost_master *dev = intr_to_dev(intr);
- void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
int i;
nvhost_debug_output(o, "\n---- host general irq ----\n\n");
nvhost_debug_output(o, "sync_hintmask_ext = 0x%08x\n",
- readl(sync_regs + host1x_sync_hintmask_ext_r()));
+ host1x_sync_readl(dev->dev, host1x_sync_hintmask_ext_r()));
nvhost_debug_output(o, "sync_hintmask = 0x%08x\n",
- readl(sync_regs + host1x_sync_hintmask_r()));
+ host1x_sync_readl(dev->dev, host1x_sync_hintmask_r()));
nvhost_debug_output(o, "sync_intc0mask = 0x%08x\n",
- readl(sync_regs + host1x_sync_intc0mask_r()));
+ host1x_sync_readl(dev->dev, host1x_sync_intc0mask_r()));
nvhost_debug_output(o, "sync_intmask = 0x%08x\n",
- readl(sync_regs + host1x_sync_intmask_r()));
+ host1x_sync_readl(dev->dev, host1x_sync_intmask_r()));
nvhost_debug_output(o, "\n---- host syncpt irq mask ----\n\n");
for (i = 0; i < DIV_ROUND_UP(nvhost_syncpt_nb_hw_pts(&dev->syncpt), 16);
i++)
nvhost_debug_output(o, "syncpt_thresh_int_mask(%d) = 0x%08x\n",
- i, readl(sync_regs +
+ i, host1x_sync_readl(dev->dev,
host1x_sync_syncpt_thresh_int_mask_r() +
i * REGISTER_STRIDE));
for (i = 0; i < DIV_ROUND_UP(nvhost_syncpt_nb_hw_pts(&dev->syncpt), 32);
i++)
nvhost_debug_output(o, "syncpt_thresh_cpu0_int_status(%d) = 0x%08x\n",
- i, readl(sync_regs +
+ i, host1x_sync_readl(dev->dev,
host1x_sync_syncpt_thresh_cpu0_int_status_r() +
i * REGISTER_STRIDE));
nvhost_debug_output(o, "\n---- host syncpt thresh ----\n\n");
for (i = 0; i < nvhost_syncpt_nb_hw_pts(&dev->syncpt); i++) {
- u32 reg = readl(sync_regs +
+ u32 reg = host1x_sync_readl(dev->dev,
host1x_sync_syncpt_thresh_int_mask_r() +
bit_word(i * 2) * REGISTER_STRIDE);
if (!(reg & bit_mask(i * 2)))
continue;
nvhost_debug_output(o, "syncpt_int_thresh_thresh_0(%d) = %u\n",
- i, readl(sync_regs +
+ i, host1x_sync_readl(dev->dev,
host1x_sync_syncpt_int_thresh_0_r() +
i * REGISTER_STRIDE));
}
static void intr_enable_host_irq(struct nvhost_intr *intr, int irq)
{
- void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+ struct nvhost_master *dev = intr_to_dev(intr);
long val;
- val = readl(sync_regs + host1x_sync_hintmask_r());
+ val = host1x_sync_readl(dev->dev, host1x_sync_hintmask_r());
val |= BIT(irq);
- writel(val, sync_regs + host1x_sync_hintmask_r());
+ host1x_sync_writel(dev->dev, host1x_sync_hintmask_r(), val);
}
static void intr_disable_host_irq(struct nvhost_intr *intr, int irq)
{
- void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
+ struct nvhost_master *dev = intr_to_dev(intr);
long val;
- val = readl(sync_regs + host1x_sync_hintmask_r());
+ val = host1x_sync_readl(dev->dev, host1x_sync_hintmask_r());
val &= ~BIT(irq);
- writel(val, sync_regs + host1x_sync_hintmask_r());
+ host1x_sync_writel(dev->dev, host1x_sync_hintmask_r(), val);
}
static const struct nvhost_intr_ops host1x_intr_ops = {
{
struct nvhost_master *dev = syncpt_to_dev(sp);
int min = nvhost_syncpt_read_min(sp, id);
- writel(min, dev->sync_aperture + (host1x_sync_syncpt_0_r() + id * 4));
+ host1x_sync_writel(dev->dev, (host1x_sync_syncpt_0_r() + id * 4), min);
}
/**
static u32 t20_syncpt_update_min(struct nvhost_syncpt *sp, u32 id)
{
struct nvhost_master *dev = syncpt_to_dev(sp);
- void __iomem *sync_regs = dev->sync_aperture;
u32 old, live;
do {
old = nvhost_syncpt_read_min(sp, id);
- live = readl(sync_regs + (host1x_sync_syncpt_0_r() + id * 4));
+ live = host1x_sync_readl(dev->dev,
+ (host1x_sync_syncpt_0_r() + id * 4));
} while ((u32)atomic_cmpxchg(&sp->min_val[id], old, live) != old);
return live;
nvhost_debug_dump(syncpt_to_dev(sp));
return;
}
- writel(bit_mask(id), dev->sync_aperture +
- host1x_sync_syncpt_cpu_incr_r() + reg_offset * 4);
+ host1x_sync_writel(dev->dev,
+ host1x_sync_syncpt_cpu_incr_r() + reg_offset * 4, bit_mask(id));
}
/* remove a wait pointed to by patch_addr */
static int syncpt_mutex_try_lock(struct nvhost_syncpt *sp,
unsigned int idx)
{
- void __iomem *sync_regs = syncpt_to_dev(sp)->sync_aperture;
+ struct nvhost_master *dev = syncpt_to_dev(sp);
/* mlock registers returns 0 when the lock is aquired.
* writing 0 clears the lock. */
- return !!readl(sync_regs + (host1x_sync_mlock_0_r() + idx * 4));
+ return !!host1x_sync_readl(dev->dev,
+ (host1x_sync_mlock_0_r() + idx * 4));
}
static void syncpt_mutex_unlock(struct nvhost_syncpt *sp,
unsigned int idx)
{
- void __iomem *sync_regs = syncpt_to_dev(sp)->sync_aperture;
+ struct nvhost_master *dev = syncpt_to_dev(sp);
- writel(0, sync_regs + (host1x_sync_mlock_0_r() + idx * 4));
+ host1x_sync_writel(dev->dev, (host1x_sync_mlock_0_r() + idx * 4), 0);
}
static void syncpt_mutex_owner(struct nvhost_syncpt *sp,
unsigned int *chid)
{
struct nvhost_master *dev = syncpt_to_dev(sp);
- u32 __iomem *mlo_regs = dev->sync_aperture +
- host1x_sync_mlock_owner_0_r();
- u32 owner = readl(mlo_regs + idx);
+ u32 owner = host1x_sync_readl(dev->dev,
+ host1x_sync_mlock_owner_0_r() + idx * 4);
*chid = host1x_sync_mlock_owner_0_mlock_owner_chid_0_v(owner);
*cpu = host1x_sync_mlock_owner_0_mlock_cpu_owns_0_v(owner);
unsigned int idx)
{
struct nvhost_master *dev = cpuaccess_to_dev(ctx);
- void __iomem *sync_regs = dev->sync_aperture;
nvhost_dbg_fn("");
/* mlock registers returns 0 when the lock is aquired.
* writing 0 clears the lock. */
- return !!readl(sync_regs + (host1x_sync_mlock_0_0_r() + idx * 4));
+ return !!host1x_sync_readl(dev->dev, (host1x_sync_mlock_0_0_r() + idx * 4));
}
static void t124_cpuaccess_mutex_unlock(struct nvhost_cpuaccess *ctx,
unsigned int idx)
{
struct nvhost_master *dev = cpuaccess_to_dev(ctx);
- void __iomem *sync_regs = dev->sync_aperture;
nvhost_dbg_fn("");
- writel(0, sync_regs + (host1x_sync_mlock_0_0_r() + idx * 4));
+ host1x_sync_writel(dev->dev, (host1x_sync_mlock_0_0_r() + idx * 4), 0);
}
int nvhost_init_t124_cpuaccess_support(struct nvhost_master *host,
static void t124_debug_show_mlocks(struct nvhost_master *m, struct output *o)
{
- u32 __iomem *mlo_regs = m->sync_aperture +
- host1x_sync_mlock_owner_0_r();
int i;
nvhost_debug_output(o, "---- mlocks ----\n");
for (i = 0; i < NV_HOST1X_NB_MLOCKS; i++) {
- u32 owner = readl(mlo_regs + i * 4);
+ u32 owner = host1x_sync_readl(m->dev,
+ host1x_sync_mlock_owner_0_r() + i * 4);
if (owner & 0x1)
nvhost_debug_output(o, "%d: locked by channel %d\n",
i, (owner >> 8) & 0xf);