Add new field "nb_hw_pts" to struct host1x_device_info to store
the number of syncpoints supported in h/w
Already existing field "nb_pts" is used to store the number of
syncpoints supported in s/w
Hence, nb_pts <= nb_hw_pts
Add below API get "nb_hw_pts"
nvhost_syncpt_nb_hw_pts()
Add below API to check if syncpoint id is within h/w supported
syncpoint range
nvhost_syncpt_is_valid_hw_pt(id) which checks if
0 <= id < nb_hw_pts
Note that existing API nvhost_syncpt_is_valid_pt(id) checks if
pts_base <= id < pts_limit
Use nvhost_syncpt_nb_hw_pts() whereever we want to allocate
resources for syncpoints i.e. we allocate syncpoint resources
for all the syncpoints supported in h/w
Use nvhost_syncpt_is_valid_hw_pt() where it is fine to use
relaxed syncpt check (syncpt read/wait etc.)
Else use nvhost_syncpt_is_valid__pt() where we need to restrict
access to syncpoint supported in s/w only
Bug
1611482
Change-Id: I009028b81bcf1fa80b64f64a9372bec0e96d96f7
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: http://git-master/r/719039
Reviewed-by: Sachin Nikam <snikam@nvidia.com>
static int nvhost_ioctl_ctrl_syncpt_read(struct nvhost_ctrl_userctx *ctx,
struct nvhost_ctrl_syncpt_read_args *args)
{
- if (!nvhost_syncpt_is_valid_pt(&ctx->dev->syncpt, args->id))
+ if (!nvhost_syncpt_is_valid_hw_pt(&ctx->dev->syncpt, args->id))
return -EINVAL;
args->value = nvhost_syncpt_read(&ctx->dev->syncpt, args->id);
trace_nvhost_ioctl_ctrl_syncpt_read(args->id, args->value);
{
u32 timeout;
int err;
- if (!nvhost_syncpt_is_valid_pt(&ctx->dev->syncpt, args->id))
+ if (!nvhost_syncpt_is_valid_hw_pt(&ctx->dev->syncpt, args->id))
return -EINVAL;
if (args->timeout == NVHOST_NO_TIMEOUT)
/* FIXME: MAX_SCHEDULE_TIMEOUT is ulong which can be bigger
ulong timeout;
int err;
struct timespec ts;
- if (!nvhost_syncpt_is_valid_pt(&ctx->dev->syncpt, args->id))
+ if (!nvhost_syncpt_is_valid_hw_pt(&ctx->dev->syncpt, args->id))
return -EINVAL;
if (args->timeout == NVHOST_NO_TIMEOUT)
timeout = MAX_SCHEDULE_TIMEOUT;
}
for (i = 0; i < args->num_pts; i++) {
- if (!nvhost_syncpt_is_valid_pt(&ctx->dev->syncpt, pts[i].id)) {
+ if (!nvhost_syncpt_is_valid_hw_pt(&ctx->dev->syncpt,
+ pts[i].id)) {
err = -EINVAL;
goto out;
}
static int nvhost_ioctl_ctrl_syncpt_read_max(struct nvhost_ctrl_userctx *ctx,
struct nvhost_ctrl_syncpt_read_args *args)
{
- if (!nvhost_syncpt_is_valid_pt(&ctx->dev->syncpt, args->id))
+ if (!nvhost_syncpt_is_valid_hw_pt(&ctx->dev->syncpt, args->id))
return -EINVAL;
args->value = nvhost_syncpt_read_max(&ctx->dev->syncpt, args->id);
return 0;
return err;
host->intr.syncpt = kzalloc(sizeof(struct nvhost_intr_syncpt) *
- nvhost_syncpt_nb_pts(&host->syncpt),
+ nvhost_syncpt_nb_hw_pts(&host->syncpt),
GFP_KERNEL);
if (!host->intr.syncpt) {
enum nvhost_channel_policy channel_policy; /* host1x: channel policy */
/* Syncpoint info */
- int nb_pts; /* host1x: num syncpoints supported */
+ int nb_hw_pts; /* host1x: num syncpoints supported
+ in h/w */
+ int nb_pts; /* host1x: num syncpoints supported
+ in s/w where nb_pts <= nb_hw_pts */
int pts_base; /* host1x: syncpoint base */
int pts_limit; /* host1x: syncpoint limit */
enum nvhost_syncpt_policy syncpt_policy; /* host1x: syncpoint policy */
u32 id;
pt = container_of(pos, struct sync_pt, pt_list);
id = nvhost_sync_pt_id(pt);
- if (!id || !nvhost_syncpt_is_valid_pt(sp, id)) {
+ if (!id || !nvhost_syncpt_is_valid_hw_pt(sp, id)) {
sync_fence_put(fence);
return;
}
ktime_get_ts(&isr_recv);
- for (i = 0; i < DIV_ROUND_UP(dev->info.nb_pts, 32); i++) {
+ for (i = 0; i < DIV_ROUND_UP(nvhost_syncpt_nb_hw_pts(&dev->syncpt), 32);
+ i++) {
reg = readl(sync_regs +
host1x_sync_syncpt_thresh_cpu0_int_status_r() +
i * REGISTER_STRIDE);
int graphics_host_sp =
nvhost_syncpt_graphics_host_sp(&dev->syncpt);
- if (unlikely(sp_id >= dev->info.nb_pts)) {
+ if (unlikely(!nvhost_syncpt_is_valid_hw_pt(&dev->syncpt,
+ sp_id))) {
dev_err(&dev->dev->dev, "%s(): syncpoint id %d is beyond the number of syncpoints (%d)\n",
- __func__, sp_id, dev->info.nb_pts);
+ __func__, sp_id,
+ nvhost_syncpt_nb_hw_pts(&dev->syncpt));
goto out;
}
intr_op().disable_all_syncpt_intrs(intr);
- for (i = 0; i < dev->info.nb_pts; i++)
+ for (i = 0; i < nvhost_syncpt_nb_hw_pts(&dev->syncpt); i++)
INIT_WORK(&intr->syncpt[i].work, syncpt_thresh_cascade_fn);
err = request_irq(intr->syncpt_irq,
void __iomem *sync_regs = dev->sync_aperture;
u32 reg;
- for (reg = 0; reg < bit_word(dev->info.nb_pts) * REGISTER_STRIDE;
- reg += REGISTER_STRIDE) {
+ for (reg = 0; reg < bit_word(nvhost_syncpt_nb_hw_pts(&dev->syncpt))
+ * REGISTER_STRIDE; reg += REGISTER_STRIDE) {
/* disable interrupts for both cpu's */
writel(0xffffffffu, sync_regs +
host1x_sync_syncpt_thresh_int_disable_r() +
readl(sync_regs + host1x_sync_intmask_r()));
nvhost_debug_output(o, "\n---- host syncpt irq mask ----\n\n");
- for (i = 0; i < DIV_ROUND_UP(dev->info.nb_pts, 16); i++)
+ for (i = 0; i < DIV_ROUND_UP(nvhost_syncpt_nb_hw_pts(&dev->syncpt), 16);
+ i++)
nvhost_debug_output(o, "syncpt_thresh_int_mask(%d) = 0x%08x\n",
i, readl(sync_regs +
host1x_sync_syncpt_thresh_int_mask_r() +
i * REGISTER_STRIDE));
nvhost_debug_output(o, "\n---- host syncpt irq status ----\n\n");
- for (i = 0; i < DIV_ROUND_UP(dev->info.nb_pts, 32); i++)
+ for (i = 0; i < DIV_ROUND_UP(nvhost_syncpt_nb_hw_pts(&dev->syncpt), 32);
+ i++)
nvhost_debug_output(o, "syncpt_thresh_cpu0_int_status(%d) = 0x%08x\n",
i, readl(sync_regs +
host1x_sync_syncpt_thresh_cpu0_int_status_r() +
i * REGISTER_STRIDE));
nvhost_debug_output(o, "\n---- host syncpt thresh ----\n\n");
- for (i = 0; i < dev->info.nb_pts; i++) {
+ for (i = 0; i < nvhost_syncpt_nb_hw_pts(&dev->syncpt); i++) {
u32 reg = readl(sync_regs +
host1x_sync_syncpt_thresh_int_mask_r() +
bit_word(i * 2) * REGISTER_STRIDE);
{
u32 get_restart;
struct nvhost_job *job = NULL;
- int nb_pts = nvhost_syncpt_nb_pts(syncpt);
+ int nb_pts = nvhost_syncpt_nb_hw_pts(syncpt);
DECLARE_BITMAP(syncpt_used, nb_pts);
bitmap_zero(syncpt_used, nb_pts);
unsigned int id;
struct nvhost_intr_syncpt *syncpt;
struct nvhost_master *host = intr_to_dev(intr);
- u32 nb_pts = nvhost_syncpt_nb_pts(&host->syncpt);
+ u32 nb_pts = nvhost_syncpt_nb_hw_pts(&host->syncpt);
mutex_init(&intr->mutex);
intr->syncpt_irq = irq_sync;
{
unsigned int id;
struct nvhost_intr_syncpt *syncpt;
- u32 nb_pts = nvhost_syncpt_nb_pts(&intr_to_dev(intr)->syncpt);
+ u32 nb_pts = nvhost_syncpt_nb_hw_pts(&intr_to_dev(intr)->syncpt);
mutex_lock(&intr->mutex);
struct nvhost_waitchk *wait = &job->waitchk[i];
/* validate syncpt id */
- if (!nvhost_syncpt_is_valid_pt(sp, wait->syncpt_id))
+ if (!nvhost_syncpt_is_valid_hw_pt(sp, wait->syncpt_id))
continue;
if (!wait->mem)
int nvhost_job_pin(struct nvhost_job *job, struct nvhost_syncpt *sp)
{
int err = 0, i = 0, j = 0;
- DECLARE_BITMAP(waitchk_mask, nvhost_syncpt_nb_pts(sp));
+ int nb_hw_pts = nvhost_syncpt_nb_hw_pts(sp);
+ DECLARE_BITMAP(waitchk_mask, nb_hw_pts);
- bitmap_zero(waitchk_mask, nvhost_syncpt_nb_pts(sp));
+ bitmap_zero(waitchk_mask, nb_hw_pts);
for (i = 0; i < job->num_waitchk; i++) {
u32 syncpt_id = job->waitchk[i].syncpt_id;
- if (nvhost_syncpt_is_valid_pt(sp, syncpt_id))
+ if (nvhost_syncpt_is_valid_hw_pt(sp, syncpt_id))
set_bit(syncpt_id, waitchk_mask);
}
/* get current syncpt values for waitchk */
- for_each_set_bit(i, waitchk_mask, nvhost_syncpt_nb_pts(sp))
+ for_each_set_bit(i, waitchk_mask, nb_hw_pts)
nvhost_syncpt_update_min(sp, i);
/* pin memory */
struct sync_fence *fence = NULL;
for (i = 0; i < num_pts; i++) {
- if (!nvhost_syncpt_is_valid_pt(sp, pts[i].id)) {
+ if (!nvhost_syncpt_is_valid_hw_pt(sp, pts[i].id)) {
WARN_ON(1);
return ERR_PTR(-EINVAL);
}
u32 id,
u32 thresh);
- if (!id || !nvhost_syncpt_is_valid_pt(sp, id))
+ if (!id || !nvhost_syncpt_is_valid_hw_pt(sp, id))
return -EINVAL;
if (value)
{
int i;
struct nvhost_master *host = syncpt_to_dev(sp);
- int nb_pts = nvhost_syncpt_nb_pts(sp);
+ int nb_pts = nvhost_syncpt_nb_hw_pts(sp);
int err = 0;
/* Allocate structs for min, max and base values */
}
/* Fill in the attributes */
- for (i = nvhost_syncpt_pts_base(sp);
- i < nvhost_syncpt_pts_limit(sp); i++) {
+ for (i = 0; i < nvhost_syncpt_nb_hw_pts(sp); i++) {
struct nvhost_syncpt_attr *min =
&sp->syncpt_attrs[i*NUM_SYSFS_ENTRY];
struct nvhost_syncpt_attr *max =
{
#ifdef CONFIG_TEGRA_GRHOST_SYNC
int i;
- for (i = nvhost_syncpt_pts_base(sp);
- i < nvhost_syncpt_pts_limit(sp); i++) {
+ for (i = 0; i < nvhost_syncpt_nb_hw_pts(sp); i++) {
if (sp->timeline && sp->timeline[i]) {
sync_timeline_destroy(
(struct sync_timeline *)sp->timeline[i]);
return sp->client_managed[id];
}
+int nvhost_syncpt_nb_hw_pts(struct nvhost_syncpt *sp)
+{
+ return syncpt_to_dev(sp)->info.nb_hw_pts;
+}
+
int nvhost_syncpt_nb_pts(struct nvhost_syncpt *sp)
{
return syncpt_to_dev(sp)->info.nb_pts;
return syncpt_to_dev(sp)->info.pts_base;
}
+bool nvhost_syncpt_is_valid_hw_pt(struct nvhost_syncpt *sp, u32 id)
+{
+ return (id >= 0 && id < nvhost_syncpt_nb_hw_pts(sp) &&
+ id != NVSYNCPT_INVALID);
+}
+
bool nvhost_syncpt_is_valid_pt(struct nvhost_syncpt *sp, u32 id)
{
return (id >= nvhost_syncpt_pts_base(sp) &&
}
EXPORT_SYMBOL(nvhost_syncpt_set_min_eq_max_ext);
+/*
+ * For external clients, check the validity in full
+ * h/w supported syncpoint range
+ */
bool nvhost_syncpt_is_valid_pt_ext(struct platform_device *dev, u32 id)
{
struct nvhost_master *master = nvhost_get_host(dev);
struct nvhost_syncpt *sp = &master->syncpt;
- return nvhost_syncpt_is_valid_pt(sp, id);
+ return nvhost_syncpt_is_valid_hw_pt(sp, id);
}
EXPORT_SYMBOL(nvhost_syncpt_is_valid_pt_ext);
void nvhost_syncpt_patch_check(struct nvhost_syncpt *sp);
void nvhost_syncpt_set_min_eq_max(struct nvhost_syncpt *sp, u32 id);
int nvhost_syncpt_client_managed(struct nvhost_syncpt *sp, u32 id);
+int nvhost_syncpt_nb_hw_pts(struct nvhost_syncpt *sp);
int nvhost_syncpt_nb_pts(struct nvhost_syncpt *sp);
int nvhost_syncpt_pts_base(struct nvhost_syncpt *sp);
+bool nvhost_syncpt_is_valid_hw_pt(struct nvhost_syncpt *sp, u32 id);
bool nvhost_syncpt_is_valid_pt(struct nvhost_syncpt *sp, u32 id);
int nvhost_nb_syncpts_store(struct nvhost_syncpt *sp, const char *buf);
int nvhost_syncpt_nb_mlocks(struct nvhost_syncpt *sp);
.ch_limit = T124_NVHOST_NUMCHANNELS,
.nb_mlocks = NV_HOST1X_NB_MLOCKS,
.initialize_chip_support = nvhost_init_t124_support,
+ .nb_hw_pts = NV_HOST1X_SYNCPT_NB_PTS,
.nb_pts = NV_HOST1X_SYNCPT_NB_PTS,
.pts_base = 0,
.pts_limit = NV_HOST1X_SYNCPT_NB_PTS,
.ch_limit = T124_NVHOST_NUMCHANNELS,
.nb_mlocks = NV_HOST1X_NB_MLOCKS,
.initialize_chip_support = nvhost_init_t210_support,
+ .nb_hw_pts = NV_HOST1X_SYNCPT_NB_PTS,
.nb_pts = NV_HOST1X_SYNCPT_NB_PTS,
.pts_base = 0,
.pts_limit = NV_HOST1X_SYNCPT_NB_PTS,
}
sp_id = msg->id;
- if (unlikely(sp_id >= dev->info.nb_pts)) {
+ if (unlikely(!nvhost_syncpt_is_valid_hw_pt(&dev->syncpt,
+ sp_id))) {
dev_err(&dev->dev->dev,
"%s(): syncpoint id %d is beyond the number of syncpoints (%d)\n",
- __func__, sp_id, dev->info.nb_pts);
+ __func__, sp_id,
+ nvhost_syncpt_nb_hw_pts(&dev->syncpt));
tegra_gr_comm_release(handle);
continue;
}
intr_op().disable_all_syncpt_intrs(intr);
- for (i = 0; i < dev->info.nb_pts; i++)
+ for (i = 0; i < nvhost_syncpt_nb_hw_pts(&dev->syncpt); i++)
INIT_WORK(&intr->syncpt[i].work, syncpt_thresh_cascade_fn);
ctx->syncpt_handler =