u32 __user *class_ids = (u32 __user *)(uintptr_t)args->class_ids;
struct nvhost_device_data *pdata = platform_get_drvdata(ctx->pdev);
- struct nvhost_master *host = nvhost_get_host(ctx->pdev);
const u32 *syncpt_array =
(nvhost_get_syncpt_policy() == SYNCPT_PER_CHANNEL_INSTANCE) ?
ctx->syncpts :
u32 *local_class_ids = NULL;
int err, i;
- if (num_syncpt_incrs > host->info.nb_pts)
- return -EINVAL;
-
job = nvhost_job_alloc(ctx->ch,
num_cmdbufs,
num_relocs,
static void show_syncpts(struct nvhost_master *m, struct output *o)
{
int i;
+
nvhost_debug_output(o, "---- syncpts ----\n");
mutex_lock(&m->syncpt.syncpt_mutex);
- for (i = 0; i < nvhost_syncpt_nb_pts(&m->syncpt); i++) {
+ for (i = nvhost_syncpt_pts_base(&m->syncpt);
+ i < nvhost_syncpt_pts_limit(&m->syncpt); i++) {
u32 max = nvhost_syncpt_read_max(&m->syncpt, i);
u32 min = nvhost_syncpt_update_min(&m->syncpt, i);
if (!min && !max)
static int nvhost_ioctl_ctrl_syncpt_read(struct nvhost_ctrl_userctx *ctx,
struct nvhost_ctrl_syncpt_read_args *args)
{
- if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
+ if (!nvhost_syncpt_is_valid_pt(&ctx->dev->syncpt, args->id))
return -EINVAL;
args->value = nvhost_syncpt_read(&ctx->dev->syncpt, args->id);
trace_nvhost_ioctl_ctrl_syncpt_read(args->id, args->value);
static int nvhost_ioctl_ctrl_syncpt_incr(struct nvhost_ctrl_userctx *ctx,
struct nvhost_ctrl_syncpt_incr_args *args)
{
- if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
+ if (!nvhost_syncpt_is_valid_pt(&ctx->dev->syncpt, args->id))
return -EINVAL;
trace_nvhost_ioctl_ctrl_syncpt_incr(args->id);
nvhost_syncpt_incr(&ctx->dev->syncpt, args->id);
{
u32 timeout;
int err;
- if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
+ if (!nvhost_syncpt_is_valid_pt(&ctx->dev->syncpt, args->id))
return -EINVAL;
if (args->timeout == NVHOST_NO_TIMEOUT)
/* FIXME: MAX_SCHEDULE_TIMEOUT is ulong which can be bigger
ulong timeout;
int err;
struct timespec ts;
- if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
+ if (!nvhost_syncpt_is_valid_pt(&ctx->dev->syncpt, args->id))
return -EINVAL;
if (args->timeout == NVHOST_NO_TIMEOUT)
timeout = MAX_SCHEDULE_TIMEOUT;
}
for (i = 0; i < args->num_pts; i++) {
- if (pts[i].id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt) &&
- pts[i].id != NVSYNCPT_INVALID) {
+ if (!nvhost_syncpt_is_valid_pt(&ctx->dev->syncpt, pts[i].id)) {
err = -EINVAL;
goto out;
}
static int nvhost_ioctl_ctrl_syncpt_read_max(struct nvhost_ctrl_userctx *ctx,
struct nvhost_ctrl_syncpt_read_args *args)
{
- if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
+ if (!nvhost_syncpt_is_valid_pt(&ctx->dev->syncpt, args->id))
return -EINVAL;
args->value = nvhost_syncpt_read_max(&ctx->dev->syncpt, args->id);
return 0;
u32 id;
pt = container_of(pos, struct sync_pt, pt_list);
id = nvhost_sync_pt_id(pt);
- if (!id || id >= nvhost_syncpt_nb_pts(sp)) {
+ if (!id || !nvhost_syncpt_is_valid_pt(sp, id)) {
sync_fence_put(fence);
return;
}
struct nvhost_waitchk *wait = &job->waitchk[i];
/* validate syncpt id */
- if (wait->syncpt_id > nvhost_syncpt_nb_pts(sp))
+ if (!nvhost_syncpt_is_valid_pt(sp, wait->syncpt_id))
continue;
/* skip all other gathers */
bitmap_zero(waitchk_mask, nvhost_syncpt_nb_pts(sp));
for (i = 0; i < job->num_waitchk; i++) {
u32 syncpt_id = job->waitchk[i].syncpt_id;
- if (syncpt_id < nvhost_syncpt_nb_pts(sp))
+ if (nvhost_syncpt_is_valid_pt(sp, syncpt_id))
set_bit(syncpt_id, waitchk_mask);
}
*
* Tegra Graphics Host Syncpoint Integration to linux/sync Framework
*
- * Copyright (c) 2013-2014, NVIDIA Corporation. All rights reserved.
+ * Copyright (c) 2013-2015, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
u32 i;
struct sync_fence *fence = NULL;
+ for (i = 0; i < num_pts; i++) {
+ if (!nvhost_syncpt_is_valid_pt(sp, pts[i].id)) {
+ WARN_ON(1);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
for (i = 0; i < num_pts; i++) {
struct nvhost_sync_timeline *obj;
struct sync_pt *pt;
u32 id = pts[i].id;
u32 thresh = pts[i].thresh;
- BUG_ON(id >= nvhost_syncpt_nb_pts(sp) &&
- (id != NVSYNCPT_INVALID));
obj = nvhost_syncpt_timeline(sp, id);
pt = nvhost_sync_pt_create_inst(obj, thresh);
if (pt == NULL) {
*/
void nvhost_syncpt_reset(struct nvhost_syncpt *sp)
{
- struct nvhost_master *host = syncpt_to_dev(sp);
u32 i;
- for (i = host->info.pts_base; i < host->info.pts_limit; i++)
+ for (i = nvhost_syncpt_pts_base(sp);
+ i < nvhost_syncpt_pts_limit(sp); i++)
syncpt_op().reset(sp, i);
wmb();
}
u32 i;
struct nvhost_master *master = syncpt_to_dev(sp);
- for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++) {
+ for (i = nvhost_syncpt_pts_base(sp);
+ i < nvhost_syncpt_pts_limit(sp); i++) {
if (nvhost_syncpt_client_managed(sp, i))
syncpt_op().update_min(sp, i);
else
u32 id,
u32 thresh);
- if (!id || id >= nvhost_syncpt_nb_pts(sp))
+ if (!id || !nvhost_syncpt_is_valid_pt(sp, id))
return -EINVAL;
if (value)
int i;
struct nvhost_master *host = syncpt_to_dev(sp);
struct nvhost_device_data *data = platform_get_drvdata(dev);
+ int nb_pts = nvhost_syncpt_nb_pts(sp);
int err = 0;
/* Allocate structs for min, max and base values */
- sp->assigned = kzalloc(sizeof(bool) * nvhost_syncpt_nb_pts(sp),
- GFP_KERNEL);
- sp->client_managed = kzalloc(sizeof(bool) * nvhost_syncpt_nb_pts(sp),
- GFP_KERNEL);
- sp->syncpt_names = kzalloc(sizeof(char *) * nvhost_syncpt_nb_pts(sp),
- GFP_KERNEL);
- sp->last_used_by = kzalloc(sizeof(char *) * nvhost_syncpt_nb_pts(sp),
- GFP_KERNEL);
- sp->min_val = kzalloc(sizeof(atomic_t) * nvhost_syncpt_nb_pts(sp),
- GFP_KERNEL);
- sp->max_val = kzalloc(sizeof(atomic_t) * nvhost_syncpt_nb_pts(sp),
- GFP_KERNEL);
+ sp->assigned = kzalloc(sizeof(bool) * nb_pts, GFP_KERNEL);
+ sp->client_managed = kzalloc(sizeof(bool) * nb_pts, GFP_KERNEL);
+ sp->syncpt_names = kzalloc(sizeof(char *) * nb_pts, GFP_KERNEL);
+ sp->last_used_by = kzalloc(sizeof(char *) * nb_pts, GFP_KERNEL);
+ sp->min_val = kzalloc(sizeof(atomic_t) * nb_pts, GFP_KERNEL);
+ sp->max_val = kzalloc(sizeof(atomic_t) * nb_pts, GFP_KERNEL);
sp->lock_counts =
kzalloc(sizeof(atomic_t) * nvhost_syncpt_nb_mlocks(sp),
GFP_KERNEL);
#ifdef CONFIG_TEGRA_GRHOST_SYNC
sp->timeline = kzalloc(sizeof(struct nvhost_sync_timeline *) *
- nvhost_syncpt_nb_pts(sp), GFP_KERNEL);
+ nb_pts, GFP_KERNEL);
if (!sp->timeline) {
err = -ENOMEM;
goto fail;
/* Allocate two attributes for each sync point: min and max */
sp->syncpt_attrs = kzalloc(sizeof(*sp->syncpt_attrs)
- * nvhost_syncpt_nb_pts(sp) * NUM_SYSFS_ENTRY,
+ * nb_pts * NUM_SYSFS_ENTRY,
GFP_KERNEL);
if (!sp->syncpt_attrs) {
err = -ENOMEM;
}
/* Fill in the attributes */
- for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++) {
+ for (i = nvhost_syncpt_pts_base(sp);
+ i < nvhost_syncpt_pts_limit(sp); i++) {
struct nvhost_syncpt_attr *min =
&sp->syncpt_attrs[i*NUM_SYSFS_ENTRY];
struct nvhost_syncpt_attr *max =
{
#ifdef CONFIG_TEGRA_GRHOST_SYNC
int i;
- for (i = 0; i < nvhost_syncpt_nb_pts(sp); i++) {
+ for (i = nvhost_syncpt_pts_base(sp);
+ i < nvhost_syncpt_pts_limit(sp); i++) {
if (sp->timeline && sp->timeline[i]) {
sync_timeline_destroy(
(struct sync_timeline *)sp->timeline[i]);
return syncpt_to_dev(sp)->info.nb_pts;
}
+int nvhost_syncpt_graphics_host_sp(struct nvhost_syncpt *sp)
+{
+ return syncpt_to_dev(sp)->info.pts_base;
+}
+
+int nvhost_syncpt_pts_limit(struct nvhost_syncpt *sp)
+{
+ return syncpt_to_dev(sp)->info.pts_limit;
+}
+
+int nvhost_syncpt_pts_base(struct nvhost_syncpt *sp)
+{
+ return syncpt_to_dev(sp)->info.pts_base;
+}
+
+bool nvhost_syncpt_is_valid_pt(struct nvhost_syncpt *sp, u32 id)
+{
+ return (id >= nvhost_syncpt_pts_base(sp) &&
+ id < nvhost_syncpt_pts_limit(sp) && id != NVSYNCPT_INVALID);
+}
+
int nvhost_nb_syncpts_store(struct nvhost_syncpt *sp, const char *buf)
{
struct nvhost_master *master = syncpt_to_dev(sp);
nvhost_warn(d, "number of syncpts modified from %d to %d\n",
master->info.nb_pts, nb_syncpts);
master->info.nb_pts = nb_syncpts;
+ master->info.pts_limit = master->info.pts_base +
+ master->info.nb_pts;
} else
ret = -EIO;
sp->client_managed[id] = client;
}
-int nvhost_syncpt_graphics_host_sp(struct nvhost_syncpt *sp)
-{
- return syncpt_to_dev(sp)->info.pts_base;
-}
-
-int nvhost_syncpt_pts_limit(struct nvhost_syncpt *sp)
-{
- return syncpt_to_dev(sp)->info.pts_limit;
-}
-
/* public sync point API */
u32 nvhost_syncpt_incr_max_ext(struct platform_device *dev, u32 id, u32 incrs)
{
}
EXPORT_SYMBOL(nvhost_syncpt_set_min_eq_max_ext);
+bool nvhost_syncpt_is_valid_pt_ext(struct platform_device *dev, u32 id)
+{
+ struct nvhost_master *master = nvhost_get_host(dev);
+ struct nvhost_syncpt *sp = &master->syncpt;
+
+ return nvhost_syncpt_is_valid_pt(sp, id);
+}
+EXPORT_SYMBOL(nvhost_syncpt_is_valid_pt_ext);
+
int nvhost_syncpt_nb_pts_ext(struct platform_device *dev)
{
struct nvhost_master *master = nvhost_get_host(dev);
struct nvhost_syncpt *sp = &master->syncpt;
- return syncpt_to_dev(sp)->info.nb_pts;
+ return nvhost_syncpt_nb_pts(sp);
}
EXPORT_SYMBOL(nvhost_syncpt_nb_pts_ext);
/* when searching for free syncpt id, start from this base */
#define NVHOST_FREE_SYNCPT_BASE(sp) \
- (nvhost_syncpt_graphics_host_sp(sp) + 1)
+ (nvhost_syncpt_pts_base(sp) + 1)
/* timeout to wait for a syncpt to become free */
#define NVHOST_SYNCPT_FREE_WAIT_TIMEOUT (1 * HZ)
void nvhost_syncpt_set_min_eq_max(struct nvhost_syncpt *sp, u32 id);
int nvhost_syncpt_client_managed(struct nvhost_syncpt *sp, u32 id);
int nvhost_syncpt_nb_pts(struct nvhost_syncpt *sp);
+int nvhost_syncpt_pts_base(struct nvhost_syncpt *sp);
+bool nvhost_syncpt_is_valid_pt(struct nvhost_syncpt *sp, u32 id);
int nvhost_nb_syncpts_store(struct nvhost_syncpt *sp, const char *buf);
int nvhost_syncpt_nb_mlocks(struct nvhost_syncpt *sp);
void nvhost_syncpt_set_manager(struct nvhost_syncpt *sp, int id, bool client);
int nvhost_syncpt_patch_wait(struct nvhost_syncpt *sp, void *patch_addr);
-static inline int nvhost_syncpt_is_valid(struct nvhost_syncpt *sp, u32 id)
-{
- return id != NVSYNCPT_INVALID && id < nvhost_syncpt_nb_pts(sp);
-}
-
int nvhost_mutex_try_lock(struct nvhost_syncpt *sp, int idx);
void nvhost_mutex_unlock(struct nvhost_syncpt *sp, int idx);
u32 id, u32 thresh);
void nvhost_syncpt_set_min_eq_max_ext(struct platform_device *dev, u32 id);
int nvhost_syncpt_nb_pts_ext(struct platform_device *dev);
+bool nvhost_syncpt_is_valid_pt_ext(struct platform_device *dev, u32 id);
/* public host1x interrupt management APIs */
int nvhost_intr_register_notifier(struct platform_device *pdev,