struct pdev_archdata {
};
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+#define to_dma_iommu_mapping(dev) ((dev)->archdata.mapping)
+#else
+#define to_dma_iommu_mapping(dev) NULL
+#endif
+
#endif
source "drivers/video/backlight/Kconfig"
source "drivers/video/adf/Kconfig"
+source "drivers/video/tegra/Kconfig"
+
config VGASTATE
tristate
default n
obj-y += fbdev/
+obj-y += tegra/
+
obj-$(CONFIG_VIDEOMODE_HELPERS) += display_timing.o videomode.o
ifeq ($(CONFIG_OF),y)
obj-$(CONFIG_VIDEOMODE_HELPERS) += of_display_timing.o of_videomode.o
Once last allocated FD reaches this number, allocation of subsequent
FD's start from NVMAP_START_FD.
+config NVMAP_SUPPORTS_RESIZABLE_CMA
+ bool "Support resizable cma heaps"
+ depends on DMA_CMA
+ default n
+ help
+ Support for resizable carveouts is more of an enhancement than a requirement.
+ Select this option when platforms needs resizable CMA carveouts.
+
endif
*/
#include <linux/backing-dev.h>
+#include <linux/backing-dev-defs.h>
#include <linux/bitmap.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
static struct backing_dev_info nvmap_bdi = {
.ra_pages = 0,
- .capabilities = (BDI_CAP_NO_ACCT_AND_WRITEBACK |
- BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP),
+ .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
+#ifndef CONFIG_MMU
+static unsigned nvmap_mmap_capabilities(struct file *filp);
+#endif
+
static struct device_dma_parameters nvmap_dma_parameters = {
.max_segment_size = UINT_MAX,
};
.compat_ioctl = nvmap_ioctl,
#endif
.mmap = nvmap_map,
+#ifndef CONFIG_MMU
+ .mmap_capabilities = nvmap_mmap_capabilities,
+#endif
};
/*
priv->kernel_client = false;
- filp->f_mapping->backing_dev_info = &nvmap_bdi;
+ inode->i_sb->s_bdi = &nvmap_bdi;
filp->private_data = priv;
return 0;
return -EPERM;
}
+#ifndef CONFIG_MMU
+static unsigned nvmap_mmap_capabilities(struct file *filp)
+{
+ return NOMMU_MAP_READ | NOMMU_MAP_WRITE;
+}
+#endif
+
static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int err = 0;
int err;
struct dma_buf *dmabuf;
struct nvmap_handle_info *info;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
INIT_LIST_HEAD(&info->maps);
mutex_init(&info->maps_lock);
- dmabuf = dma_buf_export(info, &nvmap_dma_buf_ops, handle->size,
- O_RDWR, NULL);
+ exp_info.priv = info;
+ exp_info.ops = &nvmap_dma_buf_ops;
+ exp_info.size = handle->size;
+ exp_info.flags = O_RDWR;
+
+ dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(dmabuf)) {
err = PTR_ERR(dmabuf);
goto err_export;
unsigned int mem_prot;
phys_addr_t orig_addr;
size_t size;
+ size_t align;
struct nvmap_heap *heap;
struct list_head free_list;
};
dma_set_attr(DMA_ATTR_ALLOC_EXACT_SIZE, &attrs);
+#ifdef CONFIG_TEGRA_VIRTUALIZATION
if (start && h->is_ivm) {
void *ret;
pa = h->base + (*start);
dev_dbg(dev, "reserved (%pa) len(%zu)\n",
&pa, len);
}
- } else {
+ } else
+#endif
+ {
(void)dma_alloc_attrs(dev, len, &pa,
DMA_MEMORY_NOMAP, &attrs);
if (!dma_mapping_error(dev, pa))
DEFINE_DMA_ATTRS(attrs);
dev_dbg(dev, "Free base (%pa) size (%zu)\n", &base, len);
+#ifdef CONFIG_TEGRA_VIRTUALIZATION
if (h->is_ivm && !h->can_alloc) {
dma_mark_declared_memory_unoccupied(dev, base, len);
- } else {
+ } else
+#endif
+ {
dma_set_attr(DMA_ATTR_ALLOC_EXACT_SIZE, &attrs);
dma_free_attrs(dev, len,
(void *)(uintptr_t)base,
return b;
}
-/* nvmap_heap_free: frees block b*/
-void nvmap_heap_free(struct nvmap_heap_block *b)
+struct nvmap_heap *nvmap_block_to_heap(struct nvmap_heap_block *b)
{
struct list_block *lb;
lb = container_of(b, struct list_block, block);
h->dma_dev = co->dma_dev;
if (co->cma_dev) {
-#ifdef CONFIG_DMA_CMA
+#ifdef CONFIG_NVMAP_SUPPORT_RESIZABLE_CMA
struct dma_contiguous_stats stats;
if (dma_get_contiguous_stats(co->cma_dev, &stats))
*
* GPU heap allocator.
*
- * Copyright (c) 2010-2015, NVIDIA Corporation. All rights reserved.
+ * Copyright (c) 2010-2016, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
struct nvmap_heap_block {
unsigned long base;
unsigned int type;
+ struct nvmap_handle *handle;
};
struct nvmap_heap *nvmap_heap_create(struct device *parent,
/*
- * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#define pr_fmt(fmt) "%s: " fmt, __func__
-#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <linux/nvmap.h>
#include <linux/tegra-ivc.h>
#include <linux/dma-contiguous.h>
+#include <linux/cma.h>
#include <asm/dma-contiguous.h>
#include "iomap.h"
#include "board.h"
#include <linux/platform/tegra/common.h>
+#ifdef CONFIG_TEGRA_VIRTUALIZATION
#include "../../../drivers/virt/tegra/syscalls.h"
+#endif
phys_addr_t __weak tegra_carveout_start;
phys_addr_t __weak tegra_carveout_size;
struct device __weak tegra_iram_dev;
struct device __weak tegra_generic_cma_dev;
struct device __weak tegra_vpr_cma_dev;
-struct dma_resize_notifier_ops __weak vpr_dev_ops;
static const struct of_device_id nvmap_of_ids[] = {
{ .compatible = "nvidia,carveouts" },
{ }
};
+#ifdef CONFIG_NVMAP_SUPPORT_RESIZABLE_CMA
+struct dma_resize_notifier_ops __weak vpr_dev_ops;
+
static struct dma_declare_info generic_dma_info = {
.name = "generic",
.size = 0,
.size = SZ_32M,
.notifier.ops = &vpr_dev_ops,
};
+#endif
static struct nvmap_platform_carveout nvmap_carveouts[4] = {
[0] = {
.size = 0,
.dma_dev = &tegra_generic_dev,
.cma_dev = &tegra_generic_cma_dev,
+#ifdef CONFIG_NVMAP_SUPPORT_RESIZABLE_CMA
.dma_info = &generic_dma_info,
+#endif
},
[2] = {
.name = "vpr",
.size = 0,
.dma_dev = &tegra_vpr_dev,
.cma_dev = &tegra_vpr_cma_dev,
+#ifdef CONFIG_NVMAP_SUPPORT_RESIZABLE_CMA
.dma_info = &vpr_dma_info,
+#endif
.enable_static_dma_map = true,
},
};
"%s :dma coherent mem declare fail %pa,%zu\n",
co->name, &co->base, co->size);
} else {
+#ifndef CONFIG_NVMAP_SUPPORT_RESIZABLE_CMA
+ return -ENODEV;
+#else
/*
* When vpr memory is reserved, kmemleak tries to scan vpr
* memory for pointers. vpr memory should not be accessed
co->name);
else
co->init_done = true;
+#endif
}
return err;
}
if (copy_from_user(&floor_size, arg, sizeof(floor_size)))
return -EFAULT;
+#ifdef CONFIG_NVMAP_SUPPORT_RESIZABLE_CMA
err = dma_set_resizable_heap_floor_size(&tegra_vpr_dev, floor_size);
+#endif
return err;
}
DMA_ATTR_NO_KERNEL_MAPPING,
DMA_ATTR_SKIP_CPU_SYNC,
DMA_ATTR_FORCE_CONTIGUOUS,
+ DMA_ATTR_SKIP_IOVA_GAP,
DMA_ATTR_ALLOC_EXACT_SIZE,
DMA_ATTR_MAX,
};
/* common carveout heaps */
#define NVMAP_HEAP_CARVEOUT_IRAM (1ul<<29)
#define NVMAP_HEAP_CARVEOUT_VPR (1ul<<28)
+#define NVMAP_HEAP_CARVEOUT_IVM (1ul<<1)
#define NVMAP_HEAP_CARVEOUT_GENERIC (1ul<<0)
#define NVMAP_HEAP_CARVEOUT_MASK (NVMAP_HEAP_IOVMM - 1)
#define NVMAP_HANDLE_KIND_SPECIFIED (0x1ul << 3)
#define NVMAP_HANDLE_COMPR_SPECIFIED (0x1ul << 4)
-#define NVMAP_HANDLE_ZEROED_PAGES (0x1ul << 5)
#define NVMAP_HANDLE_PHYS_CONTIG (0x1ul << 6)
#define NVMAP_HANDLE_CACHE_SYNC (0x1ul << 7)
#define NVMAP_HANDLE_CACHE_SYNC_AT_RESERVE (0x1ul << 8)
__u32 align; /* min alignment necessary */
};
+struct nvmap_alloc_ivm_handle {
+ __u32 handle; /* nvmap handle */
+ __u32 heap_mask; /* heaps to allocate from */
+ __u32 flags; /* wb/wc/uc/iwb etc. */
+ __u32 align; /* min alignment necessary */
+ __u32 peer; /* peer with whom handle must be shared. Used
+ * only for NVMAP_HEAP_CARVEOUT_IVM
+ */
+};
struct nvmap_alloc_kind_handle {
__u32 handle; /* nvmap handle */
/* Perform reserve operation on a list of handles. */
#define NVMAP_IOC_RESERVE _IOW(NVMAP_IOC_MAGIC, 18, \
struct nvmap_cache_op_list)
+
+#define NVMAP_IOC_FROM_IVC_ID _IOWR(NVMAP_IOC_MAGIC, 19, struct nvmap_create_handle)
+#define NVMAP_IOC_GET_IVC_ID _IOWR(NVMAP_IOC_MAGIC, 20, struct nvmap_create_handle)
+#define NVMAP_IOC_GET_IVM_HEAPS _IOR(NVMAP_IOC_MAGIC, 21, unsigned int)
+
/* START of T124 IOCTLS */
/* Actually allocates memory for the specified handle, with kind */
#define NVMAP_IOC_ALLOC_KIND _IOW(NVMAP_IOC_MAGIC, 100, struct nvmap_alloc_kind_handle)