* synchronization; we might still wait and do an increment */
size = args->num_entries * sizeof(struct nvgpu_gpfifo);
if (size) {
- gpfifo = vmalloc(size);
+ gpfifo = nvgpu_alloc(size, false);
if (!gpfifo)
return -ENOMEM;
gk20a_fence_put(fence_out);
clean_up:
- vfree(gpfifo);
+ nvgpu_free(gpfifo);
return ret;
}
mutex_lock(&vm->update_gmmu_lock);
- buffer_list = kzalloc(sizeof(*buffer_list) *
- vm->num_user_mapped_buffers, GFP_KERNEL);
+ buffer_list = nvgpu_alloc(sizeof(*buffer_list) *
+ vm->num_user_mapped_buffers, true);
if (!buffer_list) {
mutex_unlock(&vm->update_gmmu_lock);
return -ENOMEM;
mutex_unlock(&vm->update_gmmu_lock);
- kfree(mapped_buffers);
+ nvgpu_free(mapped_buffers);
}
static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset)
#include <linux/dma-attrs.h>
#include <linux/iommu.h>
#include <linux/tegra-soc.h>
+#include <linux/vmalloc.h>
#include <asm/dma-iommu.h>
#include <asm/cacheflush.h>
#include "gk20a_allocator.h"
extern const struct gk20a_mmu_level gk20a_mm_levels_64k[];
extern const struct gk20a_mmu_level gk20a_mm_levels_128k[];
+static inline void *nvgpu_alloc(size_t size, bool clear)
+{
+ void *p;
+
+ if (size > PAGE_SIZE) {
+ if (clear)
+ p = vzalloc(size);
+ else
+ p = vmalloc(size);
+ } else {
+ if (clear)
+ p = kzalloc(size, GFP_KERNEL);
+ else
+ p = kmalloc(size, GFP_KERNEL);
+ }
+
+ return p;
+}
+
+static inline void nvgpu_free(void *p)
+{
+ if (virt_addr_valid(p))
+ kfree(p);
+ else
+ vfree(p);
+}
+
#endif /* MM_GK20A_H */