]> rtime.felk.cvut.cz Git - hercules2020/nv-tegra/linux-4.4.git/commitdiff
dma-buf: provide explicit function to release all mappings of dev
authorPritesh Raithatha <praithatha@nvidia.com>
Fri, 15 Sep 2017 10:48:04 +0000 (16:18 +0530)
committermobile promotions <svcmobile_promotions@nvidia.com>
Mon, 9 Oct 2017 16:00:10 +0000 (09:00 -0700)
With lazy unmap enabled, there will be list of pending unmaps and
others can see it. To overcome the issue providing explicit function
to release all the mappings.

Bug 1950747

Change-Id: I8b11b8cc132e5a8266b5e4dedf6974037734e3a6
Signed-off-by: Pritesh Raithatha <praithatha@nvidia.com>
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1560791
Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>
Tested-by: Bharat Nihalani <bnihalani@nvidia.com>
drivers/base/core.c
drivers/dma-buf/dma-buf.c
include/linux/device.h
include/linux/dma-buf.h

index 5dde455fcb50688d8d2aa9b5b14db16b0c784f08..7e5163c46f63349d6a7f83ae8cd2bdfcb3c06add 100644 (file)
@@ -706,12 +706,15 @@ void device_initialize(struct device *dev)
        lockdep_set_novalidate_class(&dev->mutex);
        spin_lock_init(&dev->devres_lock);
        INIT_LIST_HEAD(&dev->devres_head);
+       INIT_LIST_HEAD(&dev->attachments);
+
        device_pm_init(dev);
        set_dev_node(dev, -1);
 #ifdef CONFIG_GENERIC_MSI_IRQ
        INIT_LIST_HEAD(&dev->msi_list);
 #endif
        dev->no_dmabuf_defer_unmap = 0;
+       dev->context_dev = false;
 }
 EXPORT_SYMBOL_GPL(device_initialize);
 
index 75714cb80b2e66b49f09e5c804456ccaf5a3782e..d740607a96216b1f86f1198f79e8d25a13d85113 100644 (file)
@@ -43,6 +43,8 @@ struct dma_buf_list {
 
 static struct dma_buf_list db_list;
 
+static struct mutex context_dev_lock;
+
 /**
  * dma_buf_set_drvdata - Set driver specific data to dmabuf. The data
  * will remain even if the device is detached from the device. This is useful
@@ -134,10 +136,69 @@ static bool dmabuf_can_defer_unmap(struct dma_buf *dmabuf,
        return !device->no_dmabuf_defer_unmap;
 }
 
+static void dma_buf_release_attachment(struct dma_buf_attachment *attach)
+{
+       struct dma_buf *dmabuf = attach->dmabuf;
+
+       BUG_ON(atomic_read(&attach->ref) != 1);
+       BUG_ON(atomic_read(&attach->maps));
+
+       if (attach->dev->context_dev)
+               list_del(&attach->dev_node);
+
+       list_del(&attach->node);
+       if (dmabuf_can_defer_unmap(dmabuf, attach->dev)) {
+               /* sg_table is -ENOMEM if map fails before release */
+               if (!IS_ERR_OR_NULL(attach->sg_table))
+                       dmabuf->ops->unmap_dma_buf(attach,
+                               attach->sg_table, DMA_BIDIRECTIONAL);
+               if (dmabuf->ops->detach)
+                       dmabuf->ops->detach(dmabuf, attach);
+               kzfree(attach);
+       }
+}
+
+void dma_buf_release_stash(struct device *dev)
+{
+       struct dma_buf_attachment *attach, *next;
+       struct dma_buf_attachment *attach_inner, *next_inner;
+       struct dma_buf *dmabuf;
+       bool other_context_dev_attached = false;
+
+       if (!dev->context_dev)
+               return;
+
+       mutex_lock(&context_dev_lock);
+
+       list_for_each_entry_safe(attach, next, &dev->attachments, dev_node) {
+               dmabuf = attach->dmabuf;
+
+               mutex_lock(&dmabuf->lock);
+               dma_buf_release_attachment(attach);
+
+               list_for_each_entry_safe(attach_inner, next_inner,
+                       &dmabuf->attachments, node) {
+                       if (attach_inner->dev->context_dev) {
+                               other_context_dev_attached = true;
+                               break;
+                       }
+               }
+
+               if (!other_context_dev_attached)
+                       dmabuf->context_dev = false;
+
+               mutex_unlock(&dmabuf->lock);
+       }
+
+       mutex_unlock(&context_dev_lock);
+}
+EXPORT_SYMBOL(dma_buf_release_stash);
+
 static int dma_buf_release(struct inode *inode, struct file *file)
 {
        struct dma_buf *dmabuf;
        struct dma_buf_attachment *attach, *next;
+       bool context_dev_locked = false;
 
        if (!is_dma_buf_file(file))
                return -EINVAL;
@@ -145,25 +206,23 @@ static int dma_buf_release(struct inode *inode, struct file *file)
        dmabuf = file->private_data;
 
        BUG_ON(dmabuf->vmapping_counter);
+
+       if (dmabuf->context_dev) {
+               mutex_lock(&context_dev_lock);
+               context_dev_locked = true;
+       }
+
        mutex_lock(&dmabuf->lock);
-       list_for_each_entry_safe(attach, next, &dmabuf->attachments, node) {
-               BUG_ON(atomic_read(&attach->ref) != 1);
-               BUG_ON(atomic_read(&attach->maps));
-
-               list_del(&attach->node);
-               if (dmabuf_can_defer_unmap(dmabuf, attach->dev)) {
-                       /* sg_table is -ENOMEM if map fails before release */
-                       if (!IS_ERR_OR_NULL(attach->sg_table))
-                               attach->dmabuf->ops->unmap_dma_buf(attach,
-                                       attach->sg_table, DMA_BIDIRECTIONAL);
-                       if (dmabuf->ops->detach)
-                               dmabuf->ops->detach(dmabuf, attach);
-                       kzfree(attach);
-               }
 
+       list_for_each_entry_safe(attach, next, &dmabuf->attachments, node) {
+               dma_buf_release_attachment(attach);
        }
+
        mutex_unlock(&dmabuf->lock);
 
+       if (context_dev_locked)
+               mutex_unlock(&context_dev_lock);
+
        /*
         * Any fences that a dma-buf poll can wait on should be signaled
         * before releasing dma-buf. This is the responsibility of each
@@ -546,6 +605,9 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
        if (WARN_ON(!dmabuf || !dev))
                return ERR_PTR(-EINVAL);
 
+       if (dev->context_dev)
+               mutex_lock(&context_dev_lock);
+
        mutex_lock(&dmabuf->lock);
        if (dmabuf_can_defer_unmap(dmabuf, dev)) {
                /* Don't allow multiple attachments for a device */
@@ -562,6 +624,8 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
                                continue;
 
                        mutex_unlock(&dmabuf->lock);
+                       if (dev->context_dev)
+                               mutex_unlock(&context_dev_lock);
                        return attach;
                }
        }
@@ -569,6 +633,8 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
        attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL);
        if (attach == NULL) {
                mutex_unlock(&dmabuf->lock);
+               if (dev->context_dev)
+                       mutex_unlock(&context_dev_lock);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -594,14 +660,27 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
                if (ret)
                        goto err_attach;
        }
-       list_add(&attach->node, &dmabuf->attachments);
+
+       if (dev->context_dev) {
+               dmabuf->context_dev = true;
+               list_add(&attach->dev_node, &dev->attachments);
+               list_add(&attach->node, &dmabuf->attachments);
+       } else {
+               list_add(&attach->node, &dmabuf->attachments);
+       }
 
        mutex_unlock(&dmabuf->lock);
+
+       if (dev->context_dev)
+               mutex_unlock(&context_dev_lock);
+
        return attach;
 
 err_attach:
        kfree(attach);
        mutex_unlock(&dmabuf->lock);
+       if (dev->context_dev)
+               mutex_unlock(&context_dev_lock);
        return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(dma_buf_attach);
@@ -615,6 +694,8 @@ EXPORT_SYMBOL_GPL(dma_buf_attach);
  */
 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
 {
+       bool is_locked = false;
+
        if (WARN_ON(!dmabuf || !attach))
                return;
 
@@ -626,12 +707,21 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
        if (dmabuf_can_defer_unmap(dmabuf, attach->dev))
                return;
 
+       if (dmabuf->context_dev) {
+               mutex_lock(&context_dev_lock);
+               is_locked = true;
+       }
+
        mutex_lock(&dmabuf->lock);
        list_del(&attach->node);
        if (dmabuf->ops->detach)
                dmabuf->ops->detach(dmabuf, attach);
 
        mutex_unlock(&dmabuf->lock);
+
+       if (is_locked)
+               mutex_unlock(&context_dev_lock);
+
        kzfree(attach);
 }
 EXPORT_SYMBOL_GPL(dma_buf_detach);
@@ -1093,6 +1183,9 @@ static int __init dma_buf_init(void)
 {
        mutex_init(&db_list.lock);
        INIT_LIST_HEAD(&db_list.head);
+
+       mutex_init(&context_dev_lock);
+
        dma_buf_init_debugfs();
        return 0;
 }
index 8fa50e21f6669329d8174ee7b311c7cc56dc750f..2f7116466abdc365475a99193f917f84c6c04e25 100644 (file)
@@ -839,6 +839,15 @@ struct device {
        void    (*release)(struct device *dev);
        struct iommu_group      *iommu_group;
 
+       /* dma-buf stashing is optimized for host1x context device. Adding
+        * flag to find out whether device is context device or not.
+        * To iterate over all dma-bufs attached to dev for stashing, we need
+        * dev to dma-buf mappings list stored in dev node, adding attachments
+        * for that purpose.
+        */
+       bool                    context_dev;
+       struct list_head        attachments;
+
        bool                    offline_disabled:1;
        bool                    offline:1;
        bool                    no_dmabuf_defer_unmap:1;
index 6cce5f4fc37a8e3ee010290cfc6539558f735553..b7c8106f4ce83574d4a98aa600b05b65f087d447 100644 (file)
@@ -144,6 +144,11 @@ struct dma_buf {
        void *priv;
        struct reservation_object *resv;
 
+       /* dma-buf stashing is optimized for host1x context device. Adding flag
+        * to find out whether dma_buf is attached to any context device or not.
+        */
+       bool context_dev;
+
        /* poll support */
        wait_queue_head_t poll;
 
@@ -170,6 +175,9 @@ struct dma_buf_attachment {
        struct dma_buf *dmabuf;
        struct device *dev;
        struct list_head node;
+
+       /* Adding list node for device attachments. */
+       struct list_head dev_node;
        void *priv;
        struct sg_table *sg_table;
        atomic_t ref;
@@ -232,6 +240,8 @@ int dma_buf_fd(struct dma_buf *dmabuf, int flags);
 struct dma_buf *dma_buf_get(int fd);
 void dma_buf_put(struct dma_buf *dmabuf);
 
+void dma_buf_release_stash(struct device *dev);
+
 int dma_buf_set_drvdata(struct dma_buf *, struct device *,
                        void *, void (*destroy)(void *));
 void *dma_buf_get_drvdata(struct dma_buf *, struct device *);