]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
Revert "zram: don't grab mutex in zram_slot_free_noity"
authorSri Krishna chowdary <schowdary@nvidia.com>
Mon, 1 Sep 2014 10:02:39 +0000 (15:32 +0530)
committerSachin Nikam <snikam@nvidia.com>
Fri, 5 Sep 2014 10:08:06 +0000 (03:08 -0700)
This reverts commit 7e9bd604d145d743bc1e62f23656b851dad368c1.

Reverting this as it causes conflicts when synced to latest upstream
zram code.

This patch will be properly applied after all dependent patches are picked.

Bug 200034063

Change-Id: If85d5e49a38aeb3f18a19c638475777e3a83ff7a
Signed-off-by: Sri Krishna chowdary <schowdary@nvidia.com>
Reviewed-on: http://git-master/r/494442
Reviewed-by: Krishna Reddy <vdumpa@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Sachin Nikam <snikam@nvidia.com>
drivers/staging/zram/zram_drv.c
drivers/staging/zram/zram_drv.h

index cab18d22467721bca9e37f544a0fa61231719215..a333d44d0cffbeaa1412155a197f88f43ed063c9 100644 (file)
@@ -278,14 +278,6 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
                goto out;
        }
 
-       /*
-        * zram_slot_free_notify could miss free so that let's
-        * double check.
-        */
-       if (unlikely(meta->table[index].handle ||
-                       zram_test_flag(meta, index, ZRAM_ZERO)))
-               zram_free_page(zram, index);
-
        ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
                               meta->compress_workmem);
 
@@ -343,20 +335,6 @@ out:
        return ret;
 }
 
-static void handle_pending_slot_free(struct zram *zram)
-{
-       struct zram_slot_free *free_rq;
-
-       spin_lock(&zram->slot_free_lock);
-       while (zram->slot_free_rq) {
-               free_rq = zram->slot_free_rq;
-               zram->slot_free_rq = free_rq->next;
-               zram_free_page(zram, free_rq->index);
-               kfree(free_rq);
-       }
-       spin_unlock(&zram->slot_free_lock);
-}
-
 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
                        int offset, struct bio *bio, int rw)
 {
@@ -364,12 +342,10 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
 
        if (rw == READ) {
                down_read(&zram->lock);
-               handle_pending_slot_free(zram);
                ret = zram_bvec_read(zram, bvec, index, offset, bio);
                up_read(&zram->lock);
        } else {
                down_write(&zram->lock);
-               handle_pending_slot_free(zram);
                ret = zram_bvec_write(zram, bvec, index, offset);
                up_write(&zram->lock);
        }
@@ -605,40 +581,16 @@ void zram_init_device(struct zram *zram, struct zram_meta *meta)
        pr_debug("Initialization done!\n");
 }
 
-static void zram_slot_free(struct work_struct *work)
-{
-       struct zram *zram;
-
-       zram = container_of(work, struct zram, free_work);
-       down_write(&zram->lock);
-       handle_pending_slot_free(zram);
-       up_write(&zram->lock);
-}
-
-static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq)
-{
-       spin_lock(&zram->slot_free_lock);
-       free_rq->next = zram->slot_free_rq;
-       zram->slot_free_rq = free_rq;
-       spin_unlock(&zram->slot_free_lock);
-}
-
 static void zram_slot_free_notify(struct block_device *bdev,
                                unsigned long index)
 {
        struct zram *zram;
-       struct zram_slot_free *free_rq;
 
        zram = bdev->bd_disk->private_data;
+       down_write(&zram->lock);
+       zram_free_page(zram, index);
+       up_write(&zram->lock);
        zram_stat64_inc(zram, &zram->stats.notify_free);
-
-       free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC);
-       if (!free_rq)
-               return;
-
-       free_rq->index = index;
-       add_slot_free(zram, free_rq);
-       schedule_work(&zram->free_work);
 }
 
 static const struct block_device_operations zram_devops = {
@@ -654,10 +606,6 @@ static int create_device(struct zram *zram, int device_id)
        init_rwsem(&zram->init_lock);
        spin_lock_init(&zram->stat64_lock);
 
-       INIT_WORK(&zram->free_work, zram_slot_free);
-       spin_lock_init(&zram->slot_free_lock);
-       zram->slot_free_rq = NULL;
-
        zram->queue = blk_alloc_queue(GFP_KERNEL);
        if (!zram->queue) {
                pr_err("Error allocating disk queue for device %d\n",
index ece3524ea403b7bff4189f0c6dcc354320512ef3..d542eee81357582cf95bd4e296bb1610c2a7d7f8 100644 (file)
@@ -90,21 +90,12 @@ struct zram_meta {
        struct zs_pool *mem_pool;
 };
 
-struct zram_slot_free {
-       unsigned long index;
-       struct zram_slot_free *next;
-};
-
 struct zram {
        struct zram_meta *meta;
        spinlock_t stat64_lock; /* protect 64-bit stats */
        struct rw_semaphore lock; /* protect compression buffers, table,
                                   * 32bit stat counters against concurrent
                                   * notifications, reads and writes */
-
-       struct work_struct free_work;  /* handle pending free request */
-       struct zram_slot_free *slot_free_rq; /* list head of free request */
-
        struct request_queue *queue;
        struct gendisk *disk;
        int init_done;
@@ -115,7 +106,6 @@ struct zram {
         * we can store in a disk.
         */
        u64 disksize;   /* bytes */
-       spinlock_t slot_free_lock;
 
        struct zram_stats stats;
 };