]> rtime.felk.cvut.cz Git - linux-imx.git/blob - drivers/staging/zram/zram_drv.c
virtio-scsi: Fix virtqueue affinity setup
[linux-imx.git] / drivers / staging / zram / zram_drv.c
1 /*
2  * Compressed RAM block device
3  *
4  * Copyright (C) 2008, 2009, 2010  Nitin Gupta
5  *
6  * This code is released using a dual license strategy: BSD/GPL
7  * You can choose the licence that better fits your requirements.
8  *
9  * Released under the terms of 3-clause BSD License
10  * Released under the terms of GNU General Public License Version 2.0
11  *
12  * Project home: http://compcache.googlecode.com
13  */
14
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18 #ifdef CONFIG_ZRAM_DEBUG
19 #define DEBUG
20 #endif
21
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/lzo.h>
33 #include <linux/string.h>
34 #include <linux/vmalloc.h>
35
36 #include "zram_drv.h"
37
38 /* Globals */
39 static int zram_major;
40 static struct zram *zram_devices;
41
42 /* Module params (documentation at end) */
43 static unsigned int num_devices = 1;
44
45 static inline struct zram *dev_to_zram(struct device *dev)
46 {
47         return (struct zram *)dev_to_disk(dev)->private_data;
48 }
49
50 static ssize_t disksize_show(struct device *dev,
51                 struct device_attribute *attr, char *buf)
52 {
53         struct zram *zram = dev_to_zram(dev);
54
55         return sprintf(buf, "%llu\n", zram->disksize);
56 }
57
58 static ssize_t initstate_show(struct device *dev,
59                 struct device_attribute *attr, char *buf)
60 {
61         struct zram *zram = dev_to_zram(dev);
62
63         return sprintf(buf, "%u\n", zram->init_done);
64 }
65
66 static ssize_t num_reads_show(struct device *dev,
67                 struct device_attribute *attr, char *buf)
68 {
69         struct zram *zram = dev_to_zram(dev);
70
71         return sprintf(buf, "%llu\n",
72                         (u64)atomic64_read(&zram->stats.num_reads));
73 }
74
75 static ssize_t num_writes_show(struct device *dev,
76                 struct device_attribute *attr, char *buf)
77 {
78         struct zram *zram = dev_to_zram(dev);
79
80         return sprintf(buf, "%llu\n",
81                         (u64)atomic64_read(&zram->stats.num_writes));
82 }
83
84 static ssize_t invalid_io_show(struct device *dev,
85                 struct device_attribute *attr, char *buf)
86 {
87         struct zram *zram = dev_to_zram(dev);
88
89         return sprintf(buf, "%llu\n",
90                         (u64)atomic64_read(&zram->stats.invalid_io));
91 }
92
93 static ssize_t notify_free_show(struct device *dev,
94                 struct device_attribute *attr, char *buf)
95 {
96         struct zram *zram = dev_to_zram(dev);
97
98         return sprintf(buf, "%llu\n",
99                         (u64)atomic64_read(&zram->stats.notify_free));
100 }
101
102 static ssize_t zero_pages_show(struct device *dev,
103                 struct device_attribute *attr, char *buf)
104 {
105         struct zram *zram = dev_to_zram(dev);
106
107         return sprintf(buf, "%u\n", zram->stats.pages_zero);
108 }
109
110 static ssize_t orig_data_size_show(struct device *dev,
111                 struct device_attribute *attr, char *buf)
112 {
113         struct zram *zram = dev_to_zram(dev);
114
115         return sprintf(buf, "%llu\n",
116                 (u64)(zram->stats.pages_stored) << PAGE_SHIFT);
117 }
118
119 static ssize_t compr_data_size_show(struct device *dev,
120                 struct device_attribute *attr, char *buf)
121 {
122         struct zram *zram = dev_to_zram(dev);
123
124         return sprintf(buf, "%llu\n",
125                         (u64)atomic64_read(&zram->stats.compr_size));
126 }
127
128 static ssize_t mem_used_total_show(struct device *dev,
129                 struct device_attribute *attr, char *buf)
130 {
131         u64 val = 0;
132         struct zram *zram = dev_to_zram(dev);
133         struct zram_meta *meta = zram->meta;
134
135         down_read(&zram->init_lock);
136         if (zram->init_done)
137                 val = zs_get_total_size_bytes(meta->mem_pool);
138         up_read(&zram->init_lock);
139
140         return sprintf(buf, "%llu\n", val);
141 }
142
143 static int zram_test_flag(struct zram_meta *meta, u32 index,
144                         enum zram_pageflags flag)
145 {
146         return meta->table[index].flags & BIT(flag);
147 }
148
149 static void zram_set_flag(struct zram_meta *meta, u32 index,
150                         enum zram_pageflags flag)
151 {
152         meta->table[index].flags |= BIT(flag);
153 }
154
155 static void zram_clear_flag(struct zram_meta *meta, u32 index,
156                         enum zram_pageflags flag)
157 {
158         meta->table[index].flags &= ~BIT(flag);
159 }
160
161 static inline int is_partial_io(struct bio_vec *bvec)
162 {
163         return bvec->bv_len != PAGE_SIZE;
164 }
165
166 /*
167  * Check if request is within bounds and aligned on zram logical blocks.
168  */
169 static inline int valid_io_request(struct zram *zram, struct bio *bio)
170 {
171         u64 start, end, bound;
172         
173         /* unaligned request */
174         if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
175                 return 0;
176         if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
177                 return 0;
178
179         start = bio->bi_sector;
180         end = start + (bio->bi_size >> SECTOR_SHIFT);
181         bound = zram->disksize >> SECTOR_SHIFT;
182         /* out of range range */
183         if (unlikely(start >= bound || end > bound || start > end))
184                 return 0;
185
186         /* I/O request is valid */
187         return 1;
188 }
189
190 static void zram_meta_free(struct zram_meta *meta)
191 {
192         zs_destroy_pool(meta->mem_pool);
193         kfree(meta->compress_workmem);
194         free_pages((unsigned long)meta->compress_buffer, 1);
195         vfree(meta->table);
196         kfree(meta);
197 }
198
199 static struct zram_meta *zram_meta_alloc(u64 disksize)
200 {
201         size_t num_pages;
202         struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
203         if (!meta)
204                 goto out;
205
206         meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
207         if (!meta->compress_workmem)
208                 goto free_meta;
209
210         meta->compress_buffer =
211                 (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
212         if (!meta->compress_buffer) {
213                 pr_err("Error allocating compressor buffer space\n");
214                 goto free_workmem;
215         }
216
217         num_pages = disksize >> PAGE_SHIFT;
218         meta->table = vzalloc(num_pages * sizeof(*meta->table));
219         if (!meta->table) {
220                 pr_err("Error allocating zram address table\n");
221                 goto free_buffer;
222         }
223
224         meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
225         if (!meta->mem_pool) {
226                 pr_err("Error creating memory pool\n");
227                 goto free_table;
228         }
229
230         return meta;
231
232 free_table:
233         vfree(meta->table);
234 free_buffer:
235         free_pages((unsigned long)meta->compress_buffer, 1);
236 free_workmem:
237         kfree(meta->compress_workmem);
238 free_meta:
239         kfree(meta);
240         meta = NULL;
241 out:
242         return meta;
243 }
244
245 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
246 {
247         if (*offset + bvec->bv_len >= PAGE_SIZE)
248                 (*index)++;
249         *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
250 }
251
252 static int page_zero_filled(void *ptr)
253 {
254         unsigned int pos;
255         unsigned long *page;
256
257         page = (unsigned long *)ptr;
258
259         for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
260                 if (page[pos])
261                         return 0;
262         }
263
264         return 1;
265 }
266
267 static void handle_zero_page(struct bio_vec *bvec)
268 {
269         struct page *page = bvec->bv_page;
270         void *user_mem;
271
272         user_mem = kmap_atomic(page);
273         if (is_partial_io(bvec))
274                 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
275         else
276                 clear_page(user_mem);
277         kunmap_atomic(user_mem);
278
279         flush_dcache_page(page);
280 }
281
282 static void zram_free_page(struct zram *zram, size_t index)
283 {
284         struct zram_meta *meta = zram->meta;
285         unsigned long handle = meta->table[index].handle;
286         u16 size = meta->table[index].size;
287
288         if (unlikely(!handle)) {
289                 /*
290                  * No memory is allocated for zero filled pages.
291                  * Simply clear zero page flag.
292                  */
293                 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
294                         zram_clear_flag(meta, index, ZRAM_ZERO);
295                         zram->stats.pages_zero--;
296                 }
297                 return;
298         }
299
300         if (unlikely(size > max_zpage_size))
301                 zram->stats.bad_compress--;
302
303         zs_free(meta->mem_pool, handle);
304
305         if (size <= PAGE_SIZE / 2)
306                 zram->stats.good_compress--;
307
308         atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
309         zram->stats.pages_stored--;
310
311         meta->table[index].handle = 0;
312         meta->table[index].size = 0;
313 }
314
315 static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
316 {
317         int ret = LZO_E_OK;
318         size_t clen = PAGE_SIZE;
319         unsigned char *cmem;
320         struct zram_meta *meta = zram->meta;
321         unsigned long handle = meta->table[index].handle;
322
323         if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
324                 clear_page(mem);
325                 return 0;
326         }
327
328         cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
329         if (meta->table[index].size == PAGE_SIZE)
330                 copy_page(mem, cmem);
331         else
332                 ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
333                                                 mem, &clen);
334         zs_unmap_object(meta->mem_pool, handle);
335
336         /* Should NEVER happen. Return bio error if it does. */
337         if (unlikely(ret != LZO_E_OK)) {
338                 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
339                 atomic64_inc(&zram->stats.failed_reads);
340                 return ret;
341         }
342
343         return 0;
344 }
345
346 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
347                           u32 index, int offset, struct bio *bio)
348 {
349         int ret;
350         struct page *page;
351         unsigned char *user_mem, *uncmem = NULL;
352         struct zram_meta *meta = zram->meta;
353         page = bvec->bv_page;
354
355         if (unlikely(!meta->table[index].handle) ||
356                         zram_test_flag(meta, index, ZRAM_ZERO)) {
357                 handle_zero_page(bvec);
358                 return 0;
359         }
360
361         if (is_partial_io(bvec))
362                 /* Use  a temporary buffer to decompress the page */
363                 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
364
365         user_mem = kmap_atomic(page);
366         if (!is_partial_io(bvec))
367                 uncmem = user_mem;
368
369         if (!uncmem) {
370                 pr_info("Unable to allocate temp memory\n");
371                 ret = -ENOMEM;
372                 goto out_cleanup;
373         }
374
375         ret = zram_decompress_page(zram, uncmem, index);
376         /* Should NEVER happen. Return bio error if it does. */
377         if (unlikely(ret != LZO_E_OK))
378                 goto out_cleanup;
379
380         if (is_partial_io(bvec))
381                 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
382                                 bvec->bv_len);
383
384         flush_dcache_page(page);
385         ret = 0;
386 out_cleanup:
387         kunmap_atomic(user_mem);
388         if (is_partial_io(bvec))
389                 kfree(uncmem);
390         return ret;
391 }
392
393 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
394                            int offset)
395 {
396         int ret = 0;
397         size_t clen;
398         unsigned long handle;
399         struct page *page;
400         unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
401         struct zram_meta *meta = zram->meta;
402
403         page = bvec->bv_page;
404         src = meta->compress_buffer;
405
406         if (is_partial_io(bvec)) {
407                 /*
408                  * This is a partial IO. We need to read the full page
409                  * before to write the changes.
410                  */
411                 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
412                 if (!uncmem) {
413                         ret = -ENOMEM;
414                         goto out;
415                 }
416                 ret = zram_decompress_page(zram, uncmem, index);
417                 if (ret)
418                         goto out;
419         }
420
421         /*
422          * System overwrites unused sectors. Free memory associated
423          * with this sector now.
424          */
425         if (meta->table[index].handle ||
426             zram_test_flag(meta, index, ZRAM_ZERO))
427                 zram_free_page(zram, index);
428
429         user_mem = kmap_atomic(page);
430
431         if (is_partial_io(bvec)) {
432                 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
433                        bvec->bv_len);
434                 kunmap_atomic(user_mem);
435                 user_mem = NULL;
436         } else {
437                 uncmem = user_mem;
438         }
439
440         if (page_zero_filled(uncmem)) {
441                 kunmap_atomic(user_mem);
442                 zram->stats.pages_zero++;
443                 zram_set_flag(meta, index, ZRAM_ZERO);
444                 ret = 0;
445                 goto out;
446         }
447
448         ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
449                                meta->compress_workmem);
450
451         if (!is_partial_io(bvec)) {
452                 kunmap_atomic(user_mem);
453                 user_mem = NULL;
454                 uncmem = NULL;
455         }
456
457         if (unlikely(ret != LZO_E_OK)) {
458                 pr_err("Compression failed! err=%d\n", ret);
459                 goto out;
460         }
461
462         if (unlikely(clen > max_zpage_size)) {
463                 zram->stats.bad_compress++;
464                 clen = PAGE_SIZE;
465                 src = NULL;
466                 if (is_partial_io(bvec))
467                         src = uncmem;
468         }
469
470         handle = zs_malloc(meta->mem_pool, clen);
471         if (!handle) {
472                 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
473                         index, clen);
474                 ret = -ENOMEM;
475                 goto out;
476         }
477         cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
478
479         if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
480                 src = kmap_atomic(page);
481                 copy_page(cmem, src);
482                 kunmap_atomic(src);
483         } else {
484                 memcpy(cmem, src, clen);
485         }
486
487         zs_unmap_object(meta->mem_pool, handle);
488
489         meta->table[index].handle = handle;
490         meta->table[index].size = clen;
491
492         /* Update stats */
493         atomic64_add(clen, &zram->stats.compr_size);
494         zram->stats.pages_stored++;
495         if (clen <= PAGE_SIZE / 2)
496                 zram->stats.good_compress++;
497
498 out:
499         if (is_partial_io(bvec))
500                 kfree(uncmem);
501
502         if (ret)
503                 atomic64_inc(&zram->stats.failed_writes);
504         return ret;
505 }
506
507 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
508                         int offset, struct bio *bio, int rw)
509 {
510         int ret;
511
512         if (rw == READ) {
513                 down_read(&zram->lock);
514                 ret = zram_bvec_read(zram, bvec, index, offset, bio);
515                 up_read(&zram->lock);
516         } else {
517                 down_write(&zram->lock);
518                 ret = zram_bvec_write(zram, bvec, index, offset);
519                 up_write(&zram->lock);
520         }
521
522         return ret;
523 }
524
525 static void zram_reset_device(struct zram *zram)
526 {
527         size_t index;
528         struct zram_meta *meta;
529
530         if (!zram->init_done)
531                 return;
532
533         meta = zram->meta;
534         zram->init_done = 0;
535
536         /* Free all pages that are still in this zram device */
537         for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
538                 unsigned long handle = meta->table[index].handle;
539                 if (!handle)
540                         continue;
541
542                 zs_free(meta->mem_pool, handle);
543         }
544
545         zram_meta_free(zram->meta);
546         zram->meta = NULL;
547         /* Reset stats */
548         memset(&zram->stats, 0, sizeof(zram->stats));
549
550         zram->disksize = 0;
551         set_capacity(zram->disk, 0);
552 }
553
554 static void zram_init_device(struct zram *zram, struct zram_meta *meta)
555 {
556         if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) {
557                 pr_info(
558                 "There is little point creating a zram of greater than "
559                 "twice the size of memory since we expect a 2:1 compression "
560                 "ratio. Note that zram uses about 0.1%% of the size of "
561                 "the disk when not in use so a huge zram is "
562                 "wasteful.\n"
563                 "\tMemory Size: %lu kB\n"
564                 "\tSize you selected: %llu kB\n"
565                 "Continuing anyway ...\n",
566                 (totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10
567                 );
568         }
569
570         /* zram devices sort of resembles non-rotational disks */
571         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
572
573         zram->meta = meta;
574         zram->init_done = 1;
575
576         pr_debug("Initialization done!\n");
577 }
578
579 static ssize_t disksize_store(struct device *dev,
580                 struct device_attribute *attr, const char *buf, size_t len)
581 {
582         u64 disksize;
583         struct zram_meta *meta;
584         struct zram *zram = dev_to_zram(dev);
585
586         disksize = memparse(buf, NULL);
587         if (!disksize)
588                 return -EINVAL;
589
590         disksize = PAGE_ALIGN(disksize);
591         meta = zram_meta_alloc(disksize);
592         down_write(&zram->init_lock);
593         if (zram->init_done) {
594                 up_write(&zram->init_lock);
595                 zram_meta_free(meta);
596                 pr_info("Cannot change disksize for initialized device\n");
597                 return -EBUSY;
598         }
599
600         zram->disksize = disksize;
601         set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
602         zram_init_device(zram, meta);
603         up_write(&zram->init_lock);
604
605         return len;
606 }
607
608 static ssize_t reset_store(struct device *dev,
609                 struct device_attribute *attr, const char *buf, size_t len)
610 {
611         int ret;
612         unsigned short do_reset;
613         struct zram *zram;
614         struct block_device *bdev;
615
616         zram = dev_to_zram(dev);
617         bdev = bdget_disk(zram->disk, 0);
618
619         /* Do not reset an active device! */
620         if (bdev->bd_holders)
621                 return -EBUSY;
622
623         ret = kstrtou16(buf, 10, &do_reset);
624         if (ret)
625                 return ret;
626
627         if (!do_reset)
628                 return -EINVAL;
629
630         /* Make sure all pending I/O is finished */
631         if (bdev)
632                 fsync_bdev(bdev);
633
634         zram_reset_device(zram);
635         return len;
636 }
637
638 static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
639 {
640         int i, offset;
641         u32 index;
642         struct bio_vec *bvec;
643
644         switch (rw) {
645         case READ:
646                 atomic64_inc(&zram->stats.num_reads);
647                 break;
648         case WRITE:
649                 atomic64_inc(&zram->stats.num_writes);
650                 break;
651         }
652
653         index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
654         offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
655
656         bio_for_each_segment(bvec, bio, i) {
657                 int max_transfer_size = PAGE_SIZE - offset;
658
659                 if (bvec->bv_len > max_transfer_size) {
660                         /*
661                          * zram_bvec_rw() can only make operation on a single
662                          * zram page. Split the bio vector.
663                          */
664                         struct bio_vec bv;
665
666                         bv.bv_page = bvec->bv_page;
667                         bv.bv_len = max_transfer_size;
668                         bv.bv_offset = bvec->bv_offset;
669
670                         if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
671                                 goto out;
672
673                         bv.bv_len = bvec->bv_len - max_transfer_size;
674                         bv.bv_offset += max_transfer_size;
675                         if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
676                                 goto out;
677                 } else
678                         if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
679                             < 0)
680                                 goto out;
681
682                 update_position(&index, &offset, bvec);
683         }
684
685         set_bit(BIO_UPTODATE, &bio->bi_flags);
686         bio_endio(bio, 0);
687         return;
688
689 out:
690         bio_io_error(bio);
691 }
692
693 /*
694  * Handler function for all zram I/O requests.
695  */
696 static void zram_make_request(struct request_queue *queue, struct bio *bio)
697 {
698         struct zram *zram = queue->queuedata;
699
700         down_read(&zram->init_lock);
701         if (unlikely(!zram->init_done))
702                 goto error;
703
704         if (!valid_io_request(zram, bio)) {
705                 atomic64_inc(&zram->stats.invalid_io);
706                 goto error;
707         }
708
709         __zram_make_request(zram, bio, bio_data_dir(bio));
710         up_read(&zram->init_lock);
711
712         return;
713
714 error:
715         up_read(&zram->init_lock);
716         bio_io_error(bio);
717 }
718
719 static void zram_slot_free_notify(struct block_device *bdev,
720                                 unsigned long index)
721 {
722         struct zram *zram;
723
724         zram = bdev->bd_disk->private_data;
725         down_write(&zram->lock);
726         zram_free_page(zram, index);
727         up_write(&zram->lock);
728         atomic64_inc(&zram->stats.notify_free);
729 }
730
731 static const struct block_device_operations zram_devops = {
732         .swap_slot_free_notify = zram_slot_free_notify,
733         .owner = THIS_MODULE
734 };
735
736 static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
737                 disksize_show, disksize_store);
738 static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
739 static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
740 static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL);
741 static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL);
742 static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL);
743 static DEVICE_ATTR(notify_free, S_IRUGO, notify_free_show, NULL);
744 static DEVICE_ATTR(zero_pages, S_IRUGO, zero_pages_show, NULL);
745 static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
746 static DEVICE_ATTR(compr_data_size, S_IRUGO, compr_data_size_show, NULL);
747 static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
748
749 static struct attribute *zram_disk_attrs[] = {
750         &dev_attr_disksize.attr,
751         &dev_attr_initstate.attr,
752         &dev_attr_reset.attr,
753         &dev_attr_num_reads.attr,
754         &dev_attr_num_writes.attr,
755         &dev_attr_invalid_io.attr,
756         &dev_attr_notify_free.attr,
757         &dev_attr_zero_pages.attr,
758         &dev_attr_orig_data_size.attr,
759         &dev_attr_compr_data_size.attr,
760         &dev_attr_mem_used_total.attr,
761         NULL,
762 };
763
764 static struct attribute_group zram_disk_attr_group = {
765         .attrs = zram_disk_attrs,
766 };
767
768 static int create_device(struct zram *zram, int device_id)
769 {
770         int ret = -ENOMEM;
771
772         init_rwsem(&zram->lock);
773         init_rwsem(&zram->init_lock);
774
775         zram->queue = blk_alloc_queue(GFP_KERNEL);
776         if (!zram->queue) {
777                 pr_err("Error allocating disk queue for device %d\n",
778                         device_id);
779                 goto out;
780         }
781
782         blk_queue_make_request(zram->queue, zram_make_request);
783         zram->queue->queuedata = zram;
784
785          /* gendisk structure */
786         zram->disk = alloc_disk(1);
787         if (!zram->disk) {
788                 pr_warn("Error allocating disk structure for device %d\n",
789                         device_id);
790                 goto out_free_queue;
791         }
792
793         zram->disk->major = zram_major;
794         zram->disk->first_minor = device_id;
795         zram->disk->fops = &zram_devops;
796         zram->disk->queue = zram->queue;
797         zram->disk->private_data = zram;
798         snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
799
800         /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
801         set_capacity(zram->disk, 0);
802
803         /*
804          * To ensure that we always get PAGE_SIZE aligned
805          * and n*PAGE_SIZED sized I/O requests.
806          */
807         blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
808         blk_queue_logical_block_size(zram->disk->queue,
809                                         ZRAM_LOGICAL_BLOCK_SIZE);
810         blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
811         blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
812
813         add_disk(zram->disk);
814
815         ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
816                                 &zram_disk_attr_group);
817         if (ret < 0) {
818                 pr_warn("Error creating sysfs group");
819                 goto out_free_disk;
820         }
821
822         zram->init_done = 0;
823         return 0;
824
825 out_free_disk:
826         del_gendisk(zram->disk);
827         put_disk(zram->disk);
828 out_free_queue:
829         blk_cleanup_queue(zram->queue);
830 out:
831         return ret;
832 }
833
834 static void destroy_device(struct zram *zram)
835 {
836         sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
837                         &zram_disk_attr_group);
838
839         if (zram->disk) {
840                 del_gendisk(zram->disk);
841                 put_disk(zram->disk);
842         }
843
844         if (zram->queue)
845                 blk_cleanup_queue(zram->queue);
846 }
847
848 static int __init zram_init(void)
849 {
850         int ret, dev_id;
851
852         if (num_devices > max_num_devices) {
853                 pr_warn("Invalid value for num_devices: %u\n",
854                                 num_devices);
855                 ret = -EINVAL;
856                 goto out;
857         }
858
859         zram_major = register_blkdev(0, "zram");
860         if (zram_major <= 0) {
861                 pr_warn("Unable to get major number\n");
862                 ret = -EBUSY;
863                 goto out;
864         }
865
866         /* Allocate the device array and initialize each one */
867         zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
868         if (!zram_devices) {
869                 ret = -ENOMEM;
870                 goto unregister;
871         }
872
873         for (dev_id = 0; dev_id < num_devices; dev_id++) {
874                 ret = create_device(&zram_devices[dev_id], dev_id);
875                 if (ret)
876                         goto free_devices;
877         }
878
879         pr_info("Created %u device(s) ...\n", num_devices);
880
881         return 0;
882
883 free_devices:
884         while (dev_id)
885                 destroy_device(&zram_devices[--dev_id]);
886         kfree(zram_devices);
887 unregister:
888         unregister_blkdev(zram_major, "zram");
889 out:
890         return ret;
891 }
892
893 static void __exit zram_exit(void)
894 {
895         int i;
896         struct zram *zram;
897
898         for (i = 0; i < num_devices; i++) {
899                 zram = &zram_devices[i];
900
901                 get_disk(zram->disk);
902                 destroy_device(zram);
903                 zram_reset_device(zram);
904                 put_disk(zram->disk);
905         }
906
907         unregister_blkdev(zram_major, "zram");
908
909         kfree(zram_devices);
910         pr_debug("Cleanup done!\n");
911 }
912
913 module_init(zram_init);
914 module_exit(zram_exit);
915
916 module_param(num_devices, uint, 0);
917 MODULE_PARM_DESC(num_devices, "Number of zram devices");
918
919 MODULE_LICENSE("Dual BSD/GPL");
920 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
921 MODULE_DESCRIPTION("Compressed RAM Block Device");