4 * Copyright (c) 2010,2011, Dan Magenheimer, Oracle Corp.
5 * Copyright (c) 2010,2011, Nitin Gupta
7 * Zcache provides an in-kernel "host implementation" for transcendent memory
8 * and, thus indirectly, for cleancache and frontswap. Zcache includes two
9 * page-accessible memory [1] interfaces, both utilizing lzo1x compression:
10 * 1) "compression buddies" ("zbud") is used for ephemeral pages
11 * 2) xvmalloc is used for persistent pages.
12 * Xvmalloc (based on the TLSF allocator) has very low fragmentation
13 * so maximizes space efficiency, while zbud allows pairs (and potentially,
14 * in the future, more than a pair of) compressed pages to be closely linked
15 * so that reclaiming can be done via the kernel's physical-page-oriented
16 * "shrinker" interface.
18 * [1] For a definition of page-accessible memory (aka PAM), see:
19 * http://marc.info/?l=linux-mm&m=127811271605009
22 #include <linux/cpu.h>
23 #include <linux/highmem.h>
24 #include <linux/list.h>
25 #include <linux/lzo.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/types.h>
29 #include <linux/atomic.h>
32 #include "../zram/xvmalloc.h" /* if built in drivers/staging */
34 #if (!defined(CONFIG_CLEANCACHE) && !defined(CONFIG_FRONTSWAP))
35 #error "zcache is useless without CONFIG_CLEANCACHE or CONFIG_FRONTSWAP"
37 #ifdef CONFIG_CLEANCACHE
38 #include <linux/cleancache.h>
40 #ifdef CONFIG_FRONTSWAP
41 #include <linux/frontswap.h>
45 /* this is more aggressive but may cause other problems? */
46 #define ZCACHE_GFP_MASK (GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN)
48 #define ZCACHE_GFP_MASK \
49 (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
52 #define MAX_POOLS_PER_CLIENT 16
54 #define MAX_CLIENTS 16
55 #define LOCAL_CLIENT ((uint16_t)-1)
57 MODULE_LICENSE("GPL");
59 struct zcache_client {
60 struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
61 struct xv_pool *xvpool;
66 static struct zcache_client zcache_host;
67 static struct zcache_client zcache_clients[MAX_CLIENTS];
69 static inline uint16_t get_client_id_from_client(struct zcache_client *cli)
72 if (cli == &zcache_host)
74 return cli - &zcache_clients[0];
77 static inline bool is_local_client(struct zcache_client *cli)
79 return cli == &zcache_host;
83 * Compression buddies ("zbud") provides for packing two (or, possibly
84 * in the future, more) compressed ephemeral pages into a single "raw"
85 * (physical) page and tracking them with data structures so that
86 * the raw pages can be easily reclaimed.
88 * A zbud page ("zbpg") is an aligned page containing a list_head,
89 * a lock, and two "zbud headers". The remainder of the physical
90 * page is divided up into aligned 64-byte "chunks" which contain
91 * the compressed data for zero, one, or two zbuds. Each zbpg
92 * resides on: (1) an "unused list" if it has no zbuds; (2) a
93 * "buddied" list if it is fully populated with two zbuds; or
94 * (3) one of PAGE_SIZE/64 "unbuddied" lists indexed by how many chunks
95 * the one unbuddied zbud uses. The data inside a zbpg cannot be
96 * read or written unless the zbpg's lock is held.
99 #define ZBH_SENTINEL 0x43214321
100 #define ZBPG_SENTINEL 0xdeadbeef
102 #define ZBUD_MAX_BUDS 2
109 uint16_t size; /* compressed size in bytes, zero means unused */
114 struct list_head bud_list;
116 struct zbud_hdr buddy[ZBUD_MAX_BUDS];
118 /* followed by NUM_CHUNK aligned CHUNK_SIZE-byte chunks */
121 #define CHUNK_SHIFT 6
122 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
123 #define CHUNK_MASK (~(CHUNK_SIZE-1))
124 #define NCHUNKS (((PAGE_SIZE - sizeof(struct zbud_page)) & \
125 CHUNK_MASK) >> CHUNK_SHIFT)
126 #define MAX_CHUNK (NCHUNKS-1)
129 struct list_head list;
131 } zbud_unbuddied[NCHUNKS];
132 /* list N contains pages with N chunks USED and NCHUNKS-N unused */
133 /* element 0 is never used but optimizing that isn't worth it */
134 static unsigned long zbud_cumul_chunk_counts[NCHUNKS];
136 struct list_head zbud_buddied_list;
137 static unsigned long zcache_zbud_buddied_count;
139 /* protects the buddied list and all unbuddied lists */
140 static DEFINE_SPINLOCK(zbud_budlists_spinlock);
142 static LIST_HEAD(zbpg_unused_list);
143 static unsigned long zcache_zbpg_unused_list_count;
145 /* protects the unused page list */
146 static DEFINE_SPINLOCK(zbpg_unused_list_spinlock);
148 static atomic_t zcache_zbud_curr_raw_pages;
149 static atomic_t zcache_zbud_curr_zpages;
150 static unsigned long zcache_zbud_curr_zbytes;
151 static unsigned long zcache_zbud_cumul_zpages;
152 static unsigned long zcache_zbud_cumul_zbytes;
153 static unsigned long zcache_compress_poor;
154 static unsigned long zcache_mean_compress_poor;
156 /* forward references */
157 static void *zcache_get_free_page(void);
158 static void zcache_free_page(void *p);
161 * zbud helper functions
164 static inline unsigned zbud_max_buddy_size(void)
166 return MAX_CHUNK << CHUNK_SHIFT;
169 static inline unsigned zbud_size_to_chunks(unsigned size)
171 BUG_ON(size == 0 || size > zbud_max_buddy_size());
172 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
175 static inline int zbud_budnum(struct zbud_hdr *zh)
177 unsigned offset = (unsigned long)zh & (PAGE_SIZE - 1);
178 struct zbud_page *zbpg = NULL;
179 unsigned budnum = -1U;
182 for (i = 0; i < ZBUD_MAX_BUDS; i++)
183 if (offset == offsetof(typeof(*zbpg), buddy[i])) {
187 BUG_ON(budnum == -1U);
191 static char *zbud_data(struct zbud_hdr *zh, unsigned size)
193 struct zbud_page *zbpg;
197 ASSERT_SENTINEL(zh, ZBH);
198 budnum = zbud_budnum(zh);
199 BUG_ON(size == 0 || size > zbud_max_buddy_size());
200 zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
201 ASSERT_SPINLOCK(&zbpg->lock);
204 p += ((sizeof(struct zbud_page) + CHUNK_SIZE - 1) &
206 else if (budnum == 1)
207 p += PAGE_SIZE - ((size + CHUNK_SIZE - 1) & CHUNK_MASK);
212 * zbud raw page management
215 static struct zbud_page *zbud_alloc_raw_page(void)
217 struct zbud_page *zbpg = NULL;
218 struct zbud_hdr *zh0, *zh1;
221 /* if any pages on the zbpg list, use one */
222 spin_lock(&zbpg_unused_list_spinlock);
223 if (!list_empty(&zbpg_unused_list)) {
224 zbpg = list_first_entry(&zbpg_unused_list,
225 struct zbud_page, bud_list);
226 list_del_init(&zbpg->bud_list);
227 zcache_zbpg_unused_list_count--;
230 spin_unlock(&zbpg_unused_list_spinlock);
232 /* none on zbpg list, try to get a kernel page */
233 zbpg = zcache_get_free_page();
234 if (likely(zbpg != NULL)) {
235 INIT_LIST_HEAD(&zbpg->bud_list);
236 zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1];
237 spin_lock_init(&zbpg->lock);
239 ASSERT_INVERTED_SENTINEL(zbpg, ZBPG);
240 SET_SENTINEL(zbpg, ZBPG);
241 BUG_ON(zh0->size != 0 || tmem_oid_valid(&zh0->oid));
242 BUG_ON(zh1->size != 0 || tmem_oid_valid(&zh1->oid));
244 atomic_inc(&zcache_zbud_curr_raw_pages);
245 INIT_LIST_HEAD(&zbpg->bud_list);
246 SET_SENTINEL(zbpg, ZBPG);
247 zh0->size = 0; zh1->size = 0;
248 tmem_oid_set_invalid(&zh0->oid);
249 tmem_oid_set_invalid(&zh1->oid);
255 static void zbud_free_raw_page(struct zbud_page *zbpg)
257 struct zbud_hdr *zh0 = &zbpg->buddy[0], *zh1 = &zbpg->buddy[1];
259 ASSERT_SENTINEL(zbpg, ZBPG);
260 BUG_ON(!list_empty(&zbpg->bud_list));
261 ASSERT_SPINLOCK(&zbpg->lock);
262 BUG_ON(zh0->size != 0 || tmem_oid_valid(&zh0->oid));
263 BUG_ON(zh1->size != 0 || tmem_oid_valid(&zh1->oid));
264 INVERT_SENTINEL(zbpg, ZBPG);
265 spin_unlock(&zbpg->lock);
266 spin_lock(&zbpg_unused_list_spinlock);
267 list_add(&zbpg->bud_list, &zbpg_unused_list);
268 zcache_zbpg_unused_list_count++;
269 spin_unlock(&zbpg_unused_list_spinlock);
273 * core zbud handling routines
276 static unsigned zbud_free(struct zbud_hdr *zh)
280 ASSERT_SENTINEL(zh, ZBH);
281 BUG_ON(!tmem_oid_valid(&zh->oid));
283 BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
285 tmem_oid_set_invalid(&zh->oid);
286 INVERT_SENTINEL(zh, ZBH);
287 zcache_zbud_curr_zbytes -= size;
288 atomic_dec(&zcache_zbud_curr_zpages);
292 static void zbud_free_and_delist(struct zbud_hdr *zh)
295 struct zbud_hdr *zh_other;
296 unsigned budnum = zbud_budnum(zh), size;
297 struct zbud_page *zbpg =
298 container_of(zh, struct zbud_page, buddy[budnum]);
300 spin_lock(&zbpg->lock);
301 if (list_empty(&zbpg->bud_list)) {
302 /* ignore zombie page... see zbud_evict_pages() */
303 spin_unlock(&zbpg->lock);
306 size = zbud_free(zh);
307 ASSERT_SPINLOCK(&zbpg->lock);
308 zh_other = &zbpg->buddy[(budnum == 0) ? 1 : 0];
309 if (zh_other->size == 0) { /* was unbuddied: unlist and free */
310 chunks = zbud_size_to_chunks(size) ;
311 spin_lock(&zbud_budlists_spinlock);
312 BUG_ON(list_empty(&zbud_unbuddied[chunks].list));
313 list_del_init(&zbpg->bud_list);
314 zbud_unbuddied[chunks].count--;
315 spin_unlock(&zbud_budlists_spinlock);
316 zbud_free_raw_page(zbpg);
317 } else { /* was buddied: move remaining buddy to unbuddied list */
318 chunks = zbud_size_to_chunks(zh_other->size) ;
319 spin_lock(&zbud_budlists_spinlock);
320 list_del_init(&zbpg->bud_list);
321 zcache_zbud_buddied_count--;
322 list_add_tail(&zbpg->bud_list, &zbud_unbuddied[chunks].list);
323 zbud_unbuddied[chunks].count++;
324 spin_unlock(&zbud_budlists_spinlock);
325 spin_unlock(&zbpg->lock);
329 static struct zbud_hdr *zbud_create(uint16_t client_id, uint16_t pool_id,
330 struct tmem_oid *oid,
331 uint32_t index, struct page *page,
332 void *cdata, unsigned size)
334 struct zbud_hdr *zh0, *zh1, *zh = NULL;
335 struct zbud_page *zbpg = NULL, *ztmp;
338 int i, found_good_buddy = 0;
340 nchunks = zbud_size_to_chunks(size) ;
341 for (i = MAX_CHUNK - nchunks + 1; i > 0; i--) {
342 spin_lock(&zbud_budlists_spinlock);
343 if (!list_empty(&zbud_unbuddied[i].list)) {
344 list_for_each_entry_safe(zbpg, ztmp,
345 &zbud_unbuddied[i].list, bud_list) {
346 if (spin_trylock(&zbpg->lock)) {
347 found_good_buddy = i;
348 goto found_unbuddied;
352 spin_unlock(&zbud_budlists_spinlock);
354 /* didn't find a good buddy, try allocating a new page */
355 zbpg = zbud_alloc_raw_page();
356 if (unlikely(zbpg == NULL))
358 /* ok, have a page, now compress the data before taking locks */
359 spin_lock(&zbpg->lock);
360 spin_lock(&zbud_budlists_spinlock);
361 list_add_tail(&zbpg->bud_list, &zbud_unbuddied[nchunks].list);
362 zbud_unbuddied[nchunks].count++;
363 zh = &zbpg->buddy[0];
367 ASSERT_SPINLOCK(&zbpg->lock);
368 zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1];
369 BUG_ON(!((zh0->size == 0) ^ (zh1->size == 0)));
370 if (zh0->size != 0) { /* buddy0 in use, buddy1 is vacant */
371 ASSERT_SENTINEL(zh0, ZBH);
373 } else if (zh1->size != 0) { /* buddy1 in use, buddy0 is vacant */
374 ASSERT_SENTINEL(zh1, ZBH);
378 list_del_init(&zbpg->bud_list);
379 zbud_unbuddied[found_good_buddy].count--;
380 list_add_tail(&zbpg->bud_list, &zbud_buddied_list);
381 zcache_zbud_buddied_count++;
384 SET_SENTINEL(zh, ZBH);
388 zh->pool_id = pool_id;
389 zh->client_id = client_id;
390 /* can wait to copy the data until the list locks are dropped */
391 spin_unlock(&zbud_budlists_spinlock);
393 to = zbud_data(zh, size);
394 memcpy(to, cdata, size);
395 spin_unlock(&zbpg->lock);
396 zbud_cumul_chunk_counts[nchunks]++;
397 atomic_inc(&zcache_zbud_curr_zpages);
398 zcache_zbud_cumul_zpages++;
399 zcache_zbud_curr_zbytes += size;
400 zcache_zbud_cumul_zbytes += size;
405 static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
407 struct zbud_page *zbpg;
408 unsigned budnum = zbud_budnum(zh);
409 size_t out_len = PAGE_SIZE;
410 char *to_va, *from_va;
414 zbpg = container_of(zh, struct zbud_page, buddy[budnum]);
415 spin_lock(&zbpg->lock);
416 if (list_empty(&zbpg->bud_list)) {
417 /* ignore zombie page... see zbud_evict_pages() */
421 ASSERT_SENTINEL(zh, ZBH);
422 BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
423 to_va = kmap_atomic(page, KM_USER0);
425 from_va = zbud_data(zh, size);
426 ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len);
427 BUG_ON(ret != LZO_E_OK);
428 BUG_ON(out_len != PAGE_SIZE);
429 kunmap_atomic(to_va, KM_USER0);
431 spin_unlock(&zbpg->lock);
436 * The following routines handle shrinking of ephemeral pages by evicting
437 * pages "least valuable" first.
440 static unsigned long zcache_evicted_raw_pages;
441 static unsigned long zcache_evicted_buddied_pages;
442 static unsigned long zcache_evicted_unbuddied_pages;
444 static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id,
446 static void zcache_put_pool(struct tmem_pool *pool);
449 * Flush and free all zbuds in a zbpg, then free the pageframe
451 static void zbud_evict_zbpg(struct zbud_page *zbpg)
455 uint32_t pool_id[ZBUD_MAX_BUDS], client_id[ZBUD_MAX_BUDS];
456 uint32_t index[ZBUD_MAX_BUDS];
457 struct tmem_oid oid[ZBUD_MAX_BUDS];
458 struct tmem_pool *pool;
460 ASSERT_SPINLOCK(&zbpg->lock);
461 BUG_ON(!list_empty(&zbpg->bud_list));
462 for (i = 0, j = 0; i < ZBUD_MAX_BUDS; i++) {
463 zh = &zbpg->buddy[i];
465 client_id[j] = zh->client_id;
466 pool_id[j] = zh->pool_id;
468 index[j] = zh->index;
473 spin_unlock(&zbpg->lock);
474 for (i = 0; i < j; i++) {
475 pool = zcache_get_pool_by_id(client_id[i], pool_id[i]);
477 tmem_flush_page(pool, &oid[i], index[i]);
478 zcache_put_pool(pool);
481 ASSERT_SENTINEL(zbpg, ZBPG);
482 spin_lock(&zbpg->lock);
483 zbud_free_raw_page(zbpg);
487 * Free nr pages. This code is funky because we want to hold the locks
488 * protecting various lists for as short a time as possible, and in some
489 * circumstances the list may change asynchronously when the list lock is
490 * not held. In some cases we also trylock not only to avoid waiting on a
491 * page in use by another cpu, but also to avoid potential deadlock due to
494 static void zbud_evict_pages(int nr)
496 struct zbud_page *zbpg;
499 /* first try freeing any pages on unused list */
501 spin_lock_bh(&zbpg_unused_list_spinlock);
502 if (!list_empty(&zbpg_unused_list)) {
503 /* can't walk list here, since it may change when unlocked */
504 zbpg = list_first_entry(&zbpg_unused_list,
505 struct zbud_page, bud_list);
506 list_del_init(&zbpg->bud_list);
507 zcache_zbpg_unused_list_count--;
508 atomic_dec(&zcache_zbud_curr_raw_pages);
509 spin_unlock_bh(&zbpg_unused_list_spinlock);
510 zcache_free_page(zbpg);
511 zcache_evicted_raw_pages++;
514 goto retry_unused_list;
516 spin_unlock_bh(&zbpg_unused_list_spinlock);
518 /* now try freeing unbuddied pages, starting with least space avail */
519 for (i = 0; i < MAX_CHUNK; i++) {
521 spin_lock_bh(&zbud_budlists_spinlock);
522 if (list_empty(&zbud_unbuddied[i].list)) {
523 spin_unlock_bh(&zbud_budlists_spinlock);
526 list_for_each_entry(zbpg, &zbud_unbuddied[i].list, bud_list) {
527 if (unlikely(!spin_trylock(&zbpg->lock)))
529 list_del_init(&zbpg->bud_list);
530 zbud_unbuddied[i].count--;
531 spin_unlock(&zbud_budlists_spinlock);
532 zcache_evicted_unbuddied_pages++;
533 /* want budlists unlocked when doing zbpg eviction */
534 zbud_evict_zbpg(zbpg);
538 goto retry_unbud_list_i;
540 spin_unlock_bh(&zbud_budlists_spinlock);
543 /* as a last resort, free buddied pages */
545 spin_lock_bh(&zbud_budlists_spinlock);
546 if (list_empty(&zbud_buddied_list)) {
547 spin_unlock_bh(&zbud_budlists_spinlock);
550 list_for_each_entry(zbpg, &zbud_buddied_list, bud_list) {
551 if (unlikely(!spin_trylock(&zbpg->lock)))
553 list_del_init(&zbpg->bud_list);
554 zcache_zbud_buddied_count--;
555 spin_unlock(&zbud_budlists_spinlock);
556 zcache_evicted_buddied_pages++;
557 /* want budlists unlocked when doing zbpg eviction */
558 zbud_evict_zbpg(zbpg);
564 spin_unlock_bh(&zbud_budlists_spinlock);
569 static void zbud_init(void)
573 INIT_LIST_HEAD(&zbud_buddied_list);
574 zcache_zbud_buddied_count = 0;
575 for (i = 0; i < NCHUNKS; i++) {
576 INIT_LIST_HEAD(&zbud_unbuddied[i].list);
577 zbud_unbuddied[i].count = 0;
583 * These sysfs routines show a nice distribution of how many zbpg's are
584 * currently (and have ever been placed) in each unbuddied list. It's fun
585 * to watch but can probably go away before final merge.
587 static int zbud_show_unbuddied_list_counts(char *buf)
592 for (i = 0; i < NCHUNKS; i++)
593 p += sprintf(p, "%u ", zbud_unbuddied[i].count);
597 static int zbud_show_cumul_chunk_counts(char *buf)
599 unsigned long i, chunks = 0, total_chunks = 0, sum_total_chunks = 0;
600 unsigned long total_chunks_lte_21 = 0, total_chunks_lte_32 = 0;
601 unsigned long total_chunks_lte_42 = 0;
604 for (i = 0; i < NCHUNKS; i++) {
605 p += sprintf(p, "%lu ", zbud_cumul_chunk_counts[i]);
606 chunks += zbud_cumul_chunk_counts[i];
607 total_chunks += zbud_cumul_chunk_counts[i];
608 sum_total_chunks += i * zbud_cumul_chunk_counts[i];
610 total_chunks_lte_21 = total_chunks;
612 total_chunks_lte_32 = total_chunks;
614 total_chunks_lte_42 = total_chunks;
616 p += sprintf(p, "<=21:%lu <=32:%lu <=42:%lu, mean:%lu\n",
617 total_chunks_lte_21, total_chunks_lte_32, total_chunks_lte_42,
618 chunks == 0 ? 0 : sum_total_chunks / chunks);
624 * This "zv" PAM implementation combines the TLSF-based xvMalloc
625 * with lzo1x compression to maximize the amount of data that can
626 * be packed into a physical page.
628 * Zv represents a PAM page with the index and object (plus a "size" value
629 * necessary for decompression) immediately preceding the compressed data.
632 #define ZVH_SENTINEL 0x43214321
641 /* rudimentary policy limits */
642 /* total number of persistent pages may not exceed this percentage */
643 static unsigned int zv_page_count_policy_percent = 75;
645 * byte count defining poor compression; pages with greater zsize will be
648 static unsigned int zv_max_zsize = (PAGE_SIZE / 8) * 7;
650 * byte count defining poor *mean* compression; pages with greater zsize
651 * will be rejected until sufficient better-compressed pages are accepted
652 * driving the man below this threshold
654 static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5;
656 static unsigned long zv_curr_dist_counts[NCHUNKS];
657 static unsigned long zv_cumul_dist_counts[NCHUNKS];
659 static struct zv_hdr *zv_create(struct xv_pool *xvpool, uint32_t pool_id,
660 struct tmem_oid *oid, uint32_t index,
661 void *cdata, unsigned clen)
664 struct zv_hdr *zv = NULL;
666 int alloc_size = clen + sizeof(struct zv_hdr);
667 int chunks = (alloc_size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
670 BUG_ON(!irqs_disabled());
671 BUG_ON(chunks >= NCHUNKS);
672 ret = xv_malloc(xvpool, alloc_size,
673 &page, &offset, ZCACHE_GFP_MASK);
676 zv_curr_dist_counts[chunks]++;
677 zv_cumul_dist_counts[chunks]++;
678 zv = kmap_atomic(page, KM_USER0) + offset;
681 zv->pool_id = pool_id;
682 SET_SENTINEL(zv, ZVH);
683 memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen);
684 kunmap_atomic(zv, KM_USER0);
689 static void zv_free(struct xv_pool *xvpool, struct zv_hdr *zv)
694 uint16_t size = xv_get_object_size(zv);
695 int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
697 ASSERT_SENTINEL(zv, ZVH);
698 BUG_ON(chunks >= NCHUNKS);
699 zv_curr_dist_counts[chunks]--;
702 INVERT_SENTINEL(zv, ZVH);
703 page = virt_to_page(zv);
704 offset = (unsigned long)zv & ~PAGE_MASK;
705 local_irq_save(flags);
706 xv_free(xvpool, page, offset);
707 local_irq_restore(flags);
710 static void zv_decompress(struct page *page, struct zv_hdr *zv)
712 size_t clen = PAGE_SIZE;
717 ASSERT_SENTINEL(zv, ZVH);
718 size = xv_get_object_size(zv) - sizeof(*zv);
720 to_va = kmap_atomic(page, KM_USER0);
721 ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv),
723 kunmap_atomic(to_va, KM_USER0);
724 BUG_ON(ret != LZO_E_OK);
725 BUG_ON(clen != PAGE_SIZE);
730 * show a distribution of compression stats for zv pages.
733 static int zv_curr_dist_counts_show(char *buf)
735 unsigned long i, n, chunks = 0, sum_total_chunks = 0;
738 for (i = 0; i < NCHUNKS; i++) {
739 n = zv_curr_dist_counts[i];
740 p += sprintf(p, "%lu ", n);
742 sum_total_chunks += i * n;
744 p += sprintf(p, "mean:%lu\n",
745 chunks == 0 ? 0 : sum_total_chunks / chunks);
749 static int zv_cumul_dist_counts_show(char *buf)
751 unsigned long i, n, chunks = 0, sum_total_chunks = 0;
754 for (i = 0; i < NCHUNKS; i++) {
755 n = zv_cumul_dist_counts[i];
756 p += sprintf(p, "%lu ", n);
758 sum_total_chunks += i * n;
760 p += sprintf(p, "mean:%lu\n",
761 chunks == 0 ? 0 : sum_total_chunks / chunks);
766 * setting zv_max_zsize via sysfs causes all persistent (e.g. swap)
767 * pages that don't compress to less than this value (including metadata
768 * overhead) to be rejected. We don't allow the value to get too close
771 static ssize_t zv_max_zsize_show(struct kobject *kobj,
772 struct kobj_attribute *attr,
775 return sprintf(buf, "%u\n", zv_max_zsize);
778 static ssize_t zv_max_zsize_store(struct kobject *kobj,
779 struct kobj_attribute *attr,
780 const char *buf, size_t count)
785 if (!capable(CAP_SYS_ADMIN))
788 err = strict_strtoul(buf, 10, &val);
789 if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
796 * setting zv_max_mean_zsize via sysfs causes all persistent (e.g. swap)
797 * pages that don't compress to less than this value (including metadata
798 * overhead) to be rejected UNLESS the mean compression is also smaller
799 * than this value. In other words, we are load-balancing-by-zsize the
800 * accepted pages. Again, we don't allow the value to get too close
803 static ssize_t zv_max_mean_zsize_show(struct kobject *kobj,
804 struct kobj_attribute *attr,
807 return sprintf(buf, "%u\n", zv_max_mean_zsize);
810 static ssize_t zv_max_mean_zsize_store(struct kobject *kobj,
811 struct kobj_attribute *attr,
812 const char *buf, size_t count)
817 if (!capable(CAP_SYS_ADMIN))
820 err = strict_strtoul(buf, 10, &val);
821 if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
823 zv_max_mean_zsize = val;
828 * setting zv_page_count_policy_percent via sysfs sets an upper bound of
829 * persistent (e.g. swap) pages that will be retained according to:
830 * (zv_page_count_policy_percent * totalram_pages) / 100)
831 * when that limit is reached, further puts will be rejected (until
832 * some pages have been flushed). Note that, due to compression,
833 * this number may exceed 100; it defaults to 75 and we set an
834 * arbitary limit of 150. A poor choice will almost certainly result
835 * in OOM's, so this value should only be changed prudently.
837 static ssize_t zv_page_count_policy_percent_show(struct kobject *kobj,
838 struct kobj_attribute *attr,
841 return sprintf(buf, "%u\n", zv_page_count_policy_percent);
844 static ssize_t zv_page_count_policy_percent_store(struct kobject *kobj,
845 struct kobj_attribute *attr,
846 const char *buf, size_t count)
851 if (!capable(CAP_SYS_ADMIN))
854 err = strict_strtoul(buf, 10, &val);
855 if (err || (val == 0) || (val > 150))
857 zv_page_count_policy_percent = val;
861 static struct kobj_attribute zcache_zv_max_zsize_attr = {
862 .attr = { .name = "zv_max_zsize", .mode = 0644 },
863 .show = zv_max_zsize_show,
864 .store = zv_max_zsize_store,
867 static struct kobj_attribute zcache_zv_max_mean_zsize_attr = {
868 .attr = { .name = "zv_max_mean_zsize", .mode = 0644 },
869 .show = zv_max_mean_zsize_show,
870 .store = zv_max_mean_zsize_store,
873 static struct kobj_attribute zcache_zv_page_count_policy_percent_attr = {
874 .attr = { .name = "zv_page_count_policy_percent",
876 .show = zv_page_count_policy_percent_show,
877 .store = zv_page_count_policy_percent_store,
882 * zcache core code starts here
885 /* useful stats not collected by cleancache or frontswap */
886 static unsigned long zcache_flush_total;
887 static unsigned long zcache_flush_found;
888 static unsigned long zcache_flobj_total;
889 static unsigned long zcache_flobj_found;
890 static unsigned long zcache_failed_eph_puts;
891 static unsigned long zcache_failed_pers_puts;
894 * Tmem operations assume the poolid implies the invoking client.
895 * Zcache only has one client (the kernel itself): LOCAL_CLIENT.
896 * RAMster has each client numbered by cluster node, and a KVM version
897 * of zcache would have one client per guest and each client might
900 static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid)
902 struct tmem_pool *pool = NULL;
903 struct zcache_client *cli = NULL;
905 if (cli_id == LOCAL_CLIENT)
908 if (cli_id >= MAX_CLIENTS)
910 cli = &zcache_clients[cli_id];
913 atomic_inc(&cli->refcount);
915 if (poolid < MAX_POOLS_PER_CLIENT) {
916 pool = cli->tmem_pools[poolid];
918 atomic_inc(&pool->refcount);
924 static void zcache_put_pool(struct tmem_pool *pool)
926 struct zcache_client *cli = NULL;
931 atomic_dec(&pool->refcount);
932 atomic_dec(&cli->refcount);
935 int zcache_new_client(uint16_t cli_id)
937 struct zcache_client *cli = NULL;
940 if (cli_id == LOCAL_CLIENT)
942 else if ((unsigned int)cli_id < MAX_CLIENTS)
943 cli = &zcache_clients[cli_id];
949 #ifdef CONFIG_FRONTSWAP
950 cli->xvpool = xv_create_pool();
951 if (cli->xvpool == NULL)
959 /* counters for debugging */
960 static unsigned long zcache_failed_get_free_pages;
961 static unsigned long zcache_failed_alloc;
962 static unsigned long zcache_put_to_flush;
963 static unsigned long zcache_aborted_preload;
964 static unsigned long zcache_aborted_shrink;
967 * Ensure that memory allocation requests in zcache don't result
968 * in direct reclaim requests via the shrinker, which would cause
969 * an infinite loop. Maybe a GFP flag would be better?
971 static DEFINE_SPINLOCK(zcache_direct_reclaim_lock);
974 * for now, used named slabs so can easily track usage; later can
975 * either just use kmalloc, or perhaps add a slab-like allocator
976 * to more carefully manage total memory utilization
978 static struct kmem_cache *zcache_objnode_cache;
979 static struct kmem_cache *zcache_obj_cache;
980 static atomic_t zcache_curr_obj_count = ATOMIC_INIT(0);
981 static unsigned long zcache_curr_obj_count_max;
982 static atomic_t zcache_curr_objnode_count = ATOMIC_INIT(0);
983 static unsigned long zcache_curr_objnode_count_max;
986 * to avoid memory allocation recursion (e.g. due to direct reclaim), we
987 * preload all necessary data structures so the hostops callbacks never
988 * actually do a malloc
990 struct zcache_preload {
992 struct tmem_obj *obj;
994 struct tmem_objnode *objnodes[OBJNODE_TREE_MAX_PATH];
996 static DEFINE_PER_CPU(struct zcache_preload, zcache_preloads) = { 0, };
998 static int zcache_do_preload(struct tmem_pool *pool)
1000 struct zcache_preload *kp;
1001 struct tmem_objnode *objnode;
1002 struct tmem_obj *obj;
1006 if (unlikely(zcache_objnode_cache == NULL))
1008 if (unlikely(zcache_obj_cache == NULL))
1010 if (!spin_trylock(&zcache_direct_reclaim_lock)) {
1011 zcache_aborted_preload++;
1015 kp = &__get_cpu_var(zcache_preloads);
1016 while (kp->nr < ARRAY_SIZE(kp->objnodes)) {
1017 preempt_enable_no_resched();
1018 objnode = kmem_cache_alloc(zcache_objnode_cache,
1020 if (unlikely(objnode == NULL)) {
1021 zcache_failed_alloc++;
1025 kp = &__get_cpu_var(zcache_preloads);
1026 if (kp->nr < ARRAY_SIZE(kp->objnodes))
1027 kp->objnodes[kp->nr++] = objnode;
1029 kmem_cache_free(zcache_objnode_cache, objnode);
1031 preempt_enable_no_resched();
1032 obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK);
1033 if (unlikely(obj == NULL)) {
1034 zcache_failed_alloc++;
1037 page = (void *)__get_free_page(ZCACHE_GFP_MASK);
1038 if (unlikely(page == NULL)) {
1039 zcache_failed_get_free_pages++;
1040 kmem_cache_free(zcache_obj_cache, obj);
1044 kp = &__get_cpu_var(zcache_preloads);
1045 if (kp->obj == NULL)
1048 kmem_cache_free(zcache_obj_cache, obj);
1049 if (kp->page == NULL)
1052 free_page((unsigned long)page);
1055 spin_unlock(&zcache_direct_reclaim_lock);
1060 static void *zcache_get_free_page(void)
1062 struct zcache_preload *kp;
1065 kp = &__get_cpu_var(zcache_preloads);
1067 BUG_ON(page == NULL);
1072 static void zcache_free_page(void *p)
1074 free_page((unsigned long)p);
1078 * zcache implementation for tmem host ops
1081 static struct tmem_objnode *zcache_objnode_alloc(struct tmem_pool *pool)
1083 struct tmem_objnode *objnode = NULL;
1084 unsigned long count;
1085 struct zcache_preload *kp;
1087 kp = &__get_cpu_var(zcache_preloads);
1090 objnode = kp->objnodes[kp->nr - 1];
1091 BUG_ON(objnode == NULL);
1092 kp->objnodes[kp->nr - 1] = NULL;
1094 count = atomic_inc_return(&zcache_curr_objnode_count);
1095 if (count > zcache_curr_objnode_count_max)
1096 zcache_curr_objnode_count_max = count;
1101 static void zcache_objnode_free(struct tmem_objnode *objnode,
1102 struct tmem_pool *pool)
1104 atomic_dec(&zcache_curr_objnode_count);
1105 BUG_ON(atomic_read(&zcache_curr_objnode_count) < 0);
1106 kmem_cache_free(zcache_objnode_cache, objnode);
1109 static struct tmem_obj *zcache_obj_alloc(struct tmem_pool *pool)
1111 struct tmem_obj *obj = NULL;
1112 unsigned long count;
1113 struct zcache_preload *kp;
1115 kp = &__get_cpu_var(zcache_preloads);
1117 BUG_ON(obj == NULL);
1119 count = atomic_inc_return(&zcache_curr_obj_count);
1120 if (count > zcache_curr_obj_count_max)
1121 zcache_curr_obj_count_max = count;
1125 static void zcache_obj_free(struct tmem_obj *obj, struct tmem_pool *pool)
1127 atomic_dec(&zcache_curr_obj_count);
1128 BUG_ON(atomic_read(&zcache_curr_obj_count) < 0);
1129 kmem_cache_free(zcache_obj_cache, obj);
1132 static struct tmem_hostops zcache_hostops = {
1133 .obj_alloc = zcache_obj_alloc,
1134 .obj_free = zcache_obj_free,
1135 .objnode_alloc = zcache_objnode_alloc,
1136 .objnode_free = zcache_objnode_free,
1140 * zcache implementations for PAM page descriptor ops
1143 static atomic_t zcache_curr_eph_pampd_count = ATOMIC_INIT(0);
1144 static unsigned long zcache_curr_eph_pampd_count_max;
1145 static atomic_t zcache_curr_pers_pampd_count = ATOMIC_INIT(0);
1146 static unsigned long zcache_curr_pers_pampd_count_max;
1148 /* forward reference */
1149 static int zcache_compress(struct page *from, void **out_va, size_t *out_len);
1151 static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
1152 struct tmem_pool *pool, struct tmem_oid *oid,
1155 void *pampd = NULL, *cdata;
1158 unsigned long count;
1159 struct page *page = virt_to_page(data);
1160 struct zcache_client *cli = pool->client;
1161 uint16_t client_id = get_client_id_from_client(cli);
1162 unsigned long zv_mean_zsize;
1163 unsigned long curr_pers_pampd_count;
1166 ret = zcache_compress(page, &cdata, &clen);
1169 if (clen == 0 || clen > zbud_max_buddy_size()) {
1170 zcache_compress_poor++;
1173 pampd = (void *)zbud_create(client_id, pool->pool_id, oid,
1174 index, page, cdata, clen);
1175 if (pampd != NULL) {
1176 count = atomic_inc_return(&zcache_curr_eph_pampd_count);
1177 if (count > zcache_curr_eph_pampd_count_max)
1178 zcache_curr_eph_pampd_count_max = count;
1181 curr_pers_pampd_count =
1182 atomic_read(&zcache_curr_pers_pampd_count);
1183 if (curr_pers_pampd_count >
1184 (zv_page_count_policy_percent * totalram_pages) / 100)
1186 ret = zcache_compress(page, &cdata, &clen);
1189 /* reject if compression is too poor */
1190 if (clen > zv_max_zsize) {
1191 zcache_compress_poor++;
1194 /* reject if mean compression is too poor */
1195 if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) {
1196 zv_mean_zsize = xv_get_total_size_bytes(cli->xvpool) /
1197 curr_pers_pampd_count;
1198 if (zv_mean_zsize > zv_max_mean_zsize) {
1199 zcache_mean_compress_poor++;
1203 pampd = (void *)zv_create(cli->xvpool, pool->pool_id,
1204 oid, index, cdata, clen);
1207 count = atomic_inc_return(&zcache_curr_pers_pampd_count);
1208 if (count > zcache_curr_pers_pampd_count_max)
1209 zcache_curr_pers_pampd_count_max = count;
1216 * fill the pageframe corresponding to the struct page with the data
1217 * from the passed pampd
1219 static int zcache_pampd_get_data(char *data, size_t *bufsize, bool raw,
1220 void *pampd, struct tmem_pool *pool,
1221 struct tmem_oid *oid, uint32_t index)
1225 BUG_ON(is_ephemeral(pool));
1226 zv_decompress(virt_to_page(data), pampd);
1231 * fill the pageframe corresponding to the struct page with the data
1232 * from the passed pampd
1234 static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,
1235 void *pampd, struct tmem_pool *pool,
1236 struct tmem_oid *oid, uint32_t index)
1240 BUG_ON(!is_ephemeral(pool));
1241 zbud_decompress(virt_to_page(data), pampd);
1242 zbud_free_and_delist((struct zbud_hdr *)pampd);
1243 atomic_dec(&zcache_curr_eph_pampd_count);
1248 * free the pampd and remove it from any zcache lists
1249 * pampd must no longer be pointed to from any tmem data structures!
1251 static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
1252 struct tmem_oid *oid, uint32_t index)
1254 struct zcache_client *cli = pool->client;
1256 if (is_ephemeral(pool)) {
1257 zbud_free_and_delist((struct zbud_hdr *)pampd);
1258 atomic_dec(&zcache_curr_eph_pampd_count);
1259 BUG_ON(atomic_read(&zcache_curr_eph_pampd_count) < 0);
1261 zv_free(cli->xvpool, (struct zv_hdr *)pampd);
1262 atomic_dec(&zcache_curr_pers_pampd_count);
1263 BUG_ON(atomic_read(&zcache_curr_pers_pampd_count) < 0);
1267 static void zcache_pampd_free_obj(struct tmem_pool *pool, struct tmem_obj *obj)
1271 static void zcache_pampd_new_obj(struct tmem_obj *obj)
1275 static int zcache_pampd_replace_in_obj(void *pampd, struct tmem_obj *obj)
1280 static bool zcache_pampd_is_remote(void *pampd)
1285 static struct tmem_pamops zcache_pamops = {
1286 .create = zcache_pampd_create,
1287 .get_data = zcache_pampd_get_data,
1288 .get_data_and_free = zcache_pampd_get_data_and_free,
1289 .free = zcache_pampd_free,
1290 .free_obj = zcache_pampd_free_obj,
1291 .new_obj = zcache_pampd_new_obj,
1292 .replace_in_obj = zcache_pampd_replace_in_obj,
1293 .is_remote = zcache_pampd_is_remote,
1297 * zcache compression/decompression and related per-cpu stuff
1300 #define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS
1301 #define LZO_DSTMEM_PAGE_ORDER 1
1302 static DEFINE_PER_CPU(unsigned char *, zcache_workmem);
1303 static DEFINE_PER_CPU(unsigned char *, zcache_dstmem);
1305 static int zcache_compress(struct page *from, void **out_va, size_t *out_len)
1308 unsigned char *dmem = __get_cpu_var(zcache_dstmem);
1309 unsigned char *wmem = __get_cpu_var(zcache_workmem);
1312 BUG_ON(!irqs_disabled());
1313 if (unlikely(dmem == NULL || wmem == NULL))
1314 goto out; /* no buffer, so can't compress */
1315 from_va = kmap_atomic(from, KM_USER0);
1317 ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem);
1318 BUG_ON(ret != LZO_E_OK);
1320 kunmap_atomic(from_va, KM_USER0);
1327 static int zcache_cpu_notifier(struct notifier_block *nb,
1328 unsigned long action, void *pcpu)
1330 int cpu = (long)pcpu;
1331 struct zcache_preload *kp;
1334 case CPU_UP_PREPARE:
1335 per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages(
1336 GFP_KERNEL | __GFP_REPEAT,
1337 LZO_DSTMEM_PAGE_ORDER),
1338 per_cpu(zcache_workmem, cpu) =
1339 kzalloc(LZO1X_MEM_COMPRESS,
1340 GFP_KERNEL | __GFP_REPEAT);
1343 case CPU_UP_CANCELED:
1344 free_pages((unsigned long)per_cpu(zcache_dstmem, cpu),
1345 LZO_DSTMEM_PAGE_ORDER);
1346 per_cpu(zcache_dstmem, cpu) = NULL;
1347 kfree(per_cpu(zcache_workmem, cpu));
1348 per_cpu(zcache_workmem, cpu) = NULL;
1349 kp = &per_cpu(zcache_preloads, cpu);
1351 kmem_cache_free(zcache_objnode_cache,
1352 kp->objnodes[kp->nr - 1]);
1353 kp->objnodes[kp->nr - 1] = NULL;
1356 kmem_cache_free(zcache_obj_cache, kp->obj);
1357 free_page((unsigned long)kp->page);
1365 static struct notifier_block zcache_cpu_notifier_block = {
1366 .notifier_call = zcache_cpu_notifier
1370 #define ZCACHE_SYSFS_RO(_name) \
1371 static ssize_t zcache_##_name##_show(struct kobject *kobj, \
1372 struct kobj_attribute *attr, char *buf) \
1374 return sprintf(buf, "%lu\n", zcache_##_name); \
1376 static struct kobj_attribute zcache_##_name##_attr = { \
1377 .attr = { .name = __stringify(_name), .mode = 0444 }, \
1378 .show = zcache_##_name##_show, \
1381 #define ZCACHE_SYSFS_RO_ATOMIC(_name) \
1382 static ssize_t zcache_##_name##_show(struct kobject *kobj, \
1383 struct kobj_attribute *attr, char *buf) \
1385 return sprintf(buf, "%d\n", atomic_read(&zcache_##_name)); \
1387 static struct kobj_attribute zcache_##_name##_attr = { \
1388 .attr = { .name = __stringify(_name), .mode = 0444 }, \
1389 .show = zcache_##_name##_show, \
1392 #define ZCACHE_SYSFS_RO_CUSTOM(_name, _func) \
1393 static ssize_t zcache_##_name##_show(struct kobject *kobj, \
1394 struct kobj_attribute *attr, char *buf) \
1396 return _func(buf); \
1398 static struct kobj_attribute zcache_##_name##_attr = { \
1399 .attr = { .name = __stringify(_name), .mode = 0444 }, \
1400 .show = zcache_##_name##_show, \
1403 ZCACHE_SYSFS_RO(curr_obj_count_max);
1404 ZCACHE_SYSFS_RO(curr_objnode_count_max);
1405 ZCACHE_SYSFS_RO(flush_total);
1406 ZCACHE_SYSFS_RO(flush_found);
1407 ZCACHE_SYSFS_RO(flobj_total);
1408 ZCACHE_SYSFS_RO(flobj_found);
1409 ZCACHE_SYSFS_RO(failed_eph_puts);
1410 ZCACHE_SYSFS_RO(failed_pers_puts);
1411 ZCACHE_SYSFS_RO(zbud_curr_zbytes);
1412 ZCACHE_SYSFS_RO(zbud_cumul_zpages);
1413 ZCACHE_SYSFS_RO(zbud_cumul_zbytes);
1414 ZCACHE_SYSFS_RO(zbud_buddied_count);
1415 ZCACHE_SYSFS_RO(zbpg_unused_list_count);
1416 ZCACHE_SYSFS_RO(evicted_raw_pages);
1417 ZCACHE_SYSFS_RO(evicted_unbuddied_pages);
1418 ZCACHE_SYSFS_RO(evicted_buddied_pages);
1419 ZCACHE_SYSFS_RO(failed_get_free_pages);
1420 ZCACHE_SYSFS_RO(failed_alloc);
1421 ZCACHE_SYSFS_RO(put_to_flush);
1422 ZCACHE_SYSFS_RO(aborted_preload);
1423 ZCACHE_SYSFS_RO(aborted_shrink);
1424 ZCACHE_SYSFS_RO(compress_poor);
1425 ZCACHE_SYSFS_RO(mean_compress_poor);
1426 ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_raw_pages);
1427 ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_zpages);
1428 ZCACHE_SYSFS_RO_ATOMIC(curr_obj_count);
1429 ZCACHE_SYSFS_RO_ATOMIC(curr_objnode_count);
1430 ZCACHE_SYSFS_RO_CUSTOM(zbud_unbuddied_list_counts,
1431 zbud_show_unbuddied_list_counts);
1432 ZCACHE_SYSFS_RO_CUSTOM(zbud_cumul_chunk_counts,
1433 zbud_show_cumul_chunk_counts);
1434 ZCACHE_SYSFS_RO_CUSTOM(zv_curr_dist_counts,
1435 zv_curr_dist_counts_show);
1436 ZCACHE_SYSFS_RO_CUSTOM(zv_cumul_dist_counts,
1437 zv_cumul_dist_counts_show);
1439 static struct attribute *zcache_attrs[] = {
1440 &zcache_curr_obj_count_attr.attr,
1441 &zcache_curr_obj_count_max_attr.attr,
1442 &zcache_curr_objnode_count_attr.attr,
1443 &zcache_curr_objnode_count_max_attr.attr,
1444 &zcache_flush_total_attr.attr,
1445 &zcache_flobj_total_attr.attr,
1446 &zcache_flush_found_attr.attr,
1447 &zcache_flobj_found_attr.attr,
1448 &zcache_failed_eph_puts_attr.attr,
1449 &zcache_failed_pers_puts_attr.attr,
1450 &zcache_compress_poor_attr.attr,
1451 &zcache_mean_compress_poor_attr.attr,
1452 &zcache_zbud_curr_raw_pages_attr.attr,
1453 &zcache_zbud_curr_zpages_attr.attr,
1454 &zcache_zbud_curr_zbytes_attr.attr,
1455 &zcache_zbud_cumul_zpages_attr.attr,
1456 &zcache_zbud_cumul_zbytes_attr.attr,
1457 &zcache_zbud_buddied_count_attr.attr,
1458 &zcache_zbpg_unused_list_count_attr.attr,
1459 &zcache_evicted_raw_pages_attr.attr,
1460 &zcache_evicted_unbuddied_pages_attr.attr,
1461 &zcache_evicted_buddied_pages_attr.attr,
1462 &zcache_failed_get_free_pages_attr.attr,
1463 &zcache_failed_alloc_attr.attr,
1464 &zcache_put_to_flush_attr.attr,
1465 &zcache_aborted_preload_attr.attr,
1466 &zcache_aborted_shrink_attr.attr,
1467 &zcache_zbud_unbuddied_list_counts_attr.attr,
1468 &zcache_zbud_cumul_chunk_counts_attr.attr,
1469 &zcache_zv_curr_dist_counts_attr.attr,
1470 &zcache_zv_cumul_dist_counts_attr.attr,
1471 &zcache_zv_max_zsize_attr.attr,
1472 &zcache_zv_max_mean_zsize_attr.attr,
1473 &zcache_zv_page_count_policy_percent_attr.attr,
1477 static struct attribute_group zcache_attr_group = {
1478 .attrs = zcache_attrs,
1482 #endif /* CONFIG_SYSFS */
1484 * When zcache is disabled ("frozen"), pools can be created and destroyed,
1485 * but all puts (and thus all other operations that require memory allocation)
1486 * must fail. If zcache is unfrozen, accepts puts, then frozen again,
1487 * data consistency requires all puts while frozen to be converted into
1490 static bool zcache_freeze;
1493 * zcache shrinker interface (only useful for ephemeral pages, so zbud only)
1495 static int shrink_zcache_memory(struct shrinker *shrink,
1496 struct shrink_control *sc)
1499 int nr = sc->nr_to_scan;
1500 gfp_t gfp_mask = sc->gfp_mask;
1503 if (!(gfp_mask & __GFP_FS))
1504 /* does this case really need to be skipped? */
1506 if (spin_trylock(&zcache_direct_reclaim_lock)) {
1507 zbud_evict_pages(nr);
1508 spin_unlock(&zcache_direct_reclaim_lock);
1510 zcache_aborted_shrink++;
1512 ret = (int)atomic_read(&zcache_zbud_curr_raw_pages);
1517 static struct shrinker zcache_shrinker = {
1518 .shrink = shrink_zcache_memory,
1519 .seeks = DEFAULT_SEEKS,
1523 * zcache shims between cleancache/frontswap ops and tmem
1526 static int zcache_put_page(int cli_id, int pool_id, struct tmem_oid *oidp,
1527 uint32_t index, struct page *page)
1529 struct tmem_pool *pool;
1532 BUG_ON(!irqs_disabled());
1533 pool = zcache_get_pool_by_id(cli_id, pool_id);
1534 if (unlikely(pool == NULL))
1536 if (!zcache_freeze && zcache_do_preload(pool) == 0) {
1537 /* preload does preempt_disable on success */
1538 ret = tmem_put(pool, oidp, index, page_address(page),
1539 PAGE_SIZE, 0, is_ephemeral(pool));
1541 if (is_ephemeral(pool))
1542 zcache_failed_eph_puts++;
1544 zcache_failed_pers_puts++;
1546 zcache_put_pool(pool);
1547 preempt_enable_no_resched();
1549 zcache_put_to_flush++;
1550 if (atomic_read(&pool->obj_count) > 0)
1551 /* the put fails whether the flush succeeds or not */
1552 (void)tmem_flush_page(pool, oidp, index);
1553 zcache_put_pool(pool);
1559 static int zcache_get_page(int cli_id, int pool_id, struct tmem_oid *oidp,
1560 uint32_t index, struct page *page)
1562 struct tmem_pool *pool;
1564 unsigned long flags;
1565 size_t size = PAGE_SIZE;
1567 local_irq_save(flags);
1568 pool = zcache_get_pool_by_id(cli_id, pool_id);
1569 if (likely(pool != NULL)) {
1570 if (atomic_read(&pool->obj_count) > 0)
1571 ret = tmem_get(pool, oidp, index, page_address(page),
1572 &size, 0, is_ephemeral(pool));
1573 zcache_put_pool(pool);
1575 local_irq_restore(flags);
1579 static int zcache_flush_page(int cli_id, int pool_id,
1580 struct tmem_oid *oidp, uint32_t index)
1582 struct tmem_pool *pool;
1584 unsigned long flags;
1586 local_irq_save(flags);
1587 zcache_flush_total++;
1588 pool = zcache_get_pool_by_id(cli_id, pool_id);
1589 if (likely(pool != NULL)) {
1590 if (atomic_read(&pool->obj_count) > 0)
1591 ret = tmem_flush_page(pool, oidp, index);
1592 zcache_put_pool(pool);
1595 zcache_flush_found++;
1596 local_irq_restore(flags);
1600 static int zcache_flush_object(int cli_id, int pool_id,
1601 struct tmem_oid *oidp)
1603 struct tmem_pool *pool;
1605 unsigned long flags;
1607 local_irq_save(flags);
1608 zcache_flobj_total++;
1609 pool = zcache_get_pool_by_id(cli_id, pool_id);
1610 if (likely(pool != NULL)) {
1611 if (atomic_read(&pool->obj_count) > 0)
1612 ret = tmem_flush_object(pool, oidp);
1613 zcache_put_pool(pool);
1616 zcache_flobj_found++;
1617 local_irq_restore(flags);
1621 static int zcache_destroy_pool(int cli_id, int pool_id)
1623 struct tmem_pool *pool = NULL;
1624 struct zcache_client *cli = NULL;
1629 if (cli_id == LOCAL_CLIENT)
1631 else if ((unsigned int)cli_id < MAX_CLIENTS)
1632 cli = &zcache_clients[cli_id];
1635 atomic_inc(&cli->refcount);
1636 pool = cli->tmem_pools[pool_id];
1639 cli->tmem_pools[pool_id] = NULL;
1640 /* wait for pool activity on other cpus to quiesce */
1641 while (atomic_read(&pool->refcount) != 0)
1643 atomic_dec(&cli->refcount);
1645 ret = tmem_destroy_pool(pool);
1648 pr_info("zcache: destroyed pool id=%d, cli_id=%d\n",
1654 static int zcache_new_pool(uint16_t cli_id, uint32_t flags)
1657 struct tmem_pool *pool;
1658 struct zcache_client *cli = NULL;
1660 if (cli_id == LOCAL_CLIENT)
1662 else if ((unsigned int)cli_id < MAX_CLIENTS)
1663 cli = &zcache_clients[cli_id];
1666 atomic_inc(&cli->refcount);
1667 pool = kmalloc(sizeof(struct tmem_pool), GFP_KERNEL);
1669 pr_info("zcache: pool creation failed: out of memory\n");
1673 for (poolid = 0; poolid < MAX_POOLS_PER_CLIENT; poolid++)
1674 if (cli->tmem_pools[poolid] == NULL)
1676 if (poolid >= MAX_POOLS_PER_CLIENT) {
1677 pr_info("zcache: pool creation failed: max exceeded\n");
1682 atomic_set(&pool->refcount, 0);
1684 pool->pool_id = poolid;
1685 tmem_new_pool(pool, flags);
1686 cli->tmem_pools[poolid] = pool;
1687 pr_info("zcache: created %s tmem pool, id=%d, client=%d\n",
1688 flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
1692 atomic_dec(&cli->refcount);
1697 * Two kernel functionalities currently can be layered on top of tmem.
1698 * These are "cleancache" which is used as a second-chance cache for clean
1699 * page cache pages; and "frontswap" which is used for swap pages
1700 * to avoid writes to disk. A generic "shim" is provided here for each
1701 * to translate in-kernel semantics to zcache semantics.
1704 #ifdef CONFIG_CLEANCACHE
1705 static void zcache_cleancache_put_page(int pool_id,
1706 struct cleancache_filekey key,
1707 pgoff_t index, struct page *page)
1709 u32 ind = (u32) index;
1710 struct tmem_oid oid = *(struct tmem_oid *)&key;
1712 if (likely(ind == index))
1713 (void)zcache_put_page(LOCAL_CLIENT, pool_id, &oid, index, page);
1716 static int zcache_cleancache_get_page(int pool_id,
1717 struct cleancache_filekey key,
1718 pgoff_t index, struct page *page)
1720 u32 ind = (u32) index;
1721 struct tmem_oid oid = *(struct tmem_oid *)&key;
1724 if (likely(ind == index))
1725 ret = zcache_get_page(LOCAL_CLIENT, pool_id, &oid, index, page);
1729 static void zcache_cleancache_flush_page(int pool_id,
1730 struct cleancache_filekey key,
1733 u32 ind = (u32) index;
1734 struct tmem_oid oid = *(struct tmem_oid *)&key;
1736 if (likely(ind == index))
1737 (void)zcache_flush_page(LOCAL_CLIENT, pool_id, &oid, ind);
1740 static void zcache_cleancache_flush_inode(int pool_id,
1741 struct cleancache_filekey key)
1743 struct tmem_oid oid = *(struct tmem_oid *)&key;
1745 (void)zcache_flush_object(LOCAL_CLIENT, pool_id, &oid);
1748 static void zcache_cleancache_flush_fs(int pool_id)
1751 (void)zcache_destroy_pool(LOCAL_CLIENT, pool_id);
1754 static int zcache_cleancache_init_fs(size_t pagesize)
1756 BUG_ON(sizeof(struct cleancache_filekey) !=
1757 sizeof(struct tmem_oid));
1758 BUG_ON(pagesize != PAGE_SIZE);
1759 return zcache_new_pool(LOCAL_CLIENT, 0);
1762 static int zcache_cleancache_init_shared_fs(char *uuid, size_t pagesize)
1764 /* shared pools are unsupported and map to private */
1765 BUG_ON(sizeof(struct cleancache_filekey) !=
1766 sizeof(struct tmem_oid));
1767 BUG_ON(pagesize != PAGE_SIZE);
1768 return zcache_new_pool(LOCAL_CLIENT, 0);
1771 static struct cleancache_ops zcache_cleancache_ops = {
1772 .put_page = zcache_cleancache_put_page,
1773 .get_page = zcache_cleancache_get_page,
1774 .flush_page = zcache_cleancache_flush_page,
1775 .flush_inode = zcache_cleancache_flush_inode,
1776 .flush_fs = zcache_cleancache_flush_fs,
1777 .init_shared_fs = zcache_cleancache_init_shared_fs,
1778 .init_fs = zcache_cleancache_init_fs
1781 struct cleancache_ops zcache_cleancache_register_ops(void)
1783 struct cleancache_ops old_ops =
1784 cleancache_register_ops(&zcache_cleancache_ops);
1790 #ifdef CONFIG_FRONTSWAP
1791 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
1792 static int zcache_frontswap_poolid = -1;
1795 * Swizzling increases objects per swaptype, increasing tmem concurrency
1796 * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
1799 #define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
1800 #define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
1801 #define iswiz(_ind) (_ind >> SWIZ_BITS)
1803 static inline struct tmem_oid oswiz(unsigned type, u32 ind)
1805 struct tmem_oid oid = { .oid = { 0 } };
1806 oid.oid[0] = _oswiz(type, ind);
1810 static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
1813 u64 ind64 = (u64)offset;
1814 u32 ind = (u32)offset;
1815 struct tmem_oid oid = oswiz(type, ind);
1817 unsigned long flags;
1819 BUG_ON(!PageLocked(page));
1820 if (likely(ind64 == ind)) {
1821 local_irq_save(flags);
1822 ret = zcache_put_page(LOCAL_CLIENT, zcache_frontswap_poolid,
1823 &oid, iswiz(ind), page);
1824 local_irq_restore(flags);
1829 /* returns 0 if the page was successfully gotten from frontswap, -1 if
1830 * was not present (should never happen!) */
1831 static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
1834 u64 ind64 = (u64)offset;
1835 u32 ind = (u32)offset;
1836 struct tmem_oid oid = oswiz(type, ind);
1839 BUG_ON(!PageLocked(page));
1840 if (likely(ind64 == ind))
1841 ret = zcache_get_page(LOCAL_CLIENT, zcache_frontswap_poolid,
1842 &oid, iswiz(ind), page);
1846 /* flush a single page from frontswap */
1847 static void zcache_frontswap_flush_page(unsigned type, pgoff_t offset)
1849 u64 ind64 = (u64)offset;
1850 u32 ind = (u32)offset;
1851 struct tmem_oid oid = oswiz(type, ind);
1853 if (likely(ind64 == ind))
1854 (void)zcache_flush_page(LOCAL_CLIENT, zcache_frontswap_poolid,
1858 /* flush all pages from the passed swaptype */
1859 static void zcache_frontswap_flush_area(unsigned type)
1861 struct tmem_oid oid;
1864 for (ind = SWIZ_MASK; ind >= 0; ind--) {
1865 oid = oswiz(type, ind);
1866 (void)zcache_flush_object(LOCAL_CLIENT,
1867 zcache_frontswap_poolid, &oid);
1871 static void zcache_frontswap_init(unsigned ignored)
1873 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
1874 if (zcache_frontswap_poolid < 0)
1875 zcache_frontswap_poolid =
1876 zcache_new_pool(LOCAL_CLIENT, TMEM_POOL_PERSIST);
1879 static struct frontswap_ops zcache_frontswap_ops = {
1880 .put_page = zcache_frontswap_put_page,
1881 .get_page = zcache_frontswap_get_page,
1882 .flush_page = zcache_frontswap_flush_page,
1883 .flush_area = zcache_frontswap_flush_area,
1884 .init = zcache_frontswap_init
1887 struct frontswap_ops zcache_frontswap_register_ops(void)
1889 struct frontswap_ops old_ops =
1890 frontswap_register_ops(&zcache_frontswap_ops);
1897 * zcache initialization
1898 * NOTE FOR NOW zcache MUST BE PROVIDED AS A KERNEL BOOT PARAMETER OR
1902 static int zcache_enabled;
1904 static int __init enable_zcache(char *s)
1909 __setup("zcache", enable_zcache);
1911 /* allow independent dynamic disabling of cleancache and frontswap */
1913 static int use_cleancache = 1;
1915 static int __init no_cleancache(char *s)
1921 __setup("nocleancache", no_cleancache);
1923 static int use_frontswap = 1;
1925 static int __init no_frontswap(char *s)
1931 __setup("nofrontswap", no_frontswap);
1933 static int __init zcache_init(void)
1938 ret = sysfs_create_group(mm_kobj, &zcache_attr_group);
1940 pr_err("zcache: can't create sysfs\n");
1943 #endif /* CONFIG_SYSFS */
1944 #if defined(CONFIG_CLEANCACHE) || defined(CONFIG_FRONTSWAP)
1945 if (zcache_enabled) {
1948 tmem_register_hostops(&zcache_hostops);
1949 tmem_register_pamops(&zcache_pamops);
1950 ret = register_cpu_notifier(&zcache_cpu_notifier_block);
1952 pr_err("zcache: can't register cpu notifier\n");
1955 for_each_online_cpu(cpu) {
1956 void *pcpu = (void *)(long)cpu;
1957 zcache_cpu_notifier(&zcache_cpu_notifier_block,
1958 CPU_UP_PREPARE, pcpu);
1961 zcache_objnode_cache = kmem_cache_create("zcache_objnode",
1962 sizeof(struct tmem_objnode), 0, 0, NULL);
1963 zcache_obj_cache = kmem_cache_create("zcache_obj",
1964 sizeof(struct tmem_obj), 0, 0, NULL);
1965 ret = zcache_new_client(LOCAL_CLIENT);
1967 pr_err("zcache: can't create client\n");
1971 #ifdef CONFIG_CLEANCACHE
1972 if (zcache_enabled && use_cleancache) {
1973 struct cleancache_ops old_ops;
1976 register_shrinker(&zcache_shrinker);
1977 old_ops = zcache_cleancache_register_ops();
1978 pr_info("zcache: cleancache enabled using kernel "
1979 "transcendent memory and compression buddies\n");
1980 if (old_ops.init_fs != NULL)
1981 pr_warning("zcache: cleancache_ops overridden");
1984 #ifdef CONFIG_FRONTSWAP
1985 if (zcache_enabled && use_frontswap) {
1986 struct frontswap_ops old_ops;
1988 old_ops = zcache_frontswap_register_ops();
1989 pr_info("zcache: frontswap enabled using kernel "
1990 "transcendent memory and xvmalloc\n");
1991 if (old_ops.init != NULL)
1992 pr_warning("ktmem: frontswap_ops overridden");
1999 module_init(zcache_init)