]> rtime.felk.cvut.cz Git - linux-imx.git/blob - drivers/md/bcache/alloc.c
KVM: s390: fix pfmf non-quiescing control handling
[linux-imx.git] / drivers / md / bcache / alloc.c
1 /*
2  * Primary bucket allocation code
3  *
4  * Copyright 2012 Google, Inc.
5  *
6  * Allocation in bcache is done in terms of buckets:
7  *
8  * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
9  * btree pointers - they must match for the pointer to be considered valid.
10  *
11  * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
12  * bucket simply by incrementing its gen.
13  *
14  * The gens (along with the priorities; it's really the gens are important but
15  * the code is named as if it's the priorities) are written in an arbitrary list
16  * of buckets on disk, with a pointer to them in the journal header.
17  *
18  * When we invalidate a bucket, we have to write its new gen to disk and wait
19  * for that write to complete before we use it - otherwise after a crash we
20  * could have pointers that appeared to be good but pointed to data that had
21  * been overwritten.
22  *
23  * Since the gens and priorities are all stored contiguously on disk, we can
24  * batch this up: We fill up the free_inc list with freshly invalidated buckets,
25  * call prio_write(), and when prio_write() finishes we pull buckets off the
26  * free_inc list and optionally discard them.
27  *
28  * free_inc isn't the only freelist - if it was, we'd often to sleep while
29  * priorities and gens were being written before we could allocate. c->free is a
30  * smaller freelist, and buckets on that list are always ready to be used.
31  *
32  * If we've got discards enabled, that happens when a bucket moves from the
33  * free_inc list to the free list.
34  *
35  * There is another freelist, because sometimes we have buckets that we know
36  * have nothing pointing into them - these we can reuse without waiting for
37  * priorities to be rewritten. These come from freed btree nodes and buckets
38  * that garbage collection discovered no longer had valid keys pointing into
39  * them (because they were overwritten). That's the unused list - buckets on the
40  * unused list move to the free list, optionally being discarded in the process.
41  *
42  * It's also important to ensure that gens don't wrap around - with respect to
43  * either the oldest gen in the btree or the gen on disk. This is quite
44  * difficult to do in practice, but we explicitly guard against it anyways - if
45  * a bucket is in danger of wrapping around we simply skip invalidating it that
46  * time around, and we garbage collect or rewrite the priorities sooner than we
47  * would have otherwise.
48  *
49  * bch_bucket_alloc() allocates a single bucket from a specific cache.
50  *
51  * bch_bucket_alloc_set() allocates one or more buckets from different caches
52  * out of a cache set.
53  *
54  * free_some_buckets() drives all the processes described above. It's called
55  * from bch_bucket_alloc() and a few other places that need to make sure free
56  * buckets are ready.
57  *
58  * invalidate_buckets_(lru|fifo)() find buckets that are available to be
59  * invalidated, and then invalidate them and stick them on the free_inc list -
60  * in either lru or fifo order.
61  */
62
63 #include "bcache.h"
64 #include "btree.h"
65
66 #include <linux/random.h>
67
68 #define MAX_IN_FLIGHT_DISCARDS          8U
69
70 /* Bucket heap / gen */
71
72 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
73 {
74         uint8_t ret = ++b->gen;
75
76         ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
77         WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
78
79         if (CACHE_SYNC(&ca->set->sb)) {
80                 ca->need_save_prio = max(ca->need_save_prio,
81                                          bucket_disk_gen(b));
82                 WARN_ON_ONCE(ca->need_save_prio > BUCKET_DISK_GEN_MAX);
83         }
84
85         return ret;
86 }
87
88 void bch_rescale_priorities(struct cache_set *c, int sectors)
89 {
90         struct cache *ca;
91         struct bucket *b;
92         unsigned next = c->nbuckets * c->sb.bucket_size / 1024;
93         unsigned i;
94         int r;
95
96         atomic_sub(sectors, &c->rescale);
97
98         do {
99                 r = atomic_read(&c->rescale);
100
101                 if (r >= 0)
102                         return;
103         } while (atomic_cmpxchg(&c->rescale, r, r + next) != r);
104
105         mutex_lock(&c->bucket_lock);
106
107         c->min_prio = USHRT_MAX;
108
109         for_each_cache(ca, c, i)
110                 for_each_bucket(b, ca)
111                         if (b->prio &&
112                             b->prio != BTREE_PRIO &&
113                             !atomic_read(&b->pin)) {
114                                 b->prio--;
115                                 c->min_prio = min(c->min_prio, b->prio);
116                         }
117
118         mutex_unlock(&c->bucket_lock);
119 }
120
121 /* Discard/TRIM */
122
123 struct discard {
124         struct list_head        list;
125         struct work_struct      work;
126         struct cache            *ca;
127         long                    bucket;
128
129         struct bio              bio;
130         struct bio_vec          bv;
131 };
132
133 static void discard_finish(struct work_struct *w)
134 {
135         struct discard *d = container_of(w, struct discard, work);
136         struct cache *ca = d->ca;
137         char buf[BDEVNAME_SIZE];
138
139         if (!test_bit(BIO_UPTODATE, &d->bio.bi_flags)) {
140                 pr_notice("discard error on %s, disabling",
141                          bdevname(ca->bdev, buf));
142                 d->ca->discard = 0;
143         }
144
145         mutex_lock(&ca->set->bucket_lock);
146
147         fifo_push(&ca->free, d->bucket);
148         list_add(&d->list, &ca->discards);
149         atomic_dec(&ca->discards_in_flight);
150
151         mutex_unlock(&ca->set->bucket_lock);
152
153         closure_wake_up(&ca->set->bucket_wait);
154         wake_up(&ca->set->alloc_wait);
155
156         closure_put(&ca->set->cl);
157 }
158
159 static void discard_endio(struct bio *bio, int error)
160 {
161         struct discard *d = container_of(bio, struct discard, bio);
162         schedule_work(&d->work);
163 }
164
165 static void do_discard(struct cache *ca, long bucket)
166 {
167         struct discard *d = list_first_entry(&ca->discards,
168                                              struct discard, list);
169
170         list_del(&d->list);
171         d->bucket = bucket;
172
173         atomic_inc(&ca->discards_in_flight);
174         closure_get(&ca->set->cl);
175
176         bio_init(&d->bio);
177
178         d->bio.bi_sector        = bucket_to_sector(ca->set, d->bucket);
179         d->bio.bi_bdev          = ca->bdev;
180         d->bio.bi_rw            = REQ_WRITE|REQ_DISCARD;
181         d->bio.bi_max_vecs      = 1;
182         d->bio.bi_io_vec        = d->bio.bi_inline_vecs;
183         d->bio.bi_size          = bucket_bytes(ca);
184         d->bio.bi_end_io        = discard_endio;
185         bio_set_prio(&d->bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
186
187         submit_bio(0, &d->bio);
188 }
189
190 /* Allocation */
191
192 static inline bool can_inc_bucket_gen(struct bucket *b)
193 {
194         return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX &&
195                 bucket_disk_gen(b) < BUCKET_DISK_GEN_MAX;
196 }
197
198 bool bch_bucket_add_unused(struct cache *ca, struct bucket *b)
199 {
200         BUG_ON(GC_MARK(b) || GC_SECTORS_USED(b));
201
202         if (fifo_used(&ca->free) > ca->watermark[WATERMARK_MOVINGGC] &&
203             CACHE_REPLACEMENT(&ca->sb) == CACHE_REPLACEMENT_FIFO)
204                 return false;
205
206         b->prio = 0;
207
208         if (can_inc_bucket_gen(b) &&
209             fifo_push(&ca->unused, b - ca->buckets)) {
210                 atomic_inc(&b->pin);
211                 return true;
212         }
213
214         return false;
215 }
216
217 static bool can_invalidate_bucket(struct cache *ca, struct bucket *b)
218 {
219         return GC_MARK(b) == GC_MARK_RECLAIMABLE &&
220                 !atomic_read(&b->pin) &&
221                 can_inc_bucket_gen(b);
222 }
223
224 static void invalidate_one_bucket(struct cache *ca, struct bucket *b)
225 {
226         bch_inc_gen(ca, b);
227         b->prio = INITIAL_PRIO;
228         atomic_inc(&b->pin);
229         fifo_push(&ca->free_inc, b - ca->buckets);
230 }
231
232 #define bucket_prio(b)                          \
233         (((unsigned) (b->prio - ca->set->min_prio)) * GC_SECTORS_USED(b))
234
235 #define bucket_max_cmp(l, r)    (bucket_prio(l) < bucket_prio(r))
236 #define bucket_min_cmp(l, r)    (bucket_prio(l) > bucket_prio(r))
237
238 static void invalidate_buckets_lru(struct cache *ca)
239 {
240         struct bucket *b;
241         ssize_t i;
242
243         ca->heap.used = 0;
244
245         for_each_bucket(b, ca) {
246                 /*
247                  * If we fill up the unused list, if we then return before
248                  * adding anything to the free_inc list we'll skip writing
249                  * prios/gens and just go back to allocating from the unused
250                  * list:
251                  */
252                 if (fifo_full(&ca->unused))
253                         return;
254
255                 if (!can_invalidate_bucket(ca, b))
256                         continue;
257
258                 if (!GC_SECTORS_USED(b) &&
259                     bch_bucket_add_unused(ca, b))
260                         continue;
261
262                 if (!heap_full(&ca->heap))
263                         heap_add(&ca->heap, b, bucket_max_cmp);
264                 else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
265                         ca->heap.data[0] = b;
266                         heap_sift(&ca->heap, 0, bucket_max_cmp);
267                 }
268         }
269
270         for (i = ca->heap.used / 2 - 1; i >= 0; --i)
271                 heap_sift(&ca->heap, i, bucket_min_cmp);
272
273         while (!fifo_full(&ca->free_inc)) {
274                 if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
275                         /*
276                          * We don't want to be calling invalidate_buckets()
277                          * multiple times when it can't do anything
278                          */
279                         ca->invalidate_needs_gc = 1;
280                         bch_queue_gc(ca->set);
281                         return;
282                 }
283
284                 invalidate_one_bucket(ca, b);
285         }
286 }
287
288 static void invalidate_buckets_fifo(struct cache *ca)
289 {
290         struct bucket *b;
291         size_t checked = 0;
292
293         while (!fifo_full(&ca->free_inc)) {
294                 if (ca->fifo_last_bucket <  ca->sb.first_bucket ||
295                     ca->fifo_last_bucket >= ca->sb.nbuckets)
296                         ca->fifo_last_bucket = ca->sb.first_bucket;
297
298                 b = ca->buckets + ca->fifo_last_bucket++;
299
300                 if (can_invalidate_bucket(ca, b))
301                         invalidate_one_bucket(ca, b);
302
303                 if (++checked >= ca->sb.nbuckets) {
304                         ca->invalidate_needs_gc = 1;
305                         bch_queue_gc(ca->set);
306                         return;
307                 }
308         }
309 }
310
311 static void invalidate_buckets_random(struct cache *ca)
312 {
313         struct bucket *b;
314         size_t checked = 0;
315
316         while (!fifo_full(&ca->free_inc)) {
317                 size_t n;
318                 get_random_bytes(&n, sizeof(n));
319
320                 n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
321                 n += ca->sb.first_bucket;
322
323                 b = ca->buckets + n;
324
325                 if (can_invalidate_bucket(ca, b))
326                         invalidate_one_bucket(ca, b);
327
328                 if (++checked >= ca->sb.nbuckets / 2) {
329                         ca->invalidate_needs_gc = 1;
330                         bch_queue_gc(ca->set);
331                         return;
332                 }
333         }
334 }
335
336 static void invalidate_buckets(struct cache *ca)
337 {
338         if (ca->invalidate_needs_gc)
339                 return;
340
341         switch (CACHE_REPLACEMENT(&ca->sb)) {
342         case CACHE_REPLACEMENT_LRU:
343                 invalidate_buckets_lru(ca);
344                 break;
345         case CACHE_REPLACEMENT_FIFO:
346                 invalidate_buckets_fifo(ca);
347                 break;
348         case CACHE_REPLACEMENT_RANDOM:
349                 invalidate_buckets_random(ca);
350                 break;
351         }
352
353         pr_debug("free %zu/%zu free_inc %zu/%zu unused %zu/%zu",
354                  fifo_used(&ca->free), ca->free.size,
355                  fifo_used(&ca->free_inc), ca->free_inc.size,
356                  fifo_used(&ca->unused), ca->unused.size);
357 }
358
359 #define allocator_wait(ca, cond)                                        \
360 do {                                                                    \
361         DEFINE_WAIT(__wait);                                            \
362                                                                         \
363         while (1) {                                                     \
364                 prepare_to_wait(&ca->set->alloc_wait,                   \
365                                 &__wait, TASK_INTERRUPTIBLE);           \
366                 if (cond)                                               \
367                         break;                                          \
368                                                                         \
369                 mutex_unlock(&(ca)->set->bucket_lock);                  \
370                 if (test_bit(CACHE_SET_STOPPING_2, &ca->set->flags)) {  \
371                         finish_wait(&ca->set->alloc_wait, &__wait);     \
372                         closure_return(cl);                             \
373                 }                                                       \
374                                                                         \
375                 schedule();                                             \
376                 mutex_lock(&(ca)->set->bucket_lock);                    \
377         }                                                               \
378                                                                         \
379         finish_wait(&ca->set->alloc_wait, &__wait);                     \
380 } while (0)
381
382 void bch_allocator_thread(struct closure *cl)
383 {
384         struct cache *ca = container_of(cl, struct cache, alloc);
385
386         mutex_lock(&ca->set->bucket_lock);
387
388         while (1) {
389                 /*
390                  * First, we pull buckets off of the unused and free_inc lists,
391                  * possibly issue discards to them, then we add the bucket to
392                  * the free list:
393                  */
394                 while (1) {
395                         long bucket;
396
397                         if ((!atomic_read(&ca->set->prio_blocked) ||
398                              !CACHE_SYNC(&ca->set->sb)) &&
399                             !fifo_empty(&ca->unused))
400                                 fifo_pop(&ca->unused, bucket);
401                         else if (!fifo_empty(&ca->free_inc))
402                                 fifo_pop(&ca->free_inc, bucket);
403                         else
404                                 break;
405
406                         allocator_wait(ca, (int) fifo_free(&ca->free) >
407                                        atomic_read(&ca->discards_in_flight));
408
409                         if (ca->discard) {
410                                 allocator_wait(ca, !list_empty(&ca->discards));
411                                 do_discard(ca, bucket);
412                         } else {
413                                 fifo_push(&ca->free, bucket);
414                                 closure_wake_up(&ca->set->bucket_wait);
415                         }
416                 }
417
418                 /*
419                  * We've run out of free buckets, we need to find some buckets
420                  * we can invalidate. First, invalidate them in memory and add
421                  * them to the free_inc list:
422                  */
423
424                 allocator_wait(ca, ca->set->gc_mark_valid &&
425                                (ca->need_save_prio > 64 ||
426                                 !ca->invalidate_needs_gc));
427                 invalidate_buckets(ca);
428
429                 /*
430                  * Now, we write their new gens to disk so we can start writing
431                  * new stuff to them:
432                  */
433                 allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
434                 if (CACHE_SYNC(&ca->set->sb) &&
435                     (!fifo_empty(&ca->free_inc) ||
436                      ca->need_save_prio > 64))
437                         bch_prio_write(ca);
438         }
439 }
440
441 long bch_bucket_alloc(struct cache *ca, unsigned watermark, struct closure *cl)
442 {
443         long r = -1;
444 again:
445         wake_up(&ca->set->alloc_wait);
446
447         if (fifo_used(&ca->free) > ca->watermark[watermark] &&
448             fifo_pop(&ca->free, r)) {
449                 struct bucket *b = ca->buckets + r;
450 #ifdef CONFIG_BCACHE_EDEBUG
451                 size_t iter;
452                 long i;
453
454                 for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
455                         BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
456
457                 fifo_for_each(i, &ca->free, iter)
458                         BUG_ON(i == r);
459                 fifo_for_each(i, &ca->free_inc, iter)
460                         BUG_ON(i == r);
461                 fifo_for_each(i, &ca->unused, iter)
462                         BUG_ON(i == r);
463 #endif
464                 BUG_ON(atomic_read(&b->pin) != 1);
465
466                 SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
467
468                 if (watermark <= WATERMARK_METADATA) {
469                         SET_GC_MARK(b, GC_MARK_METADATA);
470                         b->prio = BTREE_PRIO;
471                 } else {
472                         SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
473                         b->prio = INITIAL_PRIO;
474                 }
475
476                 return r;
477         }
478
479         pr_debug("alloc failure: blocked %i free %zu free_inc %zu unused %zu",
480                  atomic_read(&ca->set->prio_blocked), fifo_used(&ca->free),
481                  fifo_used(&ca->free_inc), fifo_used(&ca->unused));
482
483         if (cl) {
484                 closure_wait(&ca->set->bucket_wait, cl);
485
486                 if (closure_blocking(cl)) {
487                         mutex_unlock(&ca->set->bucket_lock);
488                         closure_sync(cl);
489                         mutex_lock(&ca->set->bucket_lock);
490                         goto again;
491                 }
492         }
493
494         return -1;
495 }
496
497 void bch_bucket_free(struct cache_set *c, struct bkey *k)
498 {
499         unsigned i;
500
501         for (i = 0; i < KEY_PTRS(k); i++) {
502                 struct bucket *b = PTR_BUCKET(c, k, i);
503
504                 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
505                 SET_GC_SECTORS_USED(b, 0);
506                 bch_bucket_add_unused(PTR_CACHE(c, k, i), b);
507         }
508 }
509
510 int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
511                            struct bkey *k, int n, struct closure *cl)
512 {
513         int i;
514
515         lockdep_assert_held(&c->bucket_lock);
516         BUG_ON(!n || n > c->caches_loaded || n > 8);
517
518         bkey_init(k);
519
520         /* sort by free space/prio of oldest data in caches */
521
522         for (i = 0; i < n; i++) {
523                 struct cache *ca = c->cache_by_alloc[i];
524                 long b = bch_bucket_alloc(ca, watermark, cl);
525
526                 if (b == -1)
527                         goto err;
528
529                 k->ptr[i] = PTR(ca->buckets[b].gen,
530                                 bucket_to_sector(c, b),
531                                 ca->sb.nr_this_dev);
532
533                 SET_KEY_PTRS(k, i + 1);
534         }
535
536         return 0;
537 err:
538         bch_bucket_free(c, k);
539         __bkey_put(c, k);
540         return -1;
541 }
542
543 int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
544                          struct bkey *k, int n, struct closure *cl)
545 {
546         int ret;
547         mutex_lock(&c->bucket_lock);
548         ret = __bch_bucket_alloc_set(c, watermark, k, n, cl);
549         mutex_unlock(&c->bucket_lock);
550         return ret;
551 }
552
553 /* Init */
554
555 void bch_cache_allocator_exit(struct cache *ca)
556 {
557         struct discard *d;
558
559         while (!list_empty(&ca->discards)) {
560                 d = list_first_entry(&ca->discards, struct discard, list);
561                 cancel_work_sync(&d->work);
562                 list_del(&d->list);
563                 kfree(d);
564         }
565 }
566
567 int bch_cache_allocator_init(struct cache *ca)
568 {
569         unsigned i;
570
571         /*
572          * Reserve:
573          * Prio/gen writes first
574          * Then 8 for btree allocations
575          * Then half for the moving garbage collector
576          */
577
578         ca->watermark[WATERMARK_PRIO] = 0;
579
580         ca->watermark[WATERMARK_METADATA] = prio_buckets(ca);
581
582         ca->watermark[WATERMARK_MOVINGGC] = 8 +
583                 ca->watermark[WATERMARK_METADATA];
584
585         ca->watermark[WATERMARK_NONE] = ca->free.size / 2 +
586                 ca->watermark[WATERMARK_MOVINGGC];
587
588         for (i = 0; i < MAX_IN_FLIGHT_DISCARDS; i++) {
589                 struct discard *d = kzalloc(sizeof(*d), GFP_KERNEL);
590                 if (!d)
591                         return -ENOMEM;
592
593                 d->ca = ca;
594                 INIT_WORK(&d->work, discard_finish);
595                 list_add(&d->list, &ca->discards);
596         }
597
598         return 0;
599 }