]> rtime.felk.cvut.cz Git - linux-imx.git/blob - drivers/md/md.c
md: fix another deadlock with removing sysfs attributes.
[linux-imx.git] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3           Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34
35 #include <linux/kthread.h>
36 #include <linux/blkdev.h>
37 #include <linux/sysctl.h>
38 #include <linux/seq_file.h>
39 #include <linux/buffer_head.h> /* for invalidate_bdev */
40 #include <linux/poll.h>
41 #include <linux/ctype.h>
42 #include <linux/string.h>
43 #include <linux/hdreg.h>
44 #include <linux/proc_fs.h>
45 #include <linux/random.h>
46 #include <linux/reboot.h>
47 #include <linux/file.h>
48 #include <linux/compat.h>
49 #include <linux/delay.h>
50 #include <linux/raid/md_p.h>
51 #include <linux/raid/md_u.h>
52 #include <linux/slab.h>
53 #include "md.h"
54 #include "bitmap.h"
55
56 #define DEBUG 0
57 #define dprintk(x...) ((void)(DEBUG && printk(x)))
58
59
60 #ifndef MODULE
61 static void autostart_arrays(int part);
62 #endif
63
64 static LIST_HEAD(pers_list);
65 static DEFINE_SPINLOCK(pers_lock);
66
67 static void md_print_devices(void);
68
69 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
70
71 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
72
73 /*
74  * Default number of read corrections we'll attempt on an rdev
75  * before ejecting it from the array. We divide the read error
76  * count by 2 for every hour elapsed between read errors.
77  */
78 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
79 /*
80  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
81  * is 1000 KB/sec, so the extra system load does not show up that much.
82  * Increase it if you want to have more _guaranteed_ speed. Note that
83  * the RAID driver will use the maximum available bandwidth if the IO
84  * subsystem is idle. There is also an 'absolute maximum' reconstruction
85  * speed limit - in case reconstruction slows down your system despite
86  * idle IO detection.
87  *
88  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
89  * or /sys/block/mdX/md/sync_speed_{min,max}
90  */
91
92 static int sysctl_speed_limit_min = 1000;
93 static int sysctl_speed_limit_max = 200000;
94 static inline int speed_min(mddev_t *mddev)
95 {
96         return mddev->sync_speed_min ?
97                 mddev->sync_speed_min : sysctl_speed_limit_min;
98 }
99
100 static inline int speed_max(mddev_t *mddev)
101 {
102         return mddev->sync_speed_max ?
103                 mddev->sync_speed_max : sysctl_speed_limit_max;
104 }
105
106 static struct ctl_table_header *raid_table_header;
107
108 static ctl_table raid_table[] = {
109         {
110                 .procname       = "speed_limit_min",
111                 .data           = &sysctl_speed_limit_min,
112                 .maxlen         = sizeof(int),
113                 .mode           = S_IRUGO|S_IWUSR,
114                 .proc_handler   = proc_dointvec,
115         },
116         {
117                 .procname       = "speed_limit_max",
118                 .data           = &sysctl_speed_limit_max,
119                 .maxlen         = sizeof(int),
120                 .mode           = S_IRUGO|S_IWUSR,
121                 .proc_handler   = proc_dointvec,
122         },
123         { }
124 };
125
126 static ctl_table raid_dir_table[] = {
127         {
128                 .procname       = "raid",
129                 .maxlen         = 0,
130                 .mode           = S_IRUGO|S_IXUGO,
131                 .child          = raid_table,
132         },
133         { }
134 };
135
136 static ctl_table raid_root_table[] = {
137         {
138                 .procname       = "dev",
139                 .maxlen         = 0,
140                 .mode           = 0555,
141                 .child          = raid_dir_table,
142         },
143         {  }
144 };
145
146 static const struct block_device_operations md_fops;
147
148 static int start_readonly;
149
150 /*
151  * We have a system wide 'event count' that is incremented
152  * on any 'interesting' event, and readers of /proc/mdstat
153  * can use 'poll' or 'select' to find out when the event
154  * count increases.
155  *
156  * Events are:
157  *  start array, stop array, error, add device, remove device,
158  *  start build, activate spare
159  */
160 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
161 static atomic_t md_event_count;
162 void md_new_event(mddev_t *mddev)
163 {
164         atomic_inc(&md_event_count);
165         wake_up(&md_event_waiters);
166 }
167 EXPORT_SYMBOL_GPL(md_new_event);
168
169 /* Alternate version that can be called from interrupts
170  * when calling sysfs_notify isn't needed.
171  */
172 static void md_new_event_inintr(mddev_t *mddev)
173 {
174         atomic_inc(&md_event_count);
175         wake_up(&md_event_waiters);
176 }
177
178 /*
179  * Enables to iterate over all existing md arrays
180  * all_mddevs_lock protects this list.
181  */
182 static LIST_HEAD(all_mddevs);
183 static DEFINE_SPINLOCK(all_mddevs_lock);
184
185
186 /*
187  * iterates through all used mddevs in the system.
188  * We take care to grab the all_mddevs_lock whenever navigating
189  * the list, and to always hold a refcount when unlocked.
190  * Any code which breaks out of this loop while own
191  * a reference to the current mddev and must mddev_put it.
192  */
193 #define for_each_mddev(mddev,tmp)                                       \
194                                                                         \
195         for (({ spin_lock(&all_mddevs_lock);                            \
196                 tmp = all_mddevs.next;                                  \
197                 mddev = NULL;});                                        \
198              ({ if (tmp != &all_mddevs)                                 \
199                         mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
200                 spin_unlock(&all_mddevs_lock);                          \
201                 if (mddev) mddev_put(mddev);                            \
202                 mddev = list_entry(tmp, mddev_t, all_mddevs);           \
203                 tmp != &all_mddevs;});                                  \
204              ({ spin_lock(&all_mddevs_lock);                            \
205                 tmp = tmp->next;})                                      \
206                 )
207
208
209 /* Rather than calling directly into the personality make_request function,
210  * IO requests come here first so that we can check if the device is
211  * being suspended pending a reconfiguration.
212  * We hold a refcount over the call to ->make_request.  By the time that
213  * call has finished, the bio has been linked into some internal structure
214  * and so is visible to ->quiesce(), so we don't need the refcount any more.
215  */
216 static int md_make_request(struct request_queue *q, struct bio *bio)
217 {
218         const int rw = bio_data_dir(bio);
219         mddev_t *mddev = q->queuedata;
220         int rv;
221         int cpu;
222
223         if (mddev == NULL || mddev->pers == NULL) {
224                 bio_io_error(bio);
225                 return 0;
226         }
227         rcu_read_lock();
228         if (mddev->suspended || mddev->barrier) {
229                 DEFINE_WAIT(__wait);
230                 for (;;) {
231                         prepare_to_wait(&mddev->sb_wait, &__wait,
232                                         TASK_UNINTERRUPTIBLE);
233                         if (!mddev->suspended && !mddev->barrier)
234                                 break;
235                         rcu_read_unlock();
236                         schedule();
237                         rcu_read_lock();
238                 }
239                 finish_wait(&mddev->sb_wait, &__wait);
240         }
241         atomic_inc(&mddev->active_io);
242         rcu_read_unlock();
243
244         rv = mddev->pers->make_request(mddev, bio);
245
246         cpu = part_stat_lock();
247         part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
248         part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
249                       bio_sectors(bio));
250         part_stat_unlock();
251
252         if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
253                 wake_up(&mddev->sb_wait);
254
255         return rv;
256 }
257
258 /* mddev_suspend makes sure no new requests are submitted
259  * to the device, and that any requests that have been submitted
260  * are completely handled.
261  * Once ->stop is called and completes, the module will be completely
262  * unused.
263  */
264 static void mddev_suspend(mddev_t *mddev)
265 {
266         BUG_ON(mddev->suspended);
267         mddev->suspended = 1;
268         synchronize_rcu();
269         wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
270         mddev->pers->quiesce(mddev, 1);
271 }
272
273 static void mddev_resume(mddev_t *mddev)
274 {
275         mddev->suspended = 0;
276         wake_up(&mddev->sb_wait);
277         mddev->pers->quiesce(mddev, 0);
278 }
279
280 int mddev_congested(mddev_t *mddev, int bits)
281 {
282         if (mddev->barrier)
283                 return 1;
284         return mddev->suspended;
285 }
286 EXPORT_SYMBOL(mddev_congested);
287
288 /*
289  * Generic barrier handling for md
290  */
291
292 #define POST_REQUEST_BARRIER ((void*)1)
293
294 static void md_end_barrier(struct bio *bio, int err)
295 {
296         mdk_rdev_t *rdev = bio->bi_private;
297         mddev_t *mddev = rdev->mddev;
298         if (err == -EOPNOTSUPP && mddev->barrier != POST_REQUEST_BARRIER)
299                 set_bit(BIO_EOPNOTSUPP, &mddev->barrier->bi_flags);
300
301         rdev_dec_pending(rdev, mddev);
302
303         if (atomic_dec_and_test(&mddev->flush_pending)) {
304                 if (mddev->barrier == POST_REQUEST_BARRIER) {
305                         /* This was a post-request barrier */
306                         mddev->barrier = NULL;
307                         wake_up(&mddev->sb_wait);
308                 } else
309                         /* The pre-request barrier has finished */
310                         schedule_work(&mddev->barrier_work);
311         }
312         bio_put(bio);
313 }
314
315 static void submit_barriers(mddev_t *mddev)
316 {
317         mdk_rdev_t *rdev;
318
319         rcu_read_lock();
320         list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
321                 if (rdev->raid_disk >= 0 &&
322                     !test_bit(Faulty, &rdev->flags)) {
323                         /* Take two references, one is dropped
324                          * when request finishes, one after
325                          * we reclaim rcu_read_lock
326                          */
327                         struct bio *bi;
328                         atomic_inc(&rdev->nr_pending);
329                         atomic_inc(&rdev->nr_pending);
330                         rcu_read_unlock();
331                         bi = bio_alloc(GFP_KERNEL, 0);
332                         bi->bi_end_io = md_end_barrier;
333                         bi->bi_private = rdev;
334                         bi->bi_bdev = rdev->bdev;
335                         atomic_inc(&mddev->flush_pending);
336                         submit_bio(WRITE_BARRIER, bi);
337                         rcu_read_lock();
338                         rdev_dec_pending(rdev, mddev);
339                 }
340         rcu_read_unlock();
341 }
342
343 static void md_submit_barrier(struct work_struct *ws)
344 {
345         mddev_t *mddev = container_of(ws, mddev_t, barrier_work);
346         struct bio *bio = mddev->barrier;
347
348         atomic_set(&mddev->flush_pending, 1);
349
350         if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
351                 bio_endio(bio, -EOPNOTSUPP);
352         else if (bio->bi_size == 0)
353                 /* an empty barrier - all done */
354                 bio_endio(bio, 0);
355         else {
356                 bio->bi_rw &= ~(1<<BIO_RW_BARRIER);
357                 if (mddev->pers->make_request(mddev, bio))
358                         generic_make_request(bio);
359                 mddev->barrier = POST_REQUEST_BARRIER;
360                 submit_barriers(mddev);
361         }
362         if (atomic_dec_and_test(&mddev->flush_pending)) {
363                 mddev->barrier = NULL;
364                 wake_up(&mddev->sb_wait);
365         }
366 }
367
368 void md_barrier_request(mddev_t *mddev, struct bio *bio)
369 {
370         spin_lock_irq(&mddev->write_lock);
371         wait_event_lock_irq(mddev->sb_wait,
372                             !mddev->barrier,
373                             mddev->write_lock, /*nothing*/);
374         mddev->barrier = bio;
375         spin_unlock_irq(&mddev->write_lock);
376
377         atomic_set(&mddev->flush_pending, 1);
378         INIT_WORK(&mddev->barrier_work, md_submit_barrier);
379
380         submit_barriers(mddev);
381
382         if (atomic_dec_and_test(&mddev->flush_pending))
383                 schedule_work(&mddev->barrier_work);
384 }
385 EXPORT_SYMBOL(md_barrier_request);
386
387 static inline mddev_t *mddev_get(mddev_t *mddev)
388 {
389         atomic_inc(&mddev->active);
390         return mddev;
391 }
392
393 static void mddev_delayed_delete(struct work_struct *ws);
394
395 static void mddev_put(mddev_t *mddev)
396 {
397         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
398                 return;
399         if (!mddev->raid_disks && list_empty(&mddev->disks) &&
400             mddev->ctime == 0 && !mddev->hold_active) {
401                 /* Array is not configured at all, and not held active,
402                  * so destroy it */
403                 list_del(&mddev->all_mddevs);
404                 if (mddev->gendisk) {
405                         /* we did a probe so need to clean up.
406                          * Call schedule_work inside the spinlock
407                          * so that flush_scheduled_work() after
408                          * mddev_find will succeed in waiting for the
409                          * work to be done.
410                          */
411                         INIT_WORK(&mddev->del_work, mddev_delayed_delete);
412                         schedule_work(&mddev->del_work);
413                 } else
414                         kfree(mddev);
415         }
416         spin_unlock(&all_mddevs_lock);
417 }
418
419 static void mddev_init(mddev_t *mddev)
420 {
421         mutex_init(&mddev->open_mutex);
422         mutex_init(&mddev->reconfig_mutex);
423         mutex_init(&mddev->bitmap_info.mutex);
424         INIT_LIST_HEAD(&mddev->disks);
425         INIT_LIST_HEAD(&mddev->all_mddevs);
426         init_timer(&mddev->safemode_timer);
427         atomic_set(&mddev->active, 1);
428         atomic_set(&mddev->openers, 0);
429         atomic_set(&mddev->active_io, 0);
430         spin_lock_init(&mddev->write_lock);
431         atomic_set(&mddev->flush_pending, 0);
432         init_waitqueue_head(&mddev->sb_wait);
433         init_waitqueue_head(&mddev->recovery_wait);
434         mddev->reshape_position = MaxSector;
435         mddev->resync_min = 0;
436         mddev->resync_max = MaxSector;
437         mddev->level = LEVEL_NONE;
438 }
439
440 static mddev_t * mddev_find(dev_t unit)
441 {
442         mddev_t *mddev, *new = NULL;
443
444  retry:
445         spin_lock(&all_mddevs_lock);
446
447         if (unit) {
448                 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
449                         if (mddev->unit == unit) {
450                                 mddev_get(mddev);
451                                 spin_unlock(&all_mddevs_lock);
452                                 kfree(new);
453                                 return mddev;
454                         }
455
456                 if (new) {
457                         list_add(&new->all_mddevs, &all_mddevs);
458                         spin_unlock(&all_mddevs_lock);
459                         new->hold_active = UNTIL_IOCTL;
460                         return new;
461                 }
462         } else if (new) {
463                 /* find an unused unit number */
464                 static int next_minor = 512;
465                 int start = next_minor;
466                 int is_free = 0;
467                 int dev = 0;
468                 while (!is_free) {
469                         dev = MKDEV(MD_MAJOR, next_minor);
470                         next_minor++;
471                         if (next_minor > MINORMASK)
472                                 next_minor = 0;
473                         if (next_minor == start) {
474                                 /* Oh dear, all in use. */
475                                 spin_unlock(&all_mddevs_lock);
476                                 kfree(new);
477                                 return NULL;
478                         }
479                                 
480                         is_free = 1;
481                         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
482                                 if (mddev->unit == dev) {
483                                         is_free = 0;
484                                         break;
485                                 }
486                 }
487                 new->unit = dev;
488                 new->md_minor = MINOR(dev);
489                 new->hold_active = UNTIL_STOP;
490                 list_add(&new->all_mddevs, &all_mddevs);
491                 spin_unlock(&all_mddevs_lock);
492                 return new;
493         }
494         spin_unlock(&all_mddevs_lock);
495
496         new = kzalloc(sizeof(*new), GFP_KERNEL);
497         if (!new)
498                 return NULL;
499
500         new->unit = unit;
501         if (MAJOR(unit) == MD_MAJOR)
502                 new->md_minor = MINOR(unit);
503         else
504                 new->md_minor = MINOR(unit) >> MdpMinorShift;
505
506         mddev_init(new);
507
508         goto retry;
509 }
510
511 static inline int mddev_lock(mddev_t * mddev)
512 {
513         return mutex_lock_interruptible(&mddev->reconfig_mutex);
514 }
515
516 static inline int mddev_is_locked(mddev_t *mddev)
517 {
518         return mutex_is_locked(&mddev->reconfig_mutex);
519 }
520
521 static inline int mddev_trylock(mddev_t * mddev)
522 {
523         return mutex_trylock(&mddev->reconfig_mutex);
524 }
525
526 static struct attribute_group md_redundancy_group;
527
528 static void mddev_unlock(mddev_t * mddev)
529 {
530         if (mddev->to_remove) {
531                 /* These cannot be removed under reconfig_mutex as
532                  * an access to the files will try to take reconfig_mutex
533                  * while holding the file unremovable, which leads to
534                  * a deadlock.
535                  * So hold set sysfs_active while the remove in happeing,
536                  * and anything else which might set ->to_remove or my
537                  * otherwise change the sysfs namespace will fail with
538                  * -EBUSY if sysfs_active is still set.
539                  * We set sysfs_active under reconfig_mutex and elsewhere
540                  * test it under the same mutex to ensure its correct value
541                  * is seen.
542                  */
543                 struct attribute_group *to_remove = mddev->to_remove;
544                 mddev->to_remove = NULL;
545                 mddev->sysfs_active = 1;
546                 mutex_unlock(&mddev->reconfig_mutex);
547
548                 if (to_remove != &md_redundancy_group)
549                         sysfs_remove_group(&mddev->kobj, to_remove);
550                 if (mddev->pers == NULL ||
551                     mddev->pers->sync_request == NULL) {
552                         sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
553                         if (mddev->sysfs_action)
554                                 sysfs_put(mddev->sysfs_action);
555                         mddev->sysfs_action = NULL;
556                 }
557                 mddev->sysfs_active = 0;
558         } else
559                 mutex_unlock(&mddev->reconfig_mutex);
560
561         md_wakeup_thread(mddev->thread);
562 }
563
564 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
565 {
566         mdk_rdev_t *rdev;
567
568         list_for_each_entry(rdev, &mddev->disks, same_set)
569                 if (rdev->desc_nr == nr)
570                         return rdev;
571
572         return NULL;
573 }
574
575 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
576 {
577         mdk_rdev_t *rdev;
578
579         list_for_each_entry(rdev, &mddev->disks, same_set)
580                 if (rdev->bdev->bd_dev == dev)
581                         return rdev;
582
583         return NULL;
584 }
585
586 static struct mdk_personality *find_pers(int level, char *clevel)
587 {
588         struct mdk_personality *pers;
589         list_for_each_entry(pers, &pers_list, list) {
590                 if (level != LEVEL_NONE && pers->level == level)
591                         return pers;
592                 if (strcmp(pers->name, clevel)==0)
593                         return pers;
594         }
595         return NULL;
596 }
597
598 /* return the offset of the super block in 512byte sectors */
599 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
600 {
601         sector_t num_sectors = bdev->bd_inode->i_size / 512;
602         return MD_NEW_SIZE_SECTORS(num_sectors);
603 }
604
605 static int alloc_disk_sb(mdk_rdev_t * rdev)
606 {
607         if (rdev->sb_page)
608                 MD_BUG();
609
610         rdev->sb_page = alloc_page(GFP_KERNEL);
611         if (!rdev->sb_page) {
612                 printk(KERN_ALERT "md: out of memory.\n");
613                 return -ENOMEM;
614         }
615
616         return 0;
617 }
618
619 static void free_disk_sb(mdk_rdev_t * rdev)
620 {
621         if (rdev->sb_page) {
622                 put_page(rdev->sb_page);
623                 rdev->sb_loaded = 0;
624                 rdev->sb_page = NULL;
625                 rdev->sb_start = 0;
626                 rdev->sectors = 0;
627         }
628 }
629
630
631 static void super_written(struct bio *bio, int error)
632 {
633         mdk_rdev_t *rdev = bio->bi_private;
634         mddev_t *mddev = rdev->mddev;
635
636         if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
637                 printk("md: super_written gets error=%d, uptodate=%d\n",
638                        error, test_bit(BIO_UPTODATE, &bio->bi_flags));
639                 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
640                 md_error(mddev, rdev);
641         }
642
643         if (atomic_dec_and_test(&mddev->pending_writes))
644                 wake_up(&mddev->sb_wait);
645         bio_put(bio);
646 }
647
648 static void super_written_barrier(struct bio *bio, int error)
649 {
650         struct bio *bio2 = bio->bi_private;
651         mdk_rdev_t *rdev = bio2->bi_private;
652         mddev_t *mddev = rdev->mddev;
653
654         if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
655             error == -EOPNOTSUPP) {
656                 unsigned long flags;
657                 /* barriers don't appear to be supported :-( */
658                 set_bit(BarriersNotsupp, &rdev->flags);
659                 mddev->barriers_work = 0;
660                 spin_lock_irqsave(&mddev->write_lock, flags);
661                 bio2->bi_next = mddev->biolist;
662                 mddev->biolist = bio2;
663                 spin_unlock_irqrestore(&mddev->write_lock, flags);
664                 wake_up(&mddev->sb_wait);
665                 bio_put(bio);
666         } else {
667                 bio_put(bio2);
668                 bio->bi_private = rdev;
669                 super_written(bio, error);
670         }
671 }
672
673 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
674                    sector_t sector, int size, struct page *page)
675 {
676         /* write first size bytes of page to sector of rdev
677          * Increment mddev->pending_writes before returning
678          * and decrement it on completion, waking up sb_wait
679          * if zero is reached.
680          * If an error occurred, call md_error
681          *
682          * As we might need to resubmit the request if BIO_RW_BARRIER
683          * causes ENOTSUPP, we allocate a spare bio...
684          */
685         struct bio *bio = bio_alloc(GFP_NOIO, 1);
686         int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
687
688         bio->bi_bdev = rdev->bdev;
689         bio->bi_sector = sector;
690         bio_add_page(bio, page, size, 0);
691         bio->bi_private = rdev;
692         bio->bi_end_io = super_written;
693         bio->bi_rw = rw;
694
695         atomic_inc(&mddev->pending_writes);
696         if (!test_bit(BarriersNotsupp, &rdev->flags)) {
697                 struct bio *rbio;
698                 rw |= (1<<BIO_RW_BARRIER);
699                 rbio = bio_clone(bio, GFP_NOIO);
700                 rbio->bi_private = bio;
701                 rbio->bi_end_io = super_written_barrier;
702                 submit_bio(rw, rbio);
703         } else
704                 submit_bio(rw, bio);
705 }
706
707 void md_super_wait(mddev_t *mddev)
708 {
709         /* wait for all superblock writes that were scheduled to complete.
710          * if any had to be retried (due to BARRIER problems), retry them
711          */
712         DEFINE_WAIT(wq);
713         for(;;) {
714                 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
715                 if (atomic_read(&mddev->pending_writes)==0)
716                         break;
717                 while (mddev->biolist) {
718                         struct bio *bio;
719                         spin_lock_irq(&mddev->write_lock);
720                         bio = mddev->biolist;
721                         mddev->biolist = bio->bi_next ;
722                         bio->bi_next = NULL;
723                         spin_unlock_irq(&mddev->write_lock);
724                         submit_bio(bio->bi_rw, bio);
725                 }
726                 schedule();
727         }
728         finish_wait(&mddev->sb_wait, &wq);
729 }
730
731 static void bi_complete(struct bio *bio, int error)
732 {
733         complete((struct completion*)bio->bi_private);
734 }
735
736 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
737                    struct page *page, int rw)
738 {
739         struct bio *bio = bio_alloc(GFP_NOIO, 1);
740         struct completion event;
741         int ret;
742
743         rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
744
745         bio->bi_bdev = bdev;
746         bio->bi_sector = sector;
747         bio_add_page(bio, page, size, 0);
748         init_completion(&event);
749         bio->bi_private = &event;
750         bio->bi_end_io = bi_complete;
751         submit_bio(rw, bio);
752         wait_for_completion(&event);
753
754         ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
755         bio_put(bio);
756         return ret;
757 }
758 EXPORT_SYMBOL_GPL(sync_page_io);
759
760 static int read_disk_sb(mdk_rdev_t * rdev, int size)
761 {
762         char b[BDEVNAME_SIZE];
763         if (!rdev->sb_page) {
764                 MD_BUG();
765                 return -EINVAL;
766         }
767         if (rdev->sb_loaded)
768                 return 0;
769
770
771         if (!sync_page_io(rdev->bdev, rdev->sb_start, size, rdev->sb_page, READ))
772                 goto fail;
773         rdev->sb_loaded = 1;
774         return 0;
775
776 fail:
777         printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
778                 bdevname(rdev->bdev,b));
779         return -EINVAL;
780 }
781
782 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
783 {
784         return  sb1->set_uuid0 == sb2->set_uuid0 &&
785                 sb1->set_uuid1 == sb2->set_uuid1 &&
786                 sb1->set_uuid2 == sb2->set_uuid2 &&
787                 sb1->set_uuid3 == sb2->set_uuid3;
788 }
789
790 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
791 {
792         int ret;
793         mdp_super_t *tmp1, *tmp2;
794
795         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
796         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
797
798         if (!tmp1 || !tmp2) {
799                 ret = 0;
800                 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
801                 goto abort;
802         }
803
804         *tmp1 = *sb1;
805         *tmp2 = *sb2;
806
807         /*
808          * nr_disks is not constant
809          */
810         tmp1->nr_disks = 0;
811         tmp2->nr_disks = 0;
812
813         ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
814 abort:
815         kfree(tmp1);
816         kfree(tmp2);
817         return ret;
818 }
819
820
821 static u32 md_csum_fold(u32 csum)
822 {
823         csum = (csum & 0xffff) + (csum >> 16);
824         return (csum & 0xffff) + (csum >> 16);
825 }
826
827 static unsigned int calc_sb_csum(mdp_super_t * sb)
828 {
829         u64 newcsum = 0;
830         u32 *sb32 = (u32*)sb;
831         int i;
832         unsigned int disk_csum, csum;
833
834         disk_csum = sb->sb_csum;
835         sb->sb_csum = 0;
836
837         for (i = 0; i < MD_SB_BYTES/4 ; i++)
838                 newcsum += sb32[i];
839         csum = (newcsum & 0xffffffff) + (newcsum>>32);
840
841
842 #ifdef CONFIG_ALPHA
843         /* This used to use csum_partial, which was wrong for several
844          * reasons including that different results are returned on
845          * different architectures.  It isn't critical that we get exactly
846          * the same return value as before (we always csum_fold before
847          * testing, and that removes any differences).  However as we
848          * know that csum_partial always returned a 16bit value on
849          * alphas, do a fold to maximise conformity to previous behaviour.
850          */
851         sb->sb_csum = md_csum_fold(disk_csum);
852 #else
853         sb->sb_csum = disk_csum;
854 #endif
855         return csum;
856 }
857
858
859 /*
860  * Handle superblock details.
861  * We want to be able to handle multiple superblock formats
862  * so we have a common interface to them all, and an array of
863  * different handlers.
864  * We rely on user-space to write the initial superblock, and support
865  * reading and updating of superblocks.
866  * Interface methods are:
867  *   int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
868  *      loads and validates a superblock on dev.
869  *      if refdev != NULL, compare superblocks on both devices
870  *    Return:
871  *      0 - dev has a superblock that is compatible with refdev
872  *      1 - dev has a superblock that is compatible and newer than refdev
873  *          so dev should be used as the refdev in future
874  *     -EINVAL superblock incompatible or invalid
875  *     -othererror e.g. -EIO
876  *
877  *   int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
878  *      Verify that dev is acceptable into mddev.
879  *       The first time, mddev->raid_disks will be 0, and data from
880  *       dev should be merged in.  Subsequent calls check that dev
881  *       is new enough.  Return 0 or -EINVAL
882  *
883  *   void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
884  *     Update the superblock for rdev with data in mddev
885  *     This does not write to disc.
886  *
887  */
888
889 struct super_type  {
890         char                *name;
891         struct module       *owner;
892         int                 (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev,
893                                           int minor_version);
894         int                 (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
895         void                (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
896         unsigned long long  (*rdev_size_change)(mdk_rdev_t *rdev,
897                                                 sector_t num_sectors);
898 };
899
900 /*
901  * Check that the given mddev has no bitmap.
902  *
903  * This function is called from the run method of all personalities that do not
904  * support bitmaps. It prints an error message and returns non-zero if mddev
905  * has a bitmap. Otherwise, it returns 0.
906  *
907  */
908 int md_check_no_bitmap(mddev_t *mddev)
909 {
910         if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
911                 return 0;
912         printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
913                 mdname(mddev), mddev->pers->name);
914         return 1;
915 }
916 EXPORT_SYMBOL(md_check_no_bitmap);
917
918 /*
919  * load_super for 0.90.0 
920  */
921 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
922 {
923         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
924         mdp_super_t *sb;
925         int ret;
926
927         /*
928          * Calculate the position of the superblock (512byte sectors),
929          * it's at the end of the disk.
930          *
931          * It also happens to be a multiple of 4Kb.
932          */
933         rdev->sb_start = calc_dev_sboffset(rdev->bdev);
934
935         ret = read_disk_sb(rdev, MD_SB_BYTES);
936         if (ret) return ret;
937
938         ret = -EINVAL;
939
940         bdevname(rdev->bdev, b);
941         sb = (mdp_super_t*)page_address(rdev->sb_page);
942
943         if (sb->md_magic != MD_SB_MAGIC) {
944                 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
945                        b);
946                 goto abort;
947         }
948
949         if (sb->major_version != 0 ||
950             sb->minor_version < 90 ||
951             sb->minor_version > 91) {
952                 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
953                         sb->major_version, sb->minor_version,
954                         b);
955                 goto abort;
956         }
957
958         if (sb->raid_disks <= 0)
959                 goto abort;
960
961         if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
962                 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
963                         b);
964                 goto abort;
965         }
966
967         rdev->preferred_minor = sb->md_minor;
968         rdev->data_offset = 0;
969         rdev->sb_size = MD_SB_BYTES;
970
971         if (sb->level == LEVEL_MULTIPATH)
972                 rdev->desc_nr = -1;
973         else
974                 rdev->desc_nr = sb->this_disk.number;
975
976         if (!refdev) {
977                 ret = 1;
978         } else {
979                 __u64 ev1, ev2;
980                 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
981                 if (!uuid_equal(refsb, sb)) {
982                         printk(KERN_WARNING "md: %s has different UUID to %s\n",
983                                 b, bdevname(refdev->bdev,b2));
984                         goto abort;
985                 }
986                 if (!sb_equal(refsb, sb)) {
987                         printk(KERN_WARNING "md: %s has same UUID"
988                                " but different superblock to %s\n",
989                                b, bdevname(refdev->bdev, b2));
990                         goto abort;
991                 }
992                 ev1 = md_event(sb);
993                 ev2 = md_event(refsb);
994                 if (ev1 > ev2)
995                         ret = 1;
996                 else 
997                         ret = 0;
998         }
999         rdev->sectors = rdev->sb_start;
1000
1001         if (rdev->sectors < sb->size * 2 && sb->level > 1)
1002                 /* "this cannot possibly happen" ... */
1003                 ret = -EINVAL;
1004
1005  abort:
1006         return ret;
1007 }
1008
1009 /*
1010  * validate_super for 0.90.0
1011  */
1012 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1013 {
1014         mdp_disk_t *desc;
1015         mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
1016         __u64 ev1 = md_event(sb);
1017
1018         rdev->raid_disk = -1;
1019         clear_bit(Faulty, &rdev->flags);
1020         clear_bit(In_sync, &rdev->flags);
1021         clear_bit(WriteMostly, &rdev->flags);
1022         clear_bit(BarriersNotsupp, &rdev->flags);
1023
1024         if (mddev->raid_disks == 0) {
1025                 mddev->major_version = 0;
1026                 mddev->minor_version = sb->minor_version;
1027                 mddev->patch_version = sb->patch_version;
1028                 mddev->external = 0;
1029                 mddev->chunk_sectors = sb->chunk_size >> 9;
1030                 mddev->ctime = sb->ctime;
1031                 mddev->utime = sb->utime;
1032                 mddev->level = sb->level;
1033                 mddev->clevel[0] = 0;
1034                 mddev->layout = sb->layout;
1035                 mddev->raid_disks = sb->raid_disks;
1036                 mddev->dev_sectors = sb->size * 2;
1037                 mddev->events = ev1;
1038                 mddev->bitmap_info.offset = 0;
1039                 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1040
1041                 if (mddev->minor_version >= 91) {
1042                         mddev->reshape_position = sb->reshape_position;
1043                         mddev->delta_disks = sb->delta_disks;
1044                         mddev->new_level = sb->new_level;
1045                         mddev->new_layout = sb->new_layout;
1046                         mddev->new_chunk_sectors = sb->new_chunk >> 9;
1047                 } else {
1048                         mddev->reshape_position = MaxSector;
1049                         mddev->delta_disks = 0;
1050                         mddev->new_level = mddev->level;
1051                         mddev->new_layout = mddev->layout;
1052                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1053                 }
1054
1055                 if (sb->state & (1<<MD_SB_CLEAN))
1056                         mddev->recovery_cp = MaxSector;
1057                 else {
1058                         if (sb->events_hi == sb->cp_events_hi && 
1059                                 sb->events_lo == sb->cp_events_lo) {
1060                                 mddev->recovery_cp = sb->recovery_cp;
1061                         } else
1062                                 mddev->recovery_cp = 0;
1063                 }
1064
1065                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1066                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1067                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1068                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1069
1070                 mddev->max_disks = MD_SB_DISKS;
1071
1072                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1073                     mddev->bitmap_info.file == NULL)
1074                         mddev->bitmap_info.offset =
1075                                 mddev->bitmap_info.default_offset;
1076
1077         } else if (mddev->pers == NULL) {
1078                 /* Insist on good event counter while assembling, except
1079                  * for spares (which don't need an event count) */
1080                 ++ev1;
1081                 if (sb->disks[rdev->desc_nr].state & (
1082                             (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1083                         if (ev1 < mddev->events) 
1084                                 return -EINVAL;
1085         } else if (mddev->bitmap) {
1086                 /* if adding to array with a bitmap, then we can accept an
1087                  * older device ... but not too old.
1088                  */
1089                 if (ev1 < mddev->bitmap->events_cleared)
1090                         return 0;
1091         } else {
1092                 if (ev1 < mddev->events)
1093                         /* just a hot-add of a new device, leave raid_disk at -1 */
1094                         return 0;
1095         }
1096
1097         if (mddev->level != LEVEL_MULTIPATH) {
1098                 desc = sb->disks + rdev->desc_nr;
1099
1100                 if (desc->state & (1<<MD_DISK_FAULTY))
1101                         set_bit(Faulty, &rdev->flags);
1102                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1103                             desc->raid_disk < mddev->raid_disks */) {
1104                         set_bit(In_sync, &rdev->flags);
1105                         rdev->raid_disk = desc->raid_disk;
1106                 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1107                         /* active but not in sync implies recovery up to
1108                          * reshape position.  We don't know exactly where
1109                          * that is, so set to zero for now */
1110                         if (mddev->minor_version >= 91) {
1111                                 rdev->recovery_offset = 0;
1112                                 rdev->raid_disk = desc->raid_disk;
1113                         }
1114                 }
1115                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1116                         set_bit(WriteMostly, &rdev->flags);
1117         } else /* MULTIPATH are always insync */
1118                 set_bit(In_sync, &rdev->flags);
1119         return 0;
1120 }
1121
1122 /*
1123  * sync_super for 0.90.0
1124  */
1125 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1126 {
1127         mdp_super_t *sb;
1128         mdk_rdev_t *rdev2;
1129         int next_spare = mddev->raid_disks;
1130
1131
1132         /* make rdev->sb match mddev data..
1133          *
1134          * 1/ zero out disks
1135          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1136          * 3/ any empty disks < next_spare become removed
1137          *
1138          * disks[0] gets initialised to REMOVED because
1139          * we cannot be sure from other fields if it has
1140          * been initialised or not.
1141          */
1142         int i;
1143         int active=0, working=0,failed=0,spare=0,nr_disks=0;
1144
1145         rdev->sb_size = MD_SB_BYTES;
1146
1147         sb = (mdp_super_t*)page_address(rdev->sb_page);
1148
1149         memset(sb, 0, sizeof(*sb));
1150
1151         sb->md_magic = MD_SB_MAGIC;
1152         sb->major_version = mddev->major_version;
1153         sb->patch_version = mddev->patch_version;
1154         sb->gvalid_words  = 0; /* ignored */
1155         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1156         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1157         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1158         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1159
1160         sb->ctime = mddev->ctime;
1161         sb->level = mddev->level;
1162         sb->size = mddev->dev_sectors / 2;
1163         sb->raid_disks = mddev->raid_disks;
1164         sb->md_minor = mddev->md_minor;
1165         sb->not_persistent = 0;
1166         sb->utime = mddev->utime;
1167         sb->state = 0;
1168         sb->events_hi = (mddev->events>>32);
1169         sb->events_lo = (u32)mddev->events;
1170
1171         if (mddev->reshape_position == MaxSector)
1172                 sb->minor_version = 90;
1173         else {
1174                 sb->minor_version = 91;
1175                 sb->reshape_position = mddev->reshape_position;
1176                 sb->new_level = mddev->new_level;
1177                 sb->delta_disks = mddev->delta_disks;
1178                 sb->new_layout = mddev->new_layout;
1179                 sb->new_chunk = mddev->new_chunk_sectors << 9;
1180         }
1181         mddev->minor_version = sb->minor_version;
1182         if (mddev->in_sync)
1183         {
1184                 sb->recovery_cp = mddev->recovery_cp;
1185                 sb->cp_events_hi = (mddev->events>>32);
1186                 sb->cp_events_lo = (u32)mddev->events;
1187                 if (mddev->recovery_cp == MaxSector)
1188                         sb->state = (1<< MD_SB_CLEAN);
1189         } else
1190                 sb->recovery_cp = 0;
1191
1192         sb->layout = mddev->layout;
1193         sb->chunk_size = mddev->chunk_sectors << 9;
1194
1195         if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1196                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1197
1198         sb->disks[0].state = (1<<MD_DISK_REMOVED);
1199         list_for_each_entry(rdev2, &mddev->disks, same_set) {
1200                 mdp_disk_t *d;
1201                 int desc_nr;
1202                 int is_active = test_bit(In_sync, &rdev2->flags);
1203
1204                 if (rdev2->raid_disk >= 0 &&
1205                     sb->minor_version >= 91)
1206                         /* we have nowhere to store the recovery_offset,
1207                          * but if it is not below the reshape_position,
1208                          * we can piggy-back on that.
1209                          */
1210                         is_active = 1;
1211                 if (rdev2->raid_disk < 0 ||
1212                     test_bit(Faulty, &rdev2->flags))
1213                         is_active = 0;
1214                 if (is_active)
1215                         desc_nr = rdev2->raid_disk;
1216                 else
1217                         desc_nr = next_spare++;
1218                 rdev2->desc_nr = desc_nr;
1219                 d = &sb->disks[rdev2->desc_nr];
1220                 nr_disks++;
1221                 d->number = rdev2->desc_nr;
1222                 d->major = MAJOR(rdev2->bdev->bd_dev);
1223                 d->minor = MINOR(rdev2->bdev->bd_dev);
1224                 if (is_active)
1225                         d->raid_disk = rdev2->raid_disk;
1226                 else
1227                         d->raid_disk = rdev2->desc_nr; /* compatibility */
1228                 if (test_bit(Faulty, &rdev2->flags))
1229                         d->state = (1<<MD_DISK_FAULTY);
1230                 else if (is_active) {
1231                         d->state = (1<<MD_DISK_ACTIVE);
1232                         if (test_bit(In_sync, &rdev2->flags))
1233                                 d->state |= (1<<MD_DISK_SYNC);
1234                         active++;
1235                         working++;
1236                 } else {
1237                         d->state = 0;
1238                         spare++;
1239                         working++;
1240                 }
1241                 if (test_bit(WriteMostly, &rdev2->flags))
1242                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
1243         }
1244         /* now set the "removed" and "faulty" bits on any missing devices */
1245         for (i=0 ; i < mddev->raid_disks ; i++) {
1246                 mdp_disk_t *d = &sb->disks[i];
1247                 if (d->state == 0 && d->number == 0) {
1248                         d->number = i;
1249                         d->raid_disk = i;
1250                         d->state = (1<<MD_DISK_REMOVED);
1251                         d->state |= (1<<MD_DISK_FAULTY);
1252                         failed++;
1253                 }
1254         }
1255         sb->nr_disks = nr_disks;
1256         sb->active_disks = active;
1257         sb->working_disks = working;
1258         sb->failed_disks = failed;
1259         sb->spare_disks = spare;
1260
1261         sb->this_disk = sb->disks[rdev->desc_nr];
1262         sb->sb_csum = calc_sb_csum(sb);
1263 }
1264
1265 /*
1266  * rdev_size_change for 0.90.0
1267  */
1268 static unsigned long long
1269 super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1270 {
1271         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1272                 return 0; /* component must fit device */
1273         if (rdev->mddev->bitmap_info.offset)
1274                 return 0; /* can't move bitmap */
1275         rdev->sb_start = calc_dev_sboffset(rdev->bdev);
1276         if (!num_sectors || num_sectors > rdev->sb_start)
1277                 num_sectors = rdev->sb_start;
1278         md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1279                        rdev->sb_page);
1280         md_super_wait(rdev->mddev);
1281         return num_sectors / 2; /* kB for sysfs */
1282 }
1283
1284
1285 /*
1286  * version 1 superblock
1287  */
1288
1289 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
1290 {
1291         __le32 disk_csum;
1292         u32 csum;
1293         unsigned long long newcsum;
1294         int size = 256 + le32_to_cpu(sb->max_dev)*2;
1295         __le32 *isuper = (__le32*)sb;
1296         int i;
1297
1298         disk_csum = sb->sb_csum;
1299         sb->sb_csum = 0;
1300         newcsum = 0;
1301         for (i=0; size>=4; size -= 4 )
1302                 newcsum += le32_to_cpu(*isuper++);
1303
1304         if (size == 2)
1305                 newcsum += le16_to_cpu(*(__le16*) isuper);
1306
1307         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1308         sb->sb_csum = disk_csum;
1309         return cpu_to_le32(csum);
1310 }
1311
1312 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1313 {
1314         struct mdp_superblock_1 *sb;
1315         int ret;
1316         sector_t sb_start;
1317         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1318         int bmask;
1319
1320         /*
1321          * Calculate the position of the superblock in 512byte sectors.
1322          * It is always aligned to a 4K boundary and
1323          * depeding on minor_version, it can be:
1324          * 0: At least 8K, but less than 12K, from end of device
1325          * 1: At start of device
1326          * 2: 4K from start of device.
1327          */
1328         switch(minor_version) {
1329         case 0:
1330                 sb_start = rdev->bdev->bd_inode->i_size >> 9;
1331                 sb_start -= 8*2;
1332                 sb_start &= ~(sector_t)(4*2-1);
1333                 break;
1334         case 1:
1335                 sb_start = 0;
1336                 break;
1337         case 2:
1338                 sb_start = 8;
1339                 break;
1340         default:
1341                 return -EINVAL;
1342         }
1343         rdev->sb_start = sb_start;
1344
1345         /* superblock is rarely larger than 1K, but it can be larger,
1346          * and it is safe to read 4k, so we do that
1347          */
1348         ret = read_disk_sb(rdev, 4096);
1349         if (ret) return ret;
1350
1351
1352         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1353
1354         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1355             sb->major_version != cpu_to_le32(1) ||
1356             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1357             le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1358             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1359                 return -EINVAL;
1360
1361         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1362                 printk("md: invalid superblock checksum on %s\n",
1363                         bdevname(rdev->bdev,b));
1364                 return -EINVAL;
1365         }
1366         if (le64_to_cpu(sb->data_size) < 10) {
1367                 printk("md: data_size too small on %s\n",
1368                        bdevname(rdev->bdev,b));
1369                 return -EINVAL;
1370         }
1371
1372         rdev->preferred_minor = 0xffff;
1373         rdev->data_offset = le64_to_cpu(sb->data_offset);
1374         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1375
1376         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1377         bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1378         if (rdev->sb_size & bmask)
1379                 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1380
1381         if (minor_version
1382             && rdev->data_offset < sb_start + (rdev->sb_size/512))
1383                 return -EINVAL;
1384
1385         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1386                 rdev->desc_nr = -1;
1387         else
1388                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1389
1390         if (!refdev) {
1391                 ret = 1;
1392         } else {
1393                 __u64 ev1, ev2;
1394                 struct mdp_superblock_1 *refsb = 
1395                         (struct mdp_superblock_1*)page_address(refdev->sb_page);
1396
1397                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1398                     sb->level != refsb->level ||
1399                     sb->layout != refsb->layout ||
1400                     sb->chunksize != refsb->chunksize) {
1401                         printk(KERN_WARNING "md: %s has strangely different"
1402                                 " superblock to %s\n",
1403                                 bdevname(rdev->bdev,b),
1404                                 bdevname(refdev->bdev,b2));
1405                         return -EINVAL;
1406                 }
1407                 ev1 = le64_to_cpu(sb->events);
1408                 ev2 = le64_to_cpu(refsb->events);
1409
1410                 if (ev1 > ev2)
1411                         ret = 1;
1412                 else
1413                         ret = 0;
1414         }
1415         if (minor_version)
1416                 rdev->sectors = (rdev->bdev->bd_inode->i_size >> 9) -
1417                         le64_to_cpu(sb->data_offset);
1418         else
1419                 rdev->sectors = rdev->sb_start;
1420         if (rdev->sectors < le64_to_cpu(sb->data_size))
1421                 return -EINVAL;
1422         rdev->sectors = le64_to_cpu(sb->data_size);
1423         if (le64_to_cpu(sb->size) > rdev->sectors)
1424                 return -EINVAL;
1425         return ret;
1426 }
1427
1428 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1429 {
1430         struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1431         __u64 ev1 = le64_to_cpu(sb->events);
1432
1433         rdev->raid_disk = -1;
1434         clear_bit(Faulty, &rdev->flags);
1435         clear_bit(In_sync, &rdev->flags);
1436         clear_bit(WriteMostly, &rdev->flags);
1437         clear_bit(BarriersNotsupp, &rdev->flags);
1438
1439         if (mddev->raid_disks == 0) {
1440                 mddev->major_version = 1;
1441                 mddev->patch_version = 0;
1442                 mddev->external = 0;
1443                 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1444                 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1445                 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1446                 mddev->level = le32_to_cpu(sb->level);
1447                 mddev->clevel[0] = 0;
1448                 mddev->layout = le32_to_cpu(sb->layout);
1449                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1450                 mddev->dev_sectors = le64_to_cpu(sb->size);
1451                 mddev->events = ev1;
1452                 mddev->bitmap_info.offset = 0;
1453                 mddev->bitmap_info.default_offset = 1024 >> 9;
1454                 
1455                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1456                 memcpy(mddev->uuid, sb->set_uuid, 16);
1457
1458                 mddev->max_disks =  (4096-256)/2;
1459
1460                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1461                     mddev->bitmap_info.file == NULL )
1462                         mddev->bitmap_info.offset =
1463                                 (__s32)le32_to_cpu(sb->bitmap_offset);
1464
1465                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1466                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1467                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1468                         mddev->new_level = le32_to_cpu(sb->new_level);
1469                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1470                         mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1471                 } else {
1472                         mddev->reshape_position = MaxSector;
1473                         mddev->delta_disks = 0;
1474                         mddev->new_level = mddev->level;
1475                         mddev->new_layout = mddev->layout;
1476                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1477                 }
1478
1479         } else if (mddev->pers == NULL) {
1480                 /* Insist of good event counter while assembling, except for
1481                  * spares (which don't need an event count) */
1482                 ++ev1;
1483                 if (rdev->desc_nr >= 0 &&
1484                     rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1485                     le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe)
1486                         if (ev1 < mddev->events)
1487                                 return -EINVAL;
1488         } else if (mddev->bitmap) {
1489                 /* If adding to array with a bitmap, then we can accept an
1490                  * older device, but not too old.
1491                  */
1492                 if (ev1 < mddev->bitmap->events_cleared)
1493                         return 0;
1494         } else {
1495                 if (ev1 < mddev->events)
1496                         /* just a hot-add of a new device, leave raid_disk at -1 */
1497                         return 0;
1498         }
1499         if (mddev->level != LEVEL_MULTIPATH) {
1500                 int role;
1501                 if (rdev->desc_nr < 0 ||
1502                     rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1503                         role = 0xffff;
1504                         rdev->desc_nr = -1;
1505                 } else
1506                         role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1507                 switch(role) {
1508                 case 0xffff: /* spare */
1509                         break;
1510                 case 0xfffe: /* faulty */
1511                         set_bit(Faulty, &rdev->flags);
1512                         break;
1513                 default:
1514                         if ((le32_to_cpu(sb->feature_map) &
1515                              MD_FEATURE_RECOVERY_OFFSET))
1516                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1517                         else
1518                                 set_bit(In_sync, &rdev->flags);
1519                         rdev->raid_disk = role;
1520                         break;
1521                 }
1522                 if (sb->devflags & WriteMostly1)
1523                         set_bit(WriteMostly, &rdev->flags);
1524         } else /* MULTIPATH are always insync */
1525                 set_bit(In_sync, &rdev->flags);
1526
1527         return 0;
1528 }
1529
1530 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1531 {
1532         struct mdp_superblock_1 *sb;
1533         mdk_rdev_t *rdev2;
1534         int max_dev, i;
1535         /* make rdev->sb match mddev and rdev data. */
1536
1537         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1538
1539         sb->feature_map = 0;
1540         sb->pad0 = 0;
1541         sb->recovery_offset = cpu_to_le64(0);
1542         memset(sb->pad1, 0, sizeof(sb->pad1));
1543         memset(sb->pad2, 0, sizeof(sb->pad2));
1544         memset(sb->pad3, 0, sizeof(sb->pad3));
1545
1546         sb->utime = cpu_to_le64((__u64)mddev->utime);
1547         sb->events = cpu_to_le64(mddev->events);
1548         if (mddev->in_sync)
1549                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1550         else
1551                 sb->resync_offset = cpu_to_le64(0);
1552
1553         sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1554
1555         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1556         sb->size = cpu_to_le64(mddev->dev_sectors);
1557         sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1558         sb->level = cpu_to_le32(mddev->level);
1559         sb->layout = cpu_to_le32(mddev->layout);
1560
1561         if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1562                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1563                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1564         }
1565
1566         if (rdev->raid_disk >= 0 &&
1567             !test_bit(In_sync, &rdev->flags)) {
1568                 sb->feature_map |=
1569                         cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1570                 sb->recovery_offset =
1571                         cpu_to_le64(rdev->recovery_offset);
1572         }
1573
1574         if (mddev->reshape_position != MaxSector) {
1575                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1576                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1577                 sb->new_layout = cpu_to_le32(mddev->new_layout);
1578                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1579                 sb->new_level = cpu_to_le32(mddev->new_level);
1580                 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1581         }
1582
1583         max_dev = 0;
1584         list_for_each_entry(rdev2, &mddev->disks, same_set)
1585                 if (rdev2->desc_nr+1 > max_dev)
1586                         max_dev = rdev2->desc_nr+1;
1587
1588         if (max_dev > le32_to_cpu(sb->max_dev)) {
1589                 int bmask;
1590                 sb->max_dev = cpu_to_le32(max_dev);
1591                 rdev->sb_size = max_dev * 2 + 256;
1592                 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1593                 if (rdev->sb_size & bmask)
1594                         rdev->sb_size = (rdev->sb_size | bmask) + 1;
1595         }
1596         for (i=0; i<max_dev;i++)
1597                 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1598         
1599         list_for_each_entry(rdev2, &mddev->disks, same_set) {
1600                 i = rdev2->desc_nr;
1601                 if (test_bit(Faulty, &rdev2->flags))
1602                         sb->dev_roles[i] = cpu_to_le16(0xfffe);
1603                 else if (test_bit(In_sync, &rdev2->flags))
1604                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1605                 else if (rdev2->raid_disk >= 0)
1606                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1607                 else
1608                         sb->dev_roles[i] = cpu_to_le16(0xffff);
1609         }
1610
1611         sb->sb_csum = calc_sb_1_csum(sb);
1612 }
1613
1614 static unsigned long long
1615 super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1616 {
1617         struct mdp_superblock_1 *sb;
1618         sector_t max_sectors;
1619         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1620                 return 0; /* component must fit device */
1621         if (rdev->sb_start < rdev->data_offset) {
1622                 /* minor versions 1 and 2; superblock before data */
1623                 max_sectors = rdev->bdev->bd_inode->i_size >> 9;
1624                 max_sectors -= rdev->data_offset;
1625                 if (!num_sectors || num_sectors > max_sectors)
1626                         num_sectors = max_sectors;
1627         } else if (rdev->mddev->bitmap_info.offset) {
1628                 /* minor version 0 with bitmap we can't move */
1629                 return 0;
1630         } else {
1631                 /* minor version 0; superblock after data */
1632                 sector_t sb_start;
1633                 sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2;
1634                 sb_start &= ~(sector_t)(4*2 - 1);
1635                 max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1636                 if (!num_sectors || num_sectors > max_sectors)
1637                         num_sectors = max_sectors;
1638                 rdev->sb_start = sb_start;
1639         }
1640         sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page);
1641         sb->data_size = cpu_to_le64(num_sectors);
1642         sb->super_offset = rdev->sb_start;
1643         sb->sb_csum = calc_sb_1_csum(sb);
1644         md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1645                        rdev->sb_page);
1646         md_super_wait(rdev->mddev);
1647         return num_sectors / 2; /* kB for sysfs */
1648 }
1649
1650 static struct super_type super_types[] = {
1651         [0] = {
1652                 .name   = "0.90.0",
1653                 .owner  = THIS_MODULE,
1654                 .load_super         = super_90_load,
1655                 .validate_super     = super_90_validate,
1656                 .sync_super         = super_90_sync,
1657                 .rdev_size_change   = super_90_rdev_size_change,
1658         },
1659         [1] = {
1660                 .name   = "md-1",
1661                 .owner  = THIS_MODULE,
1662                 .load_super         = super_1_load,
1663                 .validate_super     = super_1_validate,
1664                 .sync_super         = super_1_sync,
1665                 .rdev_size_change   = super_1_rdev_size_change,
1666         },
1667 };
1668
1669 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1670 {
1671         mdk_rdev_t *rdev, *rdev2;
1672
1673         rcu_read_lock();
1674         rdev_for_each_rcu(rdev, mddev1)
1675                 rdev_for_each_rcu(rdev2, mddev2)
1676                         if (rdev->bdev->bd_contains ==
1677                             rdev2->bdev->bd_contains) {
1678                                 rcu_read_unlock();
1679                                 return 1;
1680                         }
1681         rcu_read_unlock();
1682         return 0;
1683 }
1684
1685 static LIST_HEAD(pending_raid_disks);
1686
1687 /*
1688  * Try to register data integrity profile for an mddev
1689  *
1690  * This is called when an array is started and after a disk has been kicked
1691  * from the array. It only succeeds if all working and active component devices
1692  * are integrity capable with matching profiles.
1693  */
1694 int md_integrity_register(mddev_t *mddev)
1695 {
1696         mdk_rdev_t *rdev, *reference = NULL;
1697
1698         if (list_empty(&mddev->disks))
1699                 return 0; /* nothing to do */
1700         if (blk_get_integrity(mddev->gendisk))
1701                 return 0; /* already registered */
1702         list_for_each_entry(rdev, &mddev->disks, same_set) {
1703                 /* skip spares and non-functional disks */
1704                 if (test_bit(Faulty, &rdev->flags))
1705                         continue;
1706                 if (rdev->raid_disk < 0)
1707                         continue;
1708                 /*
1709                  * If at least one rdev is not integrity capable, we can not
1710                  * enable data integrity for the md device.
1711                  */
1712                 if (!bdev_get_integrity(rdev->bdev))
1713                         return -EINVAL;
1714                 if (!reference) {
1715                         /* Use the first rdev as the reference */
1716                         reference = rdev;
1717                         continue;
1718                 }
1719                 /* does this rdev's profile match the reference profile? */
1720                 if (blk_integrity_compare(reference->bdev->bd_disk,
1721                                 rdev->bdev->bd_disk) < 0)
1722                         return -EINVAL;
1723         }
1724         /*
1725          * All component devices are integrity capable and have matching
1726          * profiles, register the common profile for the md device.
1727          */
1728         if (blk_integrity_register(mddev->gendisk,
1729                         bdev_get_integrity(reference->bdev)) != 0) {
1730                 printk(KERN_ERR "md: failed to register integrity for %s\n",
1731                         mdname(mddev));
1732                 return -EINVAL;
1733         }
1734         printk(KERN_NOTICE "md: data integrity on %s enabled\n",
1735                 mdname(mddev));
1736         return 0;
1737 }
1738 EXPORT_SYMBOL(md_integrity_register);
1739
1740 /* Disable data integrity if non-capable/non-matching disk is being added */
1741 void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
1742 {
1743         struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev);
1744         struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk);
1745
1746         if (!bi_mddev) /* nothing to do */
1747                 return;
1748         if (rdev->raid_disk < 0) /* skip spares */
1749                 return;
1750         if (bi_rdev && blk_integrity_compare(mddev->gendisk,
1751                                              rdev->bdev->bd_disk) >= 0)
1752                 return;
1753         printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
1754         blk_integrity_unregister(mddev->gendisk);
1755 }
1756 EXPORT_SYMBOL(md_integrity_add_rdev);
1757
1758 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1759 {
1760         char b[BDEVNAME_SIZE];
1761         struct kobject *ko;
1762         char *s;
1763         int err;
1764
1765         if (rdev->mddev) {
1766                 MD_BUG();
1767                 return -EINVAL;
1768         }
1769
1770         /* prevent duplicates */
1771         if (find_rdev(mddev, rdev->bdev->bd_dev))
1772                 return -EEXIST;
1773
1774         /* make sure rdev->sectors exceeds mddev->dev_sectors */
1775         if (rdev->sectors && (mddev->dev_sectors == 0 ||
1776                         rdev->sectors < mddev->dev_sectors)) {
1777                 if (mddev->pers) {
1778                         /* Cannot change size, so fail
1779                          * If mddev->level <= 0, then we don't care
1780                          * about aligning sizes (e.g. linear)
1781                          */
1782                         if (mddev->level > 0)
1783                                 return -ENOSPC;
1784                 } else
1785                         mddev->dev_sectors = rdev->sectors;
1786         }
1787
1788         /* Verify rdev->desc_nr is unique.
1789          * If it is -1, assign a free number, else
1790          * check number is not in use
1791          */
1792         if (rdev->desc_nr < 0) {
1793                 int choice = 0;
1794                 if (mddev->pers) choice = mddev->raid_disks;
1795                 while (find_rdev_nr(mddev, choice))
1796                         choice++;
1797                 rdev->desc_nr = choice;
1798         } else {
1799                 if (find_rdev_nr(mddev, rdev->desc_nr))
1800                         return -EBUSY;
1801         }
1802         if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
1803                 printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
1804                        mdname(mddev), mddev->max_disks);
1805                 return -EBUSY;
1806         }
1807         bdevname(rdev->bdev,b);
1808         while ( (s=strchr(b, '/')) != NULL)
1809                 *s = '!';
1810
1811         rdev->mddev = mddev;
1812         printk(KERN_INFO "md: bind<%s>\n", b);
1813
1814         if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
1815                 goto fail;
1816
1817         ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
1818         if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
1819                 kobject_del(&rdev->kobj);
1820                 goto fail;
1821         }
1822         rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, NULL, "state");
1823
1824         list_add_rcu(&rdev->same_set, &mddev->disks);
1825         bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
1826
1827         /* May as well allow recovery to be retried once */
1828         mddev->recovery_disabled = 0;
1829
1830         return 0;
1831
1832  fail:
1833         printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
1834                b, mdname(mddev));
1835         return err;
1836 }
1837
1838 static void md_delayed_delete(struct work_struct *ws)
1839 {
1840         mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work);
1841         kobject_del(&rdev->kobj);
1842         kobject_put(&rdev->kobj);
1843 }
1844
1845 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1846 {
1847         char b[BDEVNAME_SIZE];
1848         if (!rdev->mddev) {
1849                 MD_BUG();
1850                 return;
1851         }
1852         bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
1853         list_del_rcu(&rdev->same_set);
1854         printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1855         rdev->mddev = NULL;
1856         sysfs_remove_link(&rdev->kobj, "block");
1857         sysfs_put(rdev->sysfs_state);
1858         rdev->sysfs_state = NULL;
1859         /* We need to delay this, otherwise we can deadlock when
1860          * writing to 'remove' to "dev/state".  We also need
1861          * to delay it due to rcu usage.
1862          */
1863         synchronize_rcu();
1864         INIT_WORK(&rdev->del_work, md_delayed_delete);
1865         kobject_get(&rdev->kobj);
1866         schedule_work(&rdev->del_work);
1867 }
1868
1869 /*
1870  * prevent the device from being mounted, repartitioned or
1871  * otherwise reused by a RAID array (or any other kernel
1872  * subsystem), by bd_claiming the device.
1873  */
1874 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
1875 {
1876         int err = 0;
1877         struct block_device *bdev;
1878         char b[BDEVNAME_SIZE];
1879
1880         bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1881         if (IS_ERR(bdev)) {
1882                 printk(KERN_ERR "md: could not open %s.\n",
1883                         __bdevname(dev, b));
1884                 return PTR_ERR(bdev);
1885         }
1886         err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev);
1887         if (err) {
1888                 printk(KERN_ERR "md: could not bd_claim %s.\n",
1889                         bdevname(bdev, b));
1890                 blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
1891                 return err;
1892         }
1893         if (!shared)
1894                 set_bit(AllReserved, &rdev->flags);
1895         rdev->bdev = bdev;
1896         return err;
1897 }
1898
1899 static void unlock_rdev(mdk_rdev_t *rdev)
1900 {
1901         struct block_device *bdev = rdev->bdev;
1902         rdev->bdev = NULL;
1903         if (!bdev)
1904                 MD_BUG();
1905         bd_release(bdev);
1906         blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
1907 }
1908
1909 void md_autodetect_dev(dev_t dev);
1910
1911 static void export_rdev(mdk_rdev_t * rdev)
1912 {
1913         char b[BDEVNAME_SIZE];
1914         printk(KERN_INFO "md: export_rdev(%s)\n",
1915                 bdevname(rdev->bdev,b));
1916         if (rdev->mddev)
1917                 MD_BUG();
1918         free_disk_sb(rdev);
1919 #ifndef MODULE
1920         if (test_bit(AutoDetected, &rdev->flags))
1921                 md_autodetect_dev(rdev->bdev->bd_dev);
1922 #endif
1923         unlock_rdev(rdev);
1924         kobject_put(&rdev->kobj);
1925 }
1926
1927 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1928 {
1929         unbind_rdev_from_array(rdev);
1930         export_rdev(rdev);
1931 }
1932
1933 static void export_array(mddev_t *mddev)
1934 {
1935         mdk_rdev_t *rdev, *tmp;
1936
1937         rdev_for_each(rdev, tmp, mddev) {
1938                 if (!rdev->mddev) {
1939                         MD_BUG();
1940                         continue;
1941                 }
1942                 kick_rdev_from_array(rdev);
1943         }
1944         if (!list_empty(&mddev->disks))
1945                 MD_BUG();
1946         mddev->raid_disks = 0;
1947         mddev->major_version = 0;
1948 }
1949
1950 static void print_desc(mdp_disk_t *desc)
1951 {
1952         printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1953                 desc->major,desc->minor,desc->raid_disk,desc->state);
1954 }
1955
1956 static void print_sb_90(mdp_super_t *sb)
1957 {
1958         int i;
1959
1960         printk(KERN_INFO 
1961                 "md:  SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1962                 sb->major_version, sb->minor_version, sb->patch_version,
1963                 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1964                 sb->ctime);
1965         printk(KERN_INFO "md:     L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1966                 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1967                 sb->md_minor, sb->layout, sb->chunk_size);
1968         printk(KERN_INFO "md:     UT:%08x ST:%d AD:%d WD:%d"
1969                 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1970                 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1971                 sb->failed_disks, sb->spare_disks,
1972                 sb->sb_csum, (unsigned long)sb->events_lo);
1973
1974         printk(KERN_INFO);
1975         for (i = 0; i < MD_SB_DISKS; i++) {
1976                 mdp_disk_t *desc;
1977
1978                 desc = sb->disks + i;
1979                 if (desc->number || desc->major || desc->minor ||
1980                     desc->raid_disk || (desc->state && (desc->state != 4))) {
1981                         printk("     D %2d: ", i);
1982                         print_desc(desc);
1983                 }
1984         }
1985         printk(KERN_INFO "md:     THIS: ");
1986         print_desc(&sb->this_disk);
1987 }
1988
1989 static void print_sb_1(struct mdp_superblock_1 *sb)
1990 {
1991         __u8 *uuid;
1992
1993         uuid = sb->set_uuid;
1994         printk(KERN_INFO
1995                "md:  SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n"
1996                "md:    Name: \"%s\" CT:%llu\n",
1997                 le32_to_cpu(sb->major_version),
1998                 le32_to_cpu(sb->feature_map),
1999                 uuid,
2000                 sb->set_name,
2001                 (unsigned long long)le64_to_cpu(sb->ctime)
2002                        & MD_SUPERBLOCK_1_TIME_SEC_MASK);
2003
2004         uuid = sb->device_uuid;
2005         printk(KERN_INFO
2006                "md:       L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
2007                         " RO:%llu\n"
2008                "md:     Dev:%08x UUID: %pU\n"
2009                "md:       (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
2010                "md:         (MaxDev:%u) \n",
2011                 le32_to_cpu(sb->level),
2012                 (unsigned long long)le64_to_cpu(sb->size),
2013                 le32_to_cpu(sb->raid_disks),
2014                 le32_to_cpu(sb->layout),
2015                 le32_to_cpu(sb->chunksize),
2016                 (unsigned long long)le64_to_cpu(sb->data_offset),
2017                 (unsigned long long)le64_to_cpu(sb->data_size),
2018                 (unsigned long long)le64_to_cpu(sb->super_offset),
2019                 (unsigned long long)le64_to_cpu(sb->recovery_offset),
2020                 le32_to_cpu(sb->dev_number),
2021                 uuid,
2022                 sb->devflags,
2023                 (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
2024                 (unsigned long long)le64_to_cpu(sb->events),
2025                 (unsigned long long)le64_to_cpu(sb->resync_offset),
2026                 le32_to_cpu(sb->sb_csum),
2027                 le32_to_cpu(sb->max_dev)
2028                 );
2029 }
2030
2031 static void print_rdev(mdk_rdev_t *rdev, int major_version)
2032 {
2033         char b[BDEVNAME_SIZE];
2034         printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n",
2035                 bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors,
2036                 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
2037                 rdev->desc_nr);
2038         if (rdev->sb_loaded) {
2039                 printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
2040                 switch (major_version) {
2041                 case 0:
2042                         print_sb_90((mdp_super_t*)page_address(rdev->sb_page));
2043                         break;
2044                 case 1:
2045                         print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page));
2046                         break;
2047                 }
2048         } else
2049                 printk(KERN_INFO "md: no rdev superblock!\n");
2050 }
2051
2052 static void md_print_devices(void)
2053 {
2054         struct list_head *tmp;
2055         mdk_rdev_t *rdev;
2056         mddev_t *mddev;
2057         char b[BDEVNAME_SIZE];
2058
2059         printk("\n");
2060         printk("md:     **********************************\n");
2061         printk("md:     * <COMPLETE RAID STATE PRINTOUT> *\n");
2062         printk("md:     **********************************\n");
2063         for_each_mddev(mddev, tmp) {
2064
2065                 if (mddev->bitmap)
2066                         bitmap_print_sb(mddev->bitmap);
2067                 else
2068                         printk("%s: ", mdname(mddev));
2069                 list_for_each_entry(rdev, &mddev->disks, same_set)
2070                         printk("<%s>", bdevname(rdev->bdev,b));
2071                 printk("\n");
2072
2073                 list_for_each_entry(rdev, &mddev->disks, same_set)
2074                         print_rdev(rdev, mddev->major_version);
2075         }
2076         printk("md:     **********************************\n");
2077         printk("\n");
2078 }
2079
2080
2081 static void sync_sbs(mddev_t * mddev, int nospares)
2082 {
2083         /* Update each superblock (in-memory image), but
2084          * if we are allowed to, skip spares which already
2085          * have the right event counter, or have one earlier
2086          * (which would mean they aren't being marked as dirty
2087          * with the rest of the array)
2088          */
2089         mdk_rdev_t *rdev;
2090
2091         /* First make sure individual recovery_offsets are correct */
2092         list_for_each_entry(rdev, &mddev->disks, same_set) {
2093                 if (rdev->raid_disk >= 0 &&
2094                     mddev->delta_disks >= 0 &&
2095                     !test_bit(In_sync, &rdev->flags) &&
2096                     mddev->curr_resync_completed > rdev->recovery_offset)
2097                                 rdev->recovery_offset = mddev->curr_resync_completed;
2098
2099         }       
2100         list_for_each_entry(rdev, &mddev->disks, same_set) {
2101                 if (rdev->sb_events == mddev->events ||
2102                     (nospares &&
2103                      rdev->raid_disk < 0 &&
2104                      rdev->sb_events+1 == mddev->events)) {
2105                         /* Don't update this superblock */
2106                         rdev->sb_loaded = 2;
2107                 } else {
2108                         super_types[mddev->major_version].
2109                                 sync_super(mddev, rdev);
2110                         rdev->sb_loaded = 1;
2111                 }
2112         }
2113 }
2114
2115 static void md_update_sb(mddev_t * mddev, int force_change)
2116 {
2117         mdk_rdev_t *rdev;
2118         int sync_req;
2119         int nospares = 0;
2120
2121         mddev->utime = get_seconds();
2122         if (mddev->external)
2123                 return;
2124 repeat:
2125         spin_lock_irq(&mddev->write_lock);
2126
2127         set_bit(MD_CHANGE_PENDING, &mddev->flags);
2128         if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
2129                 force_change = 1;
2130         if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
2131                 /* just a clean<-> dirty transition, possibly leave spares alone,
2132                  * though if events isn't the right even/odd, we will have to do
2133                  * spares after all
2134                  */
2135                 nospares = 1;
2136         if (force_change)
2137                 nospares = 0;
2138         if (mddev->degraded)
2139                 /* If the array is degraded, then skipping spares is both
2140                  * dangerous and fairly pointless.
2141                  * Dangerous because a device that was removed from the array
2142                  * might have a event_count that still looks up-to-date,
2143                  * so it can be re-added without a resync.
2144                  * Pointless because if there are any spares to skip,
2145                  * then a recovery will happen and soon that array won't
2146                  * be degraded any more and the spare can go back to sleep then.
2147                  */
2148                 nospares = 0;
2149
2150         sync_req = mddev->in_sync;
2151
2152         /* If this is just a dirty<->clean transition, and the array is clean
2153          * and 'events' is odd, we can roll back to the previous clean state */
2154         if (nospares
2155             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2156             && mddev->can_decrease_events
2157             && mddev->events != 1) {
2158                 mddev->events--;
2159                 mddev->can_decrease_events = 0;
2160         } else {
2161                 /* otherwise we have to go forward and ... */
2162                 mddev->events ++;
2163                 mddev->can_decrease_events = nospares;
2164         }
2165
2166         if (!mddev->events) {
2167                 /*
2168                  * oops, this 64-bit counter should never wrap.
2169                  * Either we are in around ~1 trillion A.C., assuming
2170                  * 1 reboot per second, or we have a bug:
2171                  */
2172                 MD_BUG();
2173                 mddev->events --;
2174         }
2175
2176         /*
2177          * do not write anything to disk if using
2178          * nonpersistent superblocks
2179          */
2180         if (!mddev->persistent) {
2181                 if (!mddev->external)
2182                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2183
2184                 spin_unlock_irq(&mddev->write_lock);
2185                 wake_up(&mddev->sb_wait);
2186                 return;
2187         }
2188         sync_sbs(mddev, nospares);
2189         spin_unlock_irq(&mddev->write_lock);
2190
2191         dprintk(KERN_INFO 
2192                 "md: updating %s RAID superblock on device (in sync %d)\n",
2193                 mdname(mddev),mddev->in_sync);
2194
2195         bitmap_update_sb(mddev->bitmap);
2196         list_for_each_entry(rdev, &mddev->disks, same_set) {
2197                 char b[BDEVNAME_SIZE];
2198                 dprintk(KERN_INFO "md: ");
2199                 if (rdev->sb_loaded != 1)
2200                         continue; /* no noise on spare devices */
2201                 if (test_bit(Faulty, &rdev->flags))
2202                         dprintk("(skipping faulty ");
2203
2204                 dprintk("%s ", bdevname(rdev->bdev,b));
2205                 if (!test_bit(Faulty, &rdev->flags)) {
2206                         md_super_write(mddev,rdev,
2207                                        rdev->sb_start, rdev->sb_size,
2208                                        rdev->sb_page);
2209                         dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
2210                                 bdevname(rdev->bdev,b),
2211                                 (unsigned long long)rdev->sb_start);
2212                         rdev->sb_events = mddev->events;
2213
2214                 } else
2215                         dprintk(")\n");
2216                 if (mddev->level == LEVEL_MULTIPATH)
2217                         /* only need to write one superblock... */
2218                         break;
2219         }
2220         md_super_wait(mddev);
2221         /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
2222
2223         spin_lock_irq(&mddev->write_lock);
2224         if (mddev->in_sync != sync_req ||
2225             test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
2226                 /* have to write it out again */
2227                 spin_unlock_irq(&mddev->write_lock);
2228                 goto repeat;
2229         }
2230         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2231         spin_unlock_irq(&mddev->write_lock);
2232         wake_up(&mddev->sb_wait);
2233         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2234                 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2235
2236 }
2237
2238 /* words written to sysfs files may, or may not, be \n terminated.
2239  * We want to accept with case. For this we use cmd_match.
2240  */
2241 static int cmd_match(const char *cmd, const char *str)
2242 {
2243         /* See if cmd, written into a sysfs file, matches
2244          * str.  They must either be the same, or cmd can
2245          * have a trailing newline
2246          */
2247         while (*cmd && *str && *cmd == *str) {
2248                 cmd++;
2249                 str++;
2250         }
2251         if (*cmd == '\n')
2252                 cmd++;
2253         if (*str || *cmd)
2254                 return 0;
2255         return 1;
2256 }
2257
2258 struct rdev_sysfs_entry {
2259         struct attribute attr;
2260         ssize_t (*show)(mdk_rdev_t *, char *);
2261         ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
2262 };
2263
2264 static ssize_t
2265 state_show(mdk_rdev_t *rdev, char *page)
2266 {
2267         char *sep = "";
2268         size_t len = 0;
2269
2270         if (test_bit(Faulty, &rdev->flags)) {
2271                 len+= sprintf(page+len, "%sfaulty",sep);
2272                 sep = ",";
2273         }
2274         if (test_bit(In_sync, &rdev->flags)) {
2275                 len += sprintf(page+len, "%sin_sync",sep);
2276                 sep = ",";
2277         }
2278         if (test_bit(WriteMostly, &rdev->flags)) {
2279                 len += sprintf(page+len, "%swrite_mostly",sep);
2280                 sep = ",";
2281         }
2282         if (test_bit(Blocked, &rdev->flags)) {
2283                 len += sprintf(page+len, "%sblocked", sep);
2284                 sep = ",";
2285         }
2286         if (!test_bit(Faulty, &rdev->flags) &&
2287             !test_bit(In_sync, &rdev->flags)) {
2288                 len += sprintf(page+len, "%sspare", sep);
2289                 sep = ",";
2290         }
2291         return len+sprintf(page+len, "\n");
2292 }
2293
2294 static ssize_t
2295 state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2296 {
2297         /* can write
2298          *  faulty  - simulates and error
2299          *  remove  - disconnects the device
2300          *  writemostly - sets write_mostly
2301          *  -writemostly - clears write_mostly
2302          *  blocked - sets the Blocked flag
2303          *  -blocked - clears the Blocked flag
2304          *  insync - sets Insync providing device isn't active
2305          */
2306         int err = -EINVAL;
2307         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2308                 md_error(rdev->mddev, rdev);
2309                 err = 0;
2310         } else if (cmd_match(buf, "remove")) {
2311                 if (rdev->raid_disk >= 0)
2312                         err = -EBUSY;
2313                 else {
2314                         mddev_t *mddev = rdev->mddev;
2315                         kick_rdev_from_array(rdev);
2316                         if (mddev->pers)
2317                                 md_update_sb(mddev, 1);
2318                         md_new_event(mddev);
2319                         err = 0;
2320                 }
2321         } else if (cmd_match(buf, "writemostly")) {
2322                 set_bit(WriteMostly, &rdev->flags);
2323                 err = 0;
2324         } else if (cmd_match(buf, "-writemostly")) {
2325                 clear_bit(WriteMostly, &rdev->flags);
2326                 err = 0;
2327         } else if (cmd_match(buf, "blocked")) {
2328                 set_bit(Blocked, &rdev->flags);
2329                 err = 0;
2330         } else if (cmd_match(buf, "-blocked")) {
2331                 clear_bit(Blocked, &rdev->flags);
2332                 wake_up(&rdev->blocked_wait);
2333                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2334                 md_wakeup_thread(rdev->mddev->thread);
2335
2336                 err = 0;
2337         } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2338                 set_bit(In_sync, &rdev->flags);
2339                 err = 0;
2340         }
2341         if (!err && rdev->sysfs_state)
2342                 sysfs_notify_dirent(rdev->sysfs_state);
2343         return err ? err : len;
2344 }
2345 static struct rdev_sysfs_entry rdev_state =
2346 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
2347
2348 static ssize_t
2349 errors_show(mdk_rdev_t *rdev, char *page)
2350 {
2351         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
2352 }
2353
2354 static ssize_t
2355 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2356 {
2357         char *e;
2358         unsigned long n = simple_strtoul(buf, &e, 10);
2359         if (*buf && (*e == 0 || *e == '\n')) {
2360                 atomic_set(&rdev->corrected_errors, n);
2361                 return len;
2362         }
2363         return -EINVAL;
2364 }
2365 static struct rdev_sysfs_entry rdev_errors =
2366 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
2367
2368 static ssize_t
2369 slot_show(mdk_rdev_t *rdev, char *page)
2370 {
2371         if (rdev->raid_disk < 0)
2372                 return sprintf(page, "none\n");
2373         else
2374                 return sprintf(page, "%d\n", rdev->raid_disk);
2375 }
2376
2377 static ssize_t
2378 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2379 {
2380         char *e;
2381         int err;
2382         char nm[20];
2383         int slot = simple_strtoul(buf, &e, 10);
2384         if (strncmp(buf, "none", 4)==0)
2385                 slot = -1;
2386         else if (e==buf || (*e && *e!= '\n'))
2387                 return -EINVAL;
2388         if (rdev->mddev->pers && slot == -1) {
2389                 /* Setting 'slot' on an active array requires also
2390                  * updating the 'rd%d' link, and communicating
2391                  * with the personality with ->hot_*_disk.
2392                  * For now we only support removing
2393                  * failed/spare devices.  This normally happens automatically,
2394                  * but not when the metadata is externally managed.
2395                  */
2396                 if (rdev->raid_disk == -1)
2397                         return -EEXIST;
2398                 /* personality does all needed checks */
2399                 if (rdev->mddev->pers->hot_add_disk == NULL)
2400                         return -EINVAL;
2401                 err = rdev->mddev->pers->
2402                         hot_remove_disk(rdev->mddev, rdev->raid_disk);
2403                 if (err)
2404                         return err;
2405                 sprintf(nm, "rd%d", rdev->raid_disk);
2406                 sysfs_remove_link(&rdev->mddev->kobj, nm);
2407                 rdev->raid_disk = -1;
2408                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2409                 md_wakeup_thread(rdev->mddev->thread);
2410         } else if (rdev->mddev->pers) {
2411                 mdk_rdev_t *rdev2;
2412                 /* Activating a spare .. or possibly reactivating
2413                  * if we ever get bitmaps working here.
2414                  */
2415
2416                 if (rdev->raid_disk != -1)
2417                         return -EBUSY;
2418
2419                 if (rdev->mddev->pers->hot_add_disk == NULL)
2420                         return -EINVAL;
2421
2422                 list_for_each_entry(rdev2, &rdev->mddev->disks, same_set)
2423                         if (rdev2->raid_disk == slot)
2424                                 return -EEXIST;
2425
2426                 rdev->raid_disk = slot;
2427                 if (test_bit(In_sync, &rdev->flags))
2428                         rdev->saved_raid_disk = slot;
2429                 else
2430                         rdev->saved_raid_disk = -1;
2431                 err = rdev->mddev->pers->
2432                         hot_add_disk(rdev->mddev, rdev);
2433                 if (err) {
2434                         rdev->raid_disk = -1;
2435                         return err;
2436                 } else
2437                         sysfs_notify_dirent(rdev->sysfs_state);
2438                 sprintf(nm, "rd%d", rdev->raid_disk);
2439                 if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
2440                         printk(KERN_WARNING
2441                                "md: cannot register "
2442                                "%s for %s\n",
2443                                nm, mdname(rdev->mddev));
2444
2445                 /* don't wakeup anyone, leave that to userspace. */
2446         } else {
2447                 if (slot >= rdev->mddev->raid_disks)
2448                         return -ENOSPC;
2449                 rdev->raid_disk = slot;
2450                 /* assume it is working */
2451                 clear_bit(Faulty, &rdev->flags);
2452                 clear_bit(WriteMostly, &rdev->flags);
2453                 set_bit(In_sync, &rdev->flags);
2454                 sysfs_notify_dirent(rdev->sysfs_state);
2455         }
2456         return len;
2457 }
2458
2459
2460 static struct rdev_sysfs_entry rdev_slot =
2461 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2462
2463 static ssize_t
2464 offset_show(mdk_rdev_t *rdev, char *page)
2465 {
2466         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2467 }
2468
2469 static ssize_t
2470 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2471 {
2472         char *e;
2473         unsigned long long offset = simple_strtoull(buf, &e, 10);
2474         if (e==buf || (*e && *e != '\n'))
2475                 return -EINVAL;
2476         if (rdev->mddev->pers && rdev->raid_disk >= 0)
2477                 return -EBUSY;
2478         if (rdev->sectors && rdev->mddev->external)
2479                 /* Must set offset before size, so overlap checks
2480                  * can be sane */
2481                 return -EBUSY;
2482         rdev->data_offset = offset;
2483         return len;
2484 }
2485
2486 static struct rdev_sysfs_entry rdev_offset =
2487 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2488
2489 static ssize_t
2490 rdev_size_show(mdk_rdev_t *rdev, char *page)
2491 {
2492         return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
2493 }
2494
2495 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2496 {
2497         /* check if two start/length pairs overlap */
2498         if (s1+l1 <= s2)
2499                 return 0;
2500         if (s2+l2 <= s1)
2501                 return 0;
2502         return 1;
2503 }
2504
2505 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
2506 {
2507         unsigned long long blocks;
2508         sector_t new;
2509
2510         if (strict_strtoull(buf, 10, &blocks) < 0)
2511                 return -EINVAL;
2512
2513         if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
2514                 return -EINVAL; /* sector conversion overflow */
2515
2516         new = blocks * 2;
2517         if (new != blocks * 2)
2518                 return -EINVAL; /* unsigned long long to sector_t overflow */
2519
2520         *sectors = new;
2521         return 0;
2522 }
2523
2524 static ssize_t
2525 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2526 {
2527         mddev_t *my_mddev = rdev->mddev;
2528         sector_t oldsectors = rdev->sectors;
2529         sector_t sectors;
2530
2531         if (strict_blocks_to_sectors(buf, &sectors) < 0)
2532                 return -EINVAL;
2533         if (my_mddev->pers && rdev->raid_disk >= 0) {
2534                 if (my_mddev->persistent) {
2535                         sectors = super_types[my_mddev->major_version].
2536                                 rdev_size_change(rdev, sectors);
2537                         if (!sectors)
2538                                 return -EBUSY;
2539                 } else if (!sectors)
2540                         sectors = (rdev->bdev->bd_inode->i_size >> 9) -
2541                                 rdev->data_offset;
2542         }
2543         if (sectors < my_mddev->dev_sectors)
2544                 return -EINVAL; /* component must fit device */
2545
2546         rdev->sectors = sectors;
2547         if (sectors > oldsectors && my_mddev->external) {
2548                 /* need to check that all other rdevs with the same ->bdev
2549                  * do not overlap.  We need to unlock the mddev to avoid
2550                  * a deadlock.  We have already changed rdev->sectors, and if
2551                  * we have to change it back, we will have the lock again.
2552                  */
2553                 mddev_t *mddev;
2554                 int overlap = 0;
2555                 struct list_head *tmp;
2556
2557                 mddev_unlock(my_mddev);
2558                 for_each_mddev(mddev, tmp) {
2559                         mdk_rdev_t *rdev2;
2560
2561                         mddev_lock(mddev);
2562                         list_for_each_entry(rdev2, &mddev->disks, same_set)
2563                                 if (test_bit(AllReserved, &rdev2->flags) ||
2564                                     (rdev->bdev == rdev2->bdev &&
2565                                      rdev != rdev2 &&
2566                                      overlaps(rdev->data_offset, rdev->sectors,
2567                                               rdev2->data_offset,
2568                                               rdev2->sectors))) {
2569                                         overlap = 1;
2570                                         break;
2571                                 }
2572                         mddev_unlock(mddev);
2573                         if (overlap) {
2574                                 mddev_put(mddev);
2575                                 break;
2576                         }
2577                 }
2578                 mddev_lock(my_mddev);
2579                 if (overlap) {
2580                         /* Someone else could have slipped in a size
2581                          * change here, but doing so is just silly.
2582                          * We put oldsectors back because we *know* it is
2583                          * safe, and trust userspace not to race with
2584                          * itself
2585                          */
2586                         rdev->sectors = oldsectors;
2587                         return -EBUSY;
2588                 }
2589         }
2590         return len;
2591 }
2592
2593 static struct rdev_sysfs_entry rdev_size =
2594 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
2595
2596
2597 static ssize_t recovery_start_show(mdk_rdev_t *rdev, char *page)
2598 {
2599         unsigned long long recovery_start = rdev->recovery_offset;
2600
2601         if (test_bit(In_sync, &rdev->flags) ||
2602             recovery_start == MaxSector)
2603                 return sprintf(page, "none\n");
2604
2605         return sprintf(page, "%llu\n", recovery_start);
2606 }
2607
2608 static ssize_t recovery_start_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2609 {
2610         unsigned long long recovery_start;
2611
2612         if (cmd_match(buf, "none"))
2613                 recovery_start = MaxSector;
2614         else if (strict_strtoull(buf, 10, &recovery_start))
2615                 return -EINVAL;
2616
2617         if (rdev->mddev->pers &&
2618             rdev->raid_disk >= 0)
2619                 return -EBUSY;
2620
2621         rdev->recovery_offset = recovery_start;
2622         if (recovery_start == MaxSector)
2623                 set_bit(In_sync, &rdev->flags);
2624         else
2625                 clear_bit(In_sync, &rdev->flags);
2626         return len;
2627 }
2628
2629 static struct rdev_sysfs_entry rdev_recovery_start =
2630 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
2631
2632 static struct attribute *rdev_default_attrs[] = {
2633         &rdev_state.attr,
2634         &rdev_errors.attr,
2635         &rdev_slot.attr,
2636         &rdev_offset.attr,
2637         &rdev_size.attr,
2638         &rdev_recovery_start.attr,
2639         NULL,
2640 };
2641 static ssize_t
2642 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2643 {
2644         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2645         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2646         mddev_t *mddev = rdev->mddev;
2647         ssize_t rv;
2648
2649         if (!entry->show)
2650                 return -EIO;
2651
2652         rv = mddev ? mddev_lock(mddev) : -EBUSY;
2653         if (!rv) {
2654                 if (rdev->mddev == NULL)
2655                         rv = -EBUSY;
2656                 else
2657                         rv = entry->show(rdev, page);
2658                 mddev_unlock(mddev);
2659         }
2660         return rv;
2661 }
2662
2663 static ssize_t
2664 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
2665               const char *page, size_t length)
2666 {
2667         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2668         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2669         ssize_t rv;
2670         mddev_t *mddev = rdev->mddev;
2671
2672         if (!entry->store)
2673                 return -EIO;
2674         if (!capable(CAP_SYS_ADMIN))
2675                 return -EACCES;
2676         rv = mddev ? mddev_lock(mddev): -EBUSY;
2677         if (!rv) {
2678                 if (rdev->mddev == NULL)
2679                         rv = -EBUSY;
2680                 else
2681                         rv = entry->store(rdev, page, length);
2682                 mddev_unlock(mddev);
2683         }
2684         return rv;
2685 }
2686
2687 static void rdev_free(struct kobject *ko)
2688 {
2689         mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
2690         kfree(rdev);
2691 }
2692 static const struct sysfs_ops rdev_sysfs_ops = {
2693         .show           = rdev_attr_show,
2694         .store          = rdev_attr_store,
2695 };
2696 static struct kobj_type rdev_ktype = {
2697         .release        = rdev_free,
2698         .sysfs_ops      = &rdev_sysfs_ops,
2699         .default_attrs  = rdev_default_attrs,
2700 };
2701
2702 /*
2703  * Import a device. If 'super_format' >= 0, then sanity check the superblock
2704  *
2705  * mark the device faulty if:
2706  *
2707  *   - the device is nonexistent (zero size)
2708  *   - the device has no valid superblock
2709  *
2710  * a faulty rdev _never_ has rdev->sb set.
2711  */
2712 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
2713 {
2714         char b[BDEVNAME_SIZE];
2715         int err;
2716         mdk_rdev_t *rdev;
2717         sector_t size;
2718
2719         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
2720         if (!rdev) {
2721                 printk(KERN_ERR "md: could not alloc mem for new device!\n");
2722                 return ERR_PTR(-ENOMEM);
2723         }
2724
2725         if ((err = alloc_disk_sb(rdev)))
2726                 goto abort_free;
2727
2728         err = lock_rdev(rdev, newdev, super_format == -2);
2729         if (err)
2730                 goto abort_free;
2731
2732         kobject_init(&rdev->kobj, &rdev_ktype);
2733
2734         rdev->desc_nr = -1;
2735         rdev->saved_raid_disk = -1;
2736         rdev->raid_disk = -1;
2737         rdev->flags = 0;
2738         rdev->data_offset = 0;
2739         rdev->sb_events = 0;
2740         rdev->last_read_error.tv_sec  = 0;
2741         rdev->last_read_error.tv_nsec = 0;
2742         atomic_set(&rdev->nr_pending, 0);
2743         atomic_set(&rdev->read_errors, 0);
2744         atomic_set(&rdev->corrected_errors, 0);
2745
2746         size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2747         if (!size) {
2748                 printk(KERN_WARNING 
2749                         "md: %s has zero or unknown size, marking faulty!\n",
2750                         bdevname(rdev->bdev,b));
2751                 err = -EINVAL;
2752                 goto abort_free;
2753         }
2754
2755         if (super_format >= 0) {
2756                 err = super_types[super_format].
2757                         load_super(rdev, NULL, super_minor);
2758                 if (err == -EINVAL) {
2759                         printk(KERN_WARNING
2760                                 "md: %s does not have a valid v%d.%d "
2761                                "superblock, not importing!\n",
2762                                 bdevname(rdev->bdev,b),
2763                                super_format, super_minor);
2764                         goto abort_free;
2765                 }
2766                 if (err < 0) {
2767                         printk(KERN_WARNING 
2768                                 "md: could not read %s's sb, not importing!\n",
2769                                 bdevname(rdev->bdev,b));
2770                         goto abort_free;
2771                 }
2772         }
2773
2774         INIT_LIST_HEAD(&rdev->same_set);
2775         init_waitqueue_head(&rdev->blocked_wait);
2776
2777         return rdev;
2778
2779 abort_free:
2780         if (rdev->sb_page) {
2781                 if (rdev->bdev)
2782                         unlock_rdev(rdev);
2783                 free_disk_sb(rdev);
2784         }
2785         kfree(rdev);
2786         return ERR_PTR(err);
2787 }
2788
2789 /*
2790  * Check a full RAID array for plausibility
2791  */
2792
2793
2794 static void analyze_sbs(mddev_t * mddev)
2795 {
2796         int i;
2797         mdk_rdev_t *rdev, *freshest, *tmp;
2798         char b[BDEVNAME_SIZE];
2799
2800         freshest = NULL;
2801         rdev_for_each(rdev, tmp, mddev)
2802                 switch (super_types[mddev->major_version].
2803                         load_super(rdev, freshest, mddev->minor_version)) {
2804                 case 1:
2805                         freshest = rdev;
2806                         break;
2807                 case 0:
2808                         break;
2809                 default:
2810                         printk( KERN_ERR \
2811                                 "md: fatal superblock inconsistency in %s"
2812                                 " -- removing from array\n", 
2813                                 bdevname(rdev->bdev,b));
2814                         kick_rdev_from_array(rdev);
2815                 }
2816
2817
2818         super_types[mddev->major_version].
2819                 validate_super(mddev, freshest);
2820
2821         i = 0;
2822         rdev_for_each(rdev, tmp, mddev) {
2823                 if (mddev->max_disks &&
2824                     (rdev->desc_nr >= mddev->max_disks ||
2825                      i > mddev->max_disks)) {
2826                         printk(KERN_WARNING
2827                                "md: %s: %s: only %d devices permitted\n",
2828                                mdname(mddev), bdevname(rdev->bdev, b),
2829                                mddev->max_disks);
2830                         kick_rdev_from_array(rdev);
2831                         continue;
2832                 }
2833                 if (rdev != freshest)
2834                         if (super_types[mddev->major_version].
2835                             validate_super(mddev, rdev)) {
2836                                 printk(KERN_WARNING "md: kicking non-fresh %s"
2837                                         " from array!\n",
2838                                         bdevname(rdev->bdev,b));
2839                                 kick_rdev_from_array(rdev);
2840                                 continue;
2841                         }
2842                 if (mddev->level == LEVEL_MULTIPATH) {
2843                         rdev->desc_nr = i++;
2844                         rdev->raid_disk = rdev->desc_nr;
2845                         set_bit(In_sync, &rdev->flags);
2846                 } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
2847                         rdev->raid_disk = -1;
2848                         clear_bit(In_sync, &rdev->flags);
2849                 }
2850         }
2851 }
2852
2853 /* Read a fixed-point number.
2854  * Numbers in sysfs attributes should be in "standard" units where
2855  * possible, so time should be in seconds.
2856  * However we internally use a a much smaller unit such as 
2857  * milliseconds or jiffies.
2858  * This function takes a decimal number with a possible fractional
2859  * component, and produces an integer which is the result of
2860  * multiplying that number by 10^'scale'.
2861  * all without any floating-point arithmetic.
2862  */
2863 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
2864 {
2865         unsigned long result = 0;
2866         long decimals = -1;
2867         while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
2868                 if (*cp == '.')
2869                         decimals = 0;
2870                 else if (decimals < scale) {
2871                         unsigned int value;
2872                         value = *cp - '0';
2873                         result = result * 10 + value;
2874                         if (decimals >= 0)
2875                                 decimals++;
2876                 }
2877                 cp++;
2878         }
2879         if (*cp == '\n')
2880                 cp++;
2881         if (*cp)
2882                 return -EINVAL;
2883         if (decimals < 0)
2884                 decimals = 0;
2885         while (decimals < scale) {
2886                 result *= 10;
2887                 decimals ++;
2888         }
2889         *res = result;
2890         return 0;
2891 }
2892
2893
2894 static void md_safemode_timeout(unsigned long data);
2895
2896 static ssize_t
2897 safe_delay_show(mddev_t *mddev, char *page)
2898 {
2899         int msec = (mddev->safemode_delay*1000)/HZ;
2900         return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2901 }
2902 static ssize_t
2903 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2904 {
2905         unsigned long msec;
2906
2907         if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
2908                 return -EINVAL;
2909         if (msec == 0)
2910                 mddev->safemode_delay = 0;
2911         else {
2912                 unsigned long old_delay = mddev->safemode_delay;
2913                 mddev->safemode_delay = (msec*HZ)/1000;
2914                 if (mddev->safemode_delay == 0)
2915                         mddev->safemode_delay = 1;
2916                 if (mddev->safemode_delay < old_delay)
2917                         md_safemode_timeout((unsigned long)mddev);
2918         }
2919         return len;
2920 }
2921 static struct md_sysfs_entry md_safe_delay =
2922 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
2923
2924 static ssize_t
2925 level_show(mddev_t *mddev, char *page)
2926 {
2927         struct mdk_personality *p = mddev->pers;
2928         if (p)
2929                 return sprintf(page, "%s\n", p->name);
2930         else if (mddev->clevel[0])
2931                 return sprintf(page, "%s\n", mddev->clevel);
2932         else if (mddev->level != LEVEL_NONE)
2933                 return sprintf(page, "%d\n", mddev->level);
2934         else
2935                 return 0;
2936 }
2937
2938 static ssize_t
2939 level_store(mddev_t *mddev, const char *buf, size_t len)
2940 {
2941         char clevel[16];
2942         ssize_t rv = len;
2943         struct mdk_personality *pers;
2944         long level;
2945         void *priv;
2946         mdk_rdev_t *rdev;
2947
2948         if (mddev->pers == NULL) {
2949                 if (len == 0)
2950                         return 0;
2951                 if (len >= sizeof(mddev->clevel))
2952                         return -ENOSPC;
2953                 strncpy(mddev->clevel, buf, len);
2954                 if (mddev->clevel[len-1] == '\n')
2955                         len--;
2956                 mddev->clevel[len] = 0;
2957                 mddev->level = LEVEL_NONE;
2958                 return rv;
2959         }
2960
2961         /* request to change the personality.  Need to ensure:
2962          *  - array is not engaged in resync/recovery/reshape
2963          *  - old personality can be suspended
2964          *  - new personality will access other array.
2965          */
2966
2967         if (mddev->sync_thread ||
2968             mddev->reshape_position != MaxSector ||
2969             mddev->sysfs_active)
2970                 return -EBUSY;
2971
2972         if (!mddev->pers->quiesce) {
2973                 printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
2974                        mdname(mddev), mddev->pers->name);
2975                 return -EINVAL;
2976         }
2977
2978         /* Now find the new personality */
2979         if (len == 0 || len >= sizeof(clevel))
2980                 return -EINVAL;
2981         strncpy(clevel, buf, len);
2982         if (clevel[len-1] == '\n')
2983                 len--;
2984         clevel[len] = 0;
2985         if (strict_strtol(clevel, 10, &level))
2986                 level = LEVEL_NONE;
2987
2988         if (request_module("md-%s", clevel) != 0)
2989                 request_module("md-level-%s", clevel);
2990         spin_lock(&pers_lock);
2991         pers = find_pers(level, clevel);
2992         if (!pers || !try_module_get(pers->owner)) {
2993                 spin_unlock(&pers_lock);
2994                 printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
2995                 return -EINVAL;
2996         }
2997         spin_unlock(&pers_lock);
2998
2999         if (pers == mddev->pers) {
3000                 /* Nothing to do! */
3001                 module_put(pers->owner);
3002                 return rv;
3003         }
3004         if (!pers->takeover) {
3005                 module_put(pers->owner);
3006                 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
3007                        mdname(mddev), clevel);
3008                 return -EINVAL;
3009         }
3010
3011         list_for_each_entry(rdev, &mddev->disks, same_set)
3012                 rdev->new_raid_disk = rdev->raid_disk;
3013
3014         /* ->takeover must set new_* and/or delta_disks
3015          * if it succeeds, and may set them when it fails.
3016          */
3017         priv = pers->takeover(mddev);
3018         if (IS_ERR(priv)) {
3019                 mddev->new_level = mddev->level;
3020                 mddev->new_layout = mddev->layout;
3021                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3022                 mddev->raid_disks -= mddev->delta_disks;
3023                 mddev->delta_disks = 0;
3024                 module_put(pers->owner);
3025                 printk(KERN_WARNING "md: %s: %s would not accept array\n",
3026                        mdname(mddev), clevel);
3027                 return PTR_ERR(priv);
3028         }
3029
3030         /* Looks like we have a winner */
3031         mddev_suspend(mddev);
3032         mddev->pers->stop(mddev);
3033         
3034         if (mddev->pers->sync_request == NULL &&
3035             pers->sync_request != NULL) {
3036                 /* need to add the md_redundancy_group */
3037                 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3038                         printk(KERN_WARNING
3039                                "md: cannot register extra attributes for %s\n",
3040                                mdname(mddev));
3041                 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action");
3042         }               
3043         if (mddev->pers->sync_request != NULL &&
3044             pers->sync_request == NULL) {
3045                 /* need to remove the md_redundancy_group */
3046                 if (mddev->to_remove == NULL)
3047                         mddev->to_remove = &md_redundancy_group;
3048         }
3049
3050         if (mddev->pers->sync_request == NULL &&
3051             mddev->external) {
3052                 /* We are converting from a no-redundancy array
3053                  * to a redundancy array and metadata is managed
3054                  * externally so we need to be sure that writes
3055                  * won't block due to a need to transition
3056                  *      clean->dirty
3057                  * until external management is started.
3058                  */
3059                 mddev->in_sync = 0;
3060                 mddev->safemode_delay = 0;
3061                 mddev->safemode = 0;
3062         }
3063
3064         list_for_each_entry(rdev, &mddev->disks, same_set) {
3065                 char nm[20];
3066                 if (rdev->raid_disk < 0)
3067                         continue;
3068                 if (rdev->new_raid_disk > mddev->raid_disks)
3069                         rdev->new_raid_disk = -1;
3070                 if (rdev->new_raid_disk == rdev->raid_disk)
3071                         continue;
3072                 sprintf(nm, "rd%d", rdev->raid_disk);
3073                 sysfs_remove_link(&mddev->kobj, nm);
3074         }
3075         list_for_each_entry(rdev, &mddev->disks, same_set) {
3076                 if (rdev->raid_disk < 0)
3077                         continue;
3078                 if (rdev->new_raid_disk == rdev->raid_disk)
3079                         continue;
3080                 rdev->raid_disk = rdev->new_raid_disk;
3081                 if (rdev->raid_disk < 0)
3082                         clear_bit(In_sync, &rdev->flags);
3083                 else {
3084                         char nm[20];
3085                         sprintf(nm, "rd%d", rdev->raid_disk);
3086                         if(sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
3087                                 printk("md: cannot register %s for %s after level change\n",
3088                                        nm, mdname(mddev));
3089                 }
3090         }
3091
3092         module_put(mddev->pers->owner);
3093         mddev->pers = pers;
3094         mddev->private = priv;
3095         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3096         mddev->level = mddev->new_level;
3097         mddev->layout = mddev->new_layout;
3098         mddev->chunk_sectors = mddev->new_chunk_sectors;
3099         mddev->delta_disks = 0;
3100         if (mddev->pers->sync_request == NULL) {
3101                 /* this is now an array without redundancy, so
3102                  * it must always be in_sync
3103                  */
3104                 mddev->in_sync = 1;
3105                 del_timer_sync(&mddev->safemode_timer);
3106         }
3107         pers->run(mddev);
3108         mddev_resume(mddev);
3109         set_bit(MD_CHANGE_DEVS, &mddev->flags);
3110         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3111         md_wakeup_thread(mddev->thread);
3112         sysfs_notify(&mddev->kobj, NULL, "level");
3113         md_new_event(mddev);
3114         return rv;
3115 }
3116
3117 static struct md_sysfs_entry md_level =
3118 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
3119
3120
3121 static ssize_t
3122 layout_show(mddev_t *mddev, char *page)
3123 {
3124         /* just a number, not meaningful for all levels */
3125         if (mddev->reshape_position != MaxSector &&
3126             mddev->layout != mddev->new_layout)
3127                 return sprintf(page, "%d (%d)\n",
3128                                mddev->new_layout, mddev->layout);
3129         return sprintf(page, "%d\n", mddev->layout);
3130 }
3131
3132 static ssize_t
3133 layout_store(mddev_t *mddev, const char *buf, size_t len)
3134 {
3135         char *e;
3136         unsigned long n = simple_strtoul(buf, &e, 10);
3137
3138         if (!*buf || (*e && *e != '\n'))
3139                 return -EINVAL;
3140
3141         if (mddev->pers) {
3142                 int err;
3143                 if (mddev->pers->check_reshape == NULL)
3144                         return -EBUSY;
3145                 mddev->new_layout = n;
3146                 err = mddev->pers->check_reshape(mddev);
3147                 if (err) {
3148                         mddev->new_layout = mddev->layout;
3149                         return err;
3150                 }
3151         } else {
3152                 mddev->new_layout = n;
3153                 if (mddev->reshape_position == MaxSector)
3154                         mddev->layout = n;
3155         }
3156         return len;
3157 }
3158 static struct md_sysfs_entry md_layout =
3159 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
3160
3161
3162 static ssize_t
3163 raid_disks_show(mddev_t *mddev, char *page)
3164 {
3165         if (mddev->raid_disks == 0)
3166                 return 0;
3167         if (mddev->reshape_position != MaxSector &&
3168             mddev->delta_disks != 0)
3169                 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
3170                                mddev->raid_disks - mddev->delta_disks);
3171         return sprintf(page, "%d\n", mddev->raid_disks);
3172 }
3173
3174 static int update_raid_disks(mddev_t *mddev, int raid_disks);
3175
3176 static ssize_t
3177 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
3178 {
3179         char *e;
3180         int rv = 0;
3181         unsigned long n = simple_strtoul(buf, &e, 10);
3182
3183         if (!*buf || (*e && *e != '\n'))
3184                 return -EINVAL;
3185
3186         if (mddev->pers)
3187                 rv = update_raid_disks(mddev, n);
3188         else if (mddev->reshape_position != MaxSector) {
3189                 int olddisks = mddev->raid_disks - mddev->delta_disks;
3190                 mddev->delta_disks = n - olddisks;
3191                 mddev->raid_disks = n;
3192         } else
3193                 mddev->raid_disks = n;
3194         return rv ? rv : len;
3195 }
3196 static struct md_sysfs_entry md_raid_disks =
3197 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
3198
3199 static ssize_t
3200 chunk_size_show(mddev_t *mddev, char *page)
3201 {
3202         if (mddev->reshape_position != MaxSector &&
3203             mddev->chunk_sectors != mddev->new_chunk_sectors)
3204                 return sprintf(page, "%d (%d)\n",
3205                                mddev->new_chunk_sectors << 9,
3206                                mddev->chunk_sectors << 9);
3207         return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
3208 }
3209
3210 static ssize_t
3211 chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
3212 {
3213         char *e;
3214         unsigned long n = simple_strtoul(buf, &e, 10);
3215
3216         if (!*buf || (*e && *e != '\n'))
3217                 return -EINVAL;
3218
3219         if (mddev->pers) {
3220                 int err;
3221                 if (mddev->pers->check_reshape == NULL)
3222                         return -EBUSY;
3223                 mddev->new_chunk_sectors = n >> 9;
3224                 err = mddev->pers->check_reshape(mddev);
3225                 if (err) {
3226                         mddev->new_chunk_sectors = mddev->chunk_sectors;
3227                         return err;
3228                 }
3229         } else {
3230                 mddev->new_chunk_sectors = n >> 9;
3231                 if (mddev->reshape_position == MaxSector)
3232                         mddev->chunk_sectors = n >> 9;
3233         }
3234         return len;
3235 }
3236 static struct md_sysfs_entry md_chunk_size =
3237 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
3238
3239 static ssize_t
3240 resync_start_show(mddev_t *mddev, char *page)
3241 {
3242         if (mddev->recovery_cp == MaxSector)
3243                 return sprintf(page, "none\n");
3244         return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
3245 }
3246
3247 static ssize_t
3248 resync_start_store(mddev_t *mddev, const char *buf, size_t len)
3249 {
3250         char *e;
3251         unsigned long long n = simple_strtoull(buf, &e, 10);
3252
3253         if (mddev->pers)
3254                 return -EBUSY;
3255         if (cmd_match(buf, "none"))
3256                 n = MaxSector;
3257         else if (!*buf || (*e && *e != '\n'))
3258                 return -EINVAL;
3259
3260         mddev->recovery_cp = n;
3261         return len;
3262 }
3263 static struct md_sysfs_entry md_resync_start =
3264 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
3265
3266 /*
3267  * The array state can be:
3268  *
3269  * clear
3270  *     No devices, no size, no level
3271  *     Equivalent to STOP_ARRAY ioctl
3272  * inactive
3273  *     May have some settings, but array is not active
3274  *        all IO results in error
3275  *     When written, doesn't tear down array, but just stops it
3276  * suspended (not supported yet)
3277  *     All IO requests will block. The array can be reconfigured.
3278  *     Writing this, if accepted, will block until array is quiescent
3279  * readonly
3280  *     no resync can happen.  no superblocks get written.
3281  *     write requests fail
3282  * read-auto
3283  *     like readonly, but behaves like 'clean' on a write request.
3284  *
3285  * clean - no pending writes, but otherwise active.
3286  *     When written to inactive array, starts without resync
3287  *     If a write request arrives then
3288  *       if metadata is known, mark 'dirty' and switch to 'active'.
3289  *       if not known, block and switch to write-pending
3290  *     If written to an active array that has pending writes, then fails.
3291  * active
3292  *     fully active: IO and resync can be happening.
3293  *     When written to inactive array, starts with resync
3294  *
3295  * write-pending
3296  *     clean, but writes are blocked waiting for 'active' to be written.
3297  *
3298  * active-idle
3299  *     like active, but no writes have been seen for a while (100msec).
3300  *
3301  */
3302 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
3303                    write_pending, active_idle, bad_word};
3304 static char *array_states[] = {
3305         "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
3306         "write-pending", "active-idle", NULL };
3307
3308 static int match_word(const char *word, char **list)
3309 {
3310         int n;
3311         for (n=0; list[n]; n++)
3312                 if (cmd_match(word, list[n]))
3313                         break;
3314         return n;
3315 }
3316
3317 static ssize_t
3318 array_state_show(mddev_t *mddev, char *page)
3319 {
3320         enum array_state st = inactive;
3321
3322         if (mddev->pers)
3323                 switch(mddev->ro) {
3324                 case 1:
3325                         st = readonly;
3326                         break;
3327                 case 2:
3328                         st = read_auto;
3329                         break;
3330                 case 0:
3331                         if (mddev->in_sync)
3332                                 st = clean;
3333                         else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
3334                                 st = write_pending;
3335                         else if (mddev->safemode)
3336                                 st = active_idle;
3337                         else
3338                                 st = active;
3339                 }
3340         else {
3341                 if (list_empty(&mddev->disks) &&
3342                     mddev->raid_disks == 0 &&
3343                     mddev->dev_sectors == 0)
3344                         st = clear;
3345                 else
3346                         st = inactive;
3347         }
3348         return sprintf(page, "%s\n", array_states[st]);
3349 }
3350
3351 static int do_md_stop(mddev_t * mddev, int ro, int is_open);
3352 static int md_set_readonly(mddev_t * mddev, int is_open);
3353 static int do_md_run(mddev_t * mddev);
3354 static int restart_array(mddev_t *mddev);
3355
3356 static ssize_t
3357 array_state_store(mddev_t *mddev, const char *buf, size_t len)
3358 {
3359         int err = -EINVAL;
3360         enum array_state st = match_word(buf, array_states);
3361         switch(st) {
3362         case bad_word:
3363                 break;
3364         case clear:
3365                 /* stopping an active array */
3366                 if (atomic_read(&mddev->openers) > 0)
3367                         return -EBUSY;
3368                 err = do_md_stop(mddev, 0, 0);
3369                 break;
3370         case inactive:
3371                 /* stopping an active array */
3372                 if (mddev->pers) {
3373                         if (atomic_read(&mddev->openers) > 0)
3374                                 return -EBUSY;
3375                         err = do_md_stop(mddev, 2, 0);
3376                 } else
3377                         err = 0; /* already inactive */
3378                 break;
3379         case suspended:
3380                 break; /* not supported yet */
3381         case readonly:
3382                 if (mddev->pers)
3383                         err = md_set_readonly(mddev, 0);
3384                 else {
3385                         mddev->ro = 1;
3386                         set_disk_ro(mddev->gendisk, 1);
3387                         err = do_md_run(mddev);
3388                 }
3389                 break;
3390         case read_auto:
3391                 if (mddev->pers) {
3392                         if (mddev->ro == 0)
3393                                 err = md_set_readonly(mddev, 0);
3394                         else if (mddev->ro == 1)
3395                                 err = restart_array(mddev);
3396                         if (err == 0) {
3397                                 mddev->ro = 2;
3398                                 set_disk_ro(mddev->gendisk, 0);
3399                         }
3400                 } else {
3401                         mddev->ro = 2;
3402                         err = do_md_run(mddev);
3403                 }
3404                 break;
3405         case clean:
3406                 if (mddev->pers) {
3407                         restart_array(mddev);
3408                         spin_lock_irq(&mddev->write_lock);
3409                         if (atomic_read(&mddev->writes_pending) == 0) {
3410                                 if (mddev->in_sync == 0) {
3411                                         mddev->in_sync = 1;
3412                                         if (mddev->safemode == 1)
3413                                                 mddev->safemode = 0;
3414                                         if (mddev->persistent)
3415                                                 set_bit(MD_CHANGE_CLEAN,
3416                                                         &mddev->flags);
3417                                 }
3418                                 err = 0;
3419                         } else
3420                                 err = -EBUSY;
3421                         spin_unlock_irq(&mddev->write_lock);
3422                 } else
3423                         err = -EINVAL;
3424                 break;
3425         case active:
3426                 if (mddev->pers) {
3427                         restart_array(mddev);
3428                         if (mddev->external)
3429                                 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
3430                         wake_up(&mddev->sb_wait);
3431                         err = 0;
3432                 } else {
3433                         mddev->ro = 0;
3434                         set_disk_ro(mddev->gendisk, 0);
3435                         err = do_md_run(mddev);
3436                 }
3437                 break;
3438         case write_pending:
3439         case active_idle:
3440                 /* these cannot be set */
3441                 break;
3442         }
3443         if (err)
3444                 return err;
3445         else {
3446                 sysfs_notify_dirent(mddev->sysfs_state);
3447                 return len;
3448         }
3449 }
3450 static struct md_sysfs_entry md_array_state =
3451 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
3452
3453 static ssize_t
3454 max_corrected_read_errors_show(mddev_t *mddev, char *page) {
3455         return sprintf(page, "%d\n",
3456                        atomic_read(&mddev->max_corr_read_errors));
3457 }
3458
3459 static ssize_t
3460 max_corrected_read_errors_store(mddev_t *mddev, const char *buf, size_t len)
3461 {
3462         char *e;
3463         unsigned long n = simple_strtoul(buf, &e, 10);
3464
3465         if (*buf && (*e == 0 || *e == '\n')) {
3466                 atomic_set(&mddev->max_corr_read_errors, n);
3467                 return len;
3468         }
3469         return -EINVAL;
3470 }
3471
3472 static struct md_sysfs_entry max_corr_read_errors =
3473 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
3474         max_corrected_read_errors_store);
3475
3476 static ssize_t
3477 null_show(mddev_t *mddev, char *page)
3478 {
3479         return -EINVAL;
3480 }
3481
3482 static ssize_t
3483 new_dev_store(mddev_t *mddev, const char *buf, size_t len)
3484 {
3485         /* buf must be %d:%d\n? giving major and minor numbers */
3486         /* The new device is added to the array.
3487          * If the array has a persistent superblock, we read the
3488          * superblock to initialise info and check validity.
3489          * Otherwise, only checking done is that in bind_rdev_to_array,
3490          * which mainly checks size.
3491          */
3492         char *e;
3493         int major = simple_strtoul(buf, &e, 10);
3494         int minor;
3495         dev_t dev;
3496         mdk_rdev_t *rdev;
3497         int err;
3498
3499         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
3500                 return -EINVAL;
3501         minor = simple_strtoul(e+1, &e, 10);
3502         if (*e && *e != '\n')
3503                 return -EINVAL;
3504         dev = MKDEV(major, minor);
3505         if (major != MAJOR(dev) ||
3506             minor != MINOR(dev))
3507                 return -EOVERFLOW;
3508
3509
3510         if (mddev->persistent) {
3511                 rdev = md_import_device(dev, mddev->major_version,
3512                                         mddev->minor_version);
3513                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
3514                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
3515                                                        mdk_rdev_t, same_set);
3516                         err = super_types[mddev->major_version]
3517                                 .load_super(rdev, rdev0, mddev->minor_version);
3518                         if (err < 0)
3519                                 goto out;
3520                 }
3521         } else if (mddev->external)
3522                 rdev = md_import_device(dev, -2, -1);
3523         else
3524                 rdev = md_import_device(dev, -1, -1);
3525
3526         if (IS_ERR(rdev))
3527                 return PTR_ERR(rdev);
3528         err = bind_rdev_to_array(rdev, mddev);
3529  out:
3530         if (err)
3531                 export_rdev(rdev);
3532         return err ? err : len;
3533 }
3534
3535 static struct md_sysfs_entry md_new_device =
3536 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
3537
3538 static ssize_t
3539 bitmap_store(mddev_t *mddev, const char *buf, size_t len)
3540 {
3541         char *end;
3542         unsigned long chunk, end_chunk;
3543
3544         if (!mddev->bitmap)
3545                 goto out;
3546         /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
3547         while (*buf) {
3548                 chunk = end_chunk = simple_strtoul(buf, &end, 0);
3549                 if (buf == end) break;
3550                 if (*end == '-') { /* range */
3551                         buf = end + 1;
3552                         end_chunk = simple_strtoul(buf, &end, 0);
3553                         if (buf == end) break;
3554                 }
3555                 if (*end && !isspace(*end)) break;
3556                 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
3557                 buf = skip_spaces(end);
3558         }
3559         bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
3560 out:
3561         return len;
3562 }
3563
3564 static struct md_sysfs_entry md_bitmap =
3565 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
3566
3567 static ssize_t
3568 size_show(mddev_t *mddev, char *page)
3569 {
3570         return sprintf(page, "%llu\n",
3571                 (unsigned long long)mddev->dev_sectors / 2);
3572 }
3573
3574 static int update_size(mddev_t *mddev, sector_t num_sectors);
3575
3576 static ssize_t
3577 size_store(mddev_t *mddev, const char *buf, size_t len)
3578 {
3579         /* If array is inactive, we can reduce the component size, but
3580          * not increase it (except from 0).
3581          * If array is active, we can try an on-line resize
3582          */
3583         sector_t sectors;
3584         int err = strict_blocks_to_sectors(buf, &sectors);
3585
3586         if (err < 0)
3587                 return err;
3588         if (mddev->pers) {
3589                 err = update_size(mddev, sectors);
3590                 md_update_sb(mddev, 1);
3591         } else {
3592                 if (mddev->dev_sectors == 0 ||
3593                     mddev->dev_sectors > sectors)
3594                         mddev->dev_sectors = sectors;
3595                 else
3596                         err = -ENOSPC;
3597         }
3598         return err ? err : len;
3599 }
3600
3601 static struct md_sysfs_entry md_size =
3602 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
3603
3604
3605 /* Metdata version.
3606  * This is one of
3607  *   'none' for arrays with no metadata (good luck...)
3608  *   'external' for arrays with externally managed metadata,
3609  * or N.M for internally known formats
3610  */
3611 static ssize_t
3612 metadata_show(mddev_t *mddev, char *page)
3613 {
3614         if (mddev->persistent)
3615                 return sprintf(page, "%d.%d\n",
3616                                mddev->major_version, mddev->minor_version);
3617         else if (mddev->external)
3618                 return sprintf(page, "external:%s\n", mddev->metadata_type);
3619         else
3620                 return sprintf(page, "none\n");
3621 }
3622
3623 static ssize_t
3624 metadata_store(mddev_t *mddev, const char *buf, size_t len)
3625 {
3626         int major, minor;
3627         char *e;
3628         /* Changing the details of 'external' metadata is
3629          * always permitted.  Otherwise there must be
3630          * no devices attached to the array.
3631          */
3632         if (mddev->external && strncmp(buf, "external:", 9) == 0)
3633                 ;
3634         else if (!list_empty(&mddev->disks))
3635                 return -EBUSY;
3636
3637         if (cmd_match(buf, "none")) {
3638                 mddev->persistent = 0;
3639                 mddev->external = 0;
3640                 mddev->major_version = 0;
3641                 mddev->minor_version = 90;
3642                 return len;
3643         }
3644         if (strncmp(buf, "external:", 9) == 0) {
3645                 size_t namelen = len-9;
3646                 if (namelen >= sizeof(mddev->metadata_type))
3647                         namelen = sizeof(mddev->metadata_type)-1;
3648                 strncpy(mddev->metadata_type, buf+9, namelen);
3649                 mddev->metadata_type[namelen] = 0;
3650                 if (namelen && mddev->metadata_type[namelen-1] == '\n')
3651                         mddev->metadata_type[--namelen] = 0;
3652                 mddev->persistent = 0;
3653                 mddev->external = 1;
3654                 mddev->major_version = 0;
3655                 mddev->minor_version = 90;
3656                 return len;
3657         }
3658         major = simple_strtoul(buf, &e, 10);
3659         if (e==buf || *e != '.')
3660                 return -EINVAL;
3661         buf = e+1;
3662         minor = simple_strtoul(buf, &e, 10);
3663         if (e==buf || (*e && *e != '\n') )
3664                 return -EINVAL;
3665         if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
3666                 return -ENOENT;
3667         mddev->major_version = major;
3668         mddev->minor_version = minor;
3669         mddev->persistent = 1;
3670         mddev->external = 0;
3671         return len;
3672 }
3673
3674 static struct md_sysfs_entry md_metadata =
3675 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
3676
3677 static ssize_t
3678 action_show(mddev_t *mddev, char *page)
3679 {
3680         char *type = "idle";
3681         if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3682                 type = "frozen";
3683         else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3684             (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
3685                 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3686                         type = "reshape";
3687                 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3688                         if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
3689                                 type = "resync";
3690                         else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
3691                                 type = "check";
3692                         else
3693                                 type = "repair";
3694                 } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
3695                         type = "recover";
3696         }
3697         return sprintf(page, "%s\n", type);
3698 }
3699
3700 static ssize_t
3701 action_store(mddev_t *mddev, const char *page, size_t len)
3702 {
3703         if (!mddev->pers || !mddev->pers->sync_request)
3704                 return -EINVAL;
3705
3706         if (cmd_match(page, "frozen"))
3707                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3708         else
3709                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3710
3711         if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
3712                 if (mddev->sync_thread) {
3713                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3714                         md_unregister_thread(mddev->sync_thread);
3715                         mddev->sync_thread = NULL;
3716                         mddev->recovery = 0;
3717                 }
3718         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3719                    test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
3720                 return -EBUSY;
3721         else if (cmd_match(page, "resync"))
3722                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3723         else if (cmd_match(page, "recover")) {
3724                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3725                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3726         } else if (cmd_match(page, "reshape")) {
3727                 int err;
3728                 if (mddev->pers->start_reshape == NULL)
3729                         return -EINVAL;
3730                 err = mddev->pers->start_reshape(mddev);
3731                 if (err)
3732                         return err;
3733                 sysfs_notify(&mddev->kobj, NULL, "degraded");
3734         } else {
3735                 if (cmd_match(page, "check"))
3736                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3737                 else if (!cmd_match(page, "repair"))
3738                         return -EINVAL;
3739                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3740                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3741         }
3742         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3743         md_wakeup_thread(mddev->thread);
3744         sysfs_notify_dirent(mddev->sysfs_action);
3745         return len;
3746 }
3747
3748 static ssize_t
3749 mismatch_cnt_show(mddev_t *mddev, char *page)
3750 {
3751         return sprintf(page, "%llu\n",
3752                        (unsigned long long) mddev->resync_mismatches);
3753 }
3754
3755 static struct md_sysfs_entry md_scan_mode =
3756 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
3757
3758
3759 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
3760
3761 static ssize_t
3762 sync_min_show(mddev_t *mddev, char *page)
3763 {
3764         return sprintf(page, "%d (%s)\n", speed_min(mddev),
3765                        mddev->sync_speed_min ? "local": "system");
3766 }
3767
3768 static ssize_t
3769 sync_min_store(mddev_t *mddev, const char *buf, size_t len)
3770 {
3771         int min;
3772         char *e;
3773         if (strncmp(buf, "system", 6)==0) {
3774                 mddev->sync_speed_min = 0;
3775                 return len;
3776         }
3777         min = simple_strtoul(buf, &e, 10);
3778         if (buf == e || (*e && *e != '\n') || min <= 0)
3779                 return -EINVAL;
3780         mddev->sync_speed_min = min;
3781         return len;
3782 }
3783
3784 static struct md_sysfs_entry md_sync_min =
3785 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
3786
3787 static ssize_t
3788 sync_max_show(mddev_t *mddev, char *page)
3789 {
3790         return sprintf(page, "%d (%s)\n", speed_max(mddev),
3791                        mddev->sync_speed_max ? "local": "system");
3792 }
3793
3794 static ssize_t
3795 sync_max_store(mddev_t *mddev, const char *buf, size_t len)
3796 {
3797         int max;
3798         char *e;
3799         if (strncmp(buf, "system", 6)==0) {
3800                 mddev->sync_speed_max = 0;
3801                 return len;
3802         }
3803         max = simple_strtoul(buf, &e, 10);
3804         if (buf == e || (*e && *e != '\n') || max <= 0)
3805                 return -EINVAL;
3806         mddev->sync_speed_max = max;
3807         return len;
3808 }
3809
3810 static struct md_sysfs_entry md_sync_max =
3811 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
3812
3813 static ssize_t
3814 degraded_show(mddev_t *mddev, char *page)
3815 {
3816         return sprintf(page, "%d\n", mddev->degraded);
3817 }
3818 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
3819
3820 static ssize_t
3821 sync_force_parallel_show(mddev_t *mddev, char *page)
3822 {
3823         return sprintf(page, "%d\n", mddev->parallel_resync);
3824 }
3825
3826 static ssize_t
3827 sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len)
3828 {
3829         long n;
3830
3831         if (strict_strtol(buf, 10, &n))
3832                 return -EINVAL;
3833
3834         if (n != 0 && n != 1)
3835                 return -EINVAL;
3836
3837         mddev->parallel_resync = n;
3838
3839         if (mddev->sync_thread)
3840                 wake_up(&resync_wait);
3841
3842         return len;
3843 }
3844
3845 /* force parallel resync, even with shared block devices */
3846 static struct md_sysfs_entry md_sync_force_parallel =
3847 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
3848        sync_force_parallel_show, sync_force_parallel_store);
3849
3850 static ssize_t
3851 sync_speed_show(mddev_t *mddev, char *page)
3852 {
3853         unsigned long resync, dt, db;
3854         if (mddev->curr_resync == 0)
3855                 return sprintf(page, "none\n");
3856         resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
3857         dt = (jiffies - mddev->resync_mark) / HZ;
3858         if (!dt) dt++;
3859         db = resync - mddev->resync_mark_cnt;
3860         return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
3861 }
3862
3863 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
3864
3865 static ssize_t
3866 sync_completed_show(mddev_t *mddev, char *page)
3867 {
3868         unsigned long max_sectors, resync;
3869
3870         if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3871                 return sprintf(page, "none\n");
3872
3873         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3874                 max_sectors = mddev->resync_max_sectors;
3875         else
3876                 max_sectors = mddev->dev_sectors;
3877
3878         resync = mddev->curr_resync_completed;
3879         return sprintf(page, "%lu / %lu\n", resync, max_sectors);
3880 }
3881
3882 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
3883
3884 static ssize_t
3885 min_sync_show(mddev_t *mddev, char *page)
3886 {
3887         return sprintf(page, "%llu\n",
3888                        (unsigned long long)mddev->resync_min);
3889 }
3890 static ssize_t
3891 min_sync_store(mddev_t *mddev, const char *buf, size_t len)
3892 {
3893         unsigned long long min;
3894         if (strict_strtoull(buf, 10, &min))
3895                 return -EINVAL;
3896         if (min > mddev->resync_max)
3897                 return -EINVAL;
3898         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3899                 return -EBUSY;
3900
3901         /* Must be a multiple of chunk_size */
3902         if (mddev->chunk_sectors) {
3903                 sector_t temp = min;
3904                 if (sector_div(temp, mddev->chunk_sectors))
3905                         return -EINVAL;
3906         }
3907         mddev->resync_min = min;
3908
3909         return len;
3910 }
3911
3912 static struct md_sysfs_entry md_min_sync =
3913 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
3914
3915 static ssize_t
3916 max_sync_show(mddev_t *mddev, char *page)
3917 {
3918         if (mddev->resync_max == MaxSector)
3919                 return sprintf(page, "max\n");
3920         else
3921                 return sprintf(page, "%llu\n",
3922                                (unsigned long long)mddev->resync_max);
3923 }
3924 static ssize_t
3925 max_sync_store(mddev_t *mddev, const char *buf, size_t len)
3926 {
3927         if (strncmp(buf, "max", 3) == 0)
3928                 mddev->resync_max = MaxSector;
3929         else {
3930                 unsigned long long max;
3931                 if (strict_strtoull(buf, 10, &max))
3932                         return -EINVAL;
3933                 if (max < mddev->resync_min)
3934                         return -EINVAL;
3935                 if (max < mddev->resync_max &&
3936                     mddev->ro == 0 &&
3937                     test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3938                         return -EBUSY;
3939
3940                 /* Must be a multiple of chunk_size */
3941                 if (mddev->chunk_sectors) {
3942                         sector_t temp = max;
3943                         if (sector_div(temp, mddev->chunk_sectors))
3944                                 return -EINVAL;
3945                 }
3946                 mddev->resync_max = max;
3947         }
3948         wake_up(&mddev->recovery_wait);
3949         return len;
3950 }
3951
3952 static struct md_sysfs_entry md_max_sync =
3953 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
3954
3955 static ssize_t
3956 suspend_lo_show(mddev_t *mddev, char *page)
3957 {
3958         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
3959 }
3960
3961 static ssize_t
3962 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
3963 {
3964         char *e;
3965         unsigned long long new = simple_strtoull(buf, &e, 10);
3966
3967         if (mddev->pers == NULL || 
3968             mddev->pers->quiesce == NULL)
3969                 return -EINVAL;
3970         if (buf == e || (*e && *e != '\n'))
3971                 return -EINVAL;
3972         if (new >= mddev->suspend_hi ||
3973             (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
3974                 mddev->suspend_lo = new;
3975                 mddev->pers->quiesce(mddev, 2);
3976                 return len;
3977         } else
3978                 return -EINVAL;
3979 }
3980 static struct md_sysfs_entry md_suspend_lo =
3981 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
3982
3983
3984 static ssize_t
3985 suspend_hi_show(mddev_t *mddev, char *page)
3986 {
3987         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
3988 }
3989
3990 static ssize_t
3991 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
3992 {
3993         char *e;
3994         unsigned long long new = simple_strtoull(buf, &e, 10);
3995
3996         if (mddev->pers == NULL ||
3997             mddev->pers->quiesce == NULL)
3998                 return -EINVAL;
3999         if (buf == e || (*e && *e != '\n'))
4000                 return -EINVAL;
4001         if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
4002             (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
4003                 mddev->suspend_hi = new;
4004                 mddev->pers->quiesce(mddev, 1);
4005                 mddev->pers->quiesce(mddev, 0);
4006                 return len;
4007         } else
4008                 return -EINVAL;
4009 }
4010 static struct md_sysfs_entry md_suspend_hi =
4011 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
4012
4013 static ssize_t
4014 reshape_position_show(mddev_t *mddev, char *page)
4015 {
4016         if (mddev->reshape_position != MaxSector)
4017                 return sprintf(page, "%llu\n",
4018                                (unsigned long long)mddev->reshape_position);
4019         strcpy(page, "none\n");
4020         return 5;
4021 }
4022
4023 static ssize_t
4024 reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
4025 {
4026         char *e;
4027         unsigned long long new = simple_strtoull(buf, &e, 10);
4028         if (mddev->pers)
4029                 return -EBUSY;
4030         if (buf == e || (*e && *e != '\n'))
4031                 return -EINVAL;
4032         mddev->reshape_position = new;
4033         mddev->delta_disks = 0;
4034         mddev->new_level = mddev->level;
4035         mddev->new_layout = mddev->layout;
4036         mddev->new_chunk_sectors = mddev->chunk_sectors;
4037         return len;
4038 }
4039
4040 static struct md_sysfs_entry md_reshape_position =
4041 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
4042        reshape_position_store);
4043
4044 static ssize_t
4045 array_size_show(mddev_t *mddev, char *page)
4046 {
4047         if (mddev->external_size)
4048                 return sprintf(page, "%llu\n",
4049                                (unsigned long long)mddev->array_sectors/2);
4050         else
4051                 return sprintf(page, "default\n");
4052 }
4053
4054 static ssize_t
4055 array_size_store(mddev_t *mddev, const char *buf, size_t len)
4056 {
4057         sector_t sectors;
4058
4059         if (strncmp(buf, "default", 7) == 0) {
4060                 if (mddev->pers)
4061                         sectors = mddev->pers->size(mddev, 0, 0);
4062                 else
4063                         sectors = mddev->array_sectors;
4064
4065                 mddev->external_size = 0;
4066         } else {
4067                 if (strict_blocks_to_sectors(buf, &sectors) < 0)
4068                         return -EINVAL;
4069                 if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
4070                         return -E2BIG;
4071
4072                 mddev->external_size = 1;
4073         }
4074
4075         mddev->array_sectors = sectors;
4076         set_capacity(mddev->gendisk, mddev->array_sectors);
4077         if (mddev->pers)
4078                 revalidate_disk(mddev->gendisk);
4079
4080         return len;
4081 }
4082
4083 static struct md_sysfs_entry md_array_size =
4084 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
4085        array_size_store);
4086
4087 static struct attribute *md_default_attrs[] = {
4088         &md_level.attr,
4089         &md_layout.attr,
4090         &md_raid_disks.attr,
4091         &md_chunk_size.attr,
4092         &md_size.attr,
4093         &md_resync_start.attr,
4094         &md_metadata.attr,
4095         &md_new_device.attr,
4096         &md_safe_delay.attr,
4097         &md_array_state.attr,
4098         &md_reshape_position.attr,
4099         &md_array_size.attr,
4100         &max_corr_read_errors.attr,
4101         NULL,
4102 };
4103
4104 static struct attribute *md_redundancy_attrs[] = {
4105         &md_scan_mode.attr,
4106         &md_mismatches.attr,
4107         &md_sync_min.attr,
4108         &md_sync_max.attr,
4109         &md_sync_speed.attr,
4110         &md_sync_force_parallel.attr,
4111         &md_sync_completed.attr,
4112         &md_min_sync.attr,
4113         &md_max_sync.attr,
4114         &md_suspend_lo.attr,
4115         &md_suspend_hi.attr,
4116         &md_bitmap.attr,
4117         &md_degraded.attr,
4118         NULL,
4119 };
4120 static struct attribute_group md_redundancy_group = {
4121         .name = NULL,
4122         .attrs = md_redundancy_attrs,
4123 };
4124
4125
4126 static ssize_t
4127 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
4128 {
4129         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4130         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
4131         ssize_t rv;
4132
4133         if (!entry->show)
4134                 return -EIO;
4135         rv = mddev_lock(mddev);
4136         if (!rv) {
4137                 rv = entry->show(mddev, page);
4138                 mddev_unlock(mddev);
4139         }
4140         return rv;
4141 }
4142
4143 static ssize_t
4144 md_attr_store(struct kobject *kobj, struct attribute *attr,
4145               const char *page, size_t length)
4146 {
4147         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4148         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
4149         ssize_t rv;
4150
4151         if (!entry->store)
4152                 return -EIO;
4153         if (!capable(CAP_SYS_ADMIN))
4154                 return -EACCES;
4155         rv = mddev_lock(mddev);
4156         if (mddev->hold_active == UNTIL_IOCTL)
4157                 mddev->hold_active = 0;
4158         if (!rv) {
4159                 rv = entry->store(mddev, page, length);
4160                 mddev_unlock(mddev);
4161         }
4162         return rv;
4163 }
4164
4165 static void md_free(struct kobject *ko)
4166 {
4167         mddev_t *mddev = container_of(ko, mddev_t, kobj);
4168
4169         if (mddev->sysfs_state)
4170                 sysfs_put(mddev->sysfs_state);
4171
4172         if (mddev->gendisk) {
4173                 del_gendisk(mddev->gendisk);
4174                 put_disk(mddev->gendisk);
4175         }
4176         if (mddev->queue)
4177                 blk_cleanup_queue(mddev->queue);
4178
4179         kfree(mddev);
4180 }
4181
4182 static const struct sysfs_ops md_sysfs_ops = {
4183         .show   = md_attr_show,
4184         .store  = md_attr_store,
4185 };
4186 static struct kobj_type md_ktype = {
4187         .release        = md_free,
4188         .sysfs_ops      = &md_sysfs_ops,
4189         .default_attrs  = md_default_attrs,
4190 };
4191
4192 int mdp_major = 0;
4193
4194 static void mddev_delayed_delete(struct work_struct *ws)
4195 {
4196         mddev_t *mddev = container_of(ws, mddev_t, del_work);
4197
4198         sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
4199         kobject_del(&mddev->kobj);
4200         kobject_put(&mddev->kobj);
4201 }
4202
4203 static int md_alloc(dev_t dev, char *name)
4204 {
4205         static DEFINE_MUTEX(disks_mutex);
4206         mddev_t *mddev = mddev_find(dev);
4207         struct gendisk *disk;
4208         int partitioned;
4209         int shift;
4210         int unit;
4211         int error;
4212
4213         if (!mddev)
4214                 return -ENODEV;
4215
4216         partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
4217         shift = partitioned ? MdpMinorShift : 0;
4218         unit = MINOR(mddev->unit) >> shift;
4219
4220         /* wait for any previous instance if this device
4221          * to be completed removed (mddev_delayed_delete).
4222          */
4223         flush_scheduled_work();
4224
4225         mutex_lock(&disks_mutex);
4226         error = -EEXIST;
4227         if (mddev->gendisk)
4228                 goto abort;
4229
4230         if (name) {
4231                 /* Need to ensure that 'name' is not a duplicate.
4232                  */
4233                 mddev_t *mddev2;
4234                 spin_lock(&all_mddevs_lock);
4235
4236                 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
4237                         if (mddev2->gendisk &&
4238                             strcmp(mddev2->gendisk->disk_name, name) == 0) {
4239                                 spin_unlock(&all_mddevs_lock);
4240                                 goto abort;
4241                         }
4242                 spin_unlock(&all_mddevs_lock);
4243         }
4244
4245         error = -ENOMEM;
4246         mddev->queue = blk_alloc_queue(GFP_KERNEL);
4247         if (!mddev->queue)
4248                 goto abort;
4249         mddev->queue->queuedata = mddev;
4250
4251         /* Can be unlocked because the queue is new: no concurrency */
4252         queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue);
4253
4254         blk_queue_make_request(mddev->queue, md_make_request);
4255
4256         disk = alloc_disk(1 << shift);
4257         if (!disk) {
4258                 blk_cleanup_queue(mddev->queue);
4259                 mddev->queue = NULL;
4260                 goto abort;
4261         }
4262         disk->major = MAJOR(mddev->unit);
4263         disk->first_minor = unit << shift;
4264         if (name)
4265                 strcpy(disk->disk_name, name);
4266         else if (partitioned)
4267                 sprintf(disk->disk_name, "md_d%d", unit);
4268         else
4269                 sprintf(disk->disk_name, "md%d", unit);
4270         disk->fops = &md_fops;
4271         disk->private_data = mddev;
4272         disk->queue = mddev->queue;
4273         /* Allow extended partitions.  This makes the
4274          * 'mdp' device redundant, but we can't really
4275          * remove it now.
4276          */
4277         disk->flags |= GENHD_FL_EXT_DEVT;
4278         add_disk(disk);
4279         mddev->gendisk = disk;
4280         error = kobject_init_and_add(&mddev->kobj, &md_ktype,
4281                                      &disk_to_dev(disk)->kobj, "%s", "md");
4282         if (error) {
4283                 /* This isn't possible, but as kobject_init_and_add is marked
4284                  * __must_check, we must do something with the result
4285                  */
4286                 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
4287                        disk->disk_name);
4288                 error = 0;
4289         }
4290         if (sysfs_create_group(&mddev->kobj, &md_bitmap_group))
4291                 printk(KERN_DEBUG "pointless warning\n");
4292  abort:
4293         mutex_unlock(&disks_mutex);
4294         if (!error) {
4295                 kobject_uevent(&mddev->kobj, KOBJ_ADD);
4296                 mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, NULL, "array_state");
4297         }
4298         mddev_put(mddev);
4299         return error;
4300 }
4301
4302 static struct kobject *md_probe(dev_t dev, int *part, void *data)
4303 {
4304         md_alloc(dev, NULL);
4305         return NULL;
4306 }
4307
4308 static int add_named_array(const char *val, struct kernel_param *kp)
4309 {
4310         /* val must be "md_*" where * is not all digits.
4311          * We allocate an array with a large free minor number, and
4312          * set the name to val.  val must not already be an active name.
4313          */
4314         int len = strlen(val);
4315         char buf[DISK_NAME_LEN];
4316
4317         while (len && val[len-1] == '\n')
4318                 len--;
4319         if (len >= DISK_NAME_LEN)
4320                 return -E2BIG;
4321         strlcpy(buf, val, len+1);
4322         if (strncmp(buf, "md_", 3) != 0)
4323                 return -EINVAL;
4324         return md_alloc(0, buf);
4325 }
4326
4327 static void md_safemode_timeout(unsigned long data)
4328 {
4329         mddev_t *mddev = (mddev_t *) data;
4330
4331         if (!atomic_read(&mddev->writes_pending)) {
4332                 mddev->safemode = 1;
4333                 if (mddev->external)
4334                         sysfs_notify_dirent(mddev->sysfs_state);
4335         }
4336         md_wakeup_thread(mddev->thread);
4337 }
4338
4339 static int start_dirty_degraded;
4340
4341 static int md_run(mddev_t *mddev)
4342 {
4343         int err;
4344         mdk_rdev_t *rdev;
4345         struct mdk_personality *pers;
4346
4347         if (list_empty(&mddev->disks))
4348                 /* cannot run an array with no devices.. */
4349                 return -EINVAL;
4350
4351         if (mddev->pers)
4352                 return -EBUSY;
4353         /* Cannot run until previous stop completes properly */
4354         if (mddev->sysfs_active)
4355                 return -EBUSY;
4356
4357         /*
4358          * Analyze all RAID superblock(s)
4359          */
4360         if (!mddev->raid_disks) {
4361                 if (!mddev->persistent)
4362                         return -EINVAL;
4363                 analyze_sbs(mddev);
4364         }
4365
4366         if (mddev->level != LEVEL_NONE)
4367                 request_module("md-level-%d", mddev->level);
4368         else if (mddev->clevel[0])
4369                 request_module("md-%s", mddev->clevel);
4370
4371         /*
4372          * Drop all container device buffers, from now on
4373          * the only valid external interface is through the md
4374          * device.
4375          */
4376         list_for_each_entry(rdev, &mddev->disks, same_set) {
4377                 if (test_bit(Faulty, &rdev->flags))
4378                         continue;
4379                 sync_blockdev(rdev->bdev);
4380                 invalidate_bdev(rdev->bdev);
4381
4382                 /* perform some consistency tests on the device.
4383                  * We don't want the data to overlap the metadata,
4384                  * Internal Bitmap issues have been handled elsewhere.
4385                  */
4386                 if (rdev->data_offset < rdev->sb_start) {
4387                         if (mddev->dev_sectors &&
4388                             rdev->data_offset + mddev->dev_sectors
4389                             > rdev->sb_start) {
4390                                 printk("md: %s: data overlaps metadata\n",
4391                                        mdname(mddev));
4392                                 return -EINVAL;
4393                         }
4394                 } else {
4395                         if (rdev->sb_start + rdev->sb_size/512
4396                             > rdev->data_offset) {
4397                                 printk("md: %s: metadata overlaps data\n",
4398                                        mdname(mddev));
4399                                 return -EINVAL;
4400                         }
4401                 }
4402                 sysfs_notify_dirent(rdev->sysfs_state);
4403         }
4404
4405         spin_lock(&pers_lock);
4406         pers = find_pers(mddev->level, mddev->clevel);
4407         if (!pers || !try_module_get(pers->owner)) {
4408                 spin_unlock(&pers_lock);
4409                 if (mddev->level != LEVEL_NONE)
4410                         printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
4411                                mddev->level);
4412                 else
4413                         printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
4414                                mddev->clevel);
4415                 return -EINVAL;
4416         }
4417         mddev->pers = pers;
4418         spin_unlock(&pers_lock);
4419         if (mddev->level != pers->level) {
4420                 mddev->level = pers->level;
4421                 mddev->new_level = pers->level;
4422         }
4423         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
4424
4425         if (mddev->reshape_position != MaxSector &&
4426             pers->start_reshape == NULL) {
4427                 /* This personality cannot handle reshaping... */
4428                 mddev->pers = NULL;
4429                 module_put(pers->owner);
4430                 return -EINVAL;
4431         }
4432
4433         if (pers->sync_request) {
4434                 /* Warn if this is a potentially silly
4435                  * configuration.
4436                  */
4437                 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
4438                 mdk_rdev_t *rdev2;
4439                 int warned = 0;
4440
4441                 list_for_each_entry(rdev, &mddev->disks, same_set)
4442                         list_for_each_entry(rdev2, &mddev->disks, same_set) {
4443                                 if (rdev < rdev2 &&
4444                                     rdev->bdev->bd_contains ==
4445                                     rdev2->bdev->bd_contains) {
4446                                         printk(KERN_WARNING
4447                                                "%s: WARNING: %s appears to be"
4448                                                " on the same physical disk as"
4449                                                " %s.\n",
4450                                                mdname(mddev),
4451                                                bdevname(rdev->bdev,b),
4452                                                bdevname(rdev2->bdev,b2));
4453                                         warned = 1;
4454                                 }
4455                         }
4456
4457                 if (warned)
4458                         printk(KERN_WARNING
4459                                "True protection against single-disk"
4460                                " failure might be compromised.\n");
4461         }
4462
4463         mddev->recovery = 0;
4464         /* may be over-ridden by personality */
4465         mddev->resync_max_sectors = mddev->dev_sectors;
4466
4467         mddev->barriers_work = 1;
4468         mddev->ok_start_degraded = start_dirty_degraded;
4469
4470         if (start_readonly && mddev->ro == 0)
4471                 mddev->ro = 2; /* read-only, but switch on first write */
4472
4473         err = mddev->pers->run(mddev);
4474         if (err)
4475                 printk(KERN_ERR "md: pers->run() failed ...\n");
4476         else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) {
4477                 WARN_ONCE(!mddev->external_size, "%s: default size too small,"
4478                           " but 'external_size' not in effect?\n", __func__);
4479                 printk(KERN_ERR
4480                        "md: invalid array_size %llu > default size %llu\n",
4481                        (unsigned long long)mddev->array_sectors / 2,
4482                        (unsigned long long)mddev->pers->size(mddev, 0, 0) / 2);
4483                 err = -EINVAL;
4484                 mddev->pers->stop(mddev);
4485         }
4486         if (err == 0 && mddev->pers->sync_request) {
4487                 err = bitmap_create(mddev);
4488                 if (err) {
4489                         printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
4490                                mdname(mddev), err);
4491                         mddev->pers->stop(mddev);
4492                 }
4493         }
4494         if (err) {
4495                 module_put(mddev->pers->owner);
4496                 mddev->pers = NULL;
4497                 bitmap_destroy(mddev);
4498                 return err;
4499         }
4500         if (mddev->pers->sync_request) {
4501                 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
4502                         printk(KERN_WARNING
4503                                "md: cannot register extra attributes for %s\n",
4504                                mdname(mddev));
4505                 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action");
4506         } else if (mddev->ro == 2) /* auto-readonly not meaningful */
4507                 mddev->ro = 0;
4508
4509         atomic_set(&mddev->writes_pending,0);
4510         atomic_set(&mddev->max_corr_read_errors,
4511                    MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
4512         mddev->safemode = 0;
4513         mddev->safemode_timer.function = md_safemode_timeout;
4514         mddev->safemode_timer.data = (unsigned long) mddev;
4515         mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
4516         mddev->in_sync = 1;
4517
4518         list_for_each_entry(rdev, &mddev->disks, same_set)
4519                 if (rdev->raid_disk >= 0) {
4520                         char nm[20];
4521                         sprintf(nm, "rd%d", rdev->raid_disk);
4522                         if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
4523                                 printk("md: cannot register %s for %s\n",
4524                                        nm, mdname(mddev));
4525                 }
4526         
4527         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4528         
4529         if (mddev->flags)
4530                 md_update_sb(mddev, 0);
4531
4532         md_wakeup_thread(mddev->thread);
4533         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
4534
4535         md_new_event(mddev);
4536         sysfs_notify_dirent(mddev->sysfs_state);
4537         if (mddev->sysfs_action)
4538                 sysfs_notify_dirent(mddev->sysfs_action);
4539         sysfs_notify(&mddev->kobj, NULL, "degraded");
4540         return 0;
4541 }
4542
4543 static int do_md_run(mddev_t *mddev)
4544 {
4545         int err;
4546
4547         err = md_run(mddev);
4548         if (err)
4549                 goto out;
4550
4551         set_capacity(mddev->gendisk, mddev->array_sectors);
4552         revalidate_disk(mddev->gendisk);
4553         kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4554 out:
4555         return err;
4556 }
4557
4558 static int restart_array(mddev_t *mddev)
4559 {
4560         struct gendisk *disk = mddev->gendisk;
4561
4562         /* Complain if it has no devices */
4563         if (list_empty(&mddev->disks))
4564                 return -ENXIO;
4565         if (!mddev->pers)
4566                 return -EINVAL;
4567         if (!mddev->ro)
4568                 return -EBUSY;
4569         mddev->safemode = 0;
4570         mddev->ro = 0;
4571         set_disk_ro(disk, 0);
4572         printk(KERN_INFO "md: %s switched to read-write mode.\n",
4573                 mdname(mddev));
4574         /* Kick recovery or resync if necessary */
4575         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4576         md_wakeup_thread(mddev->thread);
4577         md_wakeup_thread(mddev->sync_thread);
4578         sysfs_notify_dirent(mddev->sysfs_state);
4579         return 0;
4580 }
4581
4582 /* similar to deny_write_access, but accounts for our holding a reference
4583  * to the file ourselves */
4584 static int deny_bitmap_write_access(struct file * file)
4585 {
4586         struct inode *inode = file->f_mapping->host;
4587
4588         spin_lock(&inode->i_lock);
4589         if (atomic_read(&inode->i_writecount) > 1) {
4590                 spin_unlock(&inode->i_lock);
4591                 return -ETXTBSY;
4592         }
4593         atomic_set(&inode->i_writecount, -1);
4594         spin_unlock(&inode->i_lock);
4595
4596         return 0;
4597 }
4598
4599 void restore_bitmap_write_access(struct file *file)
4600 {
4601         struct inode *inode = file->f_mapping->host;
4602
4603         spin_lock(&inode->i_lock);
4604         atomic_set(&inode->i_writecount, 1);
4605         spin_unlock(&inode->i_lock);
4606 }
4607
4608 static void md_clean(mddev_t *mddev)
4609 {
4610         mddev->array_sectors = 0;
4611         mddev->external_size = 0;
4612         mddev->dev_sectors = 0;
4613         mddev->raid_disks = 0;
4614         mddev->recovery_cp = 0;
4615         mddev->resync_min = 0;
4616         mddev->resync_max = MaxSector;
4617         mddev->reshape_position = MaxSector;
4618         mddev->external = 0;
4619         mddev->persistent = 0;
4620         mddev->level = LEVEL_NONE;
4621         mddev->clevel[0] = 0;
4622         mddev->flags = 0;
4623         mddev->ro = 0;
4624         mddev->metadata_type[0] = 0;
4625         mddev->chunk_sectors = 0;
4626         mddev->ctime = mddev->utime = 0;
4627         mddev->layout = 0;
4628         mddev->max_disks = 0;
4629         mddev->events = 0;
4630         mddev->can_decrease_events = 0;
4631         mddev->delta_disks = 0;
4632         mddev->new_level = LEVEL_NONE;
4633         mddev->new_layout = 0;
4634         mddev->new_chunk_sectors = 0;
4635         mddev->curr_resync = 0;
4636         mddev->resync_mismatches = 0;
4637         mddev->suspend_lo = mddev->suspend_hi = 0;
4638         mddev->sync_speed_min = mddev->sync_speed_max = 0;
4639         mddev->recovery = 0;
4640         mddev->in_sync = 0;
4641         mddev->degraded = 0;
4642         mddev->barriers_work = 0;
4643         mddev->safemode = 0;
4644         mddev->bitmap_info.offset = 0;
4645         mddev->bitmap_info.default_offset = 0;
4646         mddev->bitmap_info.chunksize = 0;
4647         mddev->bitmap_info.daemon_sleep = 0;
4648         mddev->bitmap_info.max_write_behind = 0;
4649 }
4650
4651 static void md_stop_writes(mddev_t *mddev)
4652 {
4653         if (mddev->sync_thread) {
4654                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4655                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4656                 md_unregister_thread(mddev->sync_thread);
4657                 mddev->sync_thread = NULL;
4658         }
4659
4660         del_timer_sync(&mddev->safemode_timer);
4661
4662         bitmap_flush(mddev);
4663         md_super_wait(mddev);
4664
4665         if (!mddev->in_sync || mddev->flags) {
4666                 /* mark array as shutdown cleanly */
4667                 mddev->in_sync = 1;
4668                 md_update_sb(mddev, 1);
4669         }
4670 }
4671
4672 static void md_stop(mddev_t *mddev)
4673 {
4674         md_stop_writes(mddev);
4675
4676         mddev->pers->stop(mddev);
4677         if (mddev->pers->sync_request && mddev->to_remove == NULL)
4678                 mddev->to_remove = &md_redundancy_group;
4679         module_put(mddev->pers->owner);
4680         mddev->pers = NULL;
4681         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4682 }
4683
4684 static int md_set_readonly(mddev_t *mddev, int is_open)
4685 {
4686         int err = 0;
4687         mutex_lock(&mddev->open_mutex);
4688         if (atomic_read(&mddev->openers) > is_open) {
4689                 printk("md: %s still in use.\n",mdname(mddev));
4690                 err = -EBUSY;
4691                 goto out;
4692         }
4693         if (mddev->pers) {
4694                 md_stop_writes(mddev);
4695
4696                 err  = -ENXIO;
4697                 if (mddev->ro==1)
4698                         goto out;
4699                 mddev->ro = 1;
4700                 set_disk_ro(mddev->gendisk, 1);
4701                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4702                 sysfs_notify_dirent(mddev->sysfs_state);
4703                 err = 0;        
4704         }
4705 out:
4706         mutex_unlock(&mddev->open_mutex);
4707         return err;
4708 }
4709
4710 /* mode:
4711  *   0 - completely stop and dis-assemble array
4712  *   2 - stop but do not disassemble array
4713  */
4714 static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4715 {
4716         int err = 0, revalidate = 0;
4717         struct gendisk *disk = mddev->gendisk;
4718         mdk_rdev_t *rdev;
4719
4720         mutex_lock(&mddev->open_mutex);
4721         if (atomic_read(&mddev->openers) > is_open ||
4722             mddev->sysfs_active) {
4723                 printk("md: %s still in use.\n",mdname(mddev));
4724                 err = -EBUSY;
4725         } else if (mddev->pers) {
4726
4727                 if (mddev->ro)
4728                         set_disk_ro(disk, 0);
4729
4730                 md_stop(mddev);
4731                 mddev->queue->merge_bvec_fn = NULL;
4732                 mddev->queue->unplug_fn = NULL;
4733                 mddev->queue->backing_dev_info.congested_fn = NULL;
4734
4735                 /* tell userspace to handle 'inactive' */
4736                 sysfs_notify_dirent(mddev->sysfs_state);
4737
4738                 list_for_each_entry(rdev, &mddev->disks, same_set)
4739                         if (rdev->raid_disk >= 0) {
4740                                 char nm[20];
4741                                 sprintf(nm, "rd%d", rdev->raid_disk);
4742                                 sysfs_remove_link(&mddev->kobj, nm);
4743                         }
4744
4745                 set_capacity(disk, 0);
4746                 revalidate = 1;
4747
4748                 if (mddev->ro)
4749                         mddev->ro = 0;
4750                 
4751                 err = 0;
4752         }
4753         mutex_unlock(&mddev->open_mutex);
4754         if (revalidate)
4755                 revalidate_disk(disk);
4756         if (err)
4757                 return err;
4758         /*
4759          * Free resources if final stop
4760          */
4761         if (mode == 0) {
4762
4763                 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
4764
4765                 bitmap_destroy(mddev);
4766                 if (mddev->bitmap_info.file) {
4767                         restore_bitmap_write_access(mddev->bitmap_info.file);
4768                         fput(mddev->bitmap_info.file);
4769                         mddev->bitmap_info.file = NULL;
4770                 }
4771                 mddev->bitmap_info.offset = 0;
4772
4773                 export_array(mddev);
4774
4775                 md_clean(mddev);
4776                 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4777                 if (mddev->hold_active == UNTIL_STOP)
4778                         mddev->hold_active = 0;
4779
4780         }
4781         err = 0;
4782         blk_integrity_unregister(disk);
4783         md_new_event(mddev);
4784         sysfs_notify_dirent(mddev->sysfs_state);
4785         return err;
4786 }
4787
4788 #ifndef MODULE
4789 static void autorun_array(mddev_t *mddev)
4790 {
4791         mdk_rdev_t *rdev;
4792         int err;
4793
4794         if (list_empty(&mddev->disks))
4795                 return;
4796
4797         printk(KERN_INFO "md: running: ");
4798
4799         list_for_each_entry(rdev, &mddev->disks, same_set) {
4800                 char b[BDEVNAME_SIZE];
4801                 printk("<%s>", bdevname(rdev->bdev,b));
4802         }
4803         printk("\n");
4804
4805         err = do_md_run(mddev);
4806         if (err) {
4807                 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
4808                 do_md_stop(mddev, 0, 0);
4809         }
4810 }
4811
4812 /*
4813  * lets try to run arrays based on all disks that have arrived
4814  * until now. (those are in pending_raid_disks)
4815  *
4816  * the method: pick the first pending disk, collect all disks with
4817  * the same UUID, remove all from the pending list and put them into
4818  * the 'same_array' list. Then order this list based on superblock
4819  * update time (freshest comes first), kick out 'old' disks and
4820  * compare superblocks. If everything's fine then run it.
4821  *
4822  * If "unit" is allocated, then bump its reference count
4823  */
4824 static void autorun_devices(int part)
4825 {
4826         mdk_rdev_t *rdev0, *rdev, *tmp;
4827         mddev_t *mddev;
4828         char b[BDEVNAME_SIZE];
4829
4830         printk(KERN_INFO "md: autorun ...\n");
4831         while (!list_empty(&pending_raid_disks)) {
4832                 int unit;
4833                 dev_t dev;
4834                 LIST_HEAD(candidates);
4835                 rdev0 = list_entry(pending_raid_disks.next,
4836                                          mdk_rdev_t, same_set);
4837
4838                 printk(KERN_INFO "md: considering %s ...\n",
4839                         bdevname(rdev0->bdev,b));
4840                 INIT_LIST_HEAD(&candidates);
4841                 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
4842                         if (super_90_load(rdev, rdev0, 0) >= 0) {
4843                                 printk(KERN_INFO "md:  adding %s ...\n",
4844                                         bdevname(rdev->bdev,b));
4845                                 list_move(&rdev->same_set, &candidates);
4846                         }
4847                 /*
4848                  * now we have a set of devices, with all of them having
4849                  * mostly sane superblocks. It's time to allocate the
4850                  * mddev.
4851                  */
4852                 if (part) {
4853                         dev = MKDEV(mdp_major,
4854                                     rdev0->preferred_minor << MdpMinorShift);
4855                         unit = MINOR(dev) >> MdpMinorShift;
4856                 } else {
4857                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
4858                         unit = MINOR(dev);
4859                 }
4860                 if (rdev0->preferred_minor != unit) {
4861                         printk(KERN_INFO "md: unit number in %s is bad: %d\n",
4862                                bdevname(rdev0->bdev, b), rdev0->preferred_minor);
4863                         break;
4864                 }
4865
4866                 md_probe(dev, NULL, NULL);
4867                 mddev = mddev_find(dev);
4868                 if (!mddev || !mddev->gendisk) {
4869                         if (mddev)
4870                                 mddev_put(mddev);
4871                         printk(KERN_ERR
4872                                 "md: cannot allocate memory for md drive.\n");
4873                         break;
4874                 }
4875                 if (mddev_lock(mddev)) 
4876                         printk(KERN_WARNING "md: %s locked, cannot run\n",
4877                                mdname(mddev));
4878                 else if (mddev->raid_disks || mddev->major_version
4879                          || !list_empty(&mddev->disks)) {
4880                         printk(KERN_WARNING 
4881                                 "md: %s already running, cannot run %s\n",
4882                                 mdname(mddev), bdevname(rdev0->bdev,b));
4883                         mddev_unlock(mddev);
4884                 } else {
4885                         printk(KERN_INFO "md: created %s\n", mdname(mddev));
4886                         mddev->persistent = 1;
4887                         rdev_for_each_list(rdev, tmp, &candidates) {
4888                                 list_del_init(&rdev->same_set);
4889                                 if (bind_rdev_to_array(rdev, mddev))
4890                                         export_rdev(rdev);
4891                         }
4892                         autorun_array(mddev);
4893                         mddev_unlock(mddev);
4894                 }
4895                 /* on success, candidates will be empty, on error
4896                  * it won't...
4897                  */
4898                 rdev_for_each_list(rdev, tmp, &candidates) {
4899                         list_del_init(&rdev->same_set);
4900                         export_rdev(rdev);
4901                 }
4902                 mddev_put(mddev);
4903         }
4904         printk(KERN_INFO "md: ... autorun DONE.\n");
4905 }
4906 #endif /* !MODULE */
4907
4908 static int get_version(void __user * arg)
4909 {
4910         mdu_version_t ver;
4911
4912         ver.major = MD_MAJOR_VERSION;
4913         ver.minor = MD_MINOR_VERSION;
4914         ver.patchlevel = MD_PATCHLEVEL_VERSION;
4915
4916         if (copy_to_user(arg, &ver, sizeof(ver)))
4917                 return -EFAULT;
4918
4919         return 0;
4920 }
4921
4922 static int get_array_info(mddev_t * mddev, void __user * arg)
4923 {
4924         mdu_array_info_t info;
4925         int nr,working,insync,failed,spare;
4926         mdk_rdev_t *rdev;
4927
4928         nr=working=insync=failed=spare=0;
4929         list_for_each_entry(rdev, &mddev->disks, same_set) {
4930                 nr++;
4931                 if (test_bit(Faulty, &rdev->flags))
4932                         failed++;
4933                 else {
4934                         working++;
4935                         if (test_bit(In_sync, &rdev->flags))
4936                                 insync++;       
4937                         else
4938                                 spare++;
4939                 }
4940         }
4941
4942         info.major_version = mddev->major_version;
4943         info.minor_version = mddev->minor_version;
4944         info.patch_version = MD_PATCHLEVEL_VERSION;
4945         info.ctime         = mddev->ctime;
4946         info.level         = mddev->level;
4947         info.size          = mddev->dev_sectors / 2;
4948         if (info.size != mddev->dev_sectors / 2) /* overflow */
4949                 info.size = -1;
4950         info.nr_disks      = nr;
4951         info.raid_disks    = mddev->raid_disks;
4952         info.md_minor      = mddev->md_minor;
4953         info.not_persistent= !mddev->persistent;
4954
4955         info.utime         = mddev->utime;
4956         info.state         = 0;
4957         if (mddev->in_sync)
4958                 info.state = (1<<MD_SB_CLEAN);
4959         if (mddev->bitmap && mddev->bitmap_info.offset)
4960                 info.state = (1<<MD_SB_BITMAP_PRESENT);
4961         info.active_disks  = insync;
4962         info.working_disks = working;
4963         info.failed_disks  = failed;
4964         info.spare_disks   = spare;
4965
4966         info.layout        = mddev->layout;
4967         info.chunk_size    = mddev->chunk_sectors << 9;
4968
4969         if (copy_to_user(arg, &info, sizeof(info)))
4970                 return -EFAULT;
4971
4972         return 0;
4973 }
4974
4975 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
4976 {
4977         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
4978         char *ptr, *buf = NULL;
4979         int err = -ENOMEM;
4980
4981         if (md_allow_write(mddev))
4982                 file = kmalloc(sizeof(*file), GFP_NOIO);
4983         else
4984                 file = kmalloc(sizeof(*file), GFP_KERNEL);
4985
4986         if (!file)
4987                 goto out;
4988
4989         /* bitmap disabled, zero the first byte and copy out */
4990         if (!mddev->bitmap || !mddev->bitmap->file) {
4991                 file->pathname[0] = '\0';
4992                 goto copy_out;
4993         }
4994
4995         buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
4996         if (!buf)
4997                 goto out;
4998
4999         ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname));
5000         if (IS_ERR(ptr))
5001                 goto out;
5002
5003         strcpy(file->pathname, ptr);
5004
5005 copy_out:
5006         err = 0;
5007         if (copy_to_user(arg, file, sizeof(*file)))
5008                 err = -EFAULT;
5009 out:
5010         kfree(buf);
5011         kfree(file);
5012         return err;
5013 }
5014
5015 static int get_disk_info(mddev_t * mddev, void __user * arg)
5016 {
5017         mdu_disk_info_t info;
5018         mdk_rdev_t *rdev;
5019
5020         if (copy_from_user(&info, arg, sizeof(info)))
5021                 return -EFAULT;
5022
5023         rdev = find_rdev_nr(mddev, info.number);
5024         if (rdev) {
5025                 info.major = MAJOR(rdev->bdev->bd_dev);
5026                 info.minor = MINOR(rdev->bdev->bd_dev);
5027                 info.raid_disk = rdev->raid_disk;
5028                 info.state = 0;
5029                 if (test_bit(Faulty, &rdev->flags))
5030                         info.state |= (1<<MD_DISK_FAULTY);
5031                 else if (test_bit(In_sync, &rdev->flags)) {
5032                         info.state |= (1<<MD_DISK_ACTIVE);
5033                         info.state |= (1<<MD_DISK_SYNC);
5034                 }
5035                 if (test_bit(WriteMostly, &rdev->flags))
5036                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
5037         } else {
5038                 info.major = info.minor = 0;
5039                 info.raid_disk = -1;
5040                 info.state = (1<<MD_DISK_REMOVED);
5041         }
5042
5043         if (copy_to_user(arg, &info, sizeof(info)))
5044                 return -EFAULT;
5045
5046         return 0;
5047 }
5048
5049 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
5050 {
5051         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5052         mdk_rdev_t *rdev;
5053         dev_t dev = MKDEV(info->major,info->minor);
5054
5055         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
5056                 return -EOVERFLOW;
5057
5058         if (!mddev->raid_disks) {
5059                 int err;
5060                 /* expecting a device which has a superblock */
5061                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
5062                 if (IS_ERR(rdev)) {
5063                         printk(KERN_WARNING 
5064                                 "md: md_import_device returned %ld\n",
5065                                 PTR_ERR(rdev));
5066                         return PTR_ERR(rdev);
5067                 }
5068                 if (!list_empty(&mddev->disks)) {
5069                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
5070                                                         mdk_rdev_t, same_set);
5071                         err = super_types[mddev->major_version]
5072                                 .load_super(rdev, rdev0, mddev->minor_version);
5073                         if (err < 0) {
5074                                 printk(KERN_WARNING 
5075                                         "md: %s has different UUID to %s\n",
5076                                         bdevname(rdev->bdev,b), 
5077                                         bdevname(rdev0->bdev,b2));
5078                                 export_rdev(rdev);
5079                                 return -EINVAL;
5080                         }
5081                 }
5082                 err = bind_rdev_to_array(rdev, mddev);
5083                 if (err)
5084                         export_rdev(rdev);
5085                 return err;
5086         }
5087
5088         /*
5089          * add_new_disk can be used once the array is assembled
5090          * to add "hot spares".  They must already have a superblock
5091          * written
5092          */
5093         if (mddev->pers) {
5094                 int err;
5095                 if (!mddev->pers->hot_add_disk) {
5096                         printk(KERN_WARNING 
5097                                 "%s: personality does not support diskops!\n",
5098                                mdname(mddev));
5099                         return -EINVAL;
5100                 }
5101                 if (mddev->persistent)
5102                         rdev = md_import_device(dev, mddev->major_version,
5103                                                 mddev->minor_version);
5104                 else
5105                         rdev = md_import_device(dev, -1, -1);
5106                 if (IS_ERR(rdev)) {
5107                         printk(KERN_WARNING 
5108                                 "md: md_import_device returned %ld\n",
5109                                 PTR_ERR(rdev));
5110                         return PTR_ERR(rdev);
5111                 }
5112                 /* set save_raid_disk if appropriate */
5113                 if (!mddev->persistent) {
5114                         if (info->state & (1<<MD_DISK_SYNC)  &&
5115                             info->raid_disk < mddev->raid_disks)
5116                                 rdev->raid_disk = info->raid_disk;
5117                         else
5118                                 rdev->raid_disk = -1;
5119                 } else
5120                         super_types[mddev->major_version].
5121                                 validate_super(mddev, rdev);
5122                 rdev->saved_raid_disk = rdev->raid_disk;
5123
5124                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
5125                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
5126                         set_bit(WriteMostly, &rdev->flags);
5127                 else
5128                         clear_bit(WriteMostly, &rdev->flags);
5129
5130                 rdev->raid_disk = -1;
5131                 err = bind_rdev_to_array(rdev, mddev);
5132                 if (!err && !mddev->pers->hot_remove_disk) {
5133                         /* If there is hot_add_disk but no hot_remove_disk
5134                          * then added disks for geometry changes,
5135                          * and should be added immediately.
5136                          */
5137                         super_types[mddev->major_version].
5138                                 validate_super(mddev, rdev);
5139                         err = mddev->pers->hot_add_disk(mddev, rdev);
5140                         if (err)
5141                                 unbind_rdev_from_array(rdev);
5142                 }
5143                 if (err)
5144                         export_rdev(rdev);
5145                 else
5146                         sysfs_notify_dirent(rdev->sysfs_state);
5147
5148                 md_update_sb(mddev, 1);
5149                 if (mddev->degraded)
5150                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5151                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5152                 md_wakeup_thread(mddev->thread);
5153                 return err;
5154         }
5155
5156         /* otherwise, add_new_disk is only allowed
5157          * for major_version==0 superblocks
5158          */
5159         if (mddev->major_version != 0) {
5160                 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
5161                        mdname(mddev));
5162                 return -EINVAL;
5163         }
5164
5165         if (!(info->state & (1<<MD_DISK_FAULTY))) {
5166                 int err;
5167                 rdev = md_import_device(dev, -1, 0);
5168                 if (IS_ERR(rdev)) {
5169                         printk(KERN_WARNING 
5170                                 "md: error, md_import_device() returned %ld\n",
5171                                 PTR_ERR(rdev));
5172                         return PTR_ERR(rdev);
5173                 }
5174                 rdev->desc_nr = info->number;
5175                 if (info->raid_disk < mddev->raid_disks)
5176                         rdev->raid_disk = info->raid_disk;
5177                 else
5178                         rdev->raid_disk = -1;
5179
5180                 if (rdev->raid_disk < mddev->raid_disks)
5181                         if (info->state & (1<<MD_DISK_SYNC))
5182                                 set_bit(In_sync, &rdev->flags);
5183
5184                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
5185                         set_bit(WriteMostly, &rdev->flags);
5186
5187                 if (!mddev->persistent) {
5188                         printk(KERN_INFO "md: nonpersistent superblock ...\n");
5189                         rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
5190                 } else 
5191                         rdev->sb_start = calc_dev_sboffset(rdev->bdev);
5192                 rdev->sectors = rdev->sb_start;
5193
5194                 err = bind_rdev_to_array(rdev, mddev);
5195                 if (err) {
5196                         export_rdev(rdev);
5197                         return err;
5198                 }
5199         }
5200
5201         return 0;
5202 }
5203
5204 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
5205 {
5206         char b[BDEVNAME_SIZE];
5207         mdk_rdev_t *rdev;
5208
5209         rdev = find_rdev(mddev, dev);
5210         if (!rdev)
5211                 return -ENXIO;
5212
5213         if (rdev->raid_disk >= 0)
5214                 goto busy;
5215
5216         kick_rdev_from_array(rdev);
5217         md_update_sb(mddev, 1);
5218         md_new_event(mddev);
5219
5220         return 0;
5221 busy:
5222         printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
5223                 bdevname(rdev->bdev,b), mdname(mddev));
5224         return -EBUSY;
5225 }
5226
5227 static int hot_add_disk(mddev_t * mddev, dev_t dev)
5228 {
5229         char b[BDEVNAME_SIZE];
5230         int err;
5231         mdk_rdev_t *rdev;
5232
5233         if (!mddev->pers)
5234                 return -ENODEV;
5235
5236         if (mddev->major_version != 0) {
5237                 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
5238                         " version-0 superblocks.\n",
5239                         mdname(mddev));
5240                 return -EINVAL;
5241         }
5242         if (!mddev->pers->hot_add_disk) {
5243                 printk(KERN_WARNING 
5244                         "%s: personality does not support diskops!\n",
5245                         mdname(mddev));
5246                 return -EINVAL;
5247         }
5248
5249         rdev = md_import_device(dev, -1, 0);
5250         if (IS_ERR(rdev)) {
5251                 printk(KERN_WARNING 
5252                         "md: error, md_import_device() returned %ld\n",
5253                         PTR_ERR(rdev));
5254                 return -EINVAL;
5255         }
5256
5257         if (mddev->persistent)
5258                 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
5259         else
5260                 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
5261
5262         rdev->sectors = rdev->sb_start;
5263
5264         if (test_bit(Faulty, &rdev->flags)) {
5265                 printk(KERN_WARNING 
5266                         "md: can not hot-add faulty %s disk to %s!\n",
5267                         bdevname(rdev->bdev,b), mdname(mddev));
5268                 err = -EINVAL;
5269                 goto abort_export;
5270         }
5271         clear_bit(In_sync, &rdev->flags);
5272         rdev->desc_nr = -1;
5273         rdev->saved_raid_disk = -1;
5274         err = bind_rdev_to_array(rdev, mddev);
5275         if (err)
5276                 goto abort_export;
5277
5278         /*
5279          * The rest should better be atomic, we can have disk failures
5280          * noticed in interrupt contexts ...
5281          */
5282
5283         rdev->raid_disk = -1;
5284
5285         md_update_sb(mddev, 1);
5286
5287         /*
5288          * Kick recovery, maybe this spare has to be added to the
5289          * array immediately.
5290          */
5291         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5292         md_wakeup_thread(mddev->thread);
5293         md_new_event(mddev);
5294         return 0;
5295
5296 abort_export:
5297         export_rdev(rdev);
5298         return err;
5299 }
5300
5301 static int set_bitmap_file(mddev_t *mddev, int fd)
5302 {
5303         int err;
5304
5305         if (mddev->pers) {
5306                 if (!mddev->pers->quiesce)
5307                         return -EBUSY;
5308                 if (mddev->recovery || mddev->sync_thread)
5309                         return -EBUSY;
5310                 /* we should be able to change the bitmap.. */
5311         }
5312
5313
5314         if (fd >= 0) {
5315                 if (mddev->bitmap)
5316                         return -EEXIST; /* cannot add when bitmap is present */
5317                 mddev->bitmap_info.file = fget(fd);
5318
5319                 if (mddev->bitmap_info.file == NULL) {
5320                         printk(KERN_ERR "%s: error: failed to get bitmap file\n",
5321                                mdname(mddev));
5322                         return -EBADF;
5323                 }
5324
5325                 err = deny_bitmap_write_access(mddev->bitmap_info.file);
5326                 if (err) {
5327                         printk(KERN_ERR "%s: error: bitmap file is already in use\n",
5328                                mdname(mddev));
5329                         fput(mddev->bitmap_info.file);
5330                         mddev->bitmap_info.file = NULL;
5331                         return err;
5332                 }
5333                 mddev->bitmap_info.offset = 0; /* file overrides offset */
5334         } else if (mddev->bitmap == NULL)
5335                 return -ENOENT; /* cannot remove what isn't there */
5336         err = 0;
5337         if (mddev->pers) {
5338                 mddev->pers->quiesce(mddev, 1);
5339                 if (fd >= 0)
5340                         err = bitmap_create(mddev);
5341                 if (fd < 0 || err) {
5342                         bitmap_destroy(mddev);
5343                         fd = -1; /* make sure to put the file */
5344                 }
5345                 mddev->pers->quiesce(mddev, 0);
5346         }
5347         if (fd < 0) {
5348                 if (mddev->bitmap_info.file) {
5349                         restore_bitmap_write_access(mddev->bitmap_info.file);
5350                         fput(mddev->bitmap_info.file);
5351                 }
5352                 mddev->bitmap_info.file = NULL;
5353         }
5354
5355         return err;
5356 }
5357
5358 /*
5359  * set_array_info is used two different ways
5360  * The original usage is when creating a new array.
5361  * In this usage, raid_disks is > 0 and it together with
5362  *  level, size, not_persistent,layout,chunksize determine the
5363  *  shape of the array.
5364  *  This will always create an array with a type-0.90.0 superblock.
5365  * The newer usage is when assembling an array.
5366  *  In this case raid_disks will be 0, and the major_version field is
5367  *  use to determine which style super-blocks are to be found on the devices.
5368  *  The minor and patch _version numbers are also kept incase the
5369  *  super_block handler wishes to interpret them.
5370  */
5371 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
5372 {
5373
5374         if (info->raid_disks == 0) {
5375                 /* just setting version number for superblock loading */
5376                 if (info->major_version < 0 ||
5377                     info->major_version >= ARRAY_SIZE(super_types) ||
5378                     super_types[info->major_version].name == NULL) {
5379                         /* maybe try to auto-load a module? */
5380                         printk(KERN_INFO 
5381                                 "md: superblock version %d not known\n",
5382                                 info->major_version);
5383                         return -EINVAL;
5384                 }
5385                 mddev->major_version = info->major_version;
5386                 mddev->minor_version = info->minor_version;
5387                 mddev->patch_version = info->patch_version;
5388                 mddev->persistent = !info->not_persistent;
5389                 /* ensure mddev_put doesn't delete this now that there
5390                  * is some minimal configuration.
5391                  */
5392                 mddev->ctime         = get_seconds();
5393                 return 0;
5394         }
5395         mddev->major_version = MD_MAJOR_VERSION;
5396         mddev->minor_version = MD_MINOR_VERSION;
5397         mddev->patch_version = MD_PATCHLEVEL_VERSION;
5398         mddev->ctime         = get_seconds();
5399
5400         mddev->level         = info->level;
5401         mddev->clevel[0]     = 0;
5402         mddev->dev_sectors   = 2 * (sector_t)info->size;
5403         mddev->raid_disks    = info->raid_disks;
5404         /* don't set md_minor, it is determined by which /dev/md* was
5405          * openned
5406          */
5407         if (info->state & (1<<MD_SB_CLEAN))
5408                 mddev->recovery_cp = MaxSector;
5409         else
5410                 mddev->recovery_cp = 0;
5411         mddev->persistent    = ! info->not_persistent;
5412         mddev->external      = 0;
5413
5414         mddev->layout        = info->layout;
5415         mddev->chunk_sectors = info->chunk_size >> 9;
5416
5417         mddev->max_disks     = MD_SB_DISKS;
5418
5419         if (mddev->persistent)
5420                 mddev->flags         = 0;
5421         set_bit(MD_CHANGE_DEVS, &mddev->flags);
5422
5423         mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
5424         mddev->bitmap_info.offset = 0;
5425
5426         mddev->reshape_position = MaxSector;
5427
5428         /*
5429          * Generate a 128 bit UUID
5430          */
5431         get_random_bytes(mddev->uuid, 16);
5432
5433         mddev->new_level = mddev->level;
5434         mddev->new_chunk_sectors = mddev->chunk_sectors;
5435         mddev->new_layout = mddev->layout;
5436         mddev->delta_disks = 0;
5437
5438         return 0;
5439 }
5440
5441 void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors)
5442 {
5443         WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
5444
5445         if (mddev->external_size)
5446                 return;
5447
5448         mddev->array_sectors = array_sectors;
5449 }
5450 EXPORT_SYMBOL(md_set_array_sectors);
5451
5452 static int update_size(mddev_t *mddev, sector_t num_sectors)
5453 {
5454         mdk_rdev_t *rdev;
5455         int rv;
5456         int fit = (num_sectors == 0);
5457
5458         if (mddev->pers->resize == NULL)
5459                 return -EINVAL;
5460         /* The "num_sectors" is the number of sectors of each device that
5461          * is used.  This can only make sense for arrays with redundancy.
5462          * linear and raid0 always use whatever space is available. We can only
5463          * consider changing this number if no resync or reconstruction is
5464          * happening, and if the new size is acceptable. It must fit before the
5465          * sb_start or, if that is <data_offset, it must fit before the size
5466          * of each device.  If num_sectors is zero, we find the largest size
5467          * that fits.
5468
5469          */
5470         if (mddev->sync_thread)
5471                 return -EBUSY;
5472         if (mddev->bitmap)
5473                 /* Sorry, cannot grow a bitmap yet, just remove it,
5474                  * grow, and re-add.
5475                  */
5476                 return -EBUSY;
5477         list_for_each_entry(rdev, &mddev->disks, same_set) {
5478                 sector_t avail = rdev->sectors;
5479
5480                 if (fit && (num_sectors == 0 || num_sectors > avail))
5481                         num_sectors = avail;
5482                 if (avail < num_sectors)
5483                         return -ENOSPC;
5484         }
5485         rv = mddev->pers->resize(mddev, num_sectors);
5486         if (!rv)
5487                 revalidate_disk(mddev->gendisk);
5488         return rv;
5489 }
5490
5491 static int update_raid_disks(mddev_t *mddev, int raid_disks)
5492 {
5493         int rv;
5494         /* change the number of raid disks */
5495         if (mddev->pers->check_reshape == NULL)
5496                 return -EINVAL;
5497         if (raid_disks <= 0 ||
5498             (mddev->max_disks && raid_disks >= mddev->max_disks))
5499                 return -EINVAL;
5500         if (mddev->sync_thread || mddev->reshape_position != MaxSector)
5501                 return -EBUSY;
5502         mddev->delta_disks = raid_disks - mddev->raid_disks;
5503
5504         rv = mddev->pers->check_reshape(mddev);
5505         return rv;
5506 }
5507
5508
5509 /*
5510  * update_array_info is used to change the configuration of an
5511  * on-line array.
5512  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
5513  * fields in the info are checked against the array.
5514  * Any differences that cannot be handled will cause an error.
5515  * Normally, only one change can be managed at a time.
5516  */
5517 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
5518 {
5519         int rv = 0;
5520         int cnt = 0;
5521         int state = 0;
5522
5523         /* calculate expected state,ignoring low bits */
5524         if (mddev->bitmap && mddev->bitmap_info.offset)
5525                 state |= (1 << MD_SB_BITMAP_PRESENT);
5526
5527         if (mddev->major_version != info->major_version ||
5528             mddev->minor_version != info->minor_version ||
5529 /*          mddev->patch_version != info->patch_version || */
5530             mddev->ctime         != info->ctime         ||
5531             mddev->level         != info->level         ||
5532 /*          mddev->layout        != info->layout        || */
5533             !mddev->persistent   != info->not_persistent||
5534             mddev->chunk_sectors != info->chunk_size >> 9 ||
5535             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
5536             ((state^info->state) & 0xfffffe00)
5537                 )
5538                 return -EINVAL;
5539         /* Check there is only one change */
5540         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
5541                 cnt++;
5542         if (mddev->raid_disks != info->raid_disks)
5543                 cnt++;
5544         if (mddev->layout != info->layout)
5545                 cnt++;
5546         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
5547                 cnt++;
5548         if (cnt == 0)
5549                 return 0;
5550         if (cnt > 1)
5551                 return -EINVAL;
5552
5553         if (mddev->layout != info->layout) {
5554                 /* Change layout
5555                  * we don't need to do anything at the md level, the
5556                  * personality will take care of it all.
5557                  */
5558                 if (mddev->pers->check_reshape == NULL)
5559                         return -EINVAL;
5560                 else {
5561                         mddev->new_layout = info->layout;
5562                         rv = mddev->pers->check_reshape(mddev);
5563                         if (rv)
5564                                 mddev->new_layout = mddev->layout;
5565                         return rv;
5566                 }
5567         }
5568         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
5569                 rv = update_size(mddev, (sector_t)info->size * 2);
5570
5571         if (mddev->raid_disks    != info->raid_disks)
5572                 rv = update_raid_disks(mddev, info->raid_disks);
5573
5574         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
5575                 if (mddev->pers->quiesce == NULL)
5576                         return -EINVAL;
5577                 if (mddev->recovery || mddev->sync_thread)
5578                         return -EBUSY;
5579                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
5580                         /* add the bitmap */
5581                         if (mddev->bitmap)
5582                                 return -EEXIST;
5583                         if (mddev->bitmap_info.default_offset == 0)
5584                                 return -EINVAL;
5585                         mddev->bitmap_info.offset =
5586                                 mddev->bitmap_info.default_offset;
5587                         mddev->pers->quiesce(mddev, 1);
5588                         rv = bitmap_create(mddev);
5589                         if (rv)
5590                                 bitmap_destroy(mddev);
5591                         mddev->pers->quiesce(mddev, 0);
5592                 } else {
5593                         /* remove the bitmap */
5594                         if (!mddev->bitmap)
5595                                 return -ENOENT;
5596                         if (mddev->bitmap->file)
5597                                 return -EINVAL;
5598                         mddev->pers->quiesce(mddev, 1);
5599                         bitmap_destroy(mddev);
5600                         mddev->pers->quiesce(mddev, 0);
5601                         mddev->bitmap_info.offset = 0;
5602                 }
5603         }
5604         md_update_sb(mddev, 1);
5605         return rv;
5606 }
5607
5608 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
5609 {
5610         mdk_rdev_t *rdev;
5611
5612         if (mddev->pers == NULL)
5613                 return -ENODEV;
5614
5615         rdev = find_rdev(mddev, dev);
5616         if (!rdev)
5617                 return -ENODEV;
5618
5619         md_error(mddev, rdev);
5620         return 0;
5621 }
5622
5623 /*
5624  * We have a problem here : there is no easy way to give a CHS
5625  * virtual geometry. We currently pretend that we have a 2 heads
5626  * 4 sectors (with a BIG number of cylinders...). This drives
5627  * dosfs just mad... ;-)
5628  */
5629 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
5630 {
5631         mddev_t *mddev = bdev->bd_disk->private_data;
5632
5633         geo->heads = 2;
5634         geo->sectors = 4;
5635         geo->cylinders = mddev->array_sectors / 8;
5636         return 0;
5637 }
5638
5639 static int md_ioctl(struct block_device *bdev, fmode_t mode,
5640                         unsigned int cmd, unsigned long arg)
5641 {
5642         int err = 0;
5643         void __user *argp = (void __user *)arg;
5644         mddev_t *mddev = NULL;
5645         int ro;
5646
5647         if (!capable(CAP_SYS_ADMIN))
5648                 return -EACCES;
5649
5650         /*
5651          * Commands dealing with the RAID driver but not any
5652          * particular array:
5653          */
5654         switch (cmd)
5655         {
5656                 case RAID_VERSION:
5657                         err = get_version(argp);
5658                         goto done;
5659
5660                 case PRINT_RAID_DEBUG:
5661                         err = 0;
5662                         md_print_devices();
5663                         goto done;
5664
5665 #ifndef MODULE
5666                 case RAID_AUTORUN:
5667                         err = 0;
5668                         autostart_arrays(arg);
5669                         goto done;
5670 #endif
5671                 default:;
5672         }
5673
5674         /*
5675          * Commands creating/starting a new array:
5676          */
5677
5678         mddev = bdev->bd_disk->private_data;
5679
5680         if (!mddev) {
5681                 BUG();
5682                 goto abort;
5683         }
5684
5685         err = mddev_lock(mddev);
5686         if (err) {
5687                 printk(KERN_INFO 
5688                         "md: ioctl lock interrupted, reason %d, cmd %d\n",
5689                         err, cmd);
5690                 goto abort;
5691         }
5692
5693         switch (cmd)
5694         {
5695                 case SET_ARRAY_INFO:
5696                         {
5697                                 mdu_array_info_t info;
5698                                 if (!arg)
5699                                         memset(&info, 0, sizeof(info));
5700                                 else if (copy_from_user(&info, argp, sizeof(info))) {
5701                                         err = -EFAULT;
5702                                         goto abort_unlock;
5703                                 }
5704                                 if (mddev->pers) {
5705                                         err = update_array_info(mddev, &info);
5706                                         if (err) {
5707                                                 printk(KERN_WARNING "md: couldn't update"
5708                                                        " array info. %d\n", err);
5709                                                 goto abort_unlock;
5710                                         }
5711                                         goto done_unlock;
5712                                 }
5713                                 if (!list_empty(&mddev->disks)) {
5714                                         printk(KERN_WARNING
5715                                                "md: array %s already has disks!\n",
5716                                                mdname(mddev));
5717                                         err = -EBUSY;
5718                                         goto abort_unlock;
5719                                 }
5720                                 if (mddev->raid_disks) {
5721                                         printk(KERN_WARNING
5722                                                "md: array %s already initialised!\n",
5723                                                mdname(mddev));
5724                                         err = -EBUSY;
5725                                         goto abort_unlock;
5726                                 }
5727                                 err = set_array_info(mddev, &info);
5728                                 if (err) {
5729                                         printk(KERN_WARNING "md: couldn't set"
5730                                                " array info. %d\n", err);
5731                                         goto abort_unlock;
5732                                 }
5733                         }
5734                         goto done_unlock;
5735
5736                 default:;
5737         }
5738
5739         /*
5740          * Commands querying/configuring an existing array:
5741          */
5742         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
5743          * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
5744         if ((!mddev->raid_disks && !mddev->external)
5745             && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
5746             && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
5747             && cmd != GET_BITMAP_FILE) {
5748                 err = -ENODEV;
5749                 goto abort_unlock;
5750         }
5751
5752         /*
5753          * Commands even a read-only array can execute:
5754          */
5755         switch (cmd)
5756         {
5757                 case GET_ARRAY_INFO:
5758                         err = get_array_info(mddev, argp);
5759                         goto done_unlock;
5760
5761                 case GET_BITMAP_FILE:
5762                         err = get_bitmap_file(mddev, argp);
5763                         goto done_unlock;
5764
5765                 case GET_DISK_INFO:
5766                         err = get_disk_info(mddev, argp);
5767                         goto done_unlock;
5768
5769                 case RESTART_ARRAY_RW:
5770                         err = restart_array(mddev);
5771                         goto done_unlock;
5772
5773                 case STOP_ARRAY:
5774                         err = do_md_stop(mddev, 0, 1);
5775                         goto done_unlock;
5776
5777                 case STOP_ARRAY_RO:
5778                         err = md_set_readonly(mddev, 1);
5779                         goto done_unlock;
5780
5781                 case BLKROSET:
5782                         if (get_user(ro, (int __user *)(arg))) {
5783                                 err = -EFAULT;
5784                                 goto done_unlock;
5785                         }
5786                         err = -EINVAL;
5787
5788                         /* if the bdev is going readonly the value of mddev->ro
5789                          * does not matter, no writes are coming
5790                          */
5791                         if (ro)
5792                                 goto done_unlock;
5793
5794                         /* are we are already prepared for writes? */
5795                         if (mddev->ro != 1)
5796                                 goto done_unlock;
5797
5798                         /* transitioning to readauto need only happen for
5799                          * arrays that call md_write_start
5800                          */
5801                         if (mddev->pers) {
5802                                 err = restart_array(mddev);
5803                                 if (err == 0) {
5804                                         mddev->ro = 2;
5805                                         set_disk_ro(mddev->gendisk, 0);
5806                                 }
5807                         }
5808                         goto done_unlock;
5809         }
5810
5811         /*
5812          * The remaining ioctls are changing the state of the
5813          * superblock, so we do not allow them on read-only arrays.
5814          * However non-MD ioctls (e.g. get-size) will still come through
5815          * here and hit the 'default' below, so only disallow
5816          * 'md' ioctls, and switch to rw mode if started auto-readonly.
5817          */
5818         if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
5819                 if (mddev->ro == 2) {
5820                         mddev->ro = 0;
5821                         sysfs_notify_dirent(mddev->sysfs_state);
5822                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5823                         md_wakeup_thread(mddev->thread);
5824                 } else {
5825                         err = -EROFS;
5826                         goto abort_unlock;
5827                 }
5828         }
5829
5830         switch (cmd)
5831         {
5832                 case ADD_NEW_DISK:
5833                 {
5834                         mdu_disk_info_t info;
5835                         if (copy_from_user(&info, argp, sizeof(info)))
5836                                 err = -EFAULT;
5837                         else
5838                                 err = add_new_disk(mddev, &info);
5839                         goto done_unlock;
5840                 }
5841
5842                 case HOT_REMOVE_DISK:
5843                         err = hot_remove_disk(mddev, new_decode_dev(arg));
5844                         goto done_unlock;
5845
5846                 case HOT_ADD_DISK:
5847                         err = hot_add_disk(mddev, new_decode_dev(arg));
5848                         goto done_unlock;
5849
5850                 case SET_DISK_FAULTY:
5851                         err = set_disk_faulty(mddev, new_decode_dev(arg));
5852                         goto done_unlock;
5853
5854                 case RUN_ARRAY:
5855                         err = do_md_run(mddev);
5856                         goto done_unlock;
5857
5858                 case SET_BITMAP_FILE:
5859                         err = set_bitmap_file(mddev, (int)arg);
5860                         goto done_unlock;
5861
5862                 default:
5863                         err = -EINVAL;
5864                         goto abort_unlock;
5865         }
5866
5867 done_unlock:
5868 abort_unlock:
5869         if (mddev->hold_active == UNTIL_IOCTL &&
5870             err != -EINVAL)
5871                 mddev->hold_active = 0;
5872         mddev_unlock(mddev);
5873
5874         return err;
5875 done:
5876         if (err)
5877                 MD_BUG();
5878 abort:
5879         return err;
5880 }
5881 #ifdef CONFIG_COMPAT
5882 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
5883                     unsigned int cmd, unsigned long arg)
5884 {
5885         switch (cmd) {
5886         case HOT_REMOVE_DISK:
5887         case HOT_ADD_DISK:
5888         case SET_DISK_FAULTY:
5889         case SET_BITMAP_FILE:
5890                 /* These take in integer arg, do not convert */
5891                 break;
5892         default:
5893                 arg = (unsigned long)compat_ptr(arg);
5894                 break;
5895         }
5896
5897         return md_ioctl(bdev, mode, cmd, arg);
5898 }
5899 #endif /* CONFIG_COMPAT */
5900
5901 static int md_open(struct block_device *bdev, fmode_t mode)
5902 {
5903         /*
5904          * Succeed if we can lock the mddev, which confirms that
5905          * it isn't being stopped right now.
5906          */
5907         mddev_t *mddev = mddev_find(bdev->bd_dev);
5908         int err;
5909
5910         if (mddev->gendisk != bdev->bd_disk) {
5911                 /* we are racing with mddev_put which is discarding this
5912                  * bd_disk.
5913                  */
5914                 mddev_put(mddev);
5915                 /* Wait until bdev->bd_disk is definitely gone */
5916                 flush_scheduled_work();
5917                 /* Then retry the open from the top */
5918                 return -ERESTARTSYS;
5919         }
5920         BUG_ON(mddev != bdev->bd_disk->private_data);
5921
5922         if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
5923                 goto out;
5924
5925         err = 0;
5926         atomic_inc(&mddev->openers);
5927         mutex_unlock(&mddev->open_mutex);
5928
5929         check_disk_size_change(mddev->gendisk, bdev);
5930  out:
5931         return err;
5932 }
5933
5934 static int md_release(struct gendisk *disk, fmode_t mode)
5935 {
5936         mddev_t *mddev = disk->private_data;
5937
5938         BUG_ON(!mddev);
5939         atomic_dec(&mddev->openers);
5940         mddev_put(mddev);
5941
5942         return 0;
5943 }
5944 static const struct block_device_operations md_fops =
5945 {
5946         .owner          = THIS_MODULE,
5947         .open           = md_open,
5948         .release        = md_release,
5949         .ioctl          = md_ioctl,
5950 #ifdef CONFIG_COMPAT
5951         .compat_ioctl   = md_compat_ioctl,
5952 #endif
5953         .getgeo         = md_getgeo,
5954 };
5955
5956 static int md_thread(void * arg)
5957 {
5958         mdk_thread_t *thread = arg;
5959
5960         /*
5961          * md_thread is a 'system-thread', it's priority should be very
5962          * high. We avoid resource deadlocks individually in each
5963          * raid personality. (RAID5 does preallocation) We also use RR and
5964          * the very same RT priority as kswapd, thus we will never get
5965          * into a priority inversion deadlock.
5966          *
5967          * we definitely have to have equal or higher priority than
5968          * bdflush, otherwise bdflush will deadlock if there are too
5969          * many dirty RAID5 blocks.
5970          */
5971
5972         allow_signal(SIGKILL);
5973         while (!kthread_should_stop()) {
5974
5975                 /* We need to wait INTERRUPTIBLE so that
5976                  * we don't add to the load-average.
5977                  * That means we need to be sure no signals are
5978                  * pending
5979                  */
5980                 if (signal_pending(current))
5981                         flush_signals(current);
5982
5983                 wait_event_interruptible_timeout
5984                         (thread->wqueue,
5985                          test_bit(THREAD_WAKEUP, &thread->flags)
5986                          || kthread_should_stop(),
5987                          thread->timeout);
5988
5989                 clear_bit(THREAD_WAKEUP, &thread->flags);
5990
5991                 thread->run(thread->mddev);
5992         }
5993
5994         return 0;
5995 }
5996
5997 void md_wakeup_thread(mdk_thread_t *thread)
5998 {
5999         if (thread) {
6000                 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
6001                 set_bit(THREAD_WAKEUP, &thread->flags);
6002                 wake_up(&thread->wqueue);
6003         }
6004 }
6005
6006 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
6007                                  const char *name)
6008 {
6009         mdk_thread_t *thread;
6010
6011         thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
6012         if (!thread)
6013                 return NULL;
6014
6015         init_waitqueue_head(&thread->wqueue);
6016
6017         thread->run = run;
6018         thread->mddev = mddev;
6019         thread->timeout = MAX_SCHEDULE_TIMEOUT;
6020         thread->tsk = kthread_run(md_thread, thread,
6021                                   "%s_%s",
6022                                   mdname(thread->mddev),
6023                                   name ?: mddev->pers->name);
6024         if (IS_ERR(thread->tsk)) {
6025                 kfree(thread);
6026                 return NULL;
6027         }
6028         return thread;
6029 }
6030
6031 void md_unregister_thread(mdk_thread_t *thread)
6032 {
6033         if (!thread)
6034                 return;
6035         dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
6036
6037         kthread_stop(thread->tsk);
6038         kfree(thread);
6039 }
6040
6041 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
6042 {
6043         if (!mddev) {
6044                 MD_BUG();
6045                 return;
6046         }
6047
6048         if (!rdev || test_bit(Faulty, &rdev->flags))
6049                 return;
6050
6051         if (mddev->external)
6052                 set_bit(Blocked, &rdev->flags);
6053 /*
6054         dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
6055                 mdname(mddev),
6056                 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
6057                 __builtin_return_address(0),__builtin_return_address(1),
6058                 __builtin_return_address(2),__builtin_return_address(3));
6059 */
6060         if (!mddev->pers)
6061                 return;
6062         if (!mddev->pers->error_handler)
6063                 return;
6064         mddev->pers->error_handler(mddev,rdev);
6065         if (mddev->degraded)
6066                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6067         sysfs_notify_dirent(rdev->sysfs_state);
6068         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6069         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6070         md_wakeup_thread(mddev->thread);
6071         md_new_event_inintr(mddev);
6072 }
6073
6074 /* seq_file implementation /proc/mdstat */
6075
6076 static void status_unused(struct seq_file *seq)
6077 {
6078         int i = 0;
6079         mdk_rdev_t *rdev;
6080
6081         seq_printf(seq, "unused devices: ");
6082
6083         list_for_each_entry(rdev, &pending_raid_disks, same_set) {
6084                 char b[BDEVNAME_SIZE];
6085                 i++;
6086                 seq_printf(seq, "%s ",
6087                               bdevname(rdev->bdev,b));
6088         }
6089         if (!i)
6090                 seq_printf(seq, "<none>");
6091
6092         seq_printf(seq, "\n");
6093 }
6094
6095
6096 static void status_resync(struct seq_file *seq, mddev_t * mddev)
6097 {
6098         sector_t max_sectors, resync, res;
6099         unsigned long dt, db;
6100         sector_t rt;
6101         int scale;
6102         unsigned int per_milli;
6103
6104         resync = mddev->curr_resync - atomic_read(&mddev->recovery_active);
6105
6106         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
6107                 max_sectors = mddev->resync_max_sectors;
6108         else
6109                 max_sectors = mddev->dev_sectors;
6110
6111         /*
6112          * Should not happen.
6113          */
6114         if (!max_sectors) {
6115                 MD_BUG();
6116                 return;
6117         }
6118         /* Pick 'scale' such that (resync>>scale)*1000 will fit
6119          * in a sector_t, and (max_sectors>>scale) will fit in a
6120          * u32, as those are the requirements for sector_div.
6121          * Thus 'scale' must be at least 10
6122          */
6123         scale = 10;
6124         if (sizeof(sector_t) > sizeof(unsigned long)) {
6125                 while ( max_sectors/2 > (1ULL<<(scale+32)))
6126                         scale++;
6127         }
6128         res = (resync>>scale)*1000;
6129         sector_div(res, (u32)((max_sectors>>scale)+1));
6130
6131         per_milli = res;
6132         {
6133                 int i, x = per_milli/50, y = 20-x;
6134                 seq_printf(seq, "[");
6135                 for (i = 0; i < x; i++)
6136                         seq_printf(seq, "=");
6137                 seq_printf(seq, ">");
6138                 for (i = 0; i < y; i++)
6139                         seq_printf(seq, ".");
6140                 seq_printf(seq, "] ");
6141         }
6142         seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
6143                    (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
6144                     "reshape" :
6145                     (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
6146                      "check" :
6147                      (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
6148                       "resync" : "recovery"))),
6149                    per_milli/10, per_milli % 10,
6150                    (unsigned long long) resync/2,
6151                    (unsigned long long) max_sectors/2);
6152
6153         /*
6154          * dt: time from mark until now
6155          * db: blocks written from mark until now
6156          * rt: remaining time
6157          *
6158          * rt is a sector_t, so could be 32bit or 64bit.
6159          * So we divide before multiply in case it is 32bit and close
6160          * to the limit.
6161          * We scale the divisor (db) by 32 to avoid loosing precision
6162          * near the end of resync when the number of remaining sectors
6163          * is close to 'db'.
6164          * We then divide rt by 32 after multiplying by db to compensate.
6165          * The '+1' avoids division by zero if db is very small.
6166          */
6167         dt = ((jiffies - mddev->resync_mark) / HZ);
6168         if (!dt) dt++;
6169         db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
6170                 - mddev->resync_mark_cnt;
6171
6172         rt = max_sectors - resync;    /* number of remaining sectors */
6173         sector_div(rt, db/32+1);
6174         rt *= dt;
6175         rt >>= 5;
6176
6177         seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
6178                    ((unsigned long)rt % 60)/6);
6179
6180         seq_printf(seq, " speed=%ldK/sec", db/2/dt);
6181 }
6182
6183 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
6184 {
6185         struct list_head *tmp;
6186         loff_t l = *pos;
6187         mddev_t *mddev;
6188
6189         if (l >= 0x10000)
6190                 return NULL;
6191         if (!l--)
6192                 /* header */
6193                 return (void*)1;
6194
6195         spin_lock(&all_mddevs_lock);
6196         list_for_each(tmp,&all_mddevs)
6197                 if (!l--) {
6198                         mddev = list_entry(tmp, mddev_t, all_mddevs);
6199                         mddev_get(mddev);
6200                         spin_unlock(&all_mddevs_lock);
6201                         return mddev;
6202                 }
6203         spin_unlock(&all_mddevs_lock);
6204         if (!l--)
6205                 return (void*)2;/* tail */
6206         return NULL;
6207 }
6208
6209 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
6210 {
6211         struct list_head *tmp;
6212         mddev_t *next_mddev, *mddev = v;
6213         
6214         ++*pos;
6215         if (v == (void*)2)
6216                 return NULL;
6217
6218         spin_lock(&all_mddevs_lock);
6219         if (v == (void*)1)
6220                 tmp = all_mddevs.next;
6221         else
6222                 tmp = mddev->all_mddevs.next;
6223         if (tmp != &all_mddevs)
6224                 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
6225         else {
6226                 next_mddev = (void*)2;
6227                 *pos = 0x10000;
6228         }               
6229         spin_unlock(&all_mddevs_lock);
6230
6231         if (v != (void*)1)
6232                 mddev_put(mddev);
6233         return next_mddev;
6234
6235 }
6236
6237 static void md_seq_stop(struct seq_file *seq, void *v)
6238 {
6239         mddev_t *mddev = v;
6240
6241         if (mddev && v != (void*)1 && v != (void*)2)
6242                 mddev_put(mddev);
6243 }
6244
6245 struct mdstat_info {
6246         int event;
6247 };
6248
6249 static int md_seq_show(struct seq_file *seq, void *v)
6250 {
6251         mddev_t *mddev = v;
6252         sector_t sectors;
6253         mdk_rdev_t *rdev;
6254         struct mdstat_info *mi = seq->private;
6255         struct bitmap *bitmap;
6256
6257         if (v == (void*)1) {
6258                 struct mdk_personality *pers;
6259                 seq_printf(seq, "Personalities : ");
6260                 spin_lock(&pers_lock);
6261                 list_for_each_entry(pers, &pers_list, list)
6262                         seq_printf(seq, "[%s] ", pers->name);
6263
6264                 spin_unlock(&pers_lock);
6265                 seq_printf(seq, "\n");
6266                 mi->event = atomic_read(&md_event_count);
6267                 return 0;
6268         }
6269         if (v == (void*)2) {
6270                 status_unused(seq);
6271                 return 0;
6272         }
6273
6274         if (mddev_lock(mddev) < 0)
6275                 return -EINTR;
6276
6277         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
6278                 seq_printf(seq, "%s : %sactive", mdname(mddev),
6279                                                 mddev->pers ? "" : "in");
6280                 if (mddev->pers) {
6281                         if (mddev->ro==1)
6282                                 seq_printf(seq, " (read-only)");
6283                         if (mddev->ro==2)
6284                                 seq_printf(seq, " (auto-read-only)");
6285                         seq_printf(seq, " %s", mddev->pers->name);
6286                 }
6287
6288                 sectors = 0;
6289                 list_for_each_entry(rdev, &mddev->disks, same_set) {
6290                         char b[BDEVNAME_SIZE];
6291                         seq_printf(seq, " %s[%d]",
6292                                 bdevname(rdev->bdev,b), rdev->desc_nr);
6293                         if (test_bit(WriteMostly, &rdev->flags))
6294                                 seq_printf(seq, "(W)");
6295                         if (test_bit(Faulty, &rdev->flags)) {
6296                                 seq_printf(seq, "(F)");
6297                                 continue;
6298                         } else if (rdev->raid_disk < 0)
6299                                 seq_printf(seq, "(S)"); /* spare */
6300                         sectors += rdev->sectors;
6301                 }
6302
6303                 if (!list_empty(&mddev->disks)) {
6304                         if (mddev->pers)
6305                                 seq_printf(seq, "\n      %llu blocks",
6306                                            (unsigned long long)
6307                                            mddev->array_sectors / 2);
6308                         else
6309                                 seq_printf(seq, "\n      %llu blocks",
6310                                            (unsigned long long)sectors / 2);
6311                 }
6312                 if (mddev->persistent) {
6313                         if (mddev->major_version != 0 ||
6314                             mddev->minor_version != 90) {
6315                                 seq_printf(seq," super %d.%d",
6316                                            mddev->major_version,
6317                                            mddev->minor_version);
6318                         }
6319                 } else if (mddev->external)
6320                         seq_printf(seq, " super external:%s",
6321                                    mddev->metadata_type);
6322                 else
6323                         seq_printf(seq, " super non-persistent");
6324
6325                 if (mddev->pers) {
6326                         mddev->pers->status(seq, mddev);
6327                         seq_printf(seq, "\n      ");
6328                         if (mddev->pers->sync_request) {
6329                                 if (mddev->curr_resync > 2) {
6330                                         status_resync(seq, mddev);
6331                                         seq_printf(seq, "\n      ");
6332                                 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
6333                                         seq_printf(seq, "\tresync=DELAYED\n      ");
6334                                 else if (mddev->recovery_cp < MaxSector)
6335                                         seq_printf(seq, "\tresync=PENDING\n      ");
6336                         }
6337                 } else
6338                         seq_printf(seq, "\n       ");
6339
6340                 if ((bitmap = mddev->bitmap)) {
6341                         unsigned long chunk_kb;
6342                         unsigned long flags;
6343                         spin_lock_irqsave(&bitmap->lock, flags);
6344                         chunk_kb = mddev->bitmap_info.chunksize >> 10;
6345                         seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
6346                                 "%lu%s chunk",
6347                                 bitmap->pages - bitmap->missing_pages,
6348                                 bitmap->pages,
6349                                 (bitmap->pages - bitmap->missing_pages)
6350                                         << (PAGE_SHIFT - 10),
6351                                 chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize,
6352                                 chunk_kb ? "KB" : "B");
6353                         if (bitmap->file) {
6354                                 seq_printf(seq, ", file: ");
6355                                 seq_path(seq, &bitmap->file->f_path, " \t\n");
6356                         }
6357
6358                         seq_printf(seq, "\n");
6359                         spin_unlock_irqrestore(&bitmap->lock, flags);
6360                 }
6361
6362                 seq_printf(seq, "\n");
6363         }
6364         mddev_unlock(mddev);
6365         
6366         return 0;
6367 }
6368
6369 static const struct seq_operations md_seq_ops = {
6370         .start  = md_seq_start,
6371         .next   = md_seq_next,
6372         .stop   = md_seq_stop,
6373         .show   = md_seq_show,
6374 };
6375
6376 static int md_seq_open(struct inode *inode, struct file *file)
6377 {
6378         int error;
6379         struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
6380         if (mi == NULL)
6381                 return -ENOMEM;
6382
6383         error = seq_open(file, &md_seq_ops);
6384         if (error)
6385                 kfree(mi);
6386         else {
6387                 struct seq_file *p = file->private_data;
6388                 p->private = mi;
6389                 mi->event = atomic_read(&md_event_count);
6390         }
6391         return error;
6392 }
6393
6394 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
6395 {
6396         struct seq_file *m = filp->private_data;
6397         struct mdstat_info *mi = m->private;
6398         int mask;
6399
6400         poll_wait(filp, &md_event_waiters, wait);
6401
6402         /* always allow read */
6403         mask = POLLIN | POLLRDNORM;
6404
6405         if (mi->event != atomic_read(&md_event_count))
6406                 mask |= POLLERR | POLLPRI;
6407         return mask;
6408 }
6409
6410 static const struct file_operations md_seq_fops = {
6411         .owner          = THIS_MODULE,
6412         .open           = md_seq_open,
6413         .read           = seq_read,
6414         .llseek         = seq_lseek,
6415         .release        = seq_release_private,
6416         .poll           = mdstat_poll,
6417 };
6418
6419 int register_md_personality(struct mdk_personality *p)
6420 {
6421         spin_lock(&pers_lock);
6422         list_add_tail(&p->list, &pers_list);
6423         printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
6424         spin_unlock(&pers_lock);
6425         return 0;
6426 }
6427
6428 int unregister_md_personality(struct mdk_personality *p)
6429 {
6430         printk(KERN_INFO "md: %s personality unregistered\n", p->name);
6431         spin_lock(&pers_lock);
6432         list_del_init(&p->list);
6433         spin_unlock(&pers_lock);
6434         return 0;
6435 }
6436
6437 static int is_mddev_idle(mddev_t *mddev, int init)
6438 {
6439         mdk_rdev_t * rdev;
6440         int idle;
6441         int curr_events;
6442
6443         idle = 1;
6444         rcu_read_lock();
6445         rdev_for_each_rcu(rdev, mddev) {
6446                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
6447                 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
6448                               (int)part_stat_read(&disk->part0, sectors[1]) -
6449                               atomic_read(&disk->sync_io);
6450                 /* sync IO will cause sync_io to increase before the disk_stats
6451                  * as sync_io is counted when a request starts, and
6452                  * disk_stats is counted when it completes.
6453                  * So resync activity will cause curr_events to be smaller than
6454                  * when there was no such activity.
6455                  * non-sync IO will cause disk_stat to increase without
6456                  * increasing sync_io so curr_events will (eventually)
6457                  * be larger than it was before.  Once it becomes
6458                  * substantially larger, the test below will cause
6459                  * the array to appear non-idle, and resync will slow
6460                  * down.
6461                  * If there is a lot of outstanding resync activity when
6462                  * we set last_event to curr_events, then all that activity
6463                  * completing might cause the array to appear non-idle
6464                  * and resync will be slowed down even though there might
6465                  * not have been non-resync activity.  This will only
6466                  * happen once though.  'last_events' will soon reflect
6467                  * the state where there is little or no outstanding
6468                  * resync requests, and further resync activity will
6469                  * always make curr_events less than last_events.
6470                  *
6471                  */
6472                 if (init || curr_events - rdev->last_events > 64) {
6473                         rdev->last_events = curr_events;
6474                         idle = 0;
6475                 }
6476         }
6477         rcu_read_unlock();
6478         return idle;
6479 }
6480
6481 void md_done_sync(mddev_t *mddev, int blocks, int ok)
6482 {
6483         /* another "blocks" (512byte) blocks have been synced */
6484         atomic_sub(blocks, &mddev->recovery_active);
6485         wake_up(&mddev->recovery_wait);
6486         if (!ok) {
6487                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6488                 md_wakeup_thread(mddev->thread);
6489                 // stop recovery, signal do_sync ....
6490         }
6491 }
6492
6493
6494 /* md_write_start(mddev, bi)
6495  * If we need to update some array metadata (e.g. 'active' flag
6496  * in superblock) before writing, schedule a superblock update
6497  * and wait for it to complete.
6498  */
6499 void md_write_start(mddev_t *mddev, struct bio *bi)
6500 {
6501         int did_change = 0;
6502         if (bio_data_dir(bi) != WRITE)
6503                 return;
6504
6505         BUG_ON(mddev->ro == 1);
6506         if (mddev->ro == 2) {
6507                 /* need to switch to read/write */
6508                 mddev->ro = 0;
6509                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6510                 md_wakeup_thread(mddev->thread);
6511                 md_wakeup_thread(mddev->sync_thread);
6512                 did_change = 1;
6513         }
6514         atomic_inc(&mddev->writes_pending);
6515         if (mddev->safemode == 1)
6516                 mddev->safemode = 0;
6517         if (mddev->in_sync) {
6518                 spin_lock_irq(&mddev->write_lock);
6519                 if (mddev->in_sync) {
6520                         mddev->in_sync = 0;
6521                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6522                         md_wakeup_thread(mddev->thread);
6523                         did_change = 1;
6524                 }
6525                 spin_unlock_irq(&mddev->write_lock);
6526         }
6527         if (did_change)
6528                 sysfs_notify_dirent(mddev->sysfs_state);
6529         wait_event(mddev->sb_wait,
6530                    !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
6531                    !test_bit(MD_CHANGE_PENDING, &mddev->flags));
6532 }
6533
6534 void md_write_end(mddev_t *mddev)
6535 {
6536         if (atomic_dec_and_test(&mddev->writes_pending)) {
6537                 if (mddev->safemode == 2)
6538                         md_wakeup_thread(mddev->thread);
6539                 else if (mddev->safemode_delay)
6540                         mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
6541         }
6542 }
6543
6544 /* md_allow_write(mddev)
6545  * Calling this ensures that the array is marked 'active' so that writes
6546  * may proceed without blocking.  It is important to call this before
6547  * attempting a GFP_KERNEL allocation while holding the mddev lock.
6548  * Must be called with mddev_lock held.
6549  *
6550  * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
6551  * is dropped, so return -EAGAIN after notifying userspace.
6552  */
6553 int md_allow_write(mddev_t *mddev)
6554 {
6555         if (!mddev->pers)
6556                 return 0;
6557         if (mddev->ro)
6558                 return 0;
6559         if (!mddev->pers->sync_request)
6560                 return 0;
6561
6562         spin_lock_irq(&mddev->write_lock);
6563         if (mddev->in_sync) {
6564                 mddev->in_sync = 0;
6565                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6566                 if (mddev->safemode_delay &&
6567                     mddev->safemode == 0)
6568                         mddev->safemode = 1;
6569                 spin_unlock_irq(&mddev->write_lock);
6570                 md_update_sb(mddev, 0);
6571                 sysfs_notify_dirent(mddev->sysfs_state);
6572         } else
6573                 spin_unlock_irq(&mddev->write_lock);
6574
6575         if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
6576                 return -EAGAIN;
6577         else
6578                 return 0;
6579 }
6580 EXPORT_SYMBOL_GPL(md_allow_write);
6581
6582 #define SYNC_MARKS      10
6583 #define SYNC_MARK_STEP  (3*HZ)
6584 void md_do_sync(mddev_t *mddev)
6585 {
6586         mddev_t *mddev2;
6587         unsigned int currspeed = 0,
6588                  window;
6589         sector_t max_sectors,j, io_sectors;
6590         unsigned long mark[SYNC_MARKS];
6591         sector_t mark_cnt[SYNC_MARKS];
6592         int last_mark,m;
6593         struct list_head *tmp;
6594         sector_t last_check;
6595         int skipped = 0;
6596         mdk_rdev_t *rdev;
6597         char *desc;
6598
6599         /* just incase thread restarts... */
6600         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
6601                 return;
6602         if (mddev->ro) /* never try to sync a read-only array */
6603                 return;
6604
6605         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6606                 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
6607                         desc = "data-check";
6608                 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6609                         desc = "requested-resync";
6610                 else
6611                         desc = "resync";
6612         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6613                 desc = "reshape";
6614         else
6615                 desc = "recovery";
6616
6617         /* we overload curr_resync somewhat here.
6618          * 0 == not engaged in resync at all
6619          * 2 == checking that there is no conflict with another sync
6620          * 1 == like 2, but have yielded to allow conflicting resync to
6621          *              commense
6622          * other == active in resync - this many blocks
6623          *
6624          * Before starting a resync we must have set curr_resync to
6625          * 2, and then checked that every "conflicting" array has curr_resync
6626          * less than ours.  When we find one that is the same or higher
6627          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
6628          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
6629          * This will mean we have to start checking from the beginning again.
6630          *
6631          */
6632
6633         do {
6634                 mddev->curr_resync = 2;
6635
6636         try_again:
6637                 if (kthread_should_stop())
6638                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6639
6640                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6641                         goto skip;
6642                 for_each_mddev(mddev2, tmp) {
6643                         if (mddev2 == mddev)
6644                                 continue;
6645                         if (!mddev->parallel_resync
6646                         &&  mddev2->curr_resync
6647                         &&  match_mddev_units(mddev, mddev2)) {
6648                                 DEFINE_WAIT(wq);
6649                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
6650                                         /* arbitrarily yield */
6651                                         mddev->curr_resync = 1;
6652                                         wake_up(&resync_wait);
6653                                 }
6654                                 if (mddev > mddev2 && mddev->curr_resync == 1)
6655                                         /* no need to wait here, we can wait the next
6656                                          * time 'round when curr_resync == 2
6657                                          */
6658                                         continue;
6659                                 /* We need to wait 'interruptible' so as not to
6660                                  * contribute to the load average, and not to
6661                                  * be caught by 'softlockup'
6662                                  */
6663                                 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
6664                                 if (!kthread_should_stop() &&
6665                                     mddev2->curr_resync >= mddev->curr_resync) {
6666                                         printk(KERN_INFO "md: delaying %s of %s"
6667                                                " until %s has finished (they"
6668                                                " share one or more physical units)\n",
6669                                                desc, mdname(mddev), mdname(mddev2));
6670                                         mddev_put(mddev2);
6671                                         if (signal_pending(current))
6672                                                 flush_signals(current);
6673                                         schedule();
6674                                         finish_wait(&resync_wait, &wq);
6675                                         goto try_again;
6676                                 }
6677                                 finish_wait(&resync_wait, &wq);
6678                         }
6679                 }
6680         } while (mddev->curr_resync < 2);
6681
6682         j = 0;
6683         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6684                 /* resync follows the size requested by the personality,
6685                  * which defaults to physical size, but can be virtual size
6686                  */
6687                 max_sectors = mddev->resync_max_sectors;
6688                 mddev->resync_mismatches = 0;
6689                 /* we don't use the checkpoint if there's a bitmap */
6690                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6691                         j = mddev->resync_min;
6692                 else if (!mddev->bitmap)
6693                         j = mddev->recovery_cp;
6694
6695         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6696                 max_sectors = mddev->dev_sectors;
6697         else {
6698                 /* recovery follows the physical size of devices */
6699                 max_sectors = mddev->dev_sectors;
6700                 j = MaxSector;
6701                 rcu_read_lock();
6702                 list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
6703                         if (rdev->raid_disk >= 0 &&
6704                             !test_bit(Faulty, &rdev->flags) &&
6705                             !test_bit(In_sync, &rdev->flags) &&
6706                             rdev->recovery_offset < j)
6707                                 j = rdev->recovery_offset;
6708                 rcu_read_unlock();
6709         }
6710
6711         printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
6712         printk(KERN_INFO "md: minimum _guaranteed_  speed:"
6713                 " %d KB/sec/disk.\n", speed_min(mddev));
6714         printk(KERN_INFO "md: using maximum available idle IO bandwidth "
6715                "(but not more than %d KB/sec) for %s.\n",
6716                speed_max(mddev), desc);
6717
6718         is_mddev_idle(mddev, 1); /* this initializes IO event counters */
6719
6720         io_sectors = 0;
6721         for (m = 0; m < SYNC_MARKS; m++) {
6722                 mark[m] = jiffies;
6723                 mark_cnt[m] = io_sectors;
6724         }
6725         last_mark = 0;
6726         mddev->resync_mark = mark[last_mark];
6727         mddev->resync_mark_cnt = mark_cnt[last_mark];
6728
6729         /*
6730          * Tune reconstruction:
6731          */
6732         window = 32*(PAGE_SIZE/512);
6733         printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
6734                 window/2,(unsigned long long) max_sectors/2);
6735
6736         atomic_set(&mddev->recovery_active, 0);
6737         last_check = 0;
6738
6739         if (j>2) {
6740                 printk(KERN_INFO 
6741                        "md: resuming %s of %s from checkpoint.\n",
6742                        desc, mdname(mddev));
6743                 mddev->curr_resync = j;
6744         }
6745         mddev->curr_resync_completed = mddev->curr_resync;
6746
6747         while (j < max_sectors) {
6748                 sector_t sectors;
6749
6750                 skipped = 0;
6751
6752                 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
6753                     ((mddev->curr_resync > mddev->curr_resync_completed &&
6754                       (mddev->curr_resync - mddev->curr_resync_completed)
6755                       > (max_sectors >> 4)) ||
6756                      (j - mddev->curr_resync_completed)*2
6757                      >= mddev->resync_max - mddev->curr_resync_completed
6758                             )) {
6759                         /* time to update curr_resync_completed */
6760                         blk_unplug(mddev->queue);
6761                         wait_event(mddev->recovery_wait,
6762                                    atomic_read(&mddev->recovery_active) == 0);
6763                         mddev->curr_resync_completed =
6764                                 mddev->curr_resync;
6765                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6766                         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6767                 }
6768
6769                 while (j >= mddev->resync_max && !kthread_should_stop()) {
6770                         /* As this condition is controlled by user-space,
6771                          * we can block indefinitely, so use '_interruptible'
6772                          * to avoid triggering warnings.
6773                          */
6774                         flush_signals(current); /* just in case */
6775                         wait_event_interruptible(mddev->recovery_wait,
6776                                                  mddev->resync_max > j
6777                                                  || kthread_should_stop());
6778                 }
6779
6780                 if (kthread_should_stop())
6781                         goto interrupted;
6782
6783                 sectors = mddev->pers->sync_request(mddev, j, &skipped,
6784                                                   currspeed < speed_min(mddev));
6785                 if (sectors == 0) {
6786                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6787                         goto out;
6788                 }
6789
6790                 if (!skipped) { /* actual IO requested */
6791                         io_sectors += sectors;
6792                         atomic_add(sectors, &mddev->recovery_active);
6793                 }
6794
6795                 j += sectors;
6796                 if (j>1) mddev->curr_resync = j;
6797                 mddev->curr_mark_cnt = io_sectors;
6798                 if (last_check == 0)
6799                         /* this is the earliers that rebuilt will be
6800                          * visible in /proc/mdstat
6801                          */
6802                         md_new_event(mddev);
6803
6804                 if (last_check + window > io_sectors || j == max_sectors)
6805                         continue;
6806
6807                 last_check = io_sectors;
6808
6809                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6810                         break;
6811
6812         repeat:
6813                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
6814                         /* step marks */
6815                         int next = (last_mark+1) % SYNC_MARKS;
6816
6817                         mddev->resync_mark = mark[next];
6818                         mddev->resync_mark_cnt = mark_cnt[next];
6819                         mark[next] = jiffies;
6820                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
6821                         last_mark = next;
6822                 }
6823
6824
6825                 if (kthread_should_stop())
6826                         goto interrupted;
6827
6828
6829                 /*
6830                  * this loop exits only if either when we are slower than
6831                  * the 'hard' speed limit, or the system was IO-idle for
6832                  * a jiffy.
6833                  * the system might be non-idle CPU-wise, but we only care
6834                  * about not overloading the IO subsystem. (things like an
6835                  * e2fsck being done on the RAID array should execute fast)
6836                  */
6837                 blk_unplug(mddev->queue);
6838                 cond_resched();
6839
6840                 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
6841                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
6842
6843                 if (currspeed > speed_min(mddev)) {
6844                         if ((currspeed > speed_max(mddev)) ||
6845                                         !is_mddev_idle(mddev, 0)) {
6846                                 msleep(500);
6847                                 goto repeat;
6848                         }
6849                 }
6850         }
6851         printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
6852         /*
6853          * this also signals 'finished resyncing' to md_stop
6854          */
6855  out:
6856         blk_unplug(mddev->queue);
6857
6858         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
6859
6860         /* tell personality that we are finished */
6861         mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
6862
6863         if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
6864             mddev->curr_resync > 2) {
6865                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6866                         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
6867                                 if (mddev->curr_resync >= mddev->recovery_cp) {
6868                                         printk(KERN_INFO
6869                                                "md: checkpointing %s of %s.\n",
6870                                                desc, mdname(mddev));
6871                                         mddev->recovery_cp = mddev->curr_resync;
6872                                 }
6873                         } else
6874                                 mddev->recovery_cp = MaxSector;
6875                 } else {
6876                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6877                                 mddev->curr_resync = MaxSector;
6878                         rcu_read_lock();
6879                         list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
6880                                 if (rdev->raid_disk >= 0 &&
6881                                     mddev->delta_disks >= 0 &&
6882                                     !test_bit(Faulty, &rdev->flags) &&
6883                                     !test_bit(In_sync, &rdev->flags) &&
6884                                     rdev->recovery_offset < mddev->curr_resync)
6885                                         rdev->recovery_offset = mddev->curr_resync;
6886                         rcu_read_unlock();
6887                 }
6888         }
6889         set_bit(MD_CHANGE_DEVS, &mddev->flags);
6890
6891  skip:
6892         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
6893                 /* We completed so min/max setting can be forgotten if used. */
6894                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6895                         mddev->resync_min = 0;
6896                 mddev->resync_max = MaxSector;
6897         } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
6898                 mddev->resync_min = mddev->curr_resync_completed;
6899         mddev->curr_resync = 0;
6900         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6901                 mddev->curr_resync_completed = 0;
6902         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6903         wake_up(&resync_wait);
6904         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
6905         md_wakeup_thread(mddev->thread);
6906         return;
6907
6908  interrupted:
6909         /*
6910          * got a signal, exit.
6911          */
6912         printk(KERN_INFO
6913                "md: md_do_sync() got signal ... exiting\n");
6914         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6915         goto out;
6916
6917 }
6918 EXPORT_SYMBOL_GPL(md_do_sync);
6919
6920
6921 static int remove_and_add_spares(mddev_t *mddev)
6922 {
6923         mdk_rdev_t *rdev;
6924         int spares = 0;
6925
6926         mddev->curr_resync_completed = 0;
6927
6928         list_for_each_entry(rdev, &mddev->disks, same_set)
6929                 if (rdev->raid_disk >= 0 &&
6930                     !test_bit(Blocked, &rdev->flags) &&
6931                     (test_bit(Faulty, &rdev->flags) ||
6932                      ! test_bit(In_sync, &rdev->flags)) &&
6933                     atomic_read(&rdev->nr_pending)==0) {
6934                         if (mddev->pers->hot_remove_disk(
6935                                     mddev, rdev->raid_disk)==0) {
6936                                 char nm[20];
6937                                 sprintf(nm,"rd%d", rdev->raid_disk);
6938                                 sysfs_remove_link(&mddev->kobj, nm);
6939                                 rdev->raid_disk = -1;
6940                         }
6941                 }
6942
6943         if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) {
6944                 list_for_each_entry(rdev, &mddev->disks, same_set) {
6945                         if (rdev->raid_disk >= 0 &&
6946                             !test_bit(In_sync, &rdev->flags) &&
6947                             !test_bit(Blocked, &rdev->flags))
6948                                 spares++;
6949                         if (rdev->raid_disk < 0
6950                             && !test_bit(Faulty, &rdev->flags)) {
6951                                 rdev->recovery_offset = 0;
6952                                 if (mddev->pers->
6953                                     hot_add_disk(mddev, rdev) == 0) {
6954                                         char nm[20];
6955                                         sprintf(nm, "rd%d", rdev->raid_disk);
6956                                         if (sysfs_create_link(&mddev->kobj,
6957                                                               &rdev->kobj, nm))
6958                                                 printk(KERN_WARNING
6959                                                        "md: cannot register "
6960                                                        "%s for %s\n",
6961                                                        nm, mdname(mddev));
6962                                         spares++;
6963                                         md_new_event(mddev);
6964                                         set_bit(MD_CHANGE_DEVS, &mddev->flags);
6965                                 } else
6966                                         break;
6967                         }
6968                 }
6969         }
6970         return spares;
6971 }
6972 /*
6973  * This routine is regularly called by all per-raid-array threads to
6974  * deal with generic issues like resync and super-block update.
6975  * Raid personalities that don't have a thread (linear/raid0) do not
6976  * need this as they never do any recovery or update the superblock.
6977  *
6978  * It does not do any resync itself, but rather "forks" off other threads
6979  * to do that as needed.
6980  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
6981  * "->recovery" and create a thread at ->sync_thread.
6982  * When the thread finishes it sets MD_RECOVERY_DONE
6983  * and wakeups up this thread which will reap the thread and finish up.
6984  * This thread also removes any faulty devices (with nr_pending == 0).
6985  *
6986  * The overall approach is:
6987  *  1/ if the superblock needs updating, update it.
6988  *  2/ If a recovery thread is running, don't do anything else.
6989  *  3/ If recovery has finished, clean up, possibly marking spares active.
6990  *  4/ If there are any faulty devices, remove them.
6991  *  5/ If array is degraded, try to add spares devices
6992  *  6/ If array has spares or is not in-sync, start a resync thread.
6993  */
6994 void md_check_recovery(mddev_t *mddev)
6995 {
6996         mdk_rdev_t *rdev;
6997
6998
6999         if (mddev->bitmap)
7000                 bitmap_daemon_work(mddev);
7001
7002         if (mddev->ro)
7003                 return;
7004
7005         if (signal_pending(current)) {
7006                 if (mddev->pers->sync_request && !mddev->external) {
7007                         printk(KERN_INFO "md: %s in immediate safe mode\n",
7008                                mdname(mddev));
7009                         mddev->safemode = 2;
7010                 }
7011                 flush_signals(current);
7012         }
7013
7014         if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
7015                 return;
7016         if ( ! (
7017                 (mddev->flags && !mddev->external) ||
7018                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
7019                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
7020                 (mddev->external == 0 && mddev->safemode == 1) ||
7021                 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
7022                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
7023                 ))
7024                 return;
7025
7026         if (mddev_trylock(mddev)) {
7027                 int spares = 0;
7028
7029                 if (mddev->ro) {
7030                         /* Only thing we do on a ro array is remove
7031                          * failed devices.
7032                          */
7033                         remove_and_add_spares(mddev);
7034                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7035                         goto unlock;
7036                 }
7037
7038                 if (!mddev->external) {
7039                         int did_change = 0;
7040                         spin_lock_irq(&mddev->write_lock);
7041                         if (mddev->safemode &&
7042                             !atomic_read(&mddev->writes_pending) &&
7043                             !mddev->in_sync &&
7044                             mddev->recovery_cp == MaxSector) {
7045                                 mddev->in_sync = 1;
7046                                 did_change = 1;
7047                                 if (mddev->persistent)
7048                                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7049                         }
7050                         if (mddev->safemode == 1)
7051                                 mddev->safemode = 0;
7052                         spin_unlock_irq(&mddev->write_lock);
7053                         if (did_change)
7054                                 sysfs_notify_dirent(mddev->sysfs_state);
7055                 }
7056
7057                 if (mddev->flags)
7058                         md_update_sb(mddev, 0);
7059
7060                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
7061                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
7062                         /* resync/recovery still happening */
7063                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7064                         goto unlock;
7065                 }
7066                 if (mddev->sync_thread) {
7067                         /* resync has finished, collect result */
7068                         md_unregister_thread(mddev->sync_thread);
7069                         mddev->sync_thread = NULL;
7070                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7071                             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
7072                                 /* success...*/
7073                                 /* activate any spares */
7074                                 if (mddev->pers->spare_active(mddev))
7075                                         sysfs_notify(&mddev->kobj, NULL,
7076                                                      "degraded");
7077                         }
7078                         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
7079                             mddev->pers->finish_reshape)
7080                                 mddev->pers->finish_reshape(mddev);
7081                         md_update_sb(mddev, 1);
7082
7083                         /* if array is no-longer degraded, then any saved_raid_disk
7084                          * information must be scrapped
7085                          */
7086                         if (!mddev->degraded)
7087                                 list_for_each_entry(rdev, &mddev->disks, same_set)
7088                                         rdev->saved_raid_disk = -1;
7089
7090                         mddev->recovery = 0;
7091                         /* flag recovery needed just to double check */
7092                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7093                         sysfs_notify_dirent(mddev->sysfs_action);
7094                         md_new_event(mddev);
7095                         goto unlock;
7096                 }
7097                 /* Set RUNNING before clearing NEEDED to avoid
7098                  * any transients in the value of "sync_action".
7099                  */
7100                 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7101                 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7102                 /* Clear some bits that don't mean anything, but
7103                  * might be left set
7104                  */
7105                 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
7106                 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
7107
7108                 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
7109                         goto unlock;
7110                 /* no recovery is running.
7111                  * remove any failed drives, then
7112                  * add spares if possible.
7113                  * Spare are also removed and re-added, to allow
7114                  * the personality to fail the re-add.
7115                  */
7116
7117                 if (mddev->reshape_position != MaxSector) {
7118                         if (mddev->pers->check_reshape == NULL ||
7119                             mddev->pers->check_reshape(mddev) != 0)
7120                                 /* Cannot proceed */
7121                                 goto unlock;
7122                         set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
7123                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7124                 } else if ((spares = remove_and_add_spares(mddev))) {
7125                         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7126                         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7127                         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
7128                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7129                 } else if (mddev->recovery_cp < MaxSector) {
7130                         set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7131                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7132                 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
7133                         /* nothing to be done ... */
7134                         goto unlock;
7135
7136                 if (mddev->pers->sync_request) {
7137                         if (spares && mddev->bitmap && ! mddev->bitmap->file) {
7138                                 /* We are adding a device or devices to an array
7139                                  * which has the bitmap stored on all devices.
7140                                  * So make sure all bitmap pages get written
7141                                  */
7142                                 bitmap_write_all(mddev->bitmap);
7143                         }
7144                         mddev->sync_thread = md_register_thread(md_do_sync,
7145                                                                 mddev,
7146                                                                 "resync");
7147                         if (!mddev->sync_thread) {
7148                                 printk(KERN_ERR "%s: could not start resync"
7149                                         " thread...\n", 
7150                                         mdname(mddev));
7151                                 /* leave the spares where they are, it shouldn't hurt */
7152                                 mddev->recovery = 0;
7153                         } else
7154                                 md_wakeup_thread(mddev->sync_thread);
7155                         sysfs_notify_dirent(mddev->sysfs_action);
7156                         md_new_event(mddev);
7157                 }
7158         unlock:
7159                 if (!mddev->sync_thread) {
7160                         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7161                         if (test_and_clear_bit(MD_RECOVERY_RECOVER,
7162                                                &mddev->recovery))
7163                                 if (mddev->sysfs_action)
7164                                         sysfs_notify_dirent(mddev->sysfs_action);
7165                 }
7166                 mddev_unlock(mddev);
7167         }
7168 }
7169
7170 void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
7171 {
7172         sysfs_notify_dirent(rdev->sysfs_state);
7173         wait_event_timeout(rdev->blocked_wait,
7174                            !test_bit(Blocked, &rdev->flags),
7175                            msecs_to_jiffies(5000));
7176         rdev_dec_pending(rdev, mddev);
7177 }
7178 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
7179
7180 static int md_notify_reboot(struct notifier_block *this,
7181                             unsigned long code, void *x)
7182 {
7183         struct list_head *tmp;
7184         mddev_t *mddev;
7185
7186         if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
7187
7188                 printk(KERN_INFO "md: stopping all md devices.\n");
7189
7190                 for_each_mddev(mddev, tmp)
7191                         if (mddev_trylock(mddev)) {
7192                                 /* Force a switch to readonly even array
7193                                  * appears to still be in use.  Hence
7194                                  * the '100'.
7195                                  */
7196                                 md_set_readonly(mddev, 100);
7197                                 mddev_unlock(mddev);
7198                         }
7199                 /*
7200                  * certain more exotic SCSI devices are known to be
7201                  * volatile wrt too early system reboots. While the
7202                  * right place to handle this issue is the given
7203                  * driver, we do want to have a safe RAID driver ...
7204                  */
7205                 mdelay(1000*1);
7206         }
7207         return NOTIFY_DONE;
7208 }
7209
7210 static struct notifier_block md_notifier = {
7211         .notifier_call  = md_notify_reboot,
7212         .next           = NULL,
7213         .priority       = INT_MAX, /* before any real devices */
7214 };
7215
7216 static void md_geninit(void)
7217 {
7218         dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
7219
7220         proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
7221 }
7222
7223 static int __init md_init(void)
7224 {
7225         if (register_blkdev(MD_MAJOR, "md"))
7226                 return -1;
7227         if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
7228                 unregister_blkdev(MD_MAJOR, "md");
7229                 return -1;
7230         }
7231         blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE,
7232                             md_probe, NULL, NULL);
7233         blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
7234                             md_probe, NULL, NULL);
7235
7236         register_reboot_notifier(&md_notifier);
7237         raid_table_header = register_sysctl_table(raid_root_table);
7238
7239         md_geninit();
7240         return 0;
7241 }
7242
7243
7244 #ifndef MODULE
7245
7246 /*
7247  * Searches all registered partitions for autorun RAID arrays
7248  * at boot time.
7249  */
7250
7251 static LIST_HEAD(all_detected_devices);
7252 struct detected_devices_node {
7253         struct list_head list;
7254         dev_t dev;
7255 };
7256
7257 void md_autodetect_dev(dev_t dev)
7258 {
7259         struct detected_devices_node *node_detected_dev;
7260
7261         node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
7262         if (node_detected_dev) {
7263                 node_detected_dev->dev = dev;
7264                 list_add_tail(&node_detected_dev->list, &all_detected_devices);
7265         } else {
7266                 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
7267                         ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
7268         }
7269 }
7270
7271
7272 static void autostart_arrays(int part)
7273 {
7274         mdk_rdev_t *rdev;
7275         struct detected_devices_node *node_detected_dev;
7276         dev_t dev;
7277         int i_scanned, i_passed;
7278
7279         i_scanned = 0;
7280         i_passed = 0;
7281
7282         printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
7283
7284         while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
7285                 i_scanned++;
7286                 node_detected_dev = list_entry(all_detected_devices.next,
7287                                         struct detected_devices_node, list);
7288                 list_del(&node_detected_dev->list);
7289                 dev = node_detected_dev->dev;
7290                 kfree(node_detected_dev);
7291                 rdev = md_import_device(dev,0, 90);
7292                 if (IS_ERR(rdev))
7293                         continue;
7294
7295                 if (test_bit(Faulty, &rdev->flags)) {
7296                         MD_BUG();
7297                         continue;
7298                 }
7299                 set_bit(AutoDetected, &rdev->flags);
7300                 list_add(&rdev->same_set, &pending_raid_disks);
7301                 i_passed++;
7302         }
7303
7304         printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
7305                                                 i_scanned, i_passed);
7306
7307         autorun_devices(part);
7308 }
7309
7310 #endif /* !MODULE */
7311
7312 static __exit void md_exit(void)
7313 {
7314         mddev_t *mddev;
7315         struct list_head *tmp;
7316
7317         blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS);
7318         blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
7319
7320         unregister_blkdev(MD_MAJOR,"md");
7321         unregister_blkdev(mdp_major, "mdp");
7322         unregister_reboot_notifier(&md_notifier);
7323         unregister_sysctl_table(raid_table_header);
7324         remove_proc_entry("mdstat", NULL);
7325         for_each_mddev(mddev, tmp) {
7326                 export_array(mddev);
7327                 mddev->hold_active = 0;
7328         }
7329 }
7330
7331 subsys_initcall(md_init);
7332 module_exit(md_exit)
7333
7334 static int get_ro(char *buffer, struct kernel_param *kp)
7335 {
7336         return sprintf(buffer, "%d", start_readonly);
7337 }
7338 static int set_ro(const char *val, struct kernel_param *kp)
7339 {
7340         char *e;
7341         int num = simple_strtoul(val, &e, 10);
7342         if (*val && (*e == '\0' || *e == '\n')) {
7343                 start_readonly = num;
7344                 return 0;
7345         }
7346         return -EINVAL;
7347 }
7348
7349 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
7350 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
7351
7352 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
7353
7354 EXPORT_SYMBOL(register_md_personality);
7355 EXPORT_SYMBOL(unregister_md_personality);
7356 EXPORT_SYMBOL(md_error);
7357 EXPORT_SYMBOL(md_done_sync);
7358 EXPORT_SYMBOL(md_write_start);
7359 EXPORT_SYMBOL(md_write_end);
7360 EXPORT_SYMBOL(md_register_thread);
7361 EXPORT_SYMBOL(md_unregister_thread);
7362 EXPORT_SYMBOL(md_wakeup_thread);
7363 EXPORT_SYMBOL(md_check_recovery);
7364 MODULE_LICENSE("GPL");
7365 MODULE_DESCRIPTION("MD RAID framework");
7366 MODULE_ALIAS("md");
7367 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);