OSDN Git Service

smb3: Add defines for new information level, FileIdInformation
[tomoyo/tomoyo-test1.git] / drivers / md / md.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3    md.c : Multiple Devices driver for Linux
4      Copyright (C) 1998, 1999, 2000 Ingo Molnar
5
6      completely rewritten, based on the MD driver code from Marc Zyngier
7
8    Changes:
9
10    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
11    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
12    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
13    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
14    - kmod support by: Cyrus Durgin
15    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
16    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17
18    - lots of fixes and improvements to the RAID1/RAID5 and generic
19      RAID code (such as request based resynchronization):
20
21      Neil Brown <neilb@cse.unsw.edu.au>.
22
23    - persistent bitmap code
24      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25
26
27    Errors, Warnings, etc.
28    Please use:
29      pr_crit() for error conditions that risk data loss
30      pr_err() for error conditions that are unexpected, like an IO error
31          or internal inconsistency
32      pr_warn() for error conditions that could have been predicated, like
33          adding a device to an array when it has incompatible metadata
34      pr_info() for every interesting, very rare events, like an array starting
35          or stopping, or resync starting or stopping
36      pr_debug() for everything else.
37
38 */
39
40 #include <linux/sched/mm.h>
41 #include <linux/sched/signal.h>
42 #include <linux/kthread.h>
43 #include <linux/blkdev.h>
44 #include <linux/badblocks.h>
45 #include <linux/sysctl.h>
46 #include <linux/seq_file.h>
47 #include <linux/fs.h>
48 #include <linux/poll.h>
49 #include <linux/ctype.h>
50 #include <linux/string.h>
51 #include <linux/hdreg.h>
52 #include <linux/proc_fs.h>
53 #include <linux/random.h>
54 #include <linux/module.h>
55 #include <linux/reboot.h>
56 #include <linux/file.h>
57 #include <linux/compat.h>
58 #include <linux/delay.h>
59 #include <linux/raid/md_p.h>
60 #include <linux/raid/md_u.h>
61 #include <linux/slab.h>
62 #include <linux/percpu-refcount.h>
63
64 #include <trace/events/block.h>
65 #include "md.h"
66 #include "md-bitmap.h"
67 #include "md-cluster.h"
68
69 #ifndef MODULE
70 static void autostart_arrays(int part);
71 #endif
72
73 /* pers_list is a list of registered personalities protected
74  * by pers_lock.
75  * pers_lock does extra service to protect accesses to
76  * mddev->thread when the mutex cannot be held.
77  */
78 static LIST_HEAD(pers_list);
79 static DEFINE_SPINLOCK(pers_lock);
80
81 static struct kobj_type md_ktype;
82
83 struct md_cluster_operations *md_cluster_ops;
84 EXPORT_SYMBOL(md_cluster_ops);
85 static struct module *md_cluster_mod;
86
87 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
88 static struct workqueue_struct *md_wq;
89 static struct workqueue_struct *md_misc_wq;
90
91 static int remove_and_add_spares(struct mddev *mddev,
92                                  struct md_rdev *this);
93 static void mddev_detach(struct mddev *mddev);
94
95 /*
96  * Default number of read corrections we'll attempt on an rdev
97  * before ejecting it from the array. We divide the read error
98  * count by 2 for every hour elapsed between read errors.
99  */
100 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
101 /*
102  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
103  * is 1000 KB/sec, so the extra system load does not show up that much.
104  * Increase it if you want to have more _guaranteed_ speed. Note that
105  * the RAID driver will use the maximum available bandwidth if the IO
106  * subsystem is idle. There is also an 'absolute maximum' reconstruction
107  * speed limit - in case reconstruction slows down your system despite
108  * idle IO detection.
109  *
110  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
111  * or /sys/block/mdX/md/sync_speed_{min,max}
112  */
113
114 static int sysctl_speed_limit_min = 1000;
115 static int sysctl_speed_limit_max = 200000;
116 static inline int speed_min(struct mddev *mddev)
117 {
118         return mddev->sync_speed_min ?
119                 mddev->sync_speed_min : sysctl_speed_limit_min;
120 }
121
122 static inline int speed_max(struct mddev *mddev)
123 {
124         return mddev->sync_speed_max ?
125                 mddev->sync_speed_max : sysctl_speed_limit_max;
126 }
127
128 static void rdev_uninit_serial(struct md_rdev *rdev)
129 {
130         if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
131                 return;
132
133         kvfree(rdev->serial);
134         rdev->serial = NULL;
135 }
136
137 static void rdevs_uninit_serial(struct mddev *mddev)
138 {
139         struct md_rdev *rdev;
140
141         rdev_for_each(rdev, mddev)
142                 rdev_uninit_serial(rdev);
143 }
144
145 static int rdev_init_serial(struct md_rdev *rdev)
146 {
147         /* serial_nums equals with BARRIER_BUCKETS_NR */
148         int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t))));
149         struct serial_in_rdev *serial = NULL;
150
151         if (test_bit(CollisionCheck, &rdev->flags))
152                 return 0;
153
154         serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums,
155                           GFP_KERNEL);
156         if (!serial)
157                 return -ENOMEM;
158
159         for (i = 0; i < serial_nums; i++) {
160                 struct serial_in_rdev *serial_tmp = &serial[i];
161
162                 spin_lock_init(&serial_tmp->serial_lock);
163                 serial_tmp->serial_rb = RB_ROOT_CACHED;
164                 init_waitqueue_head(&serial_tmp->serial_io_wait);
165         }
166
167         rdev->serial = serial;
168         set_bit(CollisionCheck, &rdev->flags);
169
170         return 0;
171 }
172
173 static int rdevs_init_serial(struct mddev *mddev)
174 {
175         struct md_rdev *rdev;
176         int ret = 0;
177
178         rdev_for_each(rdev, mddev) {
179                 ret = rdev_init_serial(rdev);
180                 if (ret)
181                         break;
182         }
183
184         /* Free all resources if pool is not existed */
185         if (ret && !mddev->serial_info_pool)
186                 rdevs_uninit_serial(mddev);
187
188         return ret;
189 }
190
191 /*
192  * rdev needs to enable serial stuffs if it meets the conditions:
193  * 1. it is multi-queue device flaged with writemostly.
194  * 2. the write-behind mode is enabled.
195  */
196 static int rdev_need_serial(struct md_rdev *rdev)
197 {
198         return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
199                 rdev->bdev->bd_queue->nr_hw_queues != 1 &&
200                 test_bit(WriteMostly, &rdev->flags));
201 }
202
203 /*
204  * Init resource for rdev(s), then create serial_info_pool if:
205  * 1. rdev is the first device which return true from rdev_enable_serial.
206  * 2. rdev is NULL, means we want to enable serialization for all rdevs.
207  */
208 void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
209                               bool is_suspend)
210 {
211         int ret = 0;
212
213         if (rdev && !rdev_need_serial(rdev) &&
214             !test_bit(CollisionCheck, &rdev->flags))
215                 return;
216
217         if (!is_suspend)
218                 mddev_suspend(mddev);
219
220         if (!rdev)
221                 ret = rdevs_init_serial(mddev);
222         else
223                 ret = rdev_init_serial(rdev);
224         if (ret)
225                 goto abort;
226
227         if (mddev->serial_info_pool == NULL) {
228                 unsigned int noio_flag;
229
230                 noio_flag = memalloc_noio_save();
231                 mddev->serial_info_pool =
232                         mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
233                                                 sizeof(struct serial_info));
234                 memalloc_noio_restore(noio_flag);
235                 if (!mddev->serial_info_pool) {
236                         rdevs_uninit_serial(mddev);
237                         pr_err("can't alloc memory pool for serialization\n");
238                 }
239         }
240
241 abort:
242         if (!is_suspend)
243                 mddev_resume(mddev);
244 }
245
246 /*
247  * Free resource from rdev(s), and destroy serial_info_pool under conditions:
248  * 1. rdev is the last device flaged with CollisionCheck.
249  * 2. when bitmap is destroyed while policy is not enabled.
250  * 3. for disable policy, the pool is destroyed only when no rdev needs it.
251  */
252 void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
253                                bool is_suspend)
254 {
255         if (rdev && !test_bit(CollisionCheck, &rdev->flags))
256                 return;
257
258         if (mddev->serial_info_pool) {
259                 struct md_rdev *temp;
260                 int num = 0; /* used to track if other rdevs need the pool */
261
262                 if (!is_suspend)
263                         mddev_suspend(mddev);
264                 rdev_for_each(temp, mddev) {
265                         if (!rdev) {
266                                 if (!mddev->serialize_policy ||
267                                     !rdev_need_serial(temp))
268                                         rdev_uninit_serial(temp);
269                                 else
270                                         num++;
271                         } else if (temp != rdev &&
272                                    test_bit(CollisionCheck, &temp->flags))
273                                 num++;
274                 }
275
276                 if (rdev)
277                         rdev_uninit_serial(rdev);
278
279                 if (num)
280                         pr_info("The mempool could be used by other devices\n");
281                 else {
282                         mempool_destroy(mddev->serial_info_pool);
283                         mddev->serial_info_pool = NULL;
284                 }
285                 if (!is_suspend)
286                         mddev_resume(mddev);
287         }
288 }
289
290 static struct ctl_table_header *raid_table_header;
291
292 static struct ctl_table raid_table[] = {
293         {
294                 .procname       = "speed_limit_min",
295                 .data           = &sysctl_speed_limit_min,
296                 .maxlen         = sizeof(int),
297                 .mode           = S_IRUGO|S_IWUSR,
298                 .proc_handler   = proc_dointvec,
299         },
300         {
301                 .procname       = "speed_limit_max",
302                 .data           = &sysctl_speed_limit_max,
303                 .maxlen         = sizeof(int),
304                 .mode           = S_IRUGO|S_IWUSR,
305                 .proc_handler   = proc_dointvec,
306         },
307         { }
308 };
309
310 static struct ctl_table raid_dir_table[] = {
311         {
312                 .procname       = "raid",
313                 .maxlen         = 0,
314                 .mode           = S_IRUGO|S_IXUGO,
315                 .child          = raid_table,
316         },
317         { }
318 };
319
320 static struct ctl_table raid_root_table[] = {
321         {
322                 .procname       = "dev",
323                 .maxlen         = 0,
324                 .mode           = 0555,
325                 .child          = raid_dir_table,
326         },
327         {  }
328 };
329
330 static const struct block_device_operations md_fops;
331
332 static int start_readonly;
333
334 /*
335  * The original mechanism for creating an md device is to create
336  * a device node in /dev and to open it.  This causes races with device-close.
337  * The preferred method is to write to the "new_array" module parameter.
338  * This can avoid races.
339  * Setting create_on_open to false disables the original mechanism
340  * so all the races disappear.
341  */
342 static bool create_on_open = true;
343
344 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
345                             struct mddev *mddev)
346 {
347         if (!mddev || !bioset_initialized(&mddev->bio_set))
348                 return bio_alloc(gfp_mask, nr_iovecs);
349
350         return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set);
351 }
352 EXPORT_SYMBOL_GPL(bio_alloc_mddev);
353
354 static struct bio *md_bio_alloc_sync(struct mddev *mddev)
355 {
356         if (!mddev || !bioset_initialized(&mddev->sync_set))
357                 return bio_alloc(GFP_NOIO, 1);
358
359         return bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set);
360 }
361
362 /*
363  * We have a system wide 'event count' that is incremented
364  * on any 'interesting' event, and readers of /proc/mdstat
365  * can use 'poll' or 'select' to find out when the event
366  * count increases.
367  *
368  * Events are:
369  *  start array, stop array, error, add device, remove device,
370  *  start build, activate spare
371  */
372 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
373 static atomic_t md_event_count;
374 void md_new_event(struct mddev *mddev)
375 {
376         atomic_inc(&md_event_count);
377         wake_up(&md_event_waiters);
378 }
379 EXPORT_SYMBOL_GPL(md_new_event);
380
381 /*
382  * Enables to iterate over all existing md arrays
383  * all_mddevs_lock protects this list.
384  */
385 static LIST_HEAD(all_mddevs);
386 static DEFINE_SPINLOCK(all_mddevs_lock);
387
388 /*
389  * iterates through all used mddevs in the system.
390  * We take care to grab the all_mddevs_lock whenever navigating
391  * the list, and to always hold a refcount when unlocked.
392  * Any code which breaks out of this loop while own
393  * a reference to the current mddev and must mddev_put it.
394  */
395 #define for_each_mddev(_mddev,_tmp)                                     \
396                                                                         \
397         for (({ spin_lock(&all_mddevs_lock);                            \
398                 _tmp = all_mddevs.next;                                 \
399                 _mddev = NULL;});                                       \
400              ({ if (_tmp != &all_mddevs)                                \
401                         mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
402                 spin_unlock(&all_mddevs_lock);                          \
403                 if (_mddev) mddev_put(_mddev);                          \
404                 _mddev = list_entry(_tmp, struct mddev, all_mddevs);    \
405                 _tmp != &all_mddevs;});                                 \
406              ({ spin_lock(&all_mddevs_lock);                            \
407                 _tmp = _tmp->next;})                                    \
408                 )
409
410 /* Rather than calling directly into the personality make_request function,
411  * IO requests come here first so that we can check if the device is
412  * being suspended pending a reconfiguration.
413  * We hold a refcount over the call to ->make_request.  By the time that
414  * call has finished, the bio has been linked into some internal structure
415  * and so is visible to ->quiesce(), so we don't need the refcount any more.
416  */
417 static bool is_suspended(struct mddev *mddev, struct bio *bio)
418 {
419         if (mddev->suspended)
420                 return true;
421         if (bio_data_dir(bio) != WRITE)
422                 return false;
423         if (mddev->suspend_lo >= mddev->suspend_hi)
424                 return false;
425         if (bio->bi_iter.bi_sector >= mddev->suspend_hi)
426                 return false;
427         if (bio_end_sector(bio) < mddev->suspend_lo)
428                 return false;
429         return true;
430 }
431
432 void md_handle_request(struct mddev *mddev, struct bio *bio)
433 {
434 check_suspended:
435         rcu_read_lock();
436         if (is_suspended(mddev, bio)) {
437                 DEFINE_WAIT(__wait);
438                 for (;;) {
439                         prepare_to_wait(&mddev->sb_wait, &__wait,
440                                         TASK_UNINTERRUPTIBLE);
441                         if (!is_suspended(mddev, bio))
442                                 break;
443                         rcu_read_unlock();
444                         schedule();
445                         rcu_read_lock();
446                 }
447                 finish_wait(&mddev->sb_wait, &__wait);
448         }
449         atomic_inc(&mddev->active_io);
450         rcu_read_unlock();
451
452         if (!mddev->pers->make_request(mddev, bio)) {
453                 atomic_dec(&mddev->active_io);
454                 wake_up(&mddev->sb_wait);
455                 goto check_suspended;
456         }
457
458         if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
459                 wake_up(&mddev->sb_wait);
460 }
461 EXPORT_SYMBOL(md_handle_request);
462
463 static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
464 {
465         const int rw = bio_data_dir(bio);
466         const int sgrp = op_stat_group(bio_op(bio));
467         struct mddev *mddev = q->queuedata;
468         unsigned int sectors;
469
470         if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
471                 bio_io_error(bio);
472                 return BLK_QC_T_NONE;
473         }
474
475         blk_queue_split(q, &bio);
476
477         if (mddev == NULL || mddev->pers == NULL) {
478                 bio_io_error(bio);
479                 return BLK_QC_T_NONE;
480         }
481         if (mddev->ro == 1 && unlikely(rw == WRITE)) {
482                 if (bio_sectors(bio) != 0)
483                         bio->bi_status = BLK_STS_IOERR;
484                 bio_endio(bio);
485                 return BLK_QC_T_NONE;
486         }
487
488         /*
489          * save the sectors now since our bio can
490          * go away inside make_request
491          */
492         sectors = bio_sectors(bio);
493         /* bio could be mergeable after passing to underlayer */
494         bio->bi_opf &= ~REQ_NOMERGE;
495
496         md_handle_request(mddev, bio);
497
498         part_stat_lock();
499         part_stat_inc(&mddev->gendisk->part0, ios[sgrp]);
500         part_stat_add(&mddev->gendisk->part0, sectors[sgrp], sectors);
501         part_stat_unlock();
502
503         return BLK_QC_T_NONE;
504 }
505
506 /* mddev_suspend makes sure no new requests are submitted
507  * to the device, and that any requests that have been submitted
508  * are completely handled.
509  * Once mddev_detach() is called and completes, the module will be
510  * completely unused.
511  */
512 void mddev_suspend(struct mddev *mddev)
513 {
514         WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
515         lockdep_assert_held(&mddev->reconfig_mutex);
516         if (mddev->suspended++)
517                 return;
518         synchronize_rcu();
519         wake_up(&mddev->sb_wait);
520         set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
521         smp_mb__after_atomic();
522         wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
523         mddev->pers->quiesce(mddev, 1);
524         clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
525         wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
526
527         del_timer_sync(&mddev->safemode_timer);
528 }
529 EXPORT_SYMBOL_GPL(mddev_suspend);
530
531 void mddev_resume(struct mddev *mddev)
532 {
533         lockdep_assert_held(&mddev->reconfig_mutex);
534         if (--mddev->suspended)
535                 return;
536         wake_up(&mddev->sb_wait);
537         mddev->pers->quiesce(mddev, 0);
538
539         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
540         md_wakeup_thread(mddev->thread);
541         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
542 }
543 EXPORT_SYMBOL_GPL(mddev_resume);
544
545 int mddev_congested(struct mddev *mddev, int bits)
546 {
547         struct md_personality *pers = mddev->pers;
548         int ret = 0;
549
550         rcu_read_lock();
551         if (mddev->suspended)
552                 ret = 1;
553         else if (pers && pers->congested)
554                 ret = pers->congested(mddev, bits);
555         rcu_read_unlock();
556         return ret;
557 }
558 EXPORT_SYMBOL_GPL(mddev_congested);
559 static int md_congested(void *data, int bits)
560 {
561         struct mddev *mddev = data;
562         return mddev_congested(mddev, bits);
563 }
564
565 /*
566  * Generic flush handling for md
567  */
568
569 static void md_end_flush(struct bio *bio)
570 {
571         struct md_rdev *rdev = bio->bi_private;
572         struct mddev *mddev = rdev->mddev;
573
574         rdev_dec_pending(rdev, mddev);
575
576         if (atomic_dec_and_test(&mddev->flush_pending)) {
577                 /* The pre-request flush has finished */
578                 queue_work(md_wq, &mddev->flush_work);
579         }
580         bio_put(bio);
581 }
582
583 static void md_submit_flush_data(struct work_struct *ws);
584
585 static void submit_flushes(struct work_struct *ws)
586 {
587         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
588         struct md_rdev *rdev;
589
590         mddev->start_flush = ktime_get_boottime();
591         INIT_WORK(&mddev->flush_work, md_submit_flush_data);
592         atomic_set(&mddev->flush_pending, 1);
593         rcu_read_lock();
594         rdev_for_each_rcu(rdev, mddev)
595                 if (rdev->raid_disk >= 0 &&
596                     !test_bit(Faulty, &rdev->flags)) {
597                         /* Take two references, one is dropped
598                          * when request finishes, one after
599                          * we reclaim rcu_read_lock
600                          */
601                         struct bio *bi;
602                         atomic_inc(&rdev->nr_pending);
603                         atomic_inc(&rdev->nr_pending);
604                         rcu_read_unlock();
605                         bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
606                         bi->bi_end_io = md_end_flush;
607                         bi->bi_private = rdev;
608                         bio_set_dev(bi, rdev->bdev);
609                         bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
610                         atomic_inc(&mddev->flush_pending);
611                         submit_bio(bi);
612                         rcu_read_lock();
613                         rdev_dec_pending(rdev, mddev);
614                 }
615         rcu_read_unlock();
616         if (atomic_dec_and_test(&mddev->flush_pending))
617                 queue_work(md_wq, &mddev->flush_work);
618 }
619
620 static void md_submit_flush_data(struct work_struct *ws)
621 {
622         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
623         struct bio *bio = mddev->flush_bio;
624
625         /*
626          * must reset flush_bio before calling into md_handle_request to avoid a
627          * deadlock, because other bios passed md_handle_request suspend check
628          * could wait for this and below md_handle_request could wait for those
629          * bios because of suspend check
630          */
631         mddev->last_flush = mddev->start_flush;
632         mddev->flush_bio = NULL;
633         wake_up(&mddev->sb_wait);
634
635         if (bio->bi_iter.bi_size == 0) {
636                 /* an empty barrier - all done */
637                 bio_endio(bio);
638         } else {
639                 bio->bi_opf &= ~REQ_PREFLUSH;
640                 md_handle_request(mddev, bio);
641         }
642 }
643
644 /*
645  * Manages consolidation of flushes and submitting any flushes needed for
646  * a bio with REQ_PREFLUSH.  Returns true if the bio is finished or is
647  * being finished in another context.  Returns false if the flushing is
648  * complete but still needs the I/O portion of the bio to be processed.
649  */
650 bool md_flush_request(struct mddev *mddev, struct bio *bio)
651 {
652         ktime_t start = ktime_get_boottime();
653         spin_lock_irq(&mddev->lock);
654         wait_event_lock_irq(mddev->sb_wait,
655                             !mddev->flush_bio ||
656                             ktime_after(mddev->last_flush, start),
657                             mddev->lock);
658         if (!ktime_after(mddev->last_flush, start)) {
659                 WARN_ON(mddev->flush_bio);
660                 mddev->flush_bio = bio;
661                 bio = NULL;
662         }
663         spin_unlock_irq(&mddev->lock);
664
665         if (!bio) {
666                 INIT_WORK(&mddev->flush_work, submit_flushes);
667                 queue_work(md_wq, &mddev->flush_work);
668         } else {
669                 /* flush was performed for some other bio while we waited. */
670                 if (bio->bi_iter.bi_size == 0)
671                         /* an empty barrier - all done */
672                         bio_endio(bio);
673                 else {
674                         bio->bi_opf &= ~REQ_PREFLUSH;
675                         return false;
676                 }
677         }
678         return true;
679 }
680 EXPORT_SYMBOL(md_flush_request);
681
682 static inline struct mddev *mddev_get(struct mddev *mddev)
683 {
684         atomic_inc(&mddev->active);
685         return mddev;
686 }
687
688 static void mddev_delayed_delete(struct work_struct *ws);
689
690 static void mddev_put(struct mddev *mddev)
691 {
692         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
693                 return;
694         if (!mddev->raid_disks && list_empty(&mddev->disks) &&
695             mddev->ctime == 0 && !mddev->hold_active) {
696                 /* Array is not configured at all, and not held active,
697                  * so destroy it */
698                 list_del_init(&mddev->all_mddevs);
699
700                 /*
701                  * Call queue_work inside the spinlock so that
702                  * flush_workqueue() after mddev_find will succeed in waiting
703                  * for the work to be done.
704                  */
705                 INIT_WORK(&mddev->del_work, mddev_delayed_delete);
706                 queue_work(md_misc_wq, &mddev->del_work);
707         }
708         spin_unlock(&all_mddevs_lock);
709 }
710
711 static void md_safemode_timeout(struct timer_list *t);
712
713 void mddev_init(struct mddev *mddev)
714 {
715         kobject_init(&mddev->kobj, &md_ktype);
716         mutex_init(&mddev->open_mutex);
717         mutex_init(&mddev->reconfig_mutex);
718         mutex_init(&mddev->bitmap_info.mutex);
719         INIT_LIST_HEAD(&mddev->disks);
720         INIT_LIST_HEAD(&mddev->all_mddevs);
721         timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
722         atomic_set(&mddev->active, 1);
723         atomic_set(&mddev->openers, 0);
724         atomic_set(&mddev->active_io, 0);
725         spin_lock_init(&mddev->lock);
726         atomic_set(&mddev->flush_pending, 0);
727         init_waitqueue_head(&mddev->sb_wait);
728         init_waitqueue_head(&mddev->recovery_wait);
729         mddev->reshape_position = MaxSector;
730         mddev->reshape_backwards = 0;
731         mddev->last_sync_action = "none";
732         mddev->resync_min = 0;
733         mddev->resync_max = MaxSector;
734         mddev->level = LEVEL_NONE;
735 }
736 EXPORT_SYMBOL_GPL(mddev_init);
737
738 static struct mddev *mddev_find(dev_t unit)
739 {
740         struct mddev *mddev, *new = NULL;
741
742         if (unit && MAJOR(unit) != MD_MAJOR)
743                 unit &= ~((1<<MdpMinorShift)-1);
744
745  retry:
746         spin_lock(&all_mddevs_lock);
747
748         if (unit) {
749                 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
750                         if (mddev->unit == unit) {
751                                 mddev_get(mddev);
752                                 spin_unlock(&all_mddevs_lock);
753                                 kfree(new);
754                                 return mddev;
755                         }
756
757                 if (new) {
758                         list_add(&new->all_mddevs, &all_mddevs);
759                         spin_unlock(&all_mddevs_lock);
760                         new->hold_active = UNTIL_IOCTL;
761                         return new;
762                 }
763         } else if (new) {
764                 /* find an unused unit number */
765                 static int next_minor = 512;
766                 int start = next_minor;
767                 int is_free = 0;
768                 int dev = 0;
769                 while (!is_free) {
770                         dev = MKDEV(MD_MAJOR, next_minor);
771                         next_minor++;
772                         if (next_minor > MINORMASK)
773                                 next_minor = 0;
774                         if (next_minor == start) {
775                                 /* Oh dear, all in use. */
776                                 spin_unlock(&all_mddevs_lock);
777                                 kfree(new);
778                                 return NULL;
779                         }
780
781                         is_free = 1;
782                         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
783                                 if (mddev->unit == dev) {
784                                         is_free = 0;
785                                         break;
786                                 }
787                 }
788                 new->unit = dev;
789                 new->md_minor = MINOR(dev);
790                 new->hold_active = UNTIL_STOP;
791                 list_add(&new->all_mddevs, &all_mddevs);
792                 spin_unlock(&all_mddevs_lock);
793                 return new;
794         }
795         spin_unlock(&all_mddevs_lock);
796
797         new = kzalloc(sizeof(*new), GFP_KERNEL);
798         if (!new)
799                 return NULL;
800
801         new->unit = unit;
802         if (MAJOR(unit) == MD_MAJOR)
803                 new->md_minor = MINOR(unit);
804         else
805                 new->md_minor = MINOR(unit) >> MdpMinorShift;
806
807         mddev_init(new);
808
809         goto retry;
810 }
811
812 static struct attribute_group md_redundancy_group;
813
814 void mddev_unlock(struct mddev *mddev)
815 {
816         if (mddev->to_remove) {
817                 /* These cannot be removed under reconfig_mutex as
818                  * an access to the files will try to take reconfig_mutex
819                  * while holding the file unremovable, which leads to
820                  * a deadlock.
821                  * So hold set sysfs_active while the remove in happeing,
822                  * and anything else which might set ->to_remove or my
823                  * otherwise change the sysfs namespace will fail with
824                  * -EBUSY if sysfs_active is still set.
825                  * We set sysfs_active under reconfig_mutex and elsewhere
826                  * test it under the same mutex to ensure its correct value
827                  * is seen.
828                  */
829                 struct attribute_group *to_remove = mddev->to_remove;
830                 mddev->to_remove = NULL;
831                 mddev->sysfs_active = 1;
832                 mutex_unlock(&mddev->reconfig_mutex);
833
834                 if (mddev->kobj.sd) {
835                         if (to_remove != &md_redundancy_group)
836                                 sysfs_remove_group(&mddev->kobj, to_remove);
837                         if (mddev->pers == NULL ||
838                             mddev->pers->sync_request == NULL) {
839                                 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
840                                 if (mddev->sysfs_action)
841                                         sysfs_put(mddev->sysfs_action);
842                                 mddev->sysfs_action = NULL;
843                         }
844                 }
845                 mddev->sysfs_active = 0;
846         } else
847                 mutex_unlock(&mddev->reconfig_mutex);
848
849         /* As we've dropped the mutex we need a spinlock to
850          * make sure the thread doesn't disappear
851          */
852         spin_lock(&pers_lock);
853         md_wakeup_thread(mddev->thread);
854         wake_up(&mddev->sb_wait);
855         spin_unlock(&pers_lock);
856 }
857 EXPORT_SYMBOL_GPL(mddev_unlock);
858
859 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
860 {
861         struct md_rdev *rdev;
862
863         rdev_for_each_rcu(rdev, mddev)
864                 if (rdev->desc_nr == nr)
865                         return rdev;
866
867         return NULL;
868 }
869 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
870
871 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
872 {
873         struct md_rdev *rdev;
874
875         rdev_for_each(rdev, mddev)
876                 if (rdev->bdev->bd_dev == dev)
877                         return rdev;
878
879         return NULL;
880 }
881
882 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev)
883 {
884         struct md_rdev *rdev;
885
886         rdev_for_each_rcu(rdev, mddev)
887                 if (rdev->bdev->bd_dev == dev)
888                         return rdev;
889
890         return NULL;
891 }
892 EXPORT_SYMBOL_GPL(md_find_rdev_rcu);
893
894 static struct md_personality *find_pers(int level, char *clevel)
895 {
896         struct md_personality *pers;
897         list_for_each_entry(pers, &pers_list, list) {
898                 if (level != LEVEL_NONE && pers->level == level)
899                         return pers;
900                 if (strcmp(pers->name, clevel)==0)
901                         return pers;
902         }
903         return NULL;
904 }
905
906 /* return the offset of the super block in 512byte sectors */
907 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
908 {
909         sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
910         return MD_NEW_SIZE_SECTORS(num_sectors);
911 }
912
913 static int alloc_disk_sb(struct md_rdev *rdev)
914 {
915         rdev->sb_page = alloc_page(GFP_KERNEL);
916         if (!rdev->sb_page)
917                 return -ENOMEM;
918         return 0;
919 }
920
921 void md_rdev_clear(struct md_rdev *rdev)
922 {
923         if (rdev->sb_page) {
924                 put_page(rdev->sb_page);
925                 rdev->sb_loaded = 0;
926                 rdev->sb_page = NULL;
927                 rdev->sb_start = 0;
928                 rdev->sectors = 0;
929         }
930         if (rdev->bb_page) {
931                 put_page(rdev->bb_page);
932                 rdev->bb_page = NULL;
933         }
934         badblocks_exit(&rdev->badblocks);
935 }
936 EXPORT_SYMBOL_GPL(md_rdev_clear);
937
938 static void super_written(struct bio *bio)
939 {
940         struct md_rdev *rdev = bio->bi_private;
941         struct mddev *mddev = rdev->mddev;
942
943         if (bio->bi_status) {
944                 pr_err("md: super_written gets error=%d\n", bio->bi_status);
945                 md_error(mddev, rdev);
946                 if (!test_bit(Faulty, &rdev->flags)
947                     && (bio->bi_opf & MD_FAILFAST)) {
948                         set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
949                         set_bit(LastDev, &rdev->flags);
950                 }
951         } else
952                 clear_bit(LastDev, &rdev->flags);
953
954         if (atomic_dec_and_test(&mddev->pending_writes))
955                 wake_up(&mddev->sb_wait);
956         rdev_dec_pending(rdev, mddev);
957         bio_put(bio);
958 }
959
960 void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
961                    sector_t sector, int size, struct page *page)
962 {
963         /* write first size bytes of page to sector of rdev
964          * Increment mddev->pending_writes before returning
965          * and decrement it on completion, waking up sb_wait
966          * if zero is reached.
967          * If an error occurred, call md_error
968          */
969         struct bio *bio;
970         int ff = 0;
971
972         if (!page)
973                 return;
974
975         if (test_bit(Faulty, &rdev->flags))
976                 return;
977
978         bio = md_bio_alloc_sync(mddev);
979
980         atomic_inc(&rdev->nr_pending);
981
982         bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev);
983         bio->bi_iter.bi_sector = sector;
984         bio_add_page(bio, page, size, 0);
985         bio->bi_private = rdev;
986         bio->bi_end_io = super_written;
987
988         if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
989             test_bit(FailFast, &rdev->flags) &&
990             !test_bit(LastDev, &rdev->flags))
991                 ff = MD_FAILFAST;
992         bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff;
993
994         atomic_inc(&mddev->pending_writes);
995         submit_bio(bio);
996 }
997
998 int md_super_wait(struct mddev *mddev)
999 {
1000         /* wait for all superblock writes that were scheduled to complete */
1001         wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
1002         if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
1003                 return -EAGAIN;
1004         return 0;
1005 }
1006
1007 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
1008                  struct page *page, int op, int op_flags, bool metadata_op)
1009 {
1010         struct bio *bio = md_bio_alloc_sync(rdev->mddev);
1011         int ret;
1012
1013         if (metadata_op && rdev->meta_bdev)
1014                 bio_set_dev(bio, rdev->meta_bdev);
1015         else
1016                 bio_set_dev(bio, rdev->bdev);
1017         bio_set_op_attrs(bio, op, op_flags);
1018         if (metadata_op)
1019                 bio->bi_iter.bi_sector = sector + rdev->sb_start;
1020         else if (rdev->mddev->reshape_position != MaxSector &&
1021                  (rdev->mddev->reshape_backwards ==
1022                   (sector >= rdev->mddev->reshape_position)))
1023                 bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
1024         else
1025                 bio->bi_iter.bi_sector = sector + rdev->data_offset;
1026         bio_add_page(bio, page, size, 0);
1027
1028         submit_bio_wait(bio);
1029
1030         ret = !bio->bi_status;
1031         bio_put(bio);
1032         return ret;
1033 }
1034 EXPORT_SYMBOL_GPL(sync_page_io);
1035
1036 static int read_disk_sb(struct md_rdev *rdev, int size)
1037 {
1038         char b[BDEVNAME_SIZE];
1039
1040         if (rdev->sb_loaded)
1041                 return 0;
1042
1043         if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
1044                 goto fail;
1045         rdev->sb_loaded = 1;
1046         return 0;
1047
1048 fail:
1049         pr_err("md: disabled device %s, could not read superblock.\n",
1050                bdevname(rdev->bdev,b));
1051         return -EINVAL;
1052 }
1053
1054 static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
1055 {
1056         return  sb1->set_uuid0 == sb2->set_uuid0 &&
1057                 sb1->set_uuid1 == sb2->set_uuid1 &&
1058                 sb1->set_uuid2 == sb2->set_uuid2 &&
1059                 sb1->set_uuid3 == sb2->set_uuid3;
1060 }
1061
1062 static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
1063 {
1064         int ret;
1065         mdp_super_t *tmp1, *tmp2;
1066
1067         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
1068         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
1069
1070         if (!tmp1 || !tmp2) {
1071                 ret = 0;
1072                 goto abort;
1073         }
1074
1075         *tmp1 = *sb1;
1076         *tmp2 = *sb2;
1077
1078         /*
1079          * nr_disks is not constant
1080          */
1081         tmp1->nr_disks = 0;
1082         tmp2->nr_disks = 0;
1083
1084         ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
1085 abort:
1086         kfree(tmp1);
1087         kfree(tmp2);
1088         return ret;
1089 }
1090
1091 static u32 md_csum_fold(u32 csum)
1092 {
1093         csum = (csum & 0xffff) + (csum >> 16);
1094         return (csum & 0xffff) + (csum >> 16);
1095 }
1096
1097 static unsigned int calc_sb_csum(mdp_super_t *sb)
1098 {
1099         u64 newcsum = 0;
1100         u32 *sb32 = (u32*)sb;
1101         int i;
1102         unsigned int disk_csum, csum;
1103
1104         disk_csum = sb->sb_csum;
1105         sb->sb_csum = 0;
1106
1107         for (i = 0; i < MD_SB_BYTES/4 ; i++)
1108                 newcsum += sb32[i];
1109         csum = (newcsum & 0xffffffff) + (newcsum>>32);
1110
1111 #ifdef CONFIG_ALPHA
1112         /* This used to use csum_partial, which was wrong for several
1113          * reasons including that different results are returned on
1114          * different architectures.  It isn't critical that we get exactly
1115          * the same return value as before (we always csum_fold before
1116          * testing, and that removes any differences).  However as we
1117          * know that csum_partial always returned a 16bit value on
1118          * alphas, do a fold to maximise conformity to previous behaviour.
1119          */
1120         sb->sb_csum = md_csum_fold(disk_csum);
1121 #else
1122         sb->sb_csum = disk_csum;
1123 #endif
1124         return csum;
1125 }
1126
1127 /*
1128  * Handle superblock details.
1129  * We want to be able to handle multiple superblock formats
1130  * so we have a common interface to them all, and an array of
1131  * different handlers.
1132  * We rely on user-space to write the initial superblock, and support
1133  * reading and updating of superblocks.
1134  * Interface methods are:
1135  *   int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
1136  *      loads and validates a superblock on dev.
1137  *      if refdev != NULL, compare superblocks on both devices
1138  *    Return:
1139  *      0 - dev has a superblock that is compatible with refdev
1140  *      1 - dev has a superblock that is compatible and newer than refdev
1141  *          so dev should be used as the refdev in future
1142  *     -EINVAL superblock incompatible or invalid
1143  *     -othererror e.g. -EIO
1144  *
1145  *   int validate_super(struct mddev *mddev, struct md_rdev *dev)
1146  *      Verify that dev is acceptable into mddev.
1147  *       The first time, mddev->raid_disks will be 0, and data from
1148  *       dev should be merged in.  Subsequent calls check that dev
1149  *       is new enough.  Return 0 or -EINVAL
1150  *
1151  *   void sync_super(struct mddev *mddev, struct md_rdev *dev)
1152  *     Update the superblock for rdev with data in mddev
1153  *     This does not write to disc.
1154  *
1155  */
1156
1157 struct super_type  {
1158         char                *name;
1159         struct module       *owner;
1160         int                 (*load_super)(struct md_rdev *rdev,
1161                                           struct md_rdev *refdev,
1162                                           int minor_version);
1163         int                 (*validate_super)(struct mddev *mddev,
1164                                               struct md_rdev *rdev);
1165         void                (*sync_super)(struct mddev *mddev,
1166                                           struct md_rdev *rdev);
1167         unsigned long long  (*rdev_size_change)(struct md_rdev *rdev,
1168                                                 sector_t num_sectors);
1169         int                 (*allow_new_offset)(struct md_rdev *rdev,
1170                                                 unsigned long long new_offset);
1171 };
1172
1173 /*
1174  * Check that the given mddev has no bitmap.
1175  *
1176  * This function is called from the run method of all personalities that do not
1177  * support bitmaps. It prints an error message and returns non-zero if mddev
1178  * has a bitmap. Otherwise, it returns 0.
1179  *
1180  */
1181 int md_check_no_bitmap(struct mddev *mddev)
1182 {
1183         if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
1184                 return 0;
1185         pr_warn("%s: bitmaps are not supported for %s\n",
1186                 mdname(mddev), mddev->pers->name);
1187         return 1;
1188 }
1189 EXPORT_SYMBOL(md_check_no_bitmap);
1190
1191 /*
1192  * load_super for 0.90.0
1193  */
1194 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1195 {
1196         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1197         mdp_super_t *sb;
1198         int ret;
1199         bool spare_disk = true;
1200
1201         /*
1202          * Calculate the position of the superblock (512byte sectors),
1203          * it's at the end of the disk.
1204          *
1205          * It also happens to be a multiple of 4Kb.
1206          */
1207         rdev->sb_start = calc_dev_sboffset(rdev);
1208
1209         ret = read_disk_sb(rdev, MD_SB_BYTES);
1210         if (ret)
1211                 return ret;
1212
1213         ret = -EINVAL;
1214
1215         bdevname(rdev->bdev, b);
1216         sb = page_address(rdev->sb_page);
1217
1218         if (sb->md_magic != MD_SB_MAGIC) {
1219                 pr_warn("md: invalid raid superblock magic on %s\n", b);
1220                 goto abort;
1221         }
1222
1223         if (sb->major_version != 0 ||
1224             sb->minor_version < 90 ||
1225             sb->minor_version > 91) {
1226                 pr_warn("Bad version number %d.%d on %s\n",
1227                         sb->major_version, sb->minor_version, b);
1228                 goto abort;
1229         }
1230
1231         if (sb->raid_disks <= 0)
1232                 goto abort;
1233
1234         if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
1235                 pr_warn("md: invalid superblock checksum on %s\n", b);
1236                 goto abort;
1237         }
1238
1239         rdev->preferred_minor = sb->md_minor;
1240         rdev->data_offset = 0;
1241         rdev->new_data_offset = 0;
1242         rdev->sb_size = MD_SB_BYTES;
1243         rdev->badblocks.shift = -1;
1244
1245         if (sb->level == LEVEL_MULTIPATH)
1246                 rdev->desc_nr = -1;
1247         else
1248                 rdev->desc_nr = sb->this_disk.number;
1249
1250         /* not spare disk, or LEVEL_MULTIPATH */
1251         if (sb->level == LEVEL_MULTIPATH ||
1252                 (rdev->desc_nr >= 0 &&
1253                  rdev->desc_nr < MD_SB_DISKS &&
1254                  sb->disks[rdev->desc_nr].state &
1255                  ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
1256                 spare_disk = false;
1257
1258         if (!refdev) {
1259                 if (!spare_disk)
1260                         ret = 1;
1261                 else
1262                         ret = 0;
1263         } else {
1264                 __u64 ev1, ev2;
1265                 mdp_super_t *refsb = page_address(refdev->sb_page);
1266                 if (!md_uuid_equal(refsb, sb)) {
1267                         pr_warn("md: %s has different UUID to %s\n",
1268                                 b, bdevname(refdev->bdev,b2));
1269                         goto abort;
1270                 }
1271                 if (!md_sb_equal(refsb, sb)) {
1272                         pr_warn("md: %s has same UUID but different superblock to %s\n",
1273                                 b, bdevname(refdev->bdev, b2));
1274                         goto abort;
1275                 }
1276                 ev1 = md_event(sb);
1277                 ev2 = md_event(refsb);
1278
1279                 if (!spare_disk && ev1 > ev2)
1280                         ret = 1;
1281                 else
1282                         ret = 0;
1283         }
1284         rdev->sectors = rdev->sb_start;
1285         /* Limit to 4TB as metadata cannot record more than that.
1286          * (not needed for Linear and RAID0 as metadata doesn't
1287          * record this size)
1288          */
1289         if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1)
1290                 rdev->sectors = (sector_t)(2ULL << 32) - 2;
1291
1292         if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1293                 /* "this cannot possibly happen" ... */
1294                 ret = -EINVAL;
1295
1296  abort:
1297         return ret;
1298 }
1299
1300 /*
1301  * validate_super for 0.90.0
1302  */
1303 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
1304 {
1305         mdp_disk_t *desc;
1306         mdp_super_t *sb = page_address(rdev->sb_page);
1307         __u64 ev1 = md_event(sb);
1308
1309         rdev->raid_disk = -1;
1310         clear_bit(Faulty, &rdev->flags);
1311         clear_bit(In_sync, &rdev->flags);
1312         clear_bit(Bitmap_sync, &rdev->flags);
1313         clear_bit(WriteMostly, &rdev->flags);
1314
1315         if (mddev->raid_disks == 0) {
1316                 mddev->major_version = 0;
1317                 mddev->minor_version = sb->minor_version;
1318                 mddev->patch_version = sb->patch_version;
1319                 mddev->external = 0;
1320                 mddev->chunk_sectors = sb->chunk_size >> 9;
1321                 mddev->ctime = sb->ctime;
1322                 mddev->utime = sb->utime;
1323                 mddev->level = sb->level;
1324                 mddev->clevel[0] = 0;
1325                 mddev->layout = sb->layout;
1326                 mddev->raid_disks = sb->raid_disks;
1327                 mddev->dev_sectors = ((sector_t)sb->size) * 2;
1328                 mddev->events = ev1;
1329                 mddev->bitmap_info.offset = 0;
1330                 mddev->bitmap_info.space = 0;
1331                 /* bitmap can use 60 K after the 4K superblocks */
1332                 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1333                 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
1334                 mddev->reshape_backwards = 0;
1335
1336                 if (mddev->minor_version >= 91) {
1337                         mddev->reshape_position = sb->reshape_position;
1338                         mddev->delta_disks = sb->delta_disks;
1339                         mddev->new_level = sb->new_level;
1340                         mddev->new_layout = sb->new_layout;
1341                         mddev->new_chunk_sectors = sb->new_chunk >> 9;
1342                         if (mddev->delta_disks < 0)
1343                                 mddev->reshape_backwards = 1;
1344                 } else {
1345                         mddev->reshape_position = MaxSector;
1346                         mddev->delta_disks = 0;
1347                         mddev->new_level = mddev->level;
1348                         mddev->new_layout = mddev->layout;
1349                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1350                 }
1351                 if (mddev->level == 0)
1352                         mddev->layout = -1;
1353
1354                 if (sb->state & (1<<MD_SB_CLEAN))
1355                         mddev->recovery_cp = MaxSector;
1356                 else {
1357                         if (sb->events_hi == sb->cp_events_hi &&
1358                                 sb->events_lo == sb->cp_events_lo) {
1359                                 mddev->recovery_cp = sb->recovery_cp;
1360                         } else
1361                                 mddev->recovery_cp = 0;
1362                 }
1363
1364                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1365                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1366                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1367                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1368
1369                 mddev->max_disks = MD_SB_DISKS;
1370
1371                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1372                     mddev->bitmap_info.file == NULL) {
1373                         mddev->bitmap_info.offset =
1374                                 mddev->bitmap_info.default_offset;
1375                         mddev->bitmap_info.space =
1376                                 mddev->bitmap_info.default_space;
1377                 }
1378
1379         } else if (mddev->pers == NULL) {
1380                 /* Insist on good event counter while assembling, except
1381                  * for spares (which don't need an event count) */
1382                 ++ev1;
1383                 if (sb->disks[rdev->desc_nr].state & (
1384                             (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1385                         if (ev1 < mddev->events)
1386                                 return -EINVAL;
1387         } else if (mddev->bitmap) {
1388                 /* if adding to array with a bitmap, then we can accept an
1389                  * older device ... but not too old.
1390                  */
1391                 if (ev1 < mddev->bitmap->events_cleared)
1392                         return 0;
1393                 if (ev1 < mddev->events)
1394                         set_bit(Bitmap_sync, &rdev->flags);
1395         } else {
1396                 if (ev1 < mddev->events)
1397                         /* just a hot-add of a new device, leave raid_disk at -1 */
1398                         return 0;
1399         }
1400
1401         if (mddev->level != LEVEL_MULTIPATH) {
1402                 desc = sb->disks + rdev->desc_nr;
1403
1404                 if (desc->state & (1<<MD_DISK_FAULTY))
1405                         set_bit(Faulty, &rdev->flags);
1406                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1407                             desc->raid_disk < mddev->raid_disks */) {
1408                         set_bit(In_sync, &rdev->flags);
1409                         rdev->raid_disk = desc->raid_disk;
1410                         rdev->saved_raid_disk = desc->raid_disk;
1411                 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1412                         /* active but not in sync implies recovery up to
1413                          * reshape position.  We don't know exactly where
1414                          * that is, so set to zero for now */
1415                         if (mddev->minor_version >= 91) {
1416                                 rdev->recovery_offset = 0;
1417                                 rdev->raid_disk = desc->raid_disk;
1418                         }
1419                 }
1420                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1421                         set_bit(WriteMostly, &rdev->flags);
1422                 if (desc->state & (1<<MD_DISK_FAILFAST))
1423                         set_bit(FailFast, &rdev->flags);
1424         } else /* MULTIPATH are always insync */
1425                 set_bit(In_sync, &rdev->flags);
1426         return 0;
1427 }
1428
1429 /*
1430  * sync_super for 0.90.0
1431  */
1432 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1433 {
1434         mdp_super_t *sb;
1435         struct md_rdev *rdev2;
1436         int next_spare = mddev->raid_disks;
1437
1438         /* make rdev->sb match mddev data..
1439          *
1440          * 1/ zero out disks
1441          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1442          * 3/ any empty disks < next_spare become removed
1443          *
1444          * disks[0] gets initialised to REMOVED because
1445          * we cannot be sure from other fields if it has
1446          * been initialised or not.
1447          */
1448         int i;
1449         int active=0, working=0,failed=0,spare=0,nr_disks=0;
1450
1451         rdev->sb_size = MD_SB_BYTES;
1452
1453         sb = page_address(rdev->sb_page);
1454
1455         memset(sb, 0, sizeof(*sb));
1456
1457         sb->md_magic = MD_SB_MAGIC;
1458         sb->major_version = mddev->major_version;
1459         sb->patch_version = mddev->patch_version;
1460         sb->gvalid_words  = 0; /* ignored */
1461         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1462         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1463         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1464         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1465
1466         sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
1467         sb->level = mddev->level;
1468         sb->size = mddev->dev_sectors / 2;
1469         sb->raid_disks = mddev->raid_disks;
1470         sb->md_minor = mddev->md_minor;
1471         sb->not_persistent = 0;
1472         sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
1473         sb->state = 0;
1474         sb->events_hi = (mddev->events>>32);
1475         sb->events_lo = (u32)mddev->events;
1476
1477         if (mddev->reshape_position == MaxSector)
1478                 sb->minor_version = 90;
1479         else {
1480                 sb->minor_version = 91;
1481                 sb->reshape_position = mddev->reshape_position;
1482                 sb->new_level = mddev->new_level;
1483                 sb->delta_disks = mddev->delta_disks;
1484                 sb->new_layout = mddev->new_layout;
1485                 sb->new_chunk = mddev->new_chunk_sectors << 9;
1486         }
1487         mddev->minor_version = sb->minor_version;
1488         if (mddev->in_sync)
1489         {
1490                 sb->recovery_cp = mddev->recovery_cp;
1491                 sb->cp_events_hi = (mddev->events>>32);
1492                 sb->cp_events_lo = (u32)mddev->events;
1493                 if (mddev->recovery_cp == MaxSector)
1494                         sb->state = (1<< MD_SB_CLEAN);
1495         } else
1496                 sb->recovery_cp = 0;
1497
1498         sb->layout = mddev->layout;
1499         sb->chunk_size = mddev->chunk_sectors << 9;
1500
1501         if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1502                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1503
1504         sb->disks[0].state = (1<<MD_DISK_REMOVED);
1505         rdev_for_each(rdev2, mddev) {
1506                 mdp_disk_t *d;
1507                 int desc_nr;
1508                 int is_active = test_bit(In_sync, &rdev2->flags);
1509
1510                 if (rdev2->raid_disk >= 0 &&
1511                     sb->minor_version >= 91)
1512                         /* we have nowhere to store the recovery_offset,
1513                          * but if it is not below the reshape_position,
1514                          * we can piggy-back on that.
1515                          */
1516                         is_active = 1;
1517                 if (rdev2->raid_disk < 0 ||
1518                     test_bit(Faulty, &rdev2->flags))
1519                         is_active = 0;
1520                 if (is_active)
1521                         desc_nr = rdev2->raid_disk;
1522                 else
1523                         desc_nr = next_spare++;
1524                 rdev2->desc_nr = desc_nr;
1525                 d = &sb->disks[rdev2->desc_nr];
1526                 nr_disks++;
1527                 d->number = rdev2->desc_nr;
1528                 d->major = MAJOR(rdev2->bdev->bd_dev);
1529                 d->minor = MINOR(rdev2->bdev->bd_dev);
1530                 if (is_active)
1531                         d->raid_disk = rdev2->raid_disk;
1532                 else
1533                         d->raid_disk = rdev2->desc_nr; /* compatibility */
1534                 if (test_bit(Faulty, &rdev2->flags))
1535                         d->state = (1<<MD_DISK_FAULTY);
1536                 else if (is_active) {
1537                         d->state = (1<<MD_DISK_ACTIVE);
1538                         if (test_bit(In_sync, &rdev2->flags))
1539                                 d->state |= (1<<MD_DISK_SYNC);
1540                         active++;
1541                         working++;
1542                 } else {
1543                         d->state = 0;
1544                         spare++;
1545                         working++;
1546                 }
1547                 if (test_bit(WriteMostly, &rdev2->flags))
1548                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
1549                 if (test_bit(FailFast, &rdev2->flags))
1550                         d->state |= (1<<MD_DISK_FAILFAST);
1551         }
1552         /* now set the "removed" and "faulty" bits on any missing devices */
1553         for (i=0 ; i < mddev->raid_disks ; i++) {
1554                 mdp_disk_t *d = &sb->disks[i];
1555                 if (d->state == 0 && d->number == 0) {
1556                         d->number = i;
1557                         d->raid_disk = i;
1558                         d->state = (1<<MD_DISK_REMOVED);
1559                         d->state |= (1<<MD_DISK_FAULTY);
1560                         failed++;
1561                 }
1562         }
1563         sb->nr_disks = nr_disks;
1564         sb->active_disks = active;
1565         sb->working_disks = working;
1566         sb->failed_disks = failed;
1567         sb->spare_disks = spare;
1568
1569         sb->this_disk = sb->disks[rdev->desc_nr];
1570         sb->sb_csum = calc_sb_csum(sb);
1571 }
1572
1573 /*
1574  * rdev_size_change for 0.90.0
1575  */
1576 static unsigned long long
1577 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1578 {
1579         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1580                 return 0; /* component must fit device */
1581         if (rdev->mddev->bitmap_info.offset)
1582                 return 0; /* can't move bitmap */
1583         rdev->sb_start = calc_dev_sboffset(rdev);
1584         if (!num_sectors || num_sectors > rdev->sb_start)
1585                 num_sectors = rdev->sb_start;
1586         /* Limit to 4TB as metadata cannot record more than that.
1587          * 4TB == 2^32 KB, or 2*2^32 sectors.
1588          */
1589         if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
1590                 num_sectors = (sector_t)(2ULL << 32) - 2;
1591         do {
1592                 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1593                        rdev->sb_page);
1594         } while (md_super_wait(rdev->mddev) < 0);
1595         return num_sectors;
1596 }
1597
1598 static int
1599 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1600 {
1601         /* non-zero offset changes not possible with v0.90 */
1602         return new_offset == 0;
1603 }
1604
1605 /*
1606  * version 1 superblock
1607  */
1608
1609 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
1610 {
1611         __le32 disk_csum;
1612         u32 csum;
1613         unsigned long long newcsum;
1614         int size = 256 + le32_to_cpu(sb->max_dev)*2;
1615         __le32 *isuper = (__le32*)sb;
1616
1617         disk_csum = sb->sb_csum;
1618         sb->sb_csum = 0;
1619         newcsum = 0;
1620         for (; size >= 4; size -= 4)
1621                 newcsum += le32_to_cpu(*isuper++);
1622
1623         if (size == 2)
1624                 newcsum += le16_to_cpu(*(__le16*) isuper);
1625
1626         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1627         sb->sb_csum = disk_csum;
1628         return cpu_to_le32(csum);
1629 }
1630
1631 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1632 {
1633         struct mdp_superblock_1 *sb;
1634         int ret;
1635         sector_t sb_start;
1636         sector_t sectors;
1637         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1638         int bmask;
1639         bool spare_disk = true;
1640
1641         /*
1642          * Calculate the position of the superblock in 512byte sectors.
1643          * It is always aligned to a 4K boundary and
1644          * depeding on minor_version, it can be:
1645          * 0: At least 8K, but less than 12K, from end of device
1646          * 1: At start of device
1647          * 2: 4K from start of device.
1648          */
1649         switch(minor_version) {
1650         case 0:
1651                 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
1652                 sb_start -= 8*2;
1653                 sb_start &= ~(sector_t)(4*2-1);
1654                 break;
1655         case 1:
1656                 sb_start = 0;
1657                 break;
1658         case 2:
1659                 sb_start = 8;
1660                 break;
1661         default:
1662                 return -EINVAL;
1663         }
1664         rdev->sb_start = sb_start;
1665
1666         /* superblock is rarely larger than 1K, but it can be larger,
1667          * and it is safe to read 4k, so we do that
1668          */
1669         ret = read_disk_sb(rdev, 4096);
1670         if (ret) return ret;
1671
1672         sb = page_address(rdev->sb_page);
1673
1674         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1675             sb->major_version != cpu_to_le32(1) ||
1676             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1677             le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1678             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1679                 return -EINVAL;
1680
1681         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1682                 pr_warn("md: invalid superblock checksum on %s\n",
1683                         bdevname(rdev->bdev,b));
1684                 return -EINVAL;
1685         }
1686         if (le64_to_cpu(sb->data_size) < 10) {
1687                 pr_warn("md: data_size too small on %s\n",
1688                         bdevname(rdev->bdev,b));
1689                 return -EINVAL;
1690         }
1691         if (sb->pad0 ||
1692             sb->pad3[0] ||
1693             memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1694                 /* Some padding is non-zero, might be a new feature */
1695                 return -EINVAL;
1696
1697         rdev->preferred_minor = 0xffff;
1698         rdev->data_offset = le64_to_cpu(sb->data_offset);
1699         rdev->new_data_offset = rdev->data_offset;
1700         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1701             (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1702                 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
1703         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1704
1705         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1706         bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1707         if (rdev->sb_size & bmask)
1708                 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1709
1710         if (minor_version
1711             && rdev->data_offset < sb_start + (rdev->sb_size/512))
1712                 return -EINVAL;
1713         if (minor_version
1714             && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1715                 return -EINVAL;
1716
1717         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1718                 rdev->desc_nr = -1;
1719         else
1720                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1721
1722         if (!rdev->bb_page) {
1723                 rdev->bb_page = alloc_page(GFP_KERNEL);
1724                 if (!rdev->bb_page)
1725                         return -ENOMEM;
1726         }
1727         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1728             rdev->badblocks.count == 0) {
1729                 /* need to load the bad block list.
1730                  * Currently we limit it to one page.
1731                  */
1732                 s32 offset;
1733                 sector_t bb_sector;
1734                 __le64 *bbp;
1735                 int i;
1736                 int sectors = le16_to_cpu(sb->bblog_size);
1737                 if (sectors > (PAGE_SIZE / 512))
1738                         return -EINVAL;
1739                 offset = le32_to_cpu(sb->bblog_offset);
1740                 if (offset == 0)
1741                         return -EINVAL;
1742                 bb_sector = (long long)offset;
1743                 if (!sync_page_io(rdev, bb_sector, sectors << 9,
1744                                   rdev->bb_page, REQ_OP_READ, 0, true))
1745                         return -EIO;
1746                 bbp = (__le64 *)page_address(rdev->bb_page);
1747                 rdev->badblocks.shift = sb->bblog_shift;
1748                 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1749                         u64 bb = le64_to_cpu(*bbp);
1750                         int count = bb & (0x3ff);
1751                         u64 sector = bb >> 10;
1752                         sector <<= sb->bblog_shift;
1753                         count <<= sb->bblog_shift;
1754                         if (bb + 1 == 0)
1755                                 break;
1756                         if (badblocks_set(&rdev->badblocks, sector, count, 1))
1757                                 return -EINVAL;
1758                 }
1759         } else if (sb->bblog_offset != 0)
1760                 rdev->badblocks.shift = 0;
1761
1762         if ((le32_to_cpu(sb->feature_map) &
1763             (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) {
1764                 rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset);
1765                 rdev->ppl.size = le16_to_cpu(sb->ppl.size);
1766                 rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset;
1767         }
1768
1769         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) &&
1770             sb->level != 0)
1771                 return -EINVAL;
1772
1773         /* not spare disk, or LEVEL_MULTIPATH */
1774         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) ||
1775                 (rdev->desc_nr >= 0 &&
1776                 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1777                 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1778                  le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)))
1779                 spare_disk = false;
1780
1781         if (!refdev) {
1782                 if (!spare_disk)
1783                         ret = 1;
1784                 else
1785                         ret = 0;
1786         } else {
1787                 __u64 ev1, ev2;
1788                 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
1789
1790                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1791                     sb->level != refsb->level ||
1792                     sb->layout != refsb->layout ||
1793                     sb->chunksize != refsb->chunksize) {
1794                         pr_warn("md: %s has strangely different superblock to %s\n",
1795                                 bdevname(rdev->bdev,b),
1796                                 bdevname(refdev->bdev,b2));
1797                         return -EINVAL;
1798                 }
1799                 ev1 = le64_to_cpu(sb->events);
1800                 ev2 = le64_to_cpu(refsb->events);
1801
1802                 if (!spare_disk && ev1 > ev2)
1803                         ret = 1;
1804                 else
1805                         ret = 0;
1806         }
1807         if (minor_version) {
1808                 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
1809                 sectors -= rdev->data_offset;
1810         } else
1811                 sectors = rdev->sb_start;
1812         if (sectors < le64_to_cpu(sb->data_size))
1813                 return -EINVAL;
1814         rdev->sectors = le64_to_cpu(sb->data_size);
1815         return ret;
1816 }
1817
1818 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1819 {
1820         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1821         __u64 ev1 = le64_to_cpu(sb->events);
1822
1823         rdev->raid_disk = -1;
1824         clear_bit(Faulty, &rdev->flags);
1825         clear_bit(In_sync, &rdev->flags);
1826         clear_bit(Bitmap_sync, &rdev->flags);
1827         clear_bit(WriteMostly, &rdev->flags);
1828
1829         if (mddev->raid_disks == 0) {
1830                 mddev->major_version = 1;
1831                 mddev->patch_version = 0;
1832                 mddev->external = 0;
1833                 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1834                 mddev->ctime = le64_to_cpu(sb->ctime);
1835                 mddev->utime = le64_to_cpu(sb->utime);
1836                 mddev->level = le32_to_cpu(sb->level);
1837                 mddev->clevel[0] = 0;
1838                 mddev->layout = le32_to_cpu(sb->layout);
1839                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1840                 mddev->dev_sectors = le64_to_cpu(sb->size);
1841                 mddev->events = ev1;
1842                 mddev->bitmap_info.offset = 0;
1843                 mddev->bitmap_info.space = 0;
1844                 /* Default location for bitmap is 1K after superblock
1845                  * using 3K - total of 4K
1846                  */
1847                 mddev->bitmap_info.default_offset = 1024 >> 9;
1848                 mddev->bitmap_info.default_space = (4096-1024) >> 9;
1849                 mddev->reshape_backwards = 0;
1850
1851                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1852                 memcpy(mddev->uuid, sb->set_uuid, 16);
1853
1854                 mddev->max_disks =  (4096-256)/2;
1855
1856                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1857                     mddev->bitmap_info.file == NULL) {
1858                         mddev->bitmap_info.offset =
1859                                 (__s32)le32_to_cpu(sb->bitmap_offset);
1860                         /* Metadata doesn't record how much space is available.
1861                          * For 1.0, we assume we can use up to the superblock
1862                          * if before, else to 4K beyond superblock.
1863                          * For others, assume no change is possible.
1864                          */
1865                         if (mddev->minor_version > 0)
1866                                 mddev->bitmap_info.space = 0;
1867                         else if (mddev->bitmap_info.offset > 0)
1868                                 mddev->bitmap_info.space =
1869                                         8 - mddev->bitmap_info.offset;
1870                         else
1871                                 mddev->bitmap_info.space =
1872                                         -mddev->bitmap_info.offset;
1873                 }
1874
1875                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1876                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1877                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1878                         mddev->new_level = le32_to_cpu(sb->new_level);
1879                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1880                         mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1881                         if (mddev->delta_disks < 0 ||
1882                             (mddev->delta_disks == 0 &&
1883                              (le32_to_cpu(sb->feature_map)
1884                               & MD_FEATURE_RESHAPE_BACKWARDS)))
1885                                 mddev->reshape_backwards = 1;
1886                 } else {
1887                         mddev->reshape_position = MaxSector;
1888                         mddev->delta_disks = 0;
1889                         mddev->new_level = mddev->level;
1890                         mddev->new_layout = mddev->layout;
1891                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1892                 }
1893
1894                 if (mddev->level == 0 &&
1895                     !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT))
1896                         mddev->layout = -1;
1897
1898                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
1899                         set_bit(MD_HAS_JOURNAL, &mddev->flags);
1900
1901                 if (le32_to_cpu(sb->feature_map) &
1902                     (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) {
1903                         if (le32_to_cpu(sb->feature_map) &
1904                             (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL))
1905                                 return -EINVAL;
1906                         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) &&
1907                             (le32_to_cpu(sb->feature_map) &
1908                                             MD_FEATURE_MULTIPLE_PPLS))
1909                                 return -EINVAL;
1910                         set_bit(MD_HAS_PPL, &mddev->flags);
1911                 }
1912         } else if (mddev->pers == NULL) {
1913                 /* Insist of good event counter while assembling, except for
1914                  * spares (which don't need an event count) */
1915                 ++ev1;
1916                 if (rdev->desc_nr >= 0 &&
1917                     rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1918                     (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1919                      le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
1920                         if (ev1 < mddev->events)
1921                                 return -EINVAL;
1922         } else if (mddev->bitmap) {
1923                 /* If adding to array with a bitmap, then we can accept an
1924                  * older device, but not too old.
1925                  */
1926                 if (ev1 < mddev->bitmap->events_cleared)
1927                         return 0;
1928                 if (ev1 < mddev->events)
1929                         set_bit(Bitmap_sync, &rdev->flags);
1930         } else {
1931                 if (ev1 < mddev->events)
1932                         /* just a hot-add of a new device, leave raid_disk at -1 */
1933                         return 0;
1934         }
1935         if (mddev->level != LEVEL_MULTIPATH) {
1936                 int role;
1937                 if (rdev->desc_nr < 0 ||
1938                     rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1939                         role = MD_DISK_ROLE_SPARE;
1940                         rdev->desc_nr = -1;
1941                 } else
1942                         role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1943                 switch(role) {
1944                 case MD_DISK_ROLE_SPARE: /* spare */
1945                         break;
1946                 case MD_DISK_ROLE_FAULTY: /* faulty */
1947                         set_bit(Faulty, &rdev->flags);
1948                         break;
1949                 case MD_DISK_ROLE_JOURNAL: /* journal device */
1950                         if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
1951                                 /* journal device without journal feature */
1952                                 pr_warn("md: journal device provided without journal feature, ignoring the device\n");
1953                                 return -EINVAL;
1954                         }
1955                         set_bit(Journal, &rdev->flags);
1956                         rdev->journal_tail = le64_to_cpu(sb->journal_tail);
1957                         rdev->raid_disk = 0;
1958                         break;
1959                 default:
1960                         rdev->saved_raid_disk = role;
1961                         if ((le32_to_cpu(sb->feature_map) &
1962                              MD_FEATURE_RECOVERY_OFFSET)) {
1963                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1964                                 if (!(le32_to_cpu(sb->feature_map) &
1965                                       MD_FEATURE_RECOVERY_BITMAP))
1966                                         rdev->saved_raid_disk = -1;
1967                         } else {
1968                                 /*
1969                                  * If the array is FROZEN, then the device can't
1970                                  * be in_sync with rest of array.
1971                                  */
1972                                 if (!test_bit(MD_RECOVERY_FROZEN,
1973                                               &mddev->recovery))
1974                                         set_bit(In_sync, &rdev->flags);
1975                         }
1976                         rdev->raid_disk = role;
1977                         break;
1978                 }
1979                 if (sb->devflags & WriteMostly1)
1980                         set_bit(WriteMostly, &rdev->flags);
1981                 if (sb->devflags & FailFast1)
1982                         set_bit(FailFast, &rdev->flags);
1983                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
1984                         set_bit(Replacement, &rdev->flags);
1985         } else /* MULTIPATH are always insync */
1986                 set_bit(In_sync, &rdev->flags);
1987
1988         return 0;
1989 }
1990
1991 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
1992 {
1993         struct mdp_superblock_1 *sb;
1994         struct md_rdev *rdev2;
1995         int max_dev, i;
1996         /* make rdev->sb match mddev and rdev data. */
1997
1998         sb = page_address(rdev->sb_page);
1999
2000         sb->feature_map = 0;
2001         sb->pad0 = 0;
2002         sb->recovery_offset = cpu_to_le64(0);
2003         memset(sb->pad3, 0, sizeof(sb->pad3));
2004
2005         sb->utime = cpu_to_le64((__u64)mddev->utime);
2006         sb->events = cpu_to_le64(mddev->events);
2007         if (mddev->in_sync)
2008                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
2009         else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
2010                 sb->resync_offset = cpu_to_le64(MaxSector);
2011         else
2012                 sb->resync_offset = cpu_to_le64(0);
2013
2014         sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
2015
2016         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
2017         sb->size = cpu_to_le64(mddev->dev_sectors);
2018         sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
2019         sb->level = cpu_to_le32(mddev->level);
2020         sb->layout = cpu_to_le32(mddev->layout);
2021         if (test_bit(FailFast, &rdev->flags))
2022                 sb->devflags |= FailFast1;
2023         else
2024                 sb->devflags &= ~FailFast1;
2025
2026         if (test_bit(WriteMostly, &rdev->flags))
2027                 sb->devflags |= WriteMostly1;
2028         else
2029                 sb->devflags &= ~WriteMostly1;
2030         sb->data_offset = cpu_to_le64(rdev->data_offset);
2031         sb->data_size = cpu_to_le64(rdev->sectors);
2032
2033         if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
2034                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
2035                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
2036         }
2037
2038         if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
2039             !test_bit(In_sync, &rdev->flags)) {
2040                 sb->feature_map |=
2041                         cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
2042                 sb->recovery_offset =
2043                         cpu_to_le64(rdev->recovery_offset);
2044                 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
2045                         sb->feature_map |=
2046                                 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
2047         }
2048         /* Note: recovery_offset and journal_tail share space  */
2049         if (test_bit(Journal, &rdev->flags))
2050                 sb->journal_tail = cpu_to_le64(rdev->journal_tail);
2051         if (test_bit(Replacement, &rdev->flags))
2052                 sb->feature_map |=
2053                         cpu_to_le32(MD_FEATURE_REPLACEMENT);
2054
2055         if (mddev->reshape_position != MaxSector) {
2056                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
2057                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
2058                 sb->new_layout = cpu_to_le32(mddev->new_layout);
2059                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
2060                 sb->new_level = cpu_to_le32(mddev->new_level);
2061                 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
2062                 if (mddev->delta_disks == 0 &&
2063                     mddev->reshape_backwards)
2064                         sb->feature_map
2065                                 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
2066                 if (rdev->new_data_offset != rdev->data_offset) {
2067                         sb->feature_map
2068                                 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
2069                         sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
2070                                                              - rdev->data_offset));
2071                 }
2072         }
2073
2074         if (mddev_is_clustered(mddev))
2075                 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
2076
2077         if (rdev->badblocks.count == 0)
2078                 /* Nothing to do for bad blocks*/ ;
2079         else if (sb->bblog_offset == 0)
2080                 /* Cannot record bad blocks on this device */
2081                 md_error(mddev, rdev);
2082         else {
2083                 struct badblocks *bb = &rdev->badblocks;
2084                 __le64 *bbp = (__le64 *)page_address(rdev->bb_page);
2085                 u64 *p = bb->page;
2086                 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
2087                 if (bb->changed) {
2088                         unsigned seq;
2089
2090 retry:
2091                         seq = read_seqbegin(&bb->lock);
2092
2093                         memset(bbp, 0xff, PAGE_SIZE);
2094
2095                         for (i = 0 ; i < bb->count ; i++) {
2096                                 u64 internal_bb = p[i];
2097                                 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
2098                                                 | BB_LEN(internal_bb));
2099                                 bbp[i] = cpu_to_le64(store_bb);
2100                         }
2101                         bb->changed = 0;
2102                         if (read_seqretry(&bb->lock, seq))
2103                                 goto retry;
2104
2105                         bb->sector = (rdev->sb_start +
2106                                       (int)le32_to_cpu(sb->bblog_offset));
2107                         bb->size = le16_to_cpu(sb->bblog_size);
2108                 }
2109         }
2110
2111         max_dev = 0;
2112         rdev_for_each(rdev2, mddev)
2113                 if (rdev2->desc_nr+1 > max_dev)
2114                         max_dev = rdev2->desc_nr+1;
2115
2116         if (max_dev > le32_to_cpu(sb->max_dev)) {
2117                 int bmask;
2118                 sb->max_dev = cpu_to_le32(max_dev);
2119                 rdev->sb_size = max_dev * 2 + 256;
2120                 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
2121                 if (rdev->sb_size & bmask)
2122                         rdev->sb_size = (rdev->sb_size | bmask) + 1;
2123         } else
2124                 max_dev = le32_to_cpu(sb->max_dev);
2125
2126         for (i=0; i<max_dev;i++)
2127                 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
2128
2129         if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
2130                 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
2131
2132         if (test_bit(MD_HAS_PPL, &mddev->flags)) {
2133                 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags))
2134                         sb->feature_map |=
2135                             cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS);
2136                 else
2137                         sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL);
2138                 sb->ppl.offset = cpu_to_le16(rdev->ppl.offset);
2139                 sb->ppl.size = cpu_to_le16(rdev->ppl.size);
2140         }
2141
2142         rdev_for_each(rdev2, mddev) {
2143                 i = rdev2->desc_nr;
2144                 if (test_bit(Faulty, &rdev2->flags))
2145                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
2146                 else if (test_bit(In_sync, &rdev2->flags))
2147                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
2148                 else if (test_bit(Journal, &rdev2->flags))
2149                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
2150                 else if (rdev2->raid_disk >= 0)
2151                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
2152                 else
2153                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
2154         }
2155
2156         sb->sb_csum = calc_sb_1_csum(sb);
2157 }
2158
2159 static unsigned long long
2160 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
2161 {
2162         struct mdp_superblock_1 *sb;
2163         sector_t max_sectors;
2164         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
2165                 return 0; /* component must fit device */
2166         if (rdev->data_offset != rdev->new_data_offset)
2167                 return 0; /* too confusing */
2168         if (rdev->sb_start < rdev->data_offset) {
2169                 /* minor versions 1 and 2; superblock before data */
2170                 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
2171                 max_sectors -= rdev->data_offset;
2172                 if (!num_sectors || num_sectors > max_sectors)
2173                         num_sectors = max_sectors;
2174         } else if (rdev->mddev->bitmap_info.offset) {
2175                 /* minor version 0 with bitmap we can't move */
2176                 return 0;
2177         } else {
2178                 /* minor version 0; superblock after data */
2179                 sector_t sb_start;
2180                 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
2181                 sb_start &= ~(sector_t)(4*2 - 1);
2182                 max_sectors = rdev->sectors + sb_start - rdev->sb_start;
2183                 if (!num_sectors || num_sectors > max_sectors)
2184                         num_sectors = max_sectors;
2185                 rdev->sb_start = sb_start;
2186         }
2187         sb = page_address(rdev->sb_page);
2188         sb->data_size = cpu_to_le64(num_sectors);
2189         sb->super_offset = cpu_to_le64(rdev->sb_start);
2190         sb->sb_csum = calc_sb_1_csum(sb);
2191         do {
2192                 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
2193                                rdev->sb_page);
2194         } while (md_super_wait(rdev->mddev) < 0);
2195         return num_sectors;
2196
2197 }
2198
2199 static int
2200 super_1_allow_new_offset(struct md_rdev *rdev,
2201                          unsigned long long new_offset)
2202 {
2203         /* All necessary checks on new >= old have been done */
2204         struct bitmap *bitmap;
2205         if (new_offset >= rdev->data_offset)
2206                 return 1;
2207
2208         /* with 1.0 metadata, there is no metadata to tread on
2209          * so we can always move back */
2210         if (rdev->mddev->minor_version == 0)
2211                 return 1;
2212
2213         /* otherwise we must be sure not to step on
2214          * any metadata, so stay:
2215          * 36K beyond start of superblock
2216          * beyond end of badblocks
2217          * beyond write-intent bitmap
2218          */
2219         if (rdev->sb_start + (32+4)*2 > new_offset)
2220                 return 0;
2221         bitmap = rdev->mddev->bitmap;
2222         if (bitmap && !rdev->mddev->bitmap_info.file &&
2223             rdev->sb_start + rdev->mddev->bitmap_info.offset +
2224             bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
2225                 return 0;
2226         if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
2227                 return 0;
2228
2229         return 1;
2230 }
2231
2232 static struct super_type super_types[] = {
2233         [0] = {
2234                 .name   = "0.90.0",
2235                 .owner  = THIS_MODULE,
2236                 .load_super         = super_90_load,
2237                 .validate_super     = super_90_validate,
2238                 .sync_super         = super_90_sync,
2239                 .rdev_size_change   = super_90_rdev_size_change,
2240                 .allow_new_offset   = super_90_allow_new_offset,
2241         },
2242         [1] = {
2243                 .name   = "md-1",
2244                 .owner  = THIS_MODULE,
2245                 .load_super         = super_1_load,
2246                 .validate_super     = super_1_validate,
2247                 .sync_super         = super_1_sync,
2248                 .rdev_size_change   = super_1_rdev_size_change,
2249                 .allow_new_offset   = super_1_allow_new_offset,
2250         },
2251 };
2252
2253 static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
2254 {
2255         if (mddev->sync_super) {
2256                 mddev->sync_super(mddev, rdev);
2257                 return;
2258         }
2259
2260         BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
2261
2262         super_types[mddev->major_version].sync_super(mddev, rdev);
2263 }
2264
2265 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
2266 {
2267         struct md_rdev *rdev, *rdev2;
2268
2269         rcu_read_lock();
2270         rdev_for_each_rcu(rdev, mddev1) {
2271                 if (test_bit(Faulty, &rdev->flags) ||
2272                     test_bit(Journal, &rdev->flags) ||
2273                     rdev->raid_disk == -1)
2274                         continue;
2275                 rdev_for_each_rcu(rdev2, mddev2) {
2276                         if (test_bit(Faulty, &rdev2->flags) ||
2277                             test_bit(Journal, &rdev2->flags) ||
2278                             rdev2->raid_disk == -1)
2279                                 continue;
2280                         if (rdev->bdev->bd_contains ==
2281                             rdev2->bdev->bd_contains) {
2282                                 rcu_read_unlock();
2283                                 return 1;
2284                         }
2285                 }
2286         }
2287         rcu_read_unlock();
2288         return 0;
2289 }
2290
2291 static LIST_HEAD(pending_raid_disks);
2292
2293 /*
2294  * Try to register data integrity profile for an mddev
2295  *
2296  * This is called when an array is started and after a disk has been kicked
2297  * from the array. It only succeeds if all working and active component devices
2298  * are integrity capable with matching profiles.
2299  */
2300 int md_integrity_register(struct mddev *mddev)
2301 {
2302         struct md_rdev *rdev, *reference = NULL;
2303
2304         if (list_empty(&mddev->disks))
2305                 return 0; /* nothing to do */
2306         if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
2307                 return 0; /* shouldn't register, or already is */
2308         rdev_for_each(rdev, mddev) {
2309                 /* skip spares and non-functional disks */
2310                 if (test_bit(Faulty, &rdev->flags))
2311                         continue;
2312                 if (rdev->raid_disk < 0)
2313                         continue;
2314                 if (!reference) {
2315                         /* Use the first rdev as the reference */
2316                         reference = rdev;
2317                         continue;
2318                 }
2319                 /* does this rdev's profile match the reference profile? */
2320                 if (blk_integrity_compare(reference->bdev->bd_disk,
2321                                 rdev->bdev->bd_disk) < 0)
2322                         return -EINVAL;
2323         }
2324         if (!reference || !bdev_get_integrity(reference->bdev))
2325                 return 0;
2326         /*
2327          * All component devices are integrity capable and have matching
2328          * profiles, register the common profile for the md device.
2329          */
2330         blk_integrity_register(mddev->gendisk,
2331                                bdev_get_integrity(reference->bdev));
2332
2333         pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
2334         if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE)) {
2335                 pr_err("md: failed to create integrity pool for %s\n",
2336                        mdname(mddev));
2337                 return -EINVAL;
2338         }
2339         return 0;
2340 }
2341 EXPORT_SYMBOL(md_integrity_register);
2342
2343 /*
2344  * Attempt to add an rdev, but only if it is consistent with the current
2345  * integrity profile
2346  */
2347 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
2348 {
2349         struct blk_integrity *bi_mddev;
2350         char name[BDEVNAME_SIZE];
2351
2352         if (!mddev->gendisk)
2353                 return 0;
2354
2355         bi_mddev = blk_get_integrity(mddev->gendisk);
2356
2357         if (!bi_mddev) /* nothing to do */
2358                 return 0;
2359
2360         if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
2361                 pr_err("%s: incompatible integrity profile for %s\n",
2362                        mdname(mddev), bdevname(rdev->bdev, name));
2363                 return -ENXIO;
2364         }
2365
2366         return 0;
2367 }
2368 EXPORT_SYMBOL(md_integrity_add_rdev);
2369
2370 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
2371 {
2372         char b[BDEVNAME_SIZE];
2373         struct kobject *ko;
2374         int err;
2375
2376         /* prevent duplicates */
2377         if (find_rdev(mddev, rdev->bdev->bd_dev))
2378                 return -EEXIST;
2379
2380         if ((bdev_read_only(rdev->bdev) || bdev_read_only(rdev->meta_bdev)) &&
2381             mddev->pers)
2382                 return -EROFS;
2383
2384         /* make sure rdev->sectors exceeds mddev->dev_sectors */
2385         if (!test_bit(Journal, &rdev->flags) &&
2386             rdev->sectors &&
2387             (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
2388                 if (mddev->pers) {
2389                         /* Cannot change size, so fail
2390                          * If mddev->level <= 0, then we don't care
2391                          * about aligning sizes (e.g. linear)
2392                          */
2393                         if (mddev->level > 0)
2394                                 return -ENOSPC;
2395                 } else
2396                         mddev->dev_sectors = rdev->sectors;
2397         }
2398
2399         /* Verify rdev->desc_nr is unique.
2400          * If it is -1, assign a free number, else
2401          * check number is not in use
2402          */
2403         rcu_read_lock();
2404         if (rdev->desc_nr < 0) {
2405                 int choice = 0;
2406                 if (mddev->pers)
2407                         choice = mddev->raid_disks;
2408                 while (md_find_rdev_nr_rcu(mddev, choice))
2409                         choice++;
2410                 rdev->desc_nr = choice;
2411         } else {
2412                 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
2413                         rcu_read_unlock();
2414                         return -EBUSY;
2415                 }
2416         }
2417         rcu_read_unlock();
2418         if (!test_bit(Journal, &rdev->flags) &&
2419             mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
2420                 pr_warn("md: %s: array is limited to %d devices\n",
2421                         mdname(mddev), mddev->max_disks);
2422                 return -EBUSY;
2423         }
2424         bdevname(rdev->bdev,b);
2425         strreplace(b, '/', '!');
2426
2427         rdev->mddev = mddev;
2428         pr_debug("md: bind<%s>\n", b);
2429
2430         if (mddev->raid_disks)
2431                 mddev_create_serial_pool(mddev, rdev, false);
2432
2433         if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2434                 goto fail;
2435
2436         ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
2437         if (sysfs_create_link(&rdev->kobj, ko, "block"))
2438                 /* failure here is OK */;
2439         rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2440
2441         list_add_rcu(&rdev->same_set, &mddev->disks);
2442         bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2443
2444         /* May as well allow recovery to be retried once */
2445         mddev->recovery_disabled++;
2446
2447         return 0;
2448
2449  fail:
2450         pr_warn("md: failed to register dev-%s for %s\n",
2451                 b, mdname(mddev));
2452         return err;
2453 }
2454
2455 static void md_delayed_delete(struct work_struct *ws)
2456 {
2457         struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
2458         kobject_del(&rdev->kobj);
2459         kobject_put(&rdev->kobj);
2460 }
2461
2462 static void unbind_rdev_from_array(struct md_rdev *rdev)
2463 {
2464         char b[BDEVNAME_SIZE];
2465
2466         bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2467         list_del_rcu(&rdev->same_set);
2468         pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
2469         mddev_destroy_serial_pool(rdev->mddev, rdev, false);
2470         rdev->mddev = NULL;
2471         sysfs_remove_link(&rdev->kobj, "block");
2472         sysfs_put(rdev->sysfs_state);
2473         rdev->sysfs_state = NULL;
2474         rdev->badblocks.count = 0;
2475         /* We need to delay this, otherwise we can deadlock when
2476          * writing to 'remove' to "dev/state".  We also need
2477          * to delay it due to rcu usage.
2478          */
2479         synchronize_rcu();
2480         INIT_WORK(&rdev->del_work, md_delayed_delete);
2481         kobject_get(&rdev->kobj);
2482         queue_work(md_misc_wq, &rdev->del_work);
2483 }
2484
2485 /*
2486  * prevent the device from being mounted, repartitioned or
2487  * otherwise reused by a RAID array (or any other kernel
2488  * subsystem), by bd_claiming the device.
2489  */
2490 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
2491 {
2492         int err = 0;
2493         struct block_device *bdev;
2494         char b[BDEVNAME_SIZE];
2495
2496         bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
2497                                  shared ? (struct md_rdev *)lock_rdev : rdev);
2498         if (IS_ERR(bdev)) {
2499                 pr_warn("md: could not open %s.\n", __bdevname(dev, b));
2500                 return PTR_ERR(bdev);
2501         }
2502         rdev->bdev = bdev;
2503         return err;
2504 }
2505
2506 static void unlock_rdev(struct md_rdev *rdev)
2507 {
2508         struct block_device *bdev = rdev->bdev;
2509         rdev->bdev = NULL;
2510         blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2511 }
2512
2513 void md_autodetect_dev(dev_t dev);
2514
2515 static void export_rdev(struct md_rdev *rdev)
2516 {
2517         char b[BDEVNAME_SIZE];
2518
2519         pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
2520         md_rdev_clear(rdev);
2521 #ifndef MODULE
2522         if (test_bit(AutoDetected, &rdev->flags))
2523                 md_autodetect_dev(rdev->bdev->bd_dev);
2524 #endif
2525         unlock_rdev(rdev);
2526         kobject_put(&rdev->kobj);
2527 }
2528
2529 void md_kick_rdev_from_array(struct md_rdev *rdev)
2530 {
2531         unbind_rdev_from_array(rdev);
2532         export_rdev(rdev);
2533 }
2534 EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
2535
2536 static void export_array(struct mddev *mddev)
2537 {
2538         struct md_rdev *rdev;
2539
2540         while (!list_empty(&mddev->disks)) {
2541                 rdev = list_first_entry(&mddev->disks, struct md_rdev,
2542                                         same_set);
2543                 md_kick_rdev_from_array(rdev);
2544         }
2545         mddev->raid_disks = 0;
2546         mddev->major_version = 0;
2547 }
2548
2549 static bool set_in_sync(struct mddev *mddev)
2550 {
2551         lockdep_assert_held(&mddev->lock);
2552         if (!mddev->in_sync) {
2553                 mddev->sync_checkers++;
2554                 spin_unlock(&mddev->lock);
2555                 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending);
2556                 spin_lock(&mddev->lock);
2557                 if (!mddev->in_sync &&
2558                     percpu_ref_is_zero(&mddev->writes_pending)) {
2559                         mddev->in_sync = 1;
2560                         /*
2561                          * Ensure ->in_sync is visible before we clear
2562                          * ->sync_checkers.
2563                          */
2564                         smp_mb();
2565                         set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2566                         sysfs_notify_dirent_safe(mddev->sysfs_state);
2567                 }
2568                 if (--mddev->sync_checkers == 0)
2569                         percpu_ref_switch_to_percpu(&mddev->writes_pending);
2570         }
2571         if (mddev->safemode == 1)
2572                 mddev->safemode = 0;
2573         return mddev->in_sync;
2574 }
2575
2576 static void sync_sbs(struct mddev *mddev, int nospares)
2577 {
2578         /* Update each superblock (in-memory image), but
2579          * if we are allowed to, skip spares which already
2580          * have the right event counter, or have one earlier
2581          * (which would mean they aren't being marked as dirty
2582          * with the rest of the array)
2583          */
2584         struct md_rdev *rdev;
2585         rdev_for_each(rdev, mddev) {
2586                 if (rdev->sb_events == mddev->events ||
2587                     (nospares &&
2588                      rdev->raid_disk < 0 &&
2589                      rdev->sb_events+1 == mddev->events)) {
2590                         /* Don't update this superblock */
2591                         rdev->sb_loaded = 2;
2592                 } else {
2593                         sync_super(mddev, rdev);
2594                         rdev->sb_loaded = 1;
2595                 }
2596         }
2597 }
2598
2599 static bool does_sb_need_changing(struct mddev *mddev)
2600 {
2601         struct md_rdev *rdev;
2602         struct mdp_superblock_1 *sb;
2603         int role;
2604
2605         /* Find a good rdev */
2606         rdev_for_each(rdev, mddev)
2607                 if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags))
2608                         break;
2609
2610         /* No good device found. */
2611         if (!rdev)
2612                 return false;
2613
2614         sb = page_address(rdev->sb_page);
2615         /* Check if a device has become faulty or a spare become active */
2616         rdev_for_each(rdev, mddev) {
2617                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2618                 /* Device activated? */
2619                 if (role == 0xffff && rdev->raid_disk >=0 &&
2620                     !test_bit(Faulty, &rdev->flags))
2621                         return true;
2622                 /* Device turned faulty? */
2623                 if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd))
2624                         return true;
2625         }
2626
2627         /* Check if any mddev parameters have changed */
2628         if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2629             (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
2630             (mddev->layout != le32_to_cpu(sb->layout)) ||
2631             (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2632             (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2633                 return true;
2634
2635         return false;
2636 }
2637
2638 void md_update_sb(struct mddev *mddev, int force_change)
2639 {
2640         struct md_rdev *rdev;
2641         int sync_req;
2642         int nospares = 0;
2643         int any_badblocks_changed = 0;
2644         int ret = -1;
2645
2646         if (mddev->ro) {
2647                 if (force_change)
2648                         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2649                 return;
2650         }
2651
2652 repeat:
2653         if (mddev_is_clustered(mddev)) {
2654                 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2655                         force_change = 1;
2656                 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2657                         nospares = 1;
2658                 ret = md_cluster_ops->metadata_update_start(mddev);
2659                 /* Has someone else has updated the sb */
2660                 if (!does_sb_need_changing(mddev)) {
2661                         if (ret == 0)
2662                                 md_cluster_ops->metadata_update_cancel(mddev);
2663                         bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2664                                                          BIT(MD_SB_CHANGE_DEVS) |
2665                                                          BIT(MD_SB_CHANGE_CLEAN));
2666                         return;
2667                 }
2668         }
2669
2670         /*
2671          * First make sure individual recovery_offsets are correct
2672          * curr_resync_completed can only be used during recovery.
2673          * During reshape/resync it might use array-addresses rather
2674          * that device addresses.
2675          */
2676         rdev_for_each(rdev, mddev) {
2677                 if (rdev->raid_disk >= 0 &&
2678                     mddev->delta_disks >= 0 &&
2679                     test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
2680                     test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
2681                     !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
2682                     !test_bit(Journal, &rdev->flags) &&
2683                     !test_bit(In_sync, &rdev->flags) &&
2684                     mddev->curr_resync_completed > rdev->recovery_offset)
2685                                 rdev->recovery_offset = mddev->curr_resync_completed;
2686
2687         }
2688         if (!mddev->persistent) {
2689                 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2690                 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2691                 if (!mddev->external) {
2692                         clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2693                         rdev_for_each(rdev, mddev) {
2694                                 if (rdev->badblocks.changed) {
2695                                         rdev->badblocks.changed = 0;
2696                                         ack_all_badblocks(&rdev->badblocks);
2697                                         md_error(mddev, rdev);
2698                                 }
2699                                 clear_bit(Blocked, &rdev->flags);
2700                                 clear_bit(BlockedBadBlocks, &rdev->flags);
2701                                 wake_up(&rdev->blocked_wait);
2702                         }
2703                 }
2704                 wake_up(&mddev->sb_wait);
2705                 return;
2706         }
2707
2708         spin_lock(&mddev->lock);
2709
2710         mddev->utime = ktime_get_real_seconds();
2711
2712         if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2713                 force_change = 1;
2714         if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2715                 /* just a clean<-> dirty transition, possibly leave spares alone,
2716                  * though if events isn't the right even/odd, we will have to do
2717                  * spares after all
2718                  */
2719                 nospares = 1;
2720         if (force_change)
2721                 nospares = 0;
2722         if (mddev->degraded)
2723                 /* If the array is degraded, then skipping spares is both
2724                  * dangerous and fairly pointless.
2725                  * Dangerous because a device that was removed from the array
2726                  * might have a event_count that still looks up-to-date,
2727                  * so it can be re-added without a resync.
2728                  * Pointless because if there are any spares to skip,
2729                  * then a recovery will happen and soon that array won't
2730                  * be degraded any more and the spare can go back to sleep then.
2731                  */
2732                 nospares = 0;
2733
2734         sync_req = mddev->in_sync;
2735
2736         /* If this is just a dirty<->clean transition, and the array is clean
2737          * and 'events' is odd, we can roll back to the previous clean state */
2738         if (nospares
2739             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2740             && mddev->can_decrease_events
2741             && mddev->events != 1) {
2742                 mddev->events--;
2743                 mddev->can_decrease_events = 0;
2744         } else {
2745                 /* otherwise we have to go forward and ... */
2746                 mddev->events ++;
2747                 mddev->can_decrease_events = nospares;
2748         }
2749
2750         /*
2751          * This 64-bit counter should never wrap.
2752          * Either we are in around ~1 trillion A.C., assuming
2753          * 1 reboot per second, or we have a bug...
2754          */
2755         WARN_ON(mddev->events == 0);
2756
2757         rdev_for_each(rdev, mddev) {
2758                 if (rdev->badblocks.changed)
2759                         any_badblocks_changed++;
2760                 if (test_bit(Faulty, &rdev->flags))
2761                         set_bit(FaultRecorded, &rdev->flags);
2762         }
2763
2764         sync_sbs(mddev, nospares);
2765         spin_unlock(&mddev->lock);
2766
2767         pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2768                  mdname(mddev), mddev->in_sync);
2769
2770         if (mddev->queue)
2771                 blk_add_trace_msg(mddev->queue, "md md_update_sb");
2772 rewrite:
2773         md_bitmap_update_sb(mddev->bitmap);
2774         rdev_for_each(rdev, mddev) {
2775                 char b[BDEVNAME_SIZE];
2776
2777                 if (rdev->sb_loaded != 1)
2778                         continue; /* no noise on spare devices */
2779
2780                 if (!test_bit(Faulty, &rdev->flags)) {
2781                         md_super_write(mddev,rdev,
2782                                        rdev->sb_start, rdev->sb_size,
2783                                        rdev->sb_page);
2784                         pr_debug("md: (write) %s's sb offset: %llu\n",
2785                                  bdevname(rdev->bdev, b),
2786                                  (unsigned long long)rdev->sb_start);
2787                         rdev->sb_events = mddev->events;
2788                         if (rdev->badblocks.size) {
2789                                 md_super_write(mddev, rdev,
2790                                                rdev->badblocks.sector,
2791                                                rdev->badblocks.size << 9,
2792                                                rdev->bb_page);
2793                                 rdev->badblocks.size = 0;
2794                         }
2795
2796                 } else
2797                         pr_debug("md: %s (skipping faulty)\n",
2798                                  bdevname(rdev->bdev, b));
2799
2800                 if (mddev->level == LEVEL_MULTIPATH)
2801                         /* only need to write one superblock... */
2802                         break;
2803         }
2804         if (md_super_wait(mddev) < 0)
2805                 goto rewrite;
2806         /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
2807
2808         if (mddev_is_clustered(mddev) && ret == 0)
2809                 md_cluster_ops->metadata_update_finish(mddev);
2810
2811         if (mddev->in_sync != sync_req ||
2812             !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2813                                BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
2814                 /* have to write it out again */
2815                 goto repeat;
2816         wake_up(&mddev->sb_wait);
2817         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2818                 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2819
2820         rdev_for_each(rdev, mddev) {
2821                 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2822                         clear_bit(Blocked, &rdev->flags);
2823
2824                 if (any_badblocks_changed)
2825                         ack_all_badblocks(&rdev->badblocks);
2826                 clear_bit(BlockedBadBlocks, &rdev->flags);
2827                 wake_up(&rdev->blocked_wait);
2828         }
2829 }
2830 EXPORT_SYMBOL(md_update_sb);
2831
2832 static int add_bound_rdev(struct md_rdev *rdev)
2833 {
2834         struct mddev *mddev = rdev->mddev;
2835         int err = 0;
2836         bool add_journal = test_bit(Journal, &rdev->flags);
2837
2838         if (!mddev->pers->hot_remove_disk || add_journal) {
2839                 /* If there is hot_add_disk but no hot_remove_disk
2840                  * then added disks for geometry changes,
2841                  * and should be added immediately.
2842                  */
2843                 super_types[mddev->major_version].
2844                         validate_super(mddev, rdev);
2845                 if (add_journal)
2846                         mddev_suspend(mddev);
2847                 err = mddev->pers->hot_add_disk(mddev, rdev);
2848                 if (add_journal)
2849                         mddev_resume(mddev);
2850                 if (err) {
2851                         md_kick_rdev_from_array(rdev);
2852                         return err;
2853                 }
2854         }
2855         sysfs_notify_dirent_safe(rdev->sysfs_state);
2856
2857         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2858         if (mddev->degraded)
2859                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2860         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2861         md_new_event(mddev);
2862         md_wakeup_thread(mddev->thread);
2863         return 0;
2864 }
2865
2866 /* words written to sysfs files may, or may not, be \n terminated.
2867  * We want to accept with case. For this we use cmd_match.
2868  */
2869 static int cmd_match(const char *cmd, const char *str)
2870 {
2871         /* See if cmd, written into a sysfs file, matches
2872          * str.  They must either be the same, or cmd can
2873          * have a trailing newline
2874          */
2875         while (*cmd && *str && *cmd == *str) {
2876                 cmd++;
2877                 str++;
2878         }
2879         if (*cmd == '\n')
2880                 cmd++;
2881         if (*str || *cmd)
2882                 return 0;
2883         return 1;
2884 }
2885
2886 struct rdev_sysfs_entry {
2887         struct attribute attr;
2888         ssize_t (*show)(struct md_rdev *, char *);
2889         ssize_t (*store)(struct md_rdev *, const char *, size_t);
2890 };
2891
2892 static ssize_t
2893 state_show(struct md_rdev *rdev, char *page)
2894 {
2895         char *sep = ",";
2896         size_t len = 0;
2897         unsigned long flags = READ_ONCE(rdev->flags);
2898
2899         if (test_bit(Faulty, &flags) ||
2900             (!test_bit(ExternalBbl, &flags) &&
2901             rdev->badblocks.unacked_exist))
2902                 len += sprintf(page+len, "faulty%s", sep);
2903         if (test_bit(In_sync, &flags))
2904                 len += sprintf(page+len, "in_sync%s", sep);
2905         if (test_bit(Journal, &flags))
2906                 len += sprintf(page+len, "journal%s", sep);
2907         if (test_bit(WriteMostly, &flags))
2908                 len += sprintf(page+len, "write_mostly%s", sep);
2909         if (test_bit(Blocked, &flags) ||
2910             (rdev->badblocks.unacked_exist
2911              && !test_bit(Faulty, &flags)))
2912                 len += sprintf(page+len, "blocked%s", sep);
2913         if (!test_bit(Faulty, &flags) &&
2914             !test_bit(Journal, &flags) &&
2915             !test_bit(In_sync, &flags))
2916                 len += sprintf(page+len, "spare%s", sep);
2917         if (test_bit(WriteErrorSeen, &flags))
2918                 len += sprintf(page+len, "write_error%s", sep);
2919         if (test_bit(WantReplacement, &flags))
2920                 len += sprintf(page+len, "want_replacement%s", sep);
2921         if (test_bit(Replacement, &flags))
2922                 len += sprintf(page+len, "replacement%s", sep);
2923         if (test_bit(ExternalBbl, &flags))
2924                 len += sprintf(page+len, "external_bbl%s", sep);
2925         if (test_bit(FailFast, &flags))
2926                 len += sprintf(page+len, "failfast%s", sep);
2927
2928         if (len)
2929                 len -= strlen(sep);
2930
2931         return len+sprintf(page+len, "\n");
2932 }
2933
2934 static ssize_t
2935 state_store(struct md_rdev *rdev, const char *buf, size_t len)
2936 {
2937         /* can write
2938          *  faulty  - simulates an error
2939          *  remove  - disconnects the device
2940          *  writemostly - sets write_mostly
2941          *  -writemostly - clears write_mostly
2942          *  blocked - sets the Blocked flags
2943          *  -blocked - clears the Blocked and possibly simulates an error
2944          *  insync - sets Insync providing device isn't active
2945          *  -insync - clear Insync for a device with a slot assigned,
2946          *            so that it gets rebuilt based on bitmap
2947          *  write_error - sets WriteErrorSeen
2948          *  -write_error - clears WriteErrorSeen
2949          *  {,-}failfast - set/clear FailFast
2950          */
2951         int err = -EINVAL;
2952         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2953                 md_error(rdev->mddev, rdev);
2954                 if (test_bit(Faulty, &rdev->flags))
2955                         err = 0;
2956                 else
2957                         err = -EBUSY;
2958         } else if (cmd_match(buf, "remove")) {
2959                 if (rdev->mddev->pers) {
2960                         clear_bit(Blocked, &rdev->flags);
2961                         remove_and_add_spares(rdev->mddev, rdev);
2962                 }
2963                 if (rdev->raid_disk >= 0)
2964                         err = -EBUSY;
2965                 else {
2966                         struct mddev *mddev = rdev->mddev;
2967                         err = 0;
2968                         if (mddev_is_clustered(mddev))
2969                                 err = md_cluster_ops->remove_disk(mddev, rdev);
2970
2971                         if (err == 0) {
2972                                 md_kick_rdev_from_array(rdev);
2973                                 if (mddev->pers) {
2974                                         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2975                                         md_wakeup_thread(mddev->thread);
2976                                 }
2977                                 md_new_event(mddev);
2978                         }
2979                 }
2980         } else if (cmd_match(buf, "writemostly")) {
2981                 set_bit(WriteMostly, &rdev->flags);
2982                 mddev_create_serial_pool(rdev->mddev, rdev, false);
2983                 err = 0;
2984         } else if (cmd_match(buf, "-writemostly")) {
2985                 mddev_destroy_serial_pool(rdev->mddev, rdev, false);
2986                 clear_bit(WriteMostly, &rdev->flags);
2987                 err = 0;
2988         } else if (cmd_match(buf, "blocked")) {
2989                 set_bit(Blocked, &rdev->flags);
2990                 err = 0;
2991         } else if (cmd_match(buf, "-blocked")) {
2992                 if (!test_bit(Faulty, &rdev->flags) &&
2993                     !test_bit(ExternalBbl, &rdev->flags) &&
2994                     rdev->badblocks.unacked_exist) {
2995                         /* metadata handler doesn't understand badblocks,
2996                          * so we need to fail the device
2997                          */
2998                         md_error(rdev->mddev, rdev);
2999                 }
3000                 clear_bit(Blocked, &rdev->flags);
3001                 clear_bit(BlockedBadBlocks, &rdev->flags);
3002                 wake_up(&rdev->blocked_wait);
3003                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3004                 md_wakeup_thread(rdev->mddev->thread);
3005
3006                 err = 0;
3007         } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
3008                 set_bit(In_sync, &rdev->flags);
3009                 err = 0;
3010         } else if (cmd_match(buf, "failfast")) {
3011                 set_bit(FailFast, &rdev->flags);
3012                 err = 0;
3013         } else if (cmd_match(buf, "-failfast")) {
3014                 clear_bit(FailFast, &rdev->flags);
3015                 err = 0;
3016         } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
3017                    !test_bit(Journal, &rdev->flags)) {
3018                 if (rdev->mddev->pers == NULL) {
3019                         clear_bit(In_sync, &rdev->flags);
3020                         rdev->saved_raid_disk = rdev->raid_disk;
3021                         rdev->raid_disk = -1;
3022                         err = 0;
3023                 }
3024         } else if (cmd_match(buf, "write_error")) {
3025                 set_bit(WriteErrorSeen, &rdev->flags);
3026                 err = 0;
3027         } else if (cmd_match(buf, "-write_error")) {
3028                 clear_bit(WriteErrorSeen, &rdev->flags);
3029                 err = 0;
3030         } else if (cmd_match(buf, "want_replacement")) {
3031                 /* Any non-spare device that is not a replacement can
3032                  * become want_replacement at any time, but we then need to
3033                  * check if recovery is needed.
3034                  */
3035                 if (rdev->raid_disk >= 0 &&
3036                     !test_bit(Journal, &rdev->flags) &&
3037                     !test_bit(Replacement, &rdev->flags))
3038                         set_bit(WantReplacement, &rdev->flags);
3039                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3040                 md_wakeup_thread(rdev->mddev->thread);
3041                 err = 0;
3042         } else if (cmd_match(buf, "-want_replacement")) {
3043                 /* Clearing 'want_replacement' is always allowed.
3044                  * Once replacements starts it is too late though.
3045                  */
3046                 err = 0;
3047                 clear_bit(WantReplacement, &rdev->flags);
3048         } else if (cmd_match(buf, "replacement")) {
3049                 /* Can only set a device as a replacement when array has not
3050                  * yet been started.  Once running, replacement is automatic
3051                  * from spares, or by assigning 'slot'.
3052                  */
3053                 if (rdev->mddev->pers)
3054                         err = -EBUSY;
3055                 else {
3056                         set_bit(Replacement, &rdev->flags);
3057                         err = 0;
3058                 }
3059         } else if (cmd_match(buf, "-replacement")) {
3060                 /* Similarly, can only clear Replacement before start */
3061                 if (rdev->mddev->pers)
3062                         err = -EBUSY;
3063                 else {
3064                         clear_bit(Replacement, &rdev->flags);
3065                         err = 0;
3066                 }
3067         } else if (cmd_match(buf, "re-add")) {
3068                 if (!rdev->mddev->pers)
3069                         err = -EINVAL;
3070                 else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
3071                                 rdev->saved_raid_disk >= 0) {
3072                         /* clear_bit is performed _after_ all the devices
3073                          * have their local Faulty bit cleared. If any writes
3074                          * happen in the meantime in the local node, they
3075                          * will land in the local bitmap, which will be synced
3076                          * by this node eventually
3077                          */
3078                         if (!mddev_is_clustered(rdev->mddev) ||
3079                             (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
3080                                 clear_bit(Faulty, &rdev->flags);
3081                                 err = add_bound_rdev(rdev);
3082                         }
3083                 } else
3084                         err = -EBUSY;
3085         } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
3086                 set_bit(ExternalBbl, &rdev->flags);
3087                 rdev->badblocks.shift = 0;
3088                 err = 0;
3089         } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
3090                 clear_bit(ExternalBbl, &rdev->flags);
3091                 err = 0;
3092         }
3093         if (!err)
3094                 sysfs_notify_dirent_safe(rdev->sysfs_state);
3095         return err ? err : len;
3096 }
3097 static struct rdev_sysfs_entry rdev_state =
3098 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
3099
3100 static ssize_t
3101 errors_show(struct md_rdev *rdev, char *page)
3102 {
3103         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
3104 }
3105
3106 static ssize_t
3107 errors_store(struct md_rdev *rdev, const char *buf, size_t len)
3108 {
3109         unsigned int n;
3110         int rv;
3111
3112         rv = kstrtouint(buf, 10, &n);
3113         if (rv < 0)
3114                 return rv;
3115         atomic_set(&rdev->corrected_errors, n);
3116         return len;
3117 }
3118 static struct rdev_sysfs_entry rdev_errors =
3119 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
3120
3121 static ssize_t
3122 slot_show(struct md_rdev *rdev, char *page)
3123 {
3124         if (test_bit(Journal, &rdev->flags))
3125                 return sprintf(page, "journal\n");
3126         else if (rdev->raid_disk < 0)
3127                 return sprintf(page, "none\n");
3128         else
3129                 return sprintf(page, "%d\n", rdev->raid_disk);
3130 }
3131
3132 static ssize_t
3133 slot_store(struct md_rdev *rdev, const char *buf, size_t len)
3134 {
3135         int slot;
3136         int err;
3137
3138         if (test_bit(Journal, &rdev->flags))
3139                 return -EBUSY;
3140         if (strncmp(buf, "none", 4)==0)
3141                 slot = -1;
3142         else {
3143                 err = kstrtouint(buf, 10, (unsigned int *)&slot);
3144                 if (err < 0)
3145                         return err;
3146         }
3147         if (rdev->mddev->pers && slot == -1) {
3148                 /* Setting 'slot' on an active array requires also
3149                  * updating the 'rd%d' link, and communicating
3150                  * with the personality with ->hot_*_disk.
3151                  * For now we only support removing
3152                  * failed/spare devices.  This normally happens automatically,
3153                  * but not when the metadata is externally managed.
3154                  */
3155                 if (rdev->raid_disk == -1)
3156                         return -EEXIST;
3157                 /* personality does all needed checks */
3158                 if (rdev->mddev->pers->hot_remove_disk == NULL)
3159                         return -EINVAL;
3160                 clear_bit(Blocked, &rdev->flags);
3161                 remove_and_add_spares(rdev->mddev, rdev);
3162                 if (rdev->raid_disk >= 0)
3163                         return -EBUSY;
3164                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3165                 md_wakeup_thread(rdev->mddev->thread);
3166         } else if (rdev->mddev->pers) {
3167                 /* Activating a spare .. or possibly reactivating
3168                  * if we ever get bitmaps working here.
3169                  */
3170                 int err;
3171
3172                 if (rdev->raid_disk != -1)
3173                         return -EBUSY;
3174
3175                 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
3176                         return -EBUSY;
3177
3178                 if (rdev->mddev->pers->hot_add_disk == NULL)
3179                         return -EINVAL;
3180
3181                 if (slot >= rdev->mddev->raid_disks &&
3182                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3183                         return -ENOSPC;
3184
3185                 rdev->raid_disk = slot;
3186                 if (test_bit(In_sync, &rdev->flags))
3187                         rdev->saved_raid_disk = slot;
3188                 else
3189                         rdev->saved_raid_disk = -1;
3190                 clear_bit(In_sync, &rdev->flags);
3191                 clear_bit(Bitmap_sync, &rdev->flags);
3192                 err = rdev->mddev->pers->
3193                         hot_add_disk(rdev->mddev, rdev);
3194                 if (err) {
3195                         rdev->raid_disk = -1;
3196                         return err;
3197                 } else
3198                         sysfs_notify_dirent_safe(rdev->sysfs_state);
3199                 if (sysfs_link_rdev(rdev->mddev, rdev))
3200                         /* failure here is OK */;
3201                 /* don't wakeup anyone, leave that to userspace. */
3202         } else {
3203                 if (slot >= rdev->mddev->raid_disks &&
3204                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3205                         return -ENOSPC;
3206                 rdev->raid_disk = slot;
3207                 /* assume it is working */
3208                 clear_bit(Faulty, &rdev->flags);
3209                 clear_bit(WriteMostly, &rdev->flags);
3210                 set_bit(In_sync, &rdev->flags);
3211                 sysfs_notify_dirent_safe(rdev->sysfs_state);
3212         }
3213         return len;
3214 }
3215
3216 static struct rdev_sysfs_entry rdev_slot =
3217 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
3218
3219 static ssize_t
3220 offset_show(struct md_rdev *rdev, char *page)
3221 {
3222         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
3223 }
3224
3225 static ssize_t
3226 offset_store(struct md_rdev *rdev, const char *buf, size_t len)
3227 {
3228         unsigned long long offset;
3229         if (kstrtoull(buf, 10, &offset) < 0)
3230                 return -EINVAL;
3231         if (rdev->mddev->pers && rdev->raid_disk >= 0)
3232                 return -EBUSY;
3233         if (rdev->sectors && rdev->mddev->external)
3234                 /* Must set offset before size, so overlap checks
3235                  * can be sane */
3236                 return -EBUSY;
3237         rdev->data_offset = offset;
3238         rdev->new_data_offset = offset;
3239         return len;
3240 }
3241
3242 static struct rdev_sysfs_entry rdev_offset =
3243 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
3244
3245 static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
3246 {
3247         return sprintf(page, "%llu\n",
3248                        (unsigned long long)rdev->new_data_offset);
3249 }
3250
3251 static ssize_t new_offset_store(struct md_rdev *rdev,
3252                                 const char *buf, size_t len)
3253 {
3254         unsigned long long new_offset;
3255         struct mddev *mddev = rdev->mddev;
3256
3257         if (kstrtoull(buf, 10, &new_offset) < 0)
3258                 return -EINVAL;
3259
3260         if (mddev->sync_thread ||
3261             test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
3262                 return -EBUSY;
3263         if (new_offset == rdev->data_offset)
3264                 /* reset is always permitted */
3265                 ;
3266         else if (new_offset > rdev->data_offset) {
3267                 /* must not push array size beyond rdev_sectors */
3268                 if (new_offset - rdev->data_offset
3269                     + mddev->dev_sectors > rdev->sectors)
3270                                 return -E2BIG;
3271         }
3272         /* Metadata worries about other space details. */
3273
3274         /* decreasing the offset is inconsistent with a backwards
3275          * reshape.
3276          */
3277         if (new_offset < rdev->data_offset &&
3278             mddev->reshape_backwards)
3279                 return -EINVAL;
3280         /* Increasing offset is inconsistent with forwards
3281          * reshape.  reshape_direction should be set to
3282          * 'backwards' first.
3283          */
3284         if (new_offset > rdev->data_offset &&
3285             !mddev->reshape_backwards)
3286                 return -EINVAL;
3287
3288         if (mddev->pers && mddev->persistent &&
3289             !super_types[mddev->major_version]
3290             .allow_new_offset(rdev, new_offset))
3291                 return -E2BIG;
3292         rdev->new_data_offset = new_offset;
3293         if (new_offset > rdev->data_offset)
3294                 mddev->reshape_backwards = 1;
3295         else if (new_offset < rdev->data_offset)
3296                 mddev->reshape_backwards = 0;
3297
3298         return len;
3299 }
3300 static struct rdev_sysfs_entry rdev_new_offset =
3301 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
3302
3303 static ssize_t
3304 rdev_size_show(struct md_rdev *rdev, char *page)
3305 {
3306         return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
3307 }
3308
3309 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
3310 {
3311         /* check if two start/length pairs overlap */
3312         if (s1+l1 <= s2)
3313                 return 0;
3314         if (s2+l2 <= s1)
3315                 return 0;
3316         return 1;
3317 }
3318
3319 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
3320 {
3321         unsigned long long blocks;
3322         sector_t new;
3323
3324         if (kstrtoull(buf, 10, &blocks) < 0)
3325                 return -EINVAL;
3326
3327         if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
3328                 return -EINVAL; /* sector conversion overflow */
3329
3330         new = blocks * 2;
3331         if (new != blocks * 2)
3332                 return -EINVAL; /* unsigned long long to sector_t overflow */
3333
3334         *sectors = new;
3335         return 0;
3336 }
3337
3338 static ssize_t
3339 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3340 {
3341         struct mddev *my_mddev = rdev->mddev;
3342         sector_t oldsectors = rdev->sectors;
3343         sector_t sectors;
3344
3345         if (test_bit(Journal, &rdev->flags))
3346                 return -EBUSY;
3347         if (strict_blocks_to_sectors(buf, &sectors) < 0)
3348                 return -EINVAL;
3349         if (rdev->data_offset != rdev->new_data_offset)
3350                 return -EINVAL; /* too confusing */
3351         if (my_mddev->pers && rdev->raid_disk >= 0) {
3352                 if (my_mddev->persistent) {
3353                         sectors = super_types[my_mddev->major_version].
3354                                 rdev_size_change(rdev, sectors);
3355                         if (!sectors)
3356                                 return -EBUSY;
3357                 } else if (!sectors)
3358                         sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
3359                                 rdev->data_offset;
3360                 if (!my_mddev->pers->resize)
3361                         /* Cannot change size for RAID0 or Linear etc */
3362                         return -EINVAL;
3363         }
3364         if (sectors < my_mddev->dev_sectors)
3365                 return -EINVAL; /* component must fit device */
3366
3367         rdev->sectors = sectors;
3368         if (sectors > oldsectors && my_mddev->external) {
3369                 /* Need to check that all other rdevs with the same
3370                  * ->bdev do not overlap.  'rcu' is sufficient to walk
3371                  * the rdev lists safely.
3372                  * This check does not provide a hard guarantee, it
3373                  * just helps avoid dangerous mistakes.
3374                  */
3375                 struct mddev *mddev;
3376                 int overlap = 0;
3377                 struct list_head *tmp;
3378
3379                 rcu_read_lock();
3380                 for_each_mddev(mddev, tmp) {
3381                         struct md_rdev *rdev2;
3382
3383                         rdev_for_each(rdev2, mddev)
3384                                 if (rdev->bdev == rdev2->bdev &&
3385                                     rdev != rdev2 &&
3386                                     overlaps(rdev->data_offset, rdev->sectors,
3387                                              rdev2->data_offset,
3388                                              rdev2->sectors)) {
3389                                         overlap = 1;
3390                                         break;
3391                                 }
3392                         if (overlap) {
3393                                 mddev_put(mddev);
3394                                 break;
3395                         }
3396                 }
3397                 rcu_read_unlock();
3398                 if (overlap) {
3399                         /* Someone else could have slipped in a size
3400                          * change here, but doing so is just silly.
3401                          * We put oldsectors back because we *know* it is
3402                          * safe, and trust userspace not to race with
3403                          * itself
3404                          */
3405                         rdev->sectors = oldsectors;
3406                         return -EBUSY;
3407                 }
3408         }
3409         return len;
3410 }
3411
3412 static struct rdev_sysfs_entry rdev_size =
3413 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
3414
3415 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
3416 {
3417         unsigned long long recovery_start = rdev->recovery_offset;
3418
3419         if (test_bit(In_sync, &rdev->flags) ||
3420             recovery_start == MaxSector)
3421                 return sprintf(page, "none\n");
3422
3423         return sprintf(page, "%llu\n", recovery_start);
3424 }
3425
3426 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
3427 {
3428         unsigned long long recovery_start;
3429
3430         if (cmd_match(buf, "none"))
3431                 recovery_start = MaxSector;
3432         else if (kstrtoull(buf, 10, &recovery_start))
3433                 return -EINVAL;
3434
3435         if (rdev->mddev->pers &&
3436             rdev->raid_disk >= 0)
3437                 return -EBUSY;
3438
3439         rdev->recovery_offset = recovery_start;
3440         if (recovery_start == MaxSector)
3441                 set_bit(In_sync, &rdev->flags);
3442         else
3443                 clear_bit(In_sync, &rdev->flags);
3444         return len;
3445 }
3446
3447 static struct rdev_sysfs_entry rdev_recovery_start =
3448 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
3449
3450 /* sysfs access to bad-blocks list.
3451  * We present two files.
3452  * 'bad-blocks' lists sector numbers and lengths of ranges that
3453  *    are recorded as bad.  The list is truncated to fit within
3454  *    the one-page limit of sysfs.
3455  *    Writing "sector length" to this file adds an acknowledged
3456  *    bad block list.
3457  * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
3458  *    been acknowledged.  Writing to this file adds bad blocks
3459  *    without acknowledging them.  This is largely for testing.
3460  */
3461 static ssize_t bb_show(struct md_rdev *rdev, char *page)
3462 {
3463         return badblocks_show(&rdev->badblocks, page, 0);
3464 }
3465 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
3466 {
3467         int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3468         /* Maybe that ack was all we needed */
3469         if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3470                 wake_up(&rdev->blocked_wait);
3471         return rv;
3472 }
3473 static struct rdev_sysfs_entry rdev_bad_blocks =
3474 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
3475
3476 static ssize_t ubb_show(struct md_rdev *rdev, char *page)
3477 {
3478         return badblocks_show(&rdev->badblocks, page, 1);
3479 }
3480 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
3481 {
3482         return badblocks_store(&rdev->badblocks, page, len, 1);
3483 }
3484 static struct rdev_sysfs_entry rdev_unack_bad_blocks =
3485 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
3486
3487 static ssize_t
3488 ppl_sector_show(struct md_rdev *rdev, char *page)
3489 {
3490         return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector);
3491 }
3492
3493 static ssize_t
3494 ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len)
3495 {
3496         unsigned long long sector;
3497
3498         if (kstrtoull(buf, 10, &sector) < 0)
3499                 return -EINVAL;
3500         if (sector != (sector_t)sector)
3501                 return -EINVAL;
3502
3503         if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3504             rdev->raid_disk >= 0)
3505                 return -EBUSY;
3506
3507         if (rdev->mddev->persistent) {
3508                 if (rdev->mddev->major_version == 0)
3509                         return -EINVAL;
3510                 if ((sector > rdev->sb_start &&
3511                      sector - rdev->sb_start > S16_MAX) ||
3512                     (sector < rdev->sb_start &&
3513                      rdev->sb_start - sector > -S16_MIN))
3514                         return -EINVAL;
3515                 rdev->ppl.offset = sector - rdev->sb_start;
3516         } else if (!rdev->mddev->external) {
3517                 return -EBUSY;
3518         }
3519         rdev->ppl.sector = sector;
3520         return len;
3521 }
3522
3523 static struct rdev_sysfs_entry rdev_ppl_sector =
3524 __ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store);
3525
3526 static ssize_t
3527 ppl_size_show(struct md_rdev *rdev, char *page)
3528 {
3529         return sprintf(page, "%u\n", rdev->ppl.size);
3530 }
3531
3532 static ssize_t
3533 ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3534 {
3535         unsigned int size;
3536
3537         if (kstrtouint(buf, 10, &size) < 0)
3538                 return -EINVAL;
3539
3540         if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3541             rdev->raid_disk >= 0)
3542                 return -EBUSY;
3543
3544         if (rdev->mddev->persistent) {
3545                 if (rdev->mddev->major_version == 0)
3546                         return -EINVAL;
3547                 if (size > U16_MAX)
3548                         return -EINVAL;
3549         } else if (!rdev->mddev->external) {
3550                 return -EBUSY;
3551         }
3552         rdev->ppl.size = size;
3553         return len;
3554 }
3555
3556 static struct rdev_sysfs_entry rdev_ppl_size =
3557 __ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store);
3558
3559 static struct attribute *rdev_default_attrs[] = {
3560         &rdev_state.attr,
3561         &rdev_errors.attr,
3562         &rdev_slot.attr,
3563         &rdev_offset.attr,
3564         &rdev_new_offset.attr,
3565         &rdev_size.attr,
3566         &rdev_recovery_start.attr,
3567         &rdev_bad_blocks.attr,
3568         &rdev_unack_bad_blocks.attr,
3569         &rdev_ppl_sector.attr,
3570         &rdev_ppl_size.attr,
3571         NULL,
3572 };
3573 static ssize_t
3574 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3575 {
3576         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3577         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3578
3579         if (!entry->show)
3580                 return -EIO;
3581         if (!rdev->mddev)
3582                 return -ENODEV;
3583         return entry->show(rdev, page);
3584 }
3585
3586 static ssize_t
3587 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3588               const char *page, size_t length)
3589 {
3590         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3591         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3592         ssize_t rv;
3593         struct mddev *mddev = rdev->mddev;
3594
3595         if (!entry->store)
3596                 return -EIO;
3597         if (!capable(CAP_SYS_ADMIN))
3598                 return -EACCES;
3599         rv = mddev ? mddev_lock(mddev) : -ENODEV;
3600         if (!rv) {
3601                 if (rdev->mddev == NULL)
3602                         rv = -ENODEV;
3603                 else
3604                         rv = entry->store(rdev, page, length);
3605                 mddev_unlock(mddev);
3606         }
3607         return rv;
3608 }
3609
3610 static void rdev_free(struct kobject *ko)
3611 {
3612         struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
3613         kfree(rdev);
3614 }
3615 static const struct sysfs_ops rdev_sysfs_ops = {
3616         .show           = rdev_attr_show,
3617         .store          = rdev_attr_store,
3618 };
3619 static struct kobj_type rdev_ktype = {
3620         .release        = rdev_free,
3621         .sysfs_ops      = &rdev_sysfs_ops,
3622         .default_attrs  = rdev_default_attrs,
3623 };
3624
3625 int md_rdev_init(struct md_rdev *rdev)
3626 {
3627         rdev->desc_nr = -1;
3628         rdev->saved_raid_disk = -1;
3629         rdev->raid_disk = -1;
3630         rdev->flags = 0;
3631         rdev->data_offset = 0;
3632         rdev->new_data_offset = 0;
3633         rdev->sb_events = 0;
3634         rdev->last_read_error = 0;
3635         rdev->sb_loaded = 0;
3636         rdev->bb_page = NULL;
3637         atomic_set(&rdev->nr_pending, 0);
3638         atomic_set(&rdev->read_errors, 0);
3639         atomic_set(&rdev->corrected_errors, 0);
3640
3641         INIT_LIST_HEAD(&rdev->same_set);
3642         init_waitqueue_head(&rdev->blocked_wait);
3643
3644         /* Add space to store bad block list.
3645          * This reserves the space even on arrays where it cannot
3646          * be used - I wonder if that matters
3647          */
3648         return badblocks_init(&rdev->badblocks, 0);
3649 }
3650 EXPORT_SYMBOL_GPL(md_rdev_init);
3651 /*
3652  * Import a device. If 'super_format' >= 0, then sanity check the superblock
3653  *
3654  * mark the device faulty if:
3655  *
3656  *   - the device is nonexistent (zero size)
3657  *   - the device has no valid superblock
3658  *
3659  * a faulty rdev _never_ has rdev->sb set.
3660  */
3661 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
3662 {
3663         char b[BDEVNAME_SIZE];
3664         int err;
3665         struct md_rdev *rdev;
3666         sector_t size;
3667
3668         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
3669         if (!rdev)
3670                 return ERR_PTR(-ENOMEM);
3671
3672         err = md_rdev_init(rdev);
3673         if (err)
3674                 goto abort_free;
3675         err = alloc_disk_sb(rdev);
3676         if (err)
3677                 goto abort_free;
3678
3679         err = lock_rdev(rdev, newdev, super_format == -2);
3680         if (err)
3681                 goto abort_free;
3682
3683         kobject_init(&rdev->kobj, &rdev_ktype);
3684
3685         size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
3686         if (!size) {
3687                 pr_warn("md: %s has zero or unknown size, marking faulty!\n",
3688                         bdevname(rdev->bdev,b));
3689                 err = -EINVAL;
3690                 goto abort_free;
3691         }
3692
3693         if (super_format >= 0) {
3694                 err = super_types[super_format].
3695                         load_super(rdev, NULL, super_minor);
3696                 if (err == -EINVAL) {
3697                         pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
3698                                 bdevname(rdev->bdev,b),
3699                                 super_format, super_minor);
3700                         goto abort_free;
3701                 }
3702                 if (err < 0) {
3703                         pr_warn("md: could not read %s's sb, not importing!\n",
3704                                 bdevname(rdev->bdev,b));
3705                         goto abort_free;
3706                 }
3707         }
3708
3709         return rdev;
3710
3711 abort_free:
3712         if (rdev->bdev)
3713                 unlock_rdev(rdev);
3714         md_rdev_clear(rdev);
3715         kfree(rdev);
3716         return ERR_PTR(err);
3717 }
3718
3719 /*
3720  * Check a full RAID array for plausibility
3721  */
3722
3723 static int analyze_sbs(struct mddev *mddev)
3724 {
3725         int i;
3726         struct md_rdev *rdev, *freshest, *tmp;
3727         char b[BDEVNAME_SIZE];
3728
3729         freshest = NULL;
3730         rdev_for_each_safe(rdev, tmp, mddev)
3731                 switch (super_types[mddev->major_version].
3732                         load_super(rdev, freshest, mddev->minor_version)) {
3733                 case 1:
3734                         freshest = rdev;
3735                         break;
3736                 case 0:
3737                         break;
3738                 default:
3739                         pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
3740                                 bdevname(rdev->bdev,b));
3741                         md_kick_rdev_from_array(rdev);
3742                 }
3743
3744         /* Cannot find a valid fresh disk */
3745         if (!freshest) {
3746                 pr_warn("md: cannot find a valid disk\n");
3747                 return -EINVAL;
3748         }
3749
3750         super_types[mddev->major_version].
3751                 validate_super(mddev, freshest);
3752
3753         i = 0;
3754         rdev_for_each_safe(rdev, tmp, mddev) {
3755                 if (mddev->max_disks &&
3756                     (rdev->desc_nr >= mddev->max_disks ||
3757                      i > mddev->max_disks)) {
3758                         pr_warn("md: %s: %s: only %d devices permitted\n",
3759                                 mdname(mddev), bdevname(rdev->bdev, b),
3760                                 mddev->max_disks);
3761                         md_kick_rdev_from_array(rdev);
3762                         continue;
3763                 }
3764                 if (rdev != freshest) {
3765                         if (super_types[mddev->major_version].
3766                             validate_super(mddev, rdev)) {
3767                                 pr_warn("md: kicking non-fresh %s from array!\n",
3768                                         bdevname(rdev->bdev,b));
3769                                 md_kick_rdev_from_array(rdev);
3770                                 continue;
3771                         }
3772                 }
3773                 if (mddev->level == LEVEL_MULTIPATH) {
3774                         rdev->desc_nr = i++;
3775                         rdev->raid_disk = rdev->desc_nr;
3776                         set_bit(In_sync, &rdev->flags);
3777                 } else if (rdev->raid_disk >=
3778                             (mddev->raid_disks - min(0, mddev->delta_disks)) &&
3779                            !test_bit(Journal, &rdev->flags)) {
3780                         rdev->raid_disk = -1;
3781                         clear_bit(In_sync, &rdev->flags);
3782                 }
3783         }
3784
3785         return 0;
3786 }
3787
3788 /* Read a fixed-point number.
3789  * Numbers in sysfs attributes should be in "standard" units where
3790  * possible, so time should be in seconds.
3791  * However we internally use a a much smaller unit such as
3792  * milliseconds or jiffies.
3793  * This function takes a decimal number with a possible fractional
3794  * component, and produces an integer which is the result of
3795  * multiplying that number by 10^'scale'.
3796  * all without any floating-point arithmetic.
3797  */
3798 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3799 {
3800         unsigned long result = 0;
3801         long decimals = -1;
3802         while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3803                 if (*cp == '.')
3804                         decimals = 0;
3805                 else if (decimals < scale) {
3806                         unsigned int value;
3807                         value = *cp - '0';
3808                         result = result * 10 + value;
3809                         if (decimals >= 0)
3810                                 decimals++;
3811                 }
3812                 cp++;
3813         }
3814         if (*cp == '\n')
3815                 cp++;
3816         if (*cp)
3817                 return -EINVAL;
3818         if (decimals < 0)
3819                 decimals = 0;
3820         *res = result * int_pow(10, scale - decimals);
3821         return 0;
3822 }
3823
3824 static ssize_t
3825 safe_delay_show(struct mddev *mddev, char *page)
3826 {
3827         int msec = (mddev->safemode_delay*1000)/HZ;
3828         return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
3829 }
3830 static ssize_t
3831 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
3832 {
3833         unsigned long msec;
3834
3835         if (mddev_is_clustered(mddev)) {
3836                 pr_warn("md: Safemode is disabled for clustered mode\n");
3837                 return -EINVAL;
3838         }
3839
3840         if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
3841                 return -EINVAL;
3842         if (msec == 0)
3843                 mddev->safemode_delay = 0;
3844         else {
3845                 unsigned long old_delay = mddev->safemode_delay;
3846                 unsigned long new_delay = (msec*HZ)/1000;
3847
3848                 if (new_delay == 0)
3849                         new_delay = 1;
3850                 mddev->safemode_delay = new_delay;
3851                 if (new_delay < old_delay || old_delay == 0)
3852                         mod_timer(&mddev->safemode_timer, jiffies+1);
3853         }
3854         return len;
3855 }
3856 static struct md_sysfs_entry md_safe_delay =
3857 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
3858
3859 static ssize_t
3860 level_show(struct mddev *mddev, char *page)
3861 {
3862         struct md_personality *p;
3863         int ret;
3864         spin_lock(&mddev->lock);
3865         p = mddev->pers;
3866         if (p)
3867                 ret = sprintf(page, "%s\n", p->name);
3868         else if (mddev->clevel[0])
3869                 ret = sprintf(page, "%s\n", mddev->clevel);
3870         else if (mddev->level != LEVEL_NONE)
3871                 ret = sprintf(page, "%d\n", mddev->level);
3872         else
3873                 ret = 0;
3874         spin_unlock(&mddev->lock);
3875         return ret;
3876 }
3877
3878 static ssize_t
3879 level_store(struct mddev *mddev, const char *buf, size_t len)
3880 {
3881         char clevel[16];
3882         ssize_t rv;
3883         size_t slen = len;
3884         struct md_personality *pers, *oldpers;
3885         long level;
3886         void *priv, *oldpriv;
3887         struct md_rdev *rdev;
3888
3889         if (slen == 0 || slen >= sizeof(clevel))
3890                 return -EINVAL;
3891
3892         rv = mddev_lock(mddev);
3893         if (rv)
3894                 return rv;
3895
3896         if (mddev->pers == NULL) {
3897                 strncpy(mddev->clevel, buf, slen);
3898                 if (mddev->clevel[slen-1] == '\n')
3899                         slen--;
3900                 mddev->clevel[slen] = 0;
3901                 mddev->level = LEVEL_NONE;
3902                 rv = len;
3903                 goto out_unlock;
3904         }
3905         rv = -EROFS;
3906         if (mddev->ro)
3907                 goto out_unlock;
3908
3909         /* request to change the personality.  Need to ensure:
3910          *  - array is not engaged in resync/recovery/reshape
3911          *  - old personality can be suspended
3912          *  - new personality will access other array.
3913          */
3914
3915         rv = -EBUSY;
3916         if (mddev->sync_thread ||
3917             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3918             mddev->reshape_position != MaxSector ||
3919             mddev->sysfs_active)
3920                 goto out_unlock;
3921
3922         rv = -EINVAL;
3923         if (!mddev->pers->quiesce) {
3924                 pr_warn("md: %s: %s does not support online personality change\n",
3925                         mdname(mddev), mddev->pers->name);
3926                 goto out_unlock;
3927         }
3928
3929         /* Now find the new personality */
3930         strncpy(clevel, buf, slen);
3931         if (clevel[slen-1] == '\n')
3932                 slen--;
3933         clevel[slen] = 0;
3934         if (kstrtol(clevel, 10, &level))
3935                 level = LEVEL_NONE;
3936
3937         if (request_module("md-%s", clevel) != 0)
3938                 request_module("md-level-%s", clevel);
3939         spin_lock(&pers_lock);
3940         pers = find_pers(level, clevel);
3941         if (!pers || !try_module_get(pers->owner)) {
3942                 spin_unlock(&pers_lock);
3943                 pr_warn("md: personality %s not loaded\n", clevel);
3944                 rv = -EINVAL;
3945                 goto out_unlock;
3946         }
3947         spin_unlock(&pers_lock);
3948
3949         if (pers == mddev->pers) {
3950                 /* Nothing to do! */
3951                 module_put(pers->owner);
3952                 rv = len;
3953                 goto out_unlock;
3954         }
3955         if (!pers->takeover) {
3956                 module_put(pers->owner);
3957                 pr_warn("md: %s: %s does not support personality takeover\n",
3958                         mdname(mddev), clevel);
3959                 rv = -EINVAL;
3960                 goto out_unlock;
3961         }
3962
3963         rdev_for_each(rdev, mddev)
3964                 rdev->new_raid_disk = rdev->raid_disk;
3965
3966         /* ->takeover must set new_* and/or delta_disks
3967          * if it succeeds, and may set them when it fails.
3968          */
3969         priv = pers->takeover(mddev);
3970         if (IS_ERR(priv)) {
3971                 mddev->new_level = mddev->level;
3972                 mddev->new_layout = mddev->layout;
3973                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3974                 mddev->raid_disks -= mddev->delta_disks;
3975                 mddev->delta_disks = 0;
3976                 mddev->reshape_backwards = 0;
3977                 module_put(pers->owner);
3978                 pr_warn("md: %s: %s would not accept array\n",
3979                         mdname(mddev), clevel);
3980                 rv = PTR_ERR(priv);
3981                 goto out_unlock;
3982         }
3983
3984         /* Looks like we have a winner */
3985         mddev_suspend(mddev);
3986         mddev_detach(mddev);
3987
3988         spin_lock(&mddev->lock);
3989         oldpers = mddev->pers;
3990         oldpriv = mddev->private;
3991         mddev->pers = pers;
3992         mddev->private = priv;
3993         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3994         mddev->level = mddev->new_level;
3995         mddev->layout = mddev->new_layout;
3996         mddev->chunk_sectors = mddev->new_chunk_sectors;
3997         mddev->delta_disks = 0;
3998         mddev->reshape_backwards = 0;
3999         mddev->degraded = 0;
4000         spin_unlock(&mddev->lock);
4001
4002         if (oldpers->sync_request == NULL &&
4003             mddev->external) {
4004                 /* We are converting from a no-redundancy array
4005                  * to a redundancy array and metadata is managed
4006                  * externally so we need to be sure that writes
4007                  * won't block due to a need to transition
4008                  *      clean->dirty
4009                  * until external management is started.
4010                  */
4011                 mddev->in_sync = 0;
4012                 mddev->safemode_delay = 0;
4013                 mddev->safemode = 0;
4014         }
4015
4016         oldpers->free(mddev, oldpriv);
4017
4018         if (oldpers->sync_request == NULL &&
4019             pers->sync_request != NULL) {
4020                 /* need to add the md_redundancy_group */
4021                 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
4022                         pr_warn("md: cannot register extra attributes for %s\n",
4023                                 mdname(mddev));
4024                 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
4025         }
4026         if (oldpers->sync_request != NULL &&
4027             pers->sync_request == NULL) {
4028                 /* need to remove the md_redundancy_group */
4029                 if (mddev->to_remove == NULL)
4030                         mddev->to_remove = &md_redundancy_group;
4031         }
4032
4033         module_put(oldpers->owner);
4034
4035         rdev_for_each(rdev, mddev) {
4036                 if (rdev->raid_disk < 0)
4037                         continue;
4038                 if (rdev->new_raid_disk >= mddev->raid_disks)
4039                         rdev->new_raid_disk = -1;
4040                 if (rdev->new_raid_disk == rdev->raid_disk)
4041                         continue;
4042                 sysfs_unlink_rdev(mddev, rdev);
4043         }
4044         rdev_for_each(rdev, mddev) {
4045                 if (rdev->raid_disk < 0)
4046                         continue;
4047                 if (rdev->new_raid_disk == rdev->raid_disk)
4048                         continue;
4049                 rdev->raid_disk = rdev->new_raid_disk;
4050                 if (rdev->raid_disk < 0)
4051                         clear_bit(In_sync, &rdev->flags);
4052                 else {
4053                         if (sysfs_link_rdev(mddev, rdev))
4054                                 pr_warn("md: cannot register rd%d for %s after level change\n",
4055                                         rdev->raid_disk, mdname(mddev));
4056                 }
4057         }
4058
4059         if (pers->sync_request == NULL) {
4060                 /* this is now an array without redundancy, so
4061                  * it must always be in_sync
4062                  */
4063                 mddev->in_sync = 1;
4064                 del_timer_sync(&mddev->safemode_timer);
4065         }
4066         blk_set_stacking_limits(&mddev->queue->limits);
4067         pers->run(mddev);
4068         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4069         mddev_resume(mddev);
4070         if (!mddev->thread)
4071                 md_update_sb(mddev, 1);
4072         sysfs_notify(&mddev->kobj, NULL, "level");
4073         md_new_event(mddev);
4074         rv = len;
4075 out_unlock:
4076         mddev_unlock(mddev);
4077         return rv;
4078 }
4079
4080 static struct md_sysfs_entry md_level =
4081 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
4082
4083 static ssize_t
4084 layout_show(struct mddev *mddev, char *page)
4085 {
4086         /* just a number, not meaningful for all levels */
4087         if (mddev->reshape_position != MaxSector &&
4088             mddev->layout != mddev->new_layout)
4089                 return sprintf(page, "%d (%d)\n",
4090                                mddev->new_layout, mddev->layout);
4091         return sprintf(page, "%d\n", mddev->layout);
4092 }
4093
4094 static ssize_t
4095 layout_store(struct mddev *mddev, const char *buf, size_t len)
4096 {
4097         unsigned int n;
4098         int err;
4099
4100         err = kstrtouint(buf, 10, &n);
4101         if (err < 0)
4102                 return err;
4103         err = mddev_lock(mddev);
4104         if (err)
4105                 return err;
4106
4107         if (mddev->pers) {
4108                 if (mddev->pers->check_reshape == NULL)
4109                         err = -EBUSY;
4110                 else if (mddev->ro)
4111                         err = -EROFS;
4112                 else {
4113                         mddev->new_layout = n;
4114                         err = mddev->pers->check_reshape(mddev);
4115                         if (err)
4116                                 mddev->new_layout = mddev->layout;
4117                 }
4118         } else {
4119                 mddev->new_layout = n;
4120                 if (mddev->reshape_position == MaxSector)
4121                         mddev->layout = n;
4122         }
4123         mddev_unlock(mddev);
4124         return err ?: len;
4125 }
4126 static struct md_sysfs_entry md_layout =
4127 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
4128
4129 static ssize_t
4130 raid_disks_show(struct mddev *mddev, char *page)
4131 {
4132         if (mddev->raid_disks == 0)
4133                 return 0;
4134         if (mddev->reshape_position != MaxSector &&
4135             mddev->delta_disks != 0)
4136                 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
4137                                mddev->raid_disks - mddev->delta_disks);
4138         return sprintf(page, "%d\n", mddev->raid_disks);
4139 }
4140
4141 static int update_raid_disks(struct mddev *mddev, int raid_disks);
4142
4143 static ssize_t
4144 raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
4145 {
4146         unsigned int n;
4147         int err;
4148
4149         err = kstrtouint(buf, 10, &n);
4150         if (err < 0)
4151                 return err;
4152
4153         err = mddev_lock(mddev);
4154         if (err)
4155                 return err;
4156         if (mddev->pers)
4157                 err = update_raid_disks(mddev, n);
4158         else if (mddev->reshape_position != MaxSector) {
4159                 struct md_rdev *rdev;
4160                 int olddisks = mddev->raid_disks - mddev->delta_disks;
4161
4162                 err = -EINVAL;
4163                 rdev_for_each(rdev, mddev) {
4164                         if (olddisks < n &&
4165                             rdev->data_offset < rdev->new_data_offset)
4166                                 goto out_unlock;
4167                         if (olddisks > n &&
4168                             rdev->data_offset > rdev->new_data_offset)
4169                                 goto out_unlock;
4170                 }
4171                 err = 0;
4172                 mddev->delta_disks = n - olddisks;
4173                 mddev->raid_disks = n;
4174                 mddev->reshape_backwards = (mddev->delta_disks < 0);
4175         } else
4176                 mddev->raid_disks = n;
4177 out_unlock:
4178         mddev_unlock(mddev);
4179         return err ? err : len;
4180 }
4181 static struct md_sysfs_entry md_raid_disks =
4182 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
4183
4184 static ssize_t
4185 chunk_size_show(struct mddev *mddev, char *page)
4186 {
4187         if (mddev->reshape_position != MaxSector &&
4188             mddev->chunk_sectors != mddev->new_chunk_sectors)
4189                 return sprintf(page, "%d (%d)\n",
4190                                mddev->new_chunk_sectors << 9,
4191                                mddev->chunk_sectors << 9);
4192         return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
4193 }
4194
4195 static ssize_t
4196 chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
4197 {
4198         unsigned long n;
4199         int err;
4200
4201         err = kstrtoul(buf, 10, &n);
4202         if (err < 0)
4203                 return err;
4204
4205         err = mddev_lock(mddev);
4206         if (err)
4207                 return err;
4208         if (mddev->pers) {
4209                 if (mddev->pers->check_reshape == NULL)
4210                         err = -EBUSY;
4211                 else if (mddev->ro)
4212                         err = -EROFS;
4213                 else {
4214                         mddev->new_chunk_sectors = n >> 9;
4215                         err = mddev->pers->check_reshape(mddev);
4216                         if (err)
4217                                 mddev->new_chunk_sectors = mddev->chunk_sectors;
4218                 }
4219         } else {
4220                 mddev->new_chunk_sectors = n >> 9;
4221                 if (mddev->reshape_position == MaxSector)
4222                         mddev->chunk_sectors = n >> 9;
4223         }
4224         mddev_unlock(mddev);
4225         return err ?: len;
4226 }
4227 static struct md_sysfs_entry md_chunk_size =
4228 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
4229
4230 static ssize_t
4231 resync_start_show(struct mddev *mddev, char *page)
4232 {
4233         if (mddev->recovery_cp == MaxSector)
4234                 return sprintf(page, "none\n");
4235         return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
4236 }
4237
4238 static ssize_t
4239 resync_start_store(struct mddev *mddev, const char *buf, size_t len)
4240 {
4241         unsigned long long n;
4242         int err;
4243
4244         if (cmd_match(buf, "none"))
4245                 n = MaxSector;
4246         else {
4247                 err = kstrtoull(buf, 10, &n);
4248                 if (err < 0)
4249                         return err;
4250                 if (n != (sector_t)n)
4251                         return -EINVAL;
4252         }
4253
4254         err = mddev_lock(mddev);
4255         if (err)
4256                 return err;
4257         if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
4258                 err = -EBUSY;
4259
4260         if (!err) {
4261                 mddev->recovery_cp = n;
4262                 if (mddev->pers)
4263                         set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
4264         }
4265         mddev_unlock(mddev);
4266         return err ?: len;
4267 }
4268 static struct md_sysfs_entry md_resync_start =
4269 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
4270                 resync_start_show, resync_start_store);
4271
4272 /*
4273  * The array state can be:
4274  *
4275  * clear
4276  *     No devices, no size, no level
4277  *     Equivalent to STOP_ARRAY ioctl
4278  * inactive
4279  *     May have some settings, but array is not active
4280  *        all IO results in error
4281  *     When written, doesn't tear down array, but just stops it
4282  * suspended (not supported yet)
4283  *     All IO requests will block. The array can be reconfigured.
4284  *     Writing this, if accepted, will block until array is quiescent
4285  * readonly
4286  *     no resync can happen.  no superblocks get written.
4287  *     write requests fail
4288  * read-auto
4289  *     like readonly, but behaves like 'clean' on a write request.
4290  *
4291  * clean - no pending writes, but otherwise active.
4292  *     When written to inactive array, starts without resync
4293  *     If a write request arrives then
4294  *       if metadata is known, mark 'dirty' and switch to 'active'.
4295  *       if not known, block and switch to write-pending
4296  *     If written to an active array that has pending writes, then fails.
4297  * active
4298  *     fully active: IO and resync can be happening.
4299  *     When written to inactive array, starts with resync
4300  *
4301  * write-pending
4302  *     clean, but writes are blocked waiting for 'active' to be written.
4303  *
4304  * active-idle
4305  *     like active, but no writes have been seen for a while (100msec).
4306  *
4307  * broken
4308  *     RAID0/LINEAR-only: same as clean, but array is missing a member.
4309  *     It's useful because RAID0/LINEAR mounted-arrays aren't stopped
4310  *     when a member is gone, so this state will at least alert the
4311  *     user that something is wrong.
4312  */
4313 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
4314                    write_pending, active_idle, broken, bad_word};
4315 static char *array_states[] = {
4316         "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
4317         "write-pending", "active-idle", "broken", NULL };
4318
4319 static int match_word(const char *word, char **list)
4320 {
4321         int n;
4322         for (n=0; list[n]; n++)
4323                 if (cmd_match(word, list[n]))
4324                         break;
4325         return n;
4326 }
4327
4328 static ssize_t
4329 array_state_show(struct mddev *mddev, char *page)
4330 {
4331         enum array_state st = inactive;
4332
4333         if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) {
4334                 switch(mddev->ro) {
4335                 case 1:
4336                         st = readonly;
4337                         break;
4338                 case 2:
4339                         st = read_auto;
4340                         break;
4341                 case 0:
4342                         spin_lock(&mddev->lock);
4343                         if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
4344                                 st = write_pending;
4345                         else if (mddev->in_sync)
4346                                 st = clean;
4347                         else if (mddev->safemode)
4348                                 st = active_idle;
4349                         else
4350                                 st = active;
4351                         spin_unlock(&mddev->lock);
4352                 }
4353
4354                 if (test_bit(MD_BROKEN, &mddev->flags) && st == clean)
4355                         st = broken;
4356         } else {
4357                 if (list_empty(&mddev->disks) &&
4358                     mddev->raid_disks == 0 &&
4359                     mddev->dev_sectors == 0)
4360                         st = clear;
4361                 else
4362                         st = inactive;
4363         }
4364         return sprintf(page, "%s\n", array_states[st]);
4365 }
4366
4367 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
4368 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
4369 static int do_md_run(struct mddev *mddev);
4370 static int restart_array(struct mddev *mddev);
4371
4372 static ssize_t
4373 array_state_store(struct mddev *mddev, const char *buf, size_t len)
4374 {
4375         int err = 0;
4376         enum array_state st = match_word(buf, array_states);
4377
4378         if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
4379                 /* don't take reconfig_mutex when toggling between
4380                  * clean and active
4381                  */
4382                 spin_lock(&mddev->lock);
4383                 if (st == active) {
4384                         restart_array(mddev);
4385                         clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
4386                         md_wakeup_thread(mddev->thread);
4387                         wake_up(&mddev->sb_wait);
4388                 } else /* st == clean */ {
4389                         restart_array(mddev);
4390                         if (!set_in_sync(mddev))
4391                                 err = -EBUSY;
4392                 }
4393                 if (!err)
4394                         sysfs_notify_dirent_safe(mddev->sysfs_state);
4395                 spin_unlock(&mddev->lock);
4396                 return err ?: len;
4397         }
4398         err = mddev_lock(mddev);
4399         if (err)
4400                 return err;
4401         err = -EINVAL;
4402         switch(st) {
4403         case bad_word:
4404                 break;
4405         case clear:
4406                 /* stopping an active array */
4407                 err = do_md_stop(mddev, 0, NULL);
4408                 break;
4409         case inactive:
4410                 /* stopping an active array */
4411                 if (mddev->pers)
4412                         err = do_md_stop(mddev, 2, NULL);
4413                 else
4414                         err = 0; /* already inactive */
4415                 break;
4416         case suspended:
4417                 break; /* not supported yet */
4418         case readonly:
4419                 if (mddev->pers)
4420                         err = md_set_readonly(mddev, NULL);
4421                 else {
4422                         mddev->ro = 1;
4423                         set_disk_ro(mddev->gendisk, 1);
4424                         err = do_md_run(mddev);
4425                 }
4426                 break;
4427         case read_auto:
4428                 if (mddev->pers) {
4429                         if (mddev->ro == 0)
4430                                 err = md_set_readonly(mddev, NULL);
4431                         else if (mddev->ro == 1)
4432                                 err = restart_array(mddev);
4433                         if (err == 0) {
4434                                 mddev->ro = 2;
4435                                 set_disk_ro(mddev->gendisk, 0);
4436                         }
4437                 } else {
4438                         mddev->ro = 2;
4439                         err = do_md_run(mddev);
4440                 }
4441                 break;
4442         case clean:
4443                 if (mddev->pers) {
4444                         err = restart_array(mddev);
4445                         if (err)
4446                                 break;
4447                         spin_lock(&mddev->lock);
4448                         if (!set_in_sync(mddev))
4449                                 err = -EBUSY;
4450                         spin_unlock(&mddev->lock);
4451                 } else
4452                         err = -EINVAL;
4453                 break;
4454         case active:
4455                 if (mddev->pers) {
4456                         err = restart_array(mddev);
4457                         if (err)
4458                                 break;
4459                         clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
4460                         wake_up(&mddev->sb_wait);
4461                         err = 0;
4462                 } else {
4463                         mddev->ro = 0;
4464                         set_disk_ro(mddev->gendisk, 0);
4465                         err = do_md_run(mddev);
4466                 }
4467                 break;
4468         case write_pending:
4469         case active_idle:
4470         case broken:
4471                 /* these cannot be set */
4472                 break;
4473         }
4474
4475         if (!err) {
4476                 if (mddev->hold_active == UNTIL_IOCTL)
4477                         mddev->hold_active = 0;
4478                 sysfs_notify_dirent_safe(mddev->sysfs_state);
4479         }
4480         mddev_unlock(mddev);
4481         return err ?: len;
4482 }
4483 static struct md_sysfs_entry md_array_state =
4484 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
4485
4486 static ssize_t
4487 max_corrected_read_errors_show(struct mddev *mddev, char *page) {
4488         return sprintf(page, "%d\n",
4489                        atomic_read(&mddev->max_corr_read_errors));
4490 }
4491
4492 static ssize_t
4493 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
4494 {
4495         unsigned int n;
4496         int rv;
4497
4498         rv = kstrtouint(buf, 10, &n);
4499         if (rv < 0)
4500                 return rv;
4501         atomic_set(&mddev->max_corr_read_errors, n);
4502         return len;
4503 }
4504
4505 static struct md_sysfs_entry max_corr_read_errors =
4506 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
4507         max_corrected_read_errors_store);
4508
4509 static ssize_t
4510 null_show(struct mddev *mddev, char *page)
4511 {
4512         return -EINVAL;
4513 }
4514
4515 static ssize_t
4516 new_dev_store(struct mddev *mddev, const char *buf, size_t len)
4517 {
4518         /* buf must be %d:%d\n? giving major and minor numbers */
4519         /* The new device is added to the array.
4520          * If the array has a persistent superblock, we read the
4521          * superblock to initialise info and check validity.
4522          * Otherwise, only checking done is that in bind_rdev_to_array,
4523          * which mainly checks size.
4524          */
4525         char *e;
4526         int major = simple_strtoul(buf, &e, 10);
4527         int minor;
4528         dev_t dev;
4529         struct md_rdev *rdev;
4530         int err;
4531
4532         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
4533                 return -EINVAL;
4534         minor = simple_strtoul(e+1, &e, 10);
4535         if (*e && *e != '\n')
4536                 return -EINVAL;
4537         dev = MKDEV(major, minor);
4538         if (major != MAJOR(dev) ||
4539             minor != MINOR(dev))
4540                 return -EOVERFLOW;
4541
4542         flush_workqueue(md_misc_wq);
4543
4544         err = mddev_lock(mddev);
4545         if (err)
4546                 return err;
4547         if (mddev->persistent) {
4548                 rdev = md_import_device(dev, mddev->major_version,
4549                                         mddev->minor_version);
4550                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
4551                         struct md_rdev *rdev0
4552                                 = list_entry(mddev->disks.next,
4553                                              struct md_rdev, same_set);
4554                         err = super_types[mddev->major_version]
4555                                 .load_super(rdev, rdev0, mddev->minor_version);
4556                         if (err < 0)
4557                                 goto out;
4558                 }
4559         } else if (mddev->external)
4560                 rdev = md_import_device(dev, -2, -1);
4561         else
4562                 rdev = md_import_device(dev, -1, -1);
4563
4564         if (IS_ERR(rdev)) {
4565                 mddev_unlock(mddev);
4566                 return PTR_ERR(rdev);
4567         }
4568         err = bind_rdev_to_array(rdev, mddev);
4569  out:
4570         if (err)
4571                 export_rdev(rdev);
4572         mddev_unlock(mddev);
4573         if (!err)
4574                 md_new_event(mddev);
4575         return err ? err : len;
4576 }
4577
4578 static struct md_sysfs_entry md_new_device =
4579 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
4580
4581 static ssize_t
4582 bitmap_store(struct mddev *mddev, const char *buf, size_t len)
4583 {
4584         char *end;
4585         unsigned long chunk, end_chunk;
4586         int err;
4587
4588         err = mddev_lock(mddev);
4589         if (err)
4590                 return err;
4591         if (!mddev->bitmap)
4592                 goto out;
4593         /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4594         while (*buf) {
4595                 chunk = end_chunk = simple_strtoul(buf, &end, 0);
4596                 if (buf == end) break;
4597                 if (*end == '-') { /* range */
4598                         buf = end + 1;
4599                         end_chunk = simple_strtoul(buf, &end, 0);
4600                         if (buf == end) break;
4601                 }
4602                 if (*end && !isspace(*end)) break;
4603                 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
4604                 buf = skip_spaces(end);
4605         }
4606         md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
4607 out:
4608         mddev_unlock(mddev);
4609         return len;
4610 }
4611
4612 static struct md_sysfs_entry md_bitmap =
4613 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4614
4615 static ssize_t
4616 size_show(struct mddev *mddev, char *page)
4617 {
4618         return sprintf(page, "%llu\n",
4619                 (unsigned long long)mddev->dev_sectors / 2);
4620 }
4621
4622 static int update_size(struct mddev *mddev, sector_t num_sectors);
4623
4624 static ssize_t
4625 size_store(struct mddev *mddev, const char *buf, size_t len)
4626 {
4627         /* If array is inactive, we can reduce the component size, but
4628          * not increase it (except from 0).
4629          * If array is active, we can try an on-line resize
4630          */
4631         sector_t sectors;
4632         int err = strict_blocks_to_sectors(buf, &sectors);
4633
4634         if (err < 0)
4635                 return err;
4636         err = mddev_lock(mddev);
4637         if (err)
4638                 return err;
4639         if (mddev->pers) {
4640                 err = update_size(mddev, sectors);
4641                 if (err == 0)
4642                         md_update_sb(mddev, 1);
4643         } else {
4644                 if (mddev->dev_sectors == 0 ||
4645                     mddev->dev_sectors > sectors)
4646                         mddev->dev_sectors = sectors;
4647                 else
4648                         err = -ENOSPC;
4649         }
4650         mddev_unlock(mddev);
4651         return err ? err : len;
4652 }
4653
4654 static struct md_sysfs_entry md_size =
4655 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
4656
4657 /* Metadata version.
4658  * This is one of
4659  *   'none' for arrays with no metadata (good luck...)
4660  *   'external' for arrays with externally managed metadata,
4661  * or N.M for internally known formats
4662  */
4663 static ssize_t
4664 metadata_show(struct mddev *mddev, char *page)
4665 {
4666         if (mddev->persistent)
4667                 return sprintf(page, "%d.%d\n",
4668                                mddev->major_version, mddev->minor_version);
4669         else if (mddev->external)
4670                 return sprintf(page, "external:%s\n", mddev->metadata_type);
4671         else
4672                 return sprintf(page, "none\n");
4673 }
4674
4675 static ssize_t
4676 metadata_store(struct mddev *mddev, const char *buf, size_t len)
4677 {
4678         int major, minor;
4679         char *e;
4680         int err;
4681         /* Changing the details of 'external' metadata is
4682          * always permitted.  Otherwise there must be
4683          * no devices attached to the array.
4684          */
4685
4686         err = mddev_lock(mddev);
4687         if (err)
4688                 return err;
4689         err = -EBUSY;
4690         if (mddev->external && strncmp(buf, "external:", 9) == 0)
4691                 ;
4692         else if (!list_empty(&mddev->disks))
4693                 goto out_unlock;
4694
4695         err = 0;
4696         if (cmd_match(buf, "none")) {
4697                 mddev->persistent = 0;
4698                 mddev->external = 0;
4699                 mddev->major_version = 0;
4700                 mddev->minor_version = 90;
4701                 goto out_unlock;
4702         }
4703         if (strncmp(buf, "external:", 9) == 0) {
4704                 size_t namelen = len-9;
4705                 if (namelen >= sizeof(mddev->metadata_type))
4706                         namelen = sizeof(mddev->metadata_type)-1;
4707                 strncpy(mddev->metadata_type, buf+9, namelen);
4708                 mddev->metadata_type[namelen] = 0;
4709                 if (namelen && mddev->metadata_type[namelen-1] == '\n')
4710                         mddev->metadata_type[--namelen] = 0;
4711                 mddev->persistent = 0;
4712                 mddev->external = 1;
4713                 mddev->major_version = 0;
4714                 mddev->minor_version = 90;
4715                 goto out_unlock;
4716         }
4717         major = simple_strtoul(buf, &e, 10);
4718         err = -EINVAL;
4719         if (e==buf || *e != '.')
4720                 goto out_unlock;
4721         buf = e+1;
4722         minor = simple_strtoul(buf, &e, 10);
4723         if (e==buf || (*e && *e != '\n') )
4724                 goto out_unlock;
4725         err = -ENOENT;
4726         if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
4727                 goto out_unlock;
4728         mddev->major_version = major;
4729         mddev->minor_version = minor;
4730         mddev->persistent = 1;
4731         mddev->external = 0;
4732         err = 0;
4733 out_unlock:
4734         mddev_unlock(mddev);
4735         return err ?: len;
4736 }
4737
4738 static struct md_sysfs_entry md_metadata =
4739 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
4740
4741 static ssize_t
4742 action_show(struct mddev *mddev, char *page)
4743 {
4744         char *type = "idle";
4745         unsigned long recovery = mddev->recovery;
4746         if (test_bit(MD_RECOVERY_FROZEN, &recovery))
4747                 type = "frozen";
4748         else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
4749             (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
4750                 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
4751                         type = "reshape";
4752                 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
4753                         if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
4754                                 type = "resync";
4755                         else if (test_bit(MD_RECOVERY_CHECK, &recovery))
4756                                 type = "check";
4757                         else
4758                                 type = "repair";
4759                 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
4760                         type = "recover";
4761                 else if (mddev->reshape_position != MaxSector)
4762                         type = "reshape";
4763         }
4764         return sprintf(page, "%s\n", type);
4765 }
4766
4767 static ssize_t
4768 action_store(struct mddev *mddev, const char *page, size_t len)
4769 {
4770         if (!mddev->pers || !mddev->pers->sync_request)
4771                 return -EINVAL;
4772
4773
4774         if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
4775                 if (cmd_match(page, "frozen"))
4776                         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4777                 else
4778                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4779                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
4780                     mddev_lock(mddev) == 0) {
4781                         flush_workqueue(md_misc_wq);
4782                         if (mddev->sync_thread) {
4783                                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4784                                 md_reap_sync_thread(mddev);
4785                         }
4786                         mddev_unlock(mddev);
4787                 }
4788         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4789                 return -EBUSY;
4790         else if (cmd_match(page, "resync"))
4791                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4792         else if (cmd_match(page, "recover")) {
4793                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4794                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4795         } else if (cmd_match(page, "reshape")) {
4796                 int err;
4797                 if (mddev->pers->start_reshape == NULL)
4798                         return -EINVAL;
4799                 err = mddev_lock(mddev);
4800                 if (!err) {
4801                         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4802                                 err =  -EBUSY;
4803                         else {
4804                                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4805                                 err = mddev->pers->start_reshape(mddev);
4806                         }
4807                         mddev_unlock(mddev);
4808                 }
4809                 if (err)
4810                         return err;
4811                 sysfs_notify(&mddev->kobj, NULL, "degraded");
4812         } else {
4813                 if (cmd_match(page, "check"))
4814                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4815                 else if (!cmd_match(page, "repair"))
4816                         return -EINVAL;
4817                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4818                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4819                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4820         }
4821         if (mddev->ro == 2) {
4822                 /* A write to sync_action is enough to justify
4823                  * canceling read-auto mode
4824                  */
4825                 mddev->ro = 0;
4826                 md_wakeup_thread(mddev->sync_thread);
4827         }
4828         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4829         md_wakeup_thread(mddev->thread);
4830         sysfs_notify_dirent_safe(mddev->sysfs_action);
4831         return len;
4832 }
4833
4834 static struct md_sysfs_entry md_scan_mode =
4835 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
4836
4837 static ssize_t
4838 last_sync_action_show(struct mddev *mddev, char *page)
4839 {
4840         return sprintf(page, "%s\n", mddev->last_sync_action);
4841 }
4842
4843 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
4844
4845 static ssize_t
4846 mismatch_cnt_show(struct mddev *mddev, char *page)
4847 {
4848         return sprintf(page, "%llu\n",
4849                        (unsigned long long)
4850                        atomic64_read(&mddev->resync_mismatches));
4851 }
4852
4853 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
4854
4855 static ssize_t
4856 sync_min_show(struct mddev *mddev, char *page)
4857 {
4858         return sprintf(page, "%d (%s)\n", speed_min(mddev),
4859                        mddev->sync_speed_min ? "local": "system");
4860 }
4861
4862 static ssize_t
4863 sync_min_store(struct mddev *mddev, const char *buf, size_t len)
4864 {
4865         unsigned int min;
4866         int rv;
4867
4868         if (strncmp(buf, "system", 6)==0) {
4869                 min = 0;
4870         } else {
4871                 rv = kstrtouint(buf, 10, &min);
4872                 if (rv < 0)
4873                         return rv;
4874                 if (min == 0)
4875                         return -EINVAL;
4876         }
4877         mddev->sync_speed_min = min;
4878         return len;
4879 }
4880
4881 static struct md_sysfs_entry md_sync_min =
4882 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
4883
4884 static ssize_t
4885 sync_max_show(struct mddev *mddev, char *page)
4886 {
4887         return sprintf(page, "%d (%s)\n", speed_max(mddev),
4888                        mddev->sync_speed_max ? "local": "system");
4889 }
4890
4891 static ssize_t
4892 sync_max_store(struct mddev *mddev, const char *buf, size_t len)
4893 {
4894         unsigned int max;
4895         int rv;
4896
4897         if (strncmp(buf, "system", 6)==0) {
4898                 max = 0;
4899         } else {
4900                 rv = kstrtouint(buf, 10, &max);
4901                 if (rv < 0)
4902                         return rv;
4903                 if (max == 0)
4904                         return -EINVAL;
4905         }
4906         mddev->sync_speed_max = max;
4907         return len;
4908 }
4909
4910 static struct md_sysfs_entry md_sync_max =
4911 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
4912
4913 static ssize_t
4914 degraded_show(struct mddev *mddev, char *page)
4915 {
4916         return sprintf(page, "%d\n", mddev->degraded);
4917 }
4918 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
4919
4920 static ssize_t
4921 sync_force_parallel_show(struct mddev *mddev, char *page)
4922 {
4923         return sprintf(page, "%d\n", mddev->parallel_resync);
4924 }
4925
4926 static ssize_t
4927 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
4928 {
4929         long n;
4930
4931         if (kstrtol(buf, 10, &n))
4932                 return -EINVAL;
4933
4934         if (n != 0 && n != 1)
4935                 return -EINVAL;
4936
4937         mddev->parallel_resync = n;
4938
4939         if (mddev->sync_thread)
4940                 wake_up(&resync_wait);
4941
4942         return len;
4943 }
4944
4945 /* force parallel resync, even with shared block devices */
4946 static struct md_sysfs_entry md_sync_force_parallel =
4947 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
4948        sync_force_parallel_show, sync_force_parallel_store);
4949
4950 static ssize_t
4951 sync_speed_show(struct mddev *mddev, char *page)
4952 {
4953         unsigned long resync, dt, db;
4954         if (mddev->curr_resync == 0)
4955                 return sprintf(page, "none\n");
4956         resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
4957         dt = (jiffies - mddev->resync_mark) / HZ;
4958         if (!dt) dt++;
4959         db = resync - mddev->resync_mark_cnt;
4960         return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
4961 }
4962
4963 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
4964
4965 static ssize_t
4966 sync_completed_show(struct mddev *mddev, char *page)
4967 {
4968         unsigned long long max_sectors, resync;
4969
4970         if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4971                 return sprintf(page, "none\n");
4972
4973         if (mddev->curr_resync == 1 ||
4974             mddev->curr_resync == 2)
4975                 return sprintf(page, "delayed\n");
4976
4977         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
4978             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4979                 max_sectors = mddev->resync_max_sectors;
4980         else
4981                 max_sectors = mddev->dev_sectors;
4982
4983         resync = mddev->curr_resync_completed;
4984         return sprintf(page, "%llu / %llu\n", resync, max_sectors);
4985 }
4986
4987 static struct md_sysfs_entry md_sync_completed =
4988         __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
4989
4990 static ssize_t
4991 min_sync_show(struct mddev *mddev, char *page)
4992 {
4993         return sprintf(page, "%llu\n",
4994                        (unsigned long long)mddev->resync_min);
4995 }
4996 static ssize_t
4997 min_sync_store(struct mddev *mddev, const char *buf, size_t len)
4998 {
4999         unsigned long long min;
5000         int err;
5001
5002         if (kstrtoull(buf, 10, &min))
5003                 return -EINVAL;
5004
5005         spin_lock(&mddev->lock);
5006         err = -EINVAL;
5007         if (min > mddev->resync_max)
5008                 goto out_unlock;
5009
5010         err = -EBUSY;
5011         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5012                 goto out_unlock;
5013
5014         /* Round down to multiple of 4K for safety */
5015         mddev->resync_min = round_down(min, 8);
5016         err = 0;
5017
5018 out_unlock:
5019         spin_unlock(&mddev->lock);
5020         return err ?: len;
5021 }
5022
5023 static struct md_sysfs_entry md_min_sync =
5024 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
5025
5026 static ssize_t
5027 max_sync_show(struct mddev *mddev, char *page)
5028 {
5029         if (mddev->resync_max == MaxSector)
5030                 return sprintf(page, "max\n");
5031         else
5032                 return sprintf(page, "%llu\n",
5033                                (unsigned long long)mddev->resync_max);
5034 }
5035 static ssize_t
5036 max_sync_store(struct mddev *mddev, const char *buf, size_t len)
5037 {
5038         int err;
5039         spin_lock(&mddev->lock);
5040         if (strncmp(buf, "max", 3) == 0)
5041                 mddev->resync_max = MaxSector;
5042         else {
5043                 unsigned long long max;
5044                 int chunk;
5045
5046                 err = -EINVAL;
5047                 if (kstrtoull(buf, 10, &max))
5048                         goto out_unlock;
5049                 if (max < mddev->resync_min)
5050                         goto out_unlock;
5051
5052                 err = -EBUSY;
5053                 if (max < mddev->resync_max &&
5054                     mddev->ro == 0 &&
5055                     test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5056                         goto out_unlock;
5057
5058                 /* Must be a multiple of chunk_size */
5059                 chunk = mddev->chunk_sectors;
5060                 if (chunk) {
5061                         sector_t temp = max;
5062
5063                         err = -EINVAL;
5064                         if (sector_div(temp, chunk))
5065                                 goto out_unlock;
5066                 }
5067                 mddev->resync_max = max;
5068         }
5069         wake_up(&mddev->recovery_wait);
5070         err = 0;
5071 out_unlock:
5072         spin_unlock(&mddev->lock);
5073         return err ?: len;
5074 }
5075
5076 static struct md_sysfs_entry md_max_sync =
5077 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
5078
5079 static ssize_t
5080 suspend_lo_show(struct mddev *mddev, char *page)
5081 {
5082         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
5083 }
5084
5085 static ssize_t
5086 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
5087 {
5088         unsigned long long new;
5089         int err;
5090
5091         err = kstrtoull(buf, 10, &new);
5092         if (err < 0)
5093                 return err;
5094         if (new != (sector_t)new)
5095                 return -EINVAL;
5096
5097         err = mddev_lock(mddev);
5098         if (err)
5099                 return err;
5100         err = -EINVAL;
5101         if (mddev->pers == NULL ||
5102             mddev->pers->quiesce == NULL)
5103                 goto unlock;
5104         mddev_suspend(mddev);
5105         mddev->suspend_lo = new;
5106         mddev_resume(mddev);
5107
5108         err = 0;
5109 unlock:
5110         mddev_unlock(mddev);
5111         return err ?: len;
5112 }
5113 static struct md_sysfs_entry md_suspend_lo =
5114 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
5115
5116 static ssize_t
5117 suspend_hi_show(struct mddev *mddev, char *page)
5118 {
5119         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
5120 }
5121
5122 static ssize_t
5123 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
5124 {
5125         unsigned long long new;
5126         int err;
5127
5128         err = kstrtoull(buf, 10, &new);
5129         if (err < 0)
5130                 return err;
5131         if (new != (sector_t)new)
5132                 return -EINVAL;
5133
5134         err = mddev_lock(mddev);
5135         if (err)
5136                 return err;
5137         err = -EINVAL;
5138         if (mddev->pers == NULL)
5139                 goto unlock;
5140
5141         mddev_suspend(mddev);
5142         mddev->suspend_hi = new;
5143         mddev_resume(mddev);
5144
5145         err = 0;
5146 unlock:
5147         mddev_unlock(mddev);
5148         return err ?: len;
5149 }
5150 static struct md_sysfs_entry md_suspend_hi =
5151 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
5152
5153 static ssize_t
5154 reshape_position_show(struct mddev *mddev, char *page)
5155 {
5156         if (mddev->reshape_position != MaxSector)
5157                 return sprintf(page, "%llu\n",
5158                                (unsigned long long)mddev->reshape_position);
5159         strcpy(page, "none\n");
5160         return 5;
5161 }
5162
5163 static ssize_t
5164 reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
5165 {
5166         struct md_rdev *rdev;
5167         unsigned long long new;
5168         int err;
5169
5170         err = kstrtoull(buf, 10, &new);
5171         if (err < 0)
5172                 return err;
5173         if (new != (sector_t)new)
5174                 return -EINVAL;
5175         err = mddev_lock(mddev);
5176         if (err)
5177                 return err;
5178         err = -EBUSY;
5179         if (mddev->pers)
5180                 goto unlock;
5181         mddev->reshape_position = new;
5182         mddev->delta_disks = 0;
5183         mddev->reshape_backwards = 0;
5184         mddev->new_level = mddev->level;
5185         mddev->new_layout = mddev->layout;
5186         mddev->new_chunk_sectors = mddev->chunk_sectors;
5187         rdev_for_each(rdev, mddev)
5188                 rdev->new_data_offset = rdev->data_offset;
5189         err = 0;
5190 unlock:
5191         mddev_unlock(mddev);
5192         return err ?: len;
5193 }
5194
5195 static struct md_sysfs_entry md_reshape_position =
5196 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
5197        reshape_position_store);
5198
5199 static ssize_t
5200 reshape_direction_show(struct mddev *mddev, char *page)
5201 {
5202         return sprintf(page, "%s\n",
5203                        mddev->reshape_backwards ? "backwards" : "forwards");
5204 }
5205
5206 static ssize_t
5207 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
5208 {
5209         int backwards = 0;
5210         int err;
5211
5212         if (cmd_match(buf, "forwards"))
5213                 backwards = 0;
5214         else if (cmd_match(buf, "backwards"))
5215                 backwards = 1;
5216         else
5217                 return -EINVAL;
5218         if (mddev->reshape_backwards == backwards)
5219                 return len;
5220
5221         err = mddev_lock(mddev);
5222         if (err)
5223                 return err;
5224         /* check if we are allowed to change */
5225         if (mddev->delta_disks)
5226                 err = -EBUSY;
5227         else if (mddev->persistent &&
5228             mddev->major_version == 0)
5229                 err =  -EINVAL;
5230         else
5231                 mddev->reshape_backwards = backwards;
5232         mddev_unlock(mddev);
5233         return err ?: len;
5234 }
5235
5236 static struct md_sysfs_entry md_reshape_direction =
5237 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
5238        reshape_direction_store);
5239
5240 static ssize_t
5241 array_size_show(struct mddev *mddev, char *page)
5242 {
5243         if (mddev->external_size)
5244                 return sprintf(page, "%llu\n",
5245                                (unsigned long long)mddev->array_sectors/2);
5246         else
5247                 return sprintf(page, "default\n");
5248 }
5249
5250 static ssize_t
5251 array_size_store(struct mddev *mddev, const char *buf, size_t len)
5252 {
5253         sector_t sectors;
5254         int err;
5255
5256         err = mddev_lock(mddev);
5257         if (err)
5258                 return err;
5259
5260         /* cluster raid doesn't support change array_sectors */
5261         if (mddev_is_clustered(mddev)) {
5262                 mddev_unlock(mddev);
5263                 return -EINVAL;
5264         }
5265
5266         if (strncmp(buf, "default", 7) == 0) {
5267                 if (mddev->pers)
5268                         sectors = mddev->pers->size(mddev, 0, 0);
5269                 else
5270                         sectors = mddev->array_sectors;
5271
5272                 mddev->external_size = 0;
5273         } else {
5274                 if (strict_blocks_to_sectors(buf, &sectors) < 0)
5275                         err = -EINVAL;
5276                 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
5277                         err = -E2BIG;
5278                 else
5279                         mddev->external_size = 1;
5280         }
5281
5282         if (!err) {
5283                 mddev->array_sectors = sectors;
5284                 if (mddev->pers) {
5285                         set_capacity(mddev->gendisk, mddev->array_sectors);
5286                         revalidate_disk(mddev->gendisk);
5287                 }
5288         }
5289         mddev_unlock(mddev);
5290         return err ?: len;
5291 }
5292
5293 static struct md_sysfs_entry md_array_size =
5294 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
5295        array_size_store);
5296
5297 static ssize_t
5298 consistency_policy_show(struct mddev *mddev, char *page)
5299 {
5300         int ret;
5301
5302         if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
5303                 ret = sprintf(page, "journal\n");
5304         } else if (test_bit(MD_HAS_PPL, &mddev->flags)) {
5305                 ret = sprintf(page, "ppl\n");
5306         } else if (mddev->bitmap) {
5307                 ret = sprintf(page, "bitmap\n");
5308         } else if (mddev->pers) {
5309                 if (mddev->pers->sync_request)
5310                         ret = sprintf(page, "resync\n");
5311                 else
5312                         ret = sprintf(page, "none\n");
5313         } else {
5314                 ret = sprintf(page, "unknown\n");
5315         }
5316
5317         return ret;
5318 }
5319
5320 static ssize_t
5321 consistency_policy_store(struct mddev *mddev, const char *buf, size_t len)
5322 {
5323         int err = 0;
5324
5325         if (mddev->pers) {
5326                 if (mddev->pers->change_consistency_policy)
5327                         err = mddev->pers->change_consistency_policy(mddev, buf);
5328                 else
5329                         err = -EBUSY;
5330         } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) {
5331                 set_bit(MD_HAS_PPL, &mddev->flags);
5332         } else {
5333                 err = -EINVAL;
5334         }
5335
5336         return err ? err : len;
5337 }
5338
5339 static struct md_sysfs_entry md_consistency_policy =
5340 __ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show,
5341        consistency_policy_store);
5342
5343 static ssize_t fail_last_dev_show(struct mddev *mddev, char *page)
5344 {
5345         return sprintf(page, "%d\n", mddev->fail_last_dev);
5346 }
5347
5348 /*
5349  * Setting fail_last_dev to true to allow last device to be forcibly removed
5350  * from RAID1/RAID10.
5351  */
5352 static ssize_t
5353 fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len)
5354 {
5355         int ret;
5356         bool value;
5357
5358         ret = kstrtobool(buf, &value);
5359         if (ret)
5360                 return ret;
5361
5362         if (value != mddev->fail_last_dev)
5363                 mddev->fail_last_dev = value;
5364
5365         return len;
5366 }
5367 static struct md_sysfs_entry md_fail_last_dev =
5368 __ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
5369        fail_last_dev_store);
5370
5371 static ssize_t serialize_policy_show(struct mddev *mddev, char *page)
5372 {
5373         if (mddev->pers == NULL || (mddev->pers->level != 1))
5374                 return sprintf(page, "n/a\n");
5375         else
5376                 return sprintf(page, "%d\n", mddev->serialize_policy);
5377 }
5378
5379 /*
5380  * Setting serialize_policy to true to enforce write IO is not reordered
5381  * for raid1.
5382  */
5383 static ssize_t
5384 serialize_policy_store(struct mddev *mddev, const char *buf, size_t len)
5385 {
5386         int err;
5387         bool value;
5388
5389         err = kstrtobool(buf, &value);
5390         if (err)
5391                 return err;
5392
5393         if (value == mddev->serialize_policy)
5394                 return len;
5395
5396         err = mddev_lock(mddev);
5397         if (err)
5398                 return err;
5399         if (mddev->pers == NULL || (mddev->pers->level != 1)) {
5400                 pr_err("md: serialize_policy is only effective for raid1\n");
5401                 err = -EINVAL;
5402                 goto unlock;
5403         }
5404
5405         mddev_suspend(mddev);
5406         if (value)
5407                 mddev_create_serial_pool(mddev, NULL, true);
5408         else
5409                 mddev_destroy_serial_pool(mddev, NULL, true);
5410         mddev->serialize_policy = value;
5411         mddev_resume(mddev);
5412 unlock:
5413         mddev_unlock(mddev);
5414         return err ?: len;
5415 }
5416
5417 static struct md_sysfs_entry md_serialize_policy =
5418 __ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
5419        serialize_policy_store);
5420
5421
5422 static struct attribute *md_default_attrs[] = {
5423         &md_level.attr,
5424         &md_layout.attr,
5425         &md_raid_disks.attr,
5426         &md_chunk_size.attr,
5427         &md_size.attr,
5428         &md_resync_start.attr,
5429         &md_metadata.attr,
5430         &md_new_device.attr,
5431         &md_safe_delay.attr,
5432         &md_array_state.attr,
5433         &md_reshape_position.attr,
5434         &md_reshape_direction.attr,
5435         &md_array_size.attr,
5436         &max_corr_read_errors.attr,
5437         &md_consistency_policy.attr,
5438         &md_fail_last_dev.attr,
5439         &md_serialize_policy.attr,
5440         NULL,
5441 };
5442
5443 static struct attribute *md_redundancy_attrs[] = {
5444         &md_scan_mode.attr,
5445         &md_last_scan_mode.attr,
5446         &md_mismatches.attr,
5447         &md_sync_min.attr,
5448         &md_sync_max.attr,
5449         &md_sync_speed.attr,
5450         &md_sync_force_parallel.attr,
5451         &md_sync_completed.attr,
5452         &md_min_sync.attr,
5453         &md_max_sync.attr,
5454         &md_suspend_lo.attr,
5455         &md_suspend_hi.attr,
5456         &md_bitmap.attr,
5457         &md_degraded.attr,
5458         NULL,
5459 };
5460 static struct attribute_group md_redundancy_group = {
5461         .name = NULL,
5462         .attrs = md_redundancy_attrs,
5463 };
5464
5465 static ssize_t
5466 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
5467 {
5468         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
5469         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
5470         ssize_t rv;
5471
5472         if (!entry->show)
5473                 return -EIO;
5474         spin_lock(&all_mddevs_lock);
5475         if (list_empty(&mddev->all_mddevs)) {
5476                 spin_unlock(&all_mddevs_lock);
5477                 return -EBUSY;
5478         }
5479         mddev_get(mddev);
5480         spin_unlock(&all_mddevs_lock);
5481
5482         rv = entry->show(mddev, page);
5483         mddev_put(mddev);
5484         return rv;
5485 }
5486
5487 static ssize_t
5488 md_attr_store(struct kobject *kobj, struct attribute *attr,
5489               const char *page, size_t length)
5490 {
5491         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
5492         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
5493         ssize_t rv;
5494
5495         if (!entry->store)
5496                 return -EIO;
5497         if (!capable(CAP_SYS_ADMIN))
5498                 return -EACCES;
5499         spin_lock(&all_mddevs_lock);
5500         if (list_empty(&mddev->all_mddevs)) {
5501                 spin_unlock(&all_mddevs_lock);
5502                 return -EBUSY;
5503         }
5504         mddev_get(mddev);
5505         spin_unlock(&all_mddevs_lock);
5506         rv = entry->store(mddev, page, length);
5507         mddev_put(mddev);
5508         return rv;
5509 }
5510
5511 static void md_free(struct kobject *ko)
5512 {
5513         struct mddev *mddev = container_of(ko, struct mddev, kobj);
5514
5515         if (mddev->sysfs_state)
5516                 sysfs_put(mddev->sysfs_state);
5517
5518         if (mddev->gendisk)
5519                 del_gendisk(mddev->gendisk);
5520         if (mddev->queue)
5521                 blk_cleanup_queue(mddev->queue);
5522         if (mddev->gendisk)
5523                 put_disk(mddev->gendisk);
5524         percpu_ref_exit(&mddev->writes_pending);
5525
5526         bioset_exit(&mddev->bio_set);
5527         bioset_exit(&mddev->sync_set);
5528         kfree(mddev);
5529 }
5530
5531 static const struct sysfs_ops md_sysfs_ops = {
5532         .show   = md_attr_show,
5533         .store  = md_attr_store,
5534 };
5535 static struct kobj_type md_ktype = {
5536         .release        = md_free,
5537         .sysfs_ops      = &md_sysfs_ops,
5538         .default_attrs  = md_default_attrs,
5539 };
5540
5541 int mdp_major = 0;
5542
5543 static void mddev_delayed_delete(struct work_struct *ws)
5544 {
5545         struct mddev *mddev = container_of(ws, struct mddev, del_work);
5546
5547         sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
5548         kobject_del(&mddev->kobj);
5549         kobject_put(&mddev->kobj);
5550 }
5551
5552 static void no_op(struct percpu_ref *r) {}
5553
5554 int mddev_init_writes_pending(struct mddev *mddev)
5555 {
5556         if (mddev->writes_pending.percpu_count_ptr)
5557                 return 0;
5558         if (percpu_ref_init(&mddev->writes_pending, no_op,
5559                             PERCPU_REF_ALLOW_REINIT, GFP_KERNEL) < 0)
5560                 return -ENOMEM;
5561         /* We want to start with the refcount at zero */
5562         percpu_ref_put(&mddev->writes_pending);
5563         return 0;
5564 }
5565 EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
5566
5567 static int md_alloc(dev_t dev, char *name)
5568 {
5569         /*
5570          * If dev is zero, name is the name of a device to allocate with
5571          * an arbitrary minor number.  It will be "md_???"
5572          * If dev is non-zero it must be a device number with a MAJOR of
5573          * MD_MAJOR or mdp_major.  In this case, if "name" is NULL, then
5574          * the device is being created by opening a node in /dev.
5575          * If "name" is not NULL, the device is being created by
5576          * writing to /sys/module/md_mod/parameters/new_array.
5577          */
5578         static DEFINE_MUTEX(disks_mutex);
5579         struct mddev *mddev = mddev_find(dev);
5580         struct gendisk *disk;
5581         int partitioned;
5582         int shift;
5583         int unit;
5584         int error;
5585
5586         if (!mddev)
5587                 return -ENODEV;
5588
5589         partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
5590         shift = partitioned ? MdpMinorShift : 0;
5591         unit = MINOR(mddev->unit) >> shift;
5592
5593         /* wait for any previous instance of this device to be
5594          * completely removed (mddev_delayed_delete).
5595          */
5596         flush_workqueue(md_misc_wq);
5597
5598         mutex_lock(&disks_mutex);
5599         error = -EEXIST;
5600         if (mddev->gendisk)
5601                 goto abort;
5602
5603         if (name && !dev) {
5604                 /* Need to ensure that 'name' is not a duplicate.
5605                  */
5606                 struct mddev *mddev2;
5607                 spin_lock(&all_mddevs_lock);
5608
5609                 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
5610                         if (mddev2->gendisk &&
5611                             strcmp(mddev2->gendisk->disk_name, name) == 0) {
5612                                 spin_unlock(&all_mddevs_lock);
5613                                 goto abort;
5614                         }
5615                 spin_unlock(&all_mddevs_lock);
5616         }
5617         if (name && dev)
5618                 /*
5619                  * Creating /dev/mdNNN via "newarray", so adjust hold_active.
5620                  */
5621                 mddev->hold_active = UNTIL_STOP;
5622
5623         error = -ENOMEM;
5624         mddev->queue = blk_alloc_queue(GFP_KERNEL);
5625         if (!mddev->queue)
5626                 goto abort;
5627         mddev->queue->queuedata = mddev;
5628
5629         blk_queue_make_request(mddev->queue, md_make_request);
5630         blk_set_stacking_limits(&mddev->queue->limits);
5631
5632         disk = alloc_disk(1 << shift);
5633         if (!disk) {
5634                 blk_cleanup_queue(mddev->queue);
5635                 mddev->queue = NULL;
5636                 goto abort;
5637         }
5638         disk->major = MAJOR(mddev->unit);
5639         disk->first_minor = unit << shift;
5640         if (name)
5641                 strcpy(disk->disk_name, name);
5642         else if (partitioned)
5643                 sprintf(disk->disk_name, "md_d%d", unit);
5644         else
5645                 sprintf(disk->disk_name, "md%d", unit);
5646         disk->fops = &md_fops;
5647         disk->private_data = mddev;
5648         disk->queue = mddev->queue;
5649         blk_queue_write_cache(mddev->queue, true, true);
5650         /* Allow extended partitions.  This makes the
5651          * 'mdp' device redundant, but we can't really
5652          * remove it now.
5653          */
5654         disk->flags |= GENHD_FL_EXT_DEVT;
5655         mddev->gendisk = disk;
5656         /* As soon as we call add_disk(), another thread could get
5657          * through to md_open, so make sure it doesn't get too far
5658          */
5659         mutex_lock(&mddev->open_mutex);
5660         add_disk(disk);
5661
5662         error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
5663         if (error) {
5664                 /* This isn't possible, but as kobject_init_and_add is marked
5665                  * __must_check, we must do something with the result
5666                  */
5667                 pr_debug("md: cannot register %s/md - name in use\n",
5668                          disk->disk_name);
5669                 error = 0;
5670         }
5671         if (mddev->kobj.sd &&
5672             sysfs_create_group(&mddev->kobj, &md_bitmap_group))
5673                 pr_debug("pointless warning\n");
5674         mutex_unlock(&mddev->open_mutex);
5675  abort:
5676         mutex_unlock(&disks_mutex);
5677         if (!error && mddev->kobj.sd) {
5678                 kobject_uevent(&mddev->kobj, KOBJ_ADD);
5679                 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
5680         }
5681         mddev_put(mddev);
5682         return error;
5683 }
5684
5685 static struct kobject *md_probe(dev_t dev, int *part, void *data)
5686 {
5687         if (create_on_open)
5688                 md_alloc(dev, NULL);
5689         return NULL;
5690 }
5691
5692 static int add_named_array(const char *val, const struct kernel_param *kp)
5693 {
5694         /*
5695          * val must be "md_*" or "mdNNN".
5696          * For "md_*" we allocate an array with a large free minor number, and
5697          * set the name to val.  val must not already be an active name.
5698          * For "mdNNN" we allocate an array with the minor number NNN
5699          * which must not already be in use.
5700          */
5701         int len = strlen(val);
5702         char buf[DISK_NAME_LEN];
5703         unsigned long devnum;
5704
5705         while (len && val[len-1] == '\n')
5706                 len--;
5707         if (len >= DISK_NAME_LEN)
5708                 return -E2BIG;
5709         strlcpy(buf, val, len+1);
5710         if (strncmp(buf, "md_", 3) == 0)
5711                 return md_alloc(0, buf);
5712         if (strncmp(buf, "md", 2) == 0 &&
5713             isdigit(buf[2]) &&
5714             kstrtoul(buf+2, 10, &devnum) == 0 &&
5715             devnum <= MINORMASK)
5716                 return md_alloc(MKDEV(MD_MAJOR, devnum), NULL);
5717
5718         return -EINVAL;
5719 }
5720
5721 static void md_safemode_timeout(struct timer_list *t)
5722 {
5723         struct mddev *mddev = from_timer(mddev, t, safemode_timer);
5724
5725         mddev->safemode = 1;
5726         if (mddev->external)
5727                 sysfs_notify_dirent_safe(mddev->sysfs_state);
5728
5729         md_wakeup_thread(mddev->thread);
5730 }
5731
5732 static int start_dirty_degraded;
5733
5734 int md_run(struct mddev *mddev)
5735 {
5736         int err;
5737         struct md_rdev *rdev;
5738         struct md_personality *pers;
5739
5740         if (list_empty(&mddev->disks))
5741                 /* cannot run an array with no devices.. */
5742                 return -EINVAL;
5743
5744         if (mddev->pers)
5745                 return -EBUSY;
5746         /* Cannot run until previous stop completes properly */
5747         if (mddev->sysfs_active)
5748                 return -EBUSY;
5749
5750         /*
5751          * Analyze all RAID superblock(s)
5752          */
5753         if (!mddev->raid_disks) {
5754                 if (!mddev->persistent)
5755                         return -EINVAL;
5756                 err = analyze_sbs(mddev);
5757                 if (err)
5758                         return -EINVAL;
5759         }
5760
5761         if (mddev->level != LEVEL_NONE)
5762                 request_module("md-level-%d", mddev->level);
5763         else if (mddev->clevel[0])
5764                 request_module("md-%s", mddev->clevel);
5765
5766         /*
5767          * Drop all container device buffers, from now on
5768          * the only valid external interface is through the md
5769          * device.
5770          */
5771         mddev->has_superblocks = false;
5772         rdev_for_each(rdev, mddev) {
5773                 if (test_bit(Faulty, &rdev->flags))
5774                         continue;
5775                 sync_blockdev(rdev->bdev);
5776                 invalidate_bdev(rdev->bdev);
5777                 if (mddev->ro != 1 &&
5778                     (bdev_read_only(rdev->bdev) ||
5779                      bdev_read_only(rdev->meta_bdev))) {
5780                         mddev->ro = 1;
5781                         if (mddev->gendisk)
5782                                 set_disk_ro(mddev->gendisk, 1);
5783                 }
5784
5785                 if (rdev->sb_page)
5786                         mddev->has_superblocks = true;
5787
5788                 /* perform some consistency tests on the device.
5789                  * We don't want the data to overlap the metadata,
5790                  * Internal Bitmap issues have been handled elsewhere.
5791                  */
5792                 if (rdev->meta_bdev) {
5793                         /* Nothing to check */;
5794                 } else if (rdev->data_offset < rdev->sb_start) {
5795                         if (mddev->dev_sectors &&
5796                             rdev->data_offset + mddev->dev_sectors
5797                             > rdev->sb_start) {
5798                                 pr_warn("md: %s: data overlaps metadata\n",
5799                                         mdname(mddev));
5800                                 return -EINVAL;
5801                         }
5802                 } else {
5803                         if (rdev->sb_start + rdev->sb_size/512
5804                             > rdev->data_offset) {
5805                                 pr_warn("md: %s: metadata overlaps data\n",
5806                                         mdname(mddev));
5807                                 return -EINVAL;
5808                         }
5809                 }
5810                 sysfs_notify_dirent_safe(rdev->sysfs_state);
5811         }
5812
5813         if (!bioset_initialized(&mddev->bio_set)) {
5814                 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5815                 if (err)
5816                         return err;
5817         }
5818         if (!bioset_initialized(&mddev->sync_set)) {
5819                 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5820                 if (err)
5821                         return err;
5822         }
5823
5824         spin_lock(&pers_lock);
5825         pers = find_pers(mddev->level, mddev->clevel);
5826         if (!pers || !try_module_get(pers->owner)) {
5827                 spin_unlock(&pers_lock);
5828                 if (mddev->level != LEVEL_NONE)
5829                         pr_warn("md: personality for level %d is not loaded!\n",
5830                                 mddev->level);
5831                 else
5832                         pr_warn("md: personality for level %s is not loaded!\n",
5833                                 mddev->clevel);
5834                 err = -EINVAL;
5835                 goto abort;
5836         }
5837         spin_unlock(&pers_lock);
5838         if (mddev->level != pers->level) {
5839                 mddev->level = pers->level;
5840                 mddev->new_level = pers->level;
5841         }
5842         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
5843
5844         if (mddev->reshape_position != MaxSector &&
5845             pers->start_reshape == NULL) {
5846                 /* This personality cannot handle reshaping... */
5847                 module_put(pers->owner);
5848                 err = -EINVAL;
5849                 goto abort;
5850         }
5851
5852         if (pers->sync_request) {
5853                 /* Warn if this is a potentially silly
5854                  * configuration.
5855                  */
5856                 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5857                 struct md_rdev *rdev2;
5858                 int warned = 0;
5859
5860                 rdev_for_each(rdev, mddev)
5861                         rdev_for_each(rdev2, mddev) {
5862                                 if (rdev < rdev2 &&
5863                                     rdev->bdev->bd_contains ==
5864                                     rdev2->bdev->bd_contains) {
5865                                         pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
5866                                                 mdname(mddev),
5867                                                 bdevname(rdev->bdev,b),
5868                                                 bdevname(rdev2->bdev,b2));
5869                                         warned = 1;
5870                                 }
5871                         }
5872
5873                 if (warned)
5874                         pr_warn("True protection against single-disk failure might be compromised.\n");
5875         }
5876
5877         mddev->recovery = 0;
5878         /* may be over-ridden by personality */
5879         mddev->resync_max_sectors = mddev->dev_sectors;
5880
5881         mddev->ok_start_degraded = start_dirty_degraded;
5882
5883         if (start_readonly && mddev->ro == 0)
5884                 mddev->ro = 2; /* read-only, but switch on first write */
5885
5886         err = pers->run(mddev);
5887         if (err)
5888                 pr_warn("md: pers->run() failed ...\n");
5889         else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
5890                 WARN_ONCE(!mddev->external_size,
5891                           "%s: default size too small, but 'external_size' not in effect?\n",
5892                           __func__);
5893                 pr_warn("md: invalid array_size %llu > default size %llu\n",
5894                         (unsigned long long)mddev->array_sectors / 2,
5895                         (unsigned long long)pers->size(mddev, 0, 0) / 2);
5896                 err = -EINVAL;
5897         }
5898         if (err == 0 && pers->sync_request &&
5899             (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
5900                 struct bitmap *bitmap;
5901
5902                 bitmap = md_bitmap_create(mddev, -1);
5903                 if (IS_ERR(bitmap)) {
5904                         err = PTR_ERR(bitmap);
5905                         pr_warn("%s: failed to create bitmap (%d)\n",
5906                                 mdname(mddev), err);
5907                 } else
5908                         mddev->bitmap = bitmap;
5909
5910         }
5911         if (err)
5912                 goto bitmap_abort;
5913
5914         if (mddev->bitmap_info.max_write_behind > 0) {
5915                 bool create_pool = false;
5916
5917                 rdev_for_each(rdev, mddev) {
5918                         if (test_bit(WriteMostly, &rdev->flags) &&
5919                             rdev_init_serial(rdev))
5920                                 create_pool = true;
5921                 }
5922                 if (create_pool && mddev->serial_info_pool == NULL) {
5923                         mddev->serial_info_pool =
5924                                 mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
5925                                                     sizeof(struct serial_info));
5926                         if (!mddev->serial_info_pool) {
5927                                 err = -ENOMEM;
5928                                 goto bitmap_abort;
5929                         }
5930                 }
5931         }
5932
5933         if (mddev->queue) {
5934                 bool nonrot = true;
5935
5936                 rdev_for_each(rdev, mddev) {
5937                         if (rdev->raid_disk >= 0 &&
5938                             !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
5939                                 nonrot = false;
5940                                 break;
5941                         }
5942                 }
5943                 if (mddev->degraded)
5944                         nonrot = false;
5945                 if (nonrot)
5946                         blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
5947                 else
5948                         blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
5949                 mddev->queue->backing_dev_info->congested_data = mddev;
5950                 mddev->queue->backing_dev_info->congested_fn = md_congested;
5951         }
5952         if (pers->sync_request) {
5953                 if (mddev->kobj.sd &&
5954                     sysfs_create_group(&mddev->kobj, &md_redundancy_group))
5955                         pr_warn("md: cannot register extra attributes for %s\n",
5956                                 mdname(mddev));
5957                 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
5958         } else if (mddev->ro == 2) /* auto-readonly not meaningful */
5959                 mddev->ro = 0;
5960
5961         atomic_set(&mddev->max_corr_read_errors,
5962                    MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
5963         mddev->safemode = 0;
5964         if (mddev_is_clustered(mddev))
5965                 mddev->safemode_delay = 0;
5966         else
5967                 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
5968         mddev->in_sync = 1;
5969         smp_wmb();
5970         spin_lock(&mddev->lock);
5971         mddev->pers = pers;
5972         spin_unlock(&mddev->lock);
5973         rdev_for_each(rdev, mddev)
5974                 if (rdev->raid_disk >= 0)
5975                         sysfs_link_rdev(mddev, rdev); /* failure here is OK */
5976
5977         if (mddev->degraded && !mddev->ro)
5978                 /* This ensures that recovering status is reported immediately
5979                  * via sysfs - until a lack of spares is confirmed.
5980                  */
5981                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5982         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5983
5984         if (mddev->sb_flags)
5985                 md_update_sb(mddev, 0);
5986
5987         md_new_event(mddev);
5988         return 0;
5989
5990 bitmap_abort:
5991         mddev_detach(mddev);
5992         if (mddev->private)
5993                 pers->free(mddev, mddev->private);
5994         mddev->private = NULL;
5995         module_put(pers->owner);
5996         md_bitmap_destroy(mddev);
5997 abort:
5998         bioset_exit(&mddev->bio_set);
5999         bioset_exit(&mddev->sync_set);
6000         return err;
6001 }
6002 EXPORT_SYMBOL_GPL(md_run);
6003
6004 static int do_md_run(struct mddev *mddev)
6005 {
6006         int err;
6007
6008         set_bit(MD_NOT_READY, &mddev->flags);
6009         err = md_run(mddev);
6010         if (err)
6011                 goto out;
6012         err = md_bitmap_load(mddev);
6013         if (err) {
6014                 md_bitmap_destroy(mddev);
6015                 goto out;
6016         }
6017
6018         if (mddev_is_clustered(mddev))
6019                 md_allow_write(mddev);
6020
6021         /* run start up tasks that require md_thread */
6022         md_start(mddev);
6023
6024         md_wakeup_thread(mddev->thread);
6025         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
6026
6027         set_capacity(mddev->gendisk, mddev->array_sectors);
6028         revalidate_disk(mddev->gendisk);
6029         clear_bit(MD_NOT_READY, &mddev->flags);
6030         mddev->changed = 1;
6031         kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
6032         sysfs_notify_dirent_safe(mddev->sysfs_state);
6033         sysfs_notify_dirent_safe(mddev->sysfs_action);
6034         sysfs_notify(&mddev->kobj, NULL, "degraded");
6035 out:
6036         clear_bit(MD_NOT_READY, &mddev->flags);
6037         return err;
6038 }
6039
6040 int md_start(struct mddev *mddev)
6041 {
6042         int ret = 0;
6043
6044         if (mddev->pers->start) {
6045                 set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6046                 md_wakeup_thread(mddev->thread);
6047                 ret = mddev->pers->start(mddev);
6048                 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6049                 md_wakeup_thread(mddev->sync_thread);
6050         }
6051         return ret;
6052 }
6053 EXPORT_SYMBOL_GPL(md_start);
6054
6055 static int restart_array(struct mddev *mddev)
6056 {
6057         struct gendisk *disk = mddev->gendisk;
6058         struct md_rdev *rdev;
6059         bool has_journal = false;
6060         bool has_readonly = false;
6061
6062         /* Complain if it has no devices */
6063         if (list_empty(&mddev->disks))
6064                 return -ENXIO;
6065         if (!mddev->pers)
6066                 return -EINVAL;
6067         if (!mddev->ro)
6068                 return -EBUSY;
6069
6070         rcu_read_lock();
6071         rdev_for_each_rcu(rdev, mddev) {
6072                 if (test_bit(Journal, &rdev->flags) &&
6073                     !test_bit(Faulty, &rdev->flags))
6074                         has_journal = true;
6075                 if (bdev_read_only(rdev->bdev))
6076                         has_readonly = true;
6077         }
6078         rcu_read_unlock();
6079         if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal)
6080                 /* Don't restart rw with journal missing/faulty */
6081                         return -EINVAL;
6082         if (has_readonly)
6083                 return -EROFS;
6084
6085         mddev->safemode = 0;
6086         mddev->ro = 0;
6087         set_disk_ro(disk, 0);
6088         pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
6089         /* Kick recovery or resync if necessary */
6090         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6091         md_wakeup_thread(mddev->thread);
6092         md_wakeup_thread(mddev->sync_thread);
6093         sysfs_notify_dirent_safe(mddev->sysfs_state);
6094         return 0;
6095 }
6096
6097 static void md_clean(struct mddev *mddev)
6098 {
6099         mddev->array_sectors = 0;
6100         mddev->external_size = 0;
6101         mddev->dev_sectors = 0;
6102         mddev->raid_disks = 0;
6103         mddev->recovery_cp = 0;
6104         mddev->resync_min = 0;
6105         mddev->resync_max = MaxSector;
6106         mddev->reshape_position = MaxSector;
6107         mddev->external = 0;
6108         mddev->persistent = 0;
6109         mddev->level = LEVEL_NONE;
6110         mddev->clevel[0] = 0;
6111         mddev->flags = 0;
6112         mddev->sb_flags = 0;
6113         mddev->ro = 0;
6114         mddev->metadata_type[0] = 0;
6115         mddev->chunk_sectors = 0;
6116         mddev->ctime = mddev->utime = 0;
6117         mddev->layout = 0;
6118         mddev->max_disks = 0;
6119         mddev->events = 0;
6120         mddev->can_decrease_events = 0;
6121         mddev->delta_disks = 0;
6122         mddev->reshape_backwards = 0;
6123         mddev->new_level = LEVEL_NONE;
6124         mddev->new_layout = 0;
6125         mddev->new_chunk_sectors = 0;
6126         mddev->curr_resync = 0;
6127         atomic64_set(&mddev->resync_mismatches, 0);
6128         mddev->suspend_lo = mddev->suspend_hi = 0;
6129         mddev->sync_speed_min = mddev->sync_speed_max = 0;
6130         mddev->recovery = 0;
6131         mddev->in_sync = 0;
6132         mddev->changed = 0;
6133         mddev->degraded = 0;
6134         mddev->safemode = 0;
6135         mddev->private = NULL;
6136         mddev->cluster_info = NULL;
6137         mddev->bitmap_info.offset = 0;
6138         mddev->bitmap_info.default_offset = 0;
6139         mddev->bitmap_info.default_space = 0;
6140         mddev->bitmap_info.chunksize = 0;
6141         mddev->bitmap_info.daemon_sleep = 0;
6142         mddev->bitmap_info.max_write_behind = 0;
6143         mddev->bitmap_info.nodes = 0;
6144 }
6145
6146 static void __md_stop_writes(struct mddev *mddev)
6147 {
6148         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6149         flush_workqueue(md_misc_wq);
6150         if (mddev->sync_thread) {
6151                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6152                 md_reap_sync_thread(mddev);
6153         }
6154
6155         del_timer_sync(&mddev->safemode_timer);
6156
6157         if (mddev->pers && mddev->pers->quiesce) {
6158                 mddev->pers->quiesce(mddev, 1);
6159                 mddev->pers->quiesce(mddev, 0);
6160         }
6161         md_bitmap_flush(mddev);
6162
6163         if (mddev->ro == 0 &&
6164             ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
6165              mddev->sb_flags)) {
6166                 /* mark array as shutdown cleanly */
6167                 if (!mddev_is_clustered(mddev))
6168                         mddev->in_sync = 1;
6169                 md_update_sb(mddev, 1);
6170         }
6171         /* disable policy to guarantee rdevs free resources for serialization */
6172         mddev->serialize_policy = 0;
6173         mddev_destroy_serial_pool(mddev, NULL, true);
6174 }
6175
6176 void md_stop_writes(struct mddev *mddev)
6177 {
6178         mddev_lock_nointr(mddev);
6179         __md_stop_writes(mddev);
6180         mddev_unlock(mddev);
6181 }
6182 EXPORT_SYMBOL_GPL(md_stop_writes);
6183
6184 static void mddev_detach(struct mddev *mddev)
6185 {
6186         md_bitmap_wait_behind_writes(mddev);
6187         if (mddev->pers && mddev->pers->quiesce) {
6188                 mddev->pers->quiesce(mddev, 1);
6189                 mddev->pers->quiesce(mddev, 0);
6190         }
6191         md_unregister_thread(&mddev->thread);
6192         if (mddev->queue)
6193                 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
6194 }
6195
6196 static void __md_stop(struct mddev *mddev)
6197 {
6198         struct md_personality *pers = mddev->pers;
6199         md_bitmap_destroy(mddev);
6200         mddev_detach(mddev);
6201         /* Ensure ->event_work is done */
6202         flush_workqueue(md_misc_wq);
6203         spin_lock(&mddev->lock);
6204         mddev->pers = NULL;
6205         spin_unlock(&mddev->lock);
6206         pers->free(mddev, mddev->private);
6207         mddev->private = NULL;
6208         if (pers->sync_request && mddev->to_remove == NULL)
6209                 mddev->to_remove = &md_redundancy_group;
6210         module_put(pers->owner);
6211         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6212 }
6213
6214 void md_stop(struct mddev *mddev)
6215 {
6216         /* stop the array and free an attached data structures.
6217          * This is called from dm-raid
6218          */
6219         __md_stop(mddev);
6220         bioset_exit(&mddev->bio_set);
6221         bioset_exit(&mddev->sync_set);
6222 }
6223
6224 EXPORT_SYMBOL_GPL(md_stop);
6225
6226 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
6227 {
6228         int err = 0;
6229         int did_freeze = 0;
6230
6231         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6232                 did_freeze = 1;
6233                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6234                 md_wakeup_thread(mddev->thread);
6235         }
6236         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
6237                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6238         if (mddev->sync_thread)
6239                 /* Thread might be blocked waiting for metadata update
6240                  * which will now never happen */
6241                 wake_up_process(mddev->sync_thread->tsk);
6242
6243         if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
6244                 return -EBUSY;
6245         mddev_unlock(mddev);
6246         wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
6247                                           &mddev->recovery));
6248         wait_event(mddev->sb_wait,
6249                    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
6250         mddev_lock_nointr(mddev);
6251
6252         mutex_lock(&mddev->open_mutex);
6253         if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
6254             mddev->sync_thread ||
6255             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
6256                 pr_warn("md: %s still in use.\n",mdname(mddev));
6257                 if (did_freeze) {
6258                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6259                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6260                         md_wakeup_thread(mddev->thread);
6261                 }
6262                 err = -EBUSY;
6263                 goto out;
6264         }
6265         if (mddev->pers) {
6266                 __md_stop_writes(mddev);
6267
6268                 err  = -ENXIO;
6269                 if (mddev->ro==1)
6270                         goto out;
6271                 mddev->ro = 1;
6272                 set_disk_ro(mddev->gendisk, 1);
6273                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6274                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6275                 md_wakeup_thread(mddev->thread);
6276                 sysfs_notify_dirent_safe(mddev->sysfs_state);
6277                 err = 0;
6278         }
6279 out:
6280         mutex_unlock(&mddev->open_mutex);
6281         return err;
6282 }
6283
6284 /* mode:
6285  *   0 - completely stop and dis-assemble array
6286  *   2 - stop but do not disassemble array
6287  */
6288 static int do_md_stop(struct mddev *mddev, int mode,
6289                       struct block_device *bdev)
6290 {
6291         struct gendisk *disk = mddev->gendisk;
6292         struct md_rdev *rdev;
6293         int did_freeze = 0;
6294
6295         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6296                 did_freeze = 1;
6297                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6298                 md_wakeup_thread(mddev->thread);
6299         }
6300         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
6301                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6302         if (mddev->sync_thread)
6303                 /* Thread might be blocked waiting for metadata update
6304                  * which will now never happen */
6305                 wake_up_process(mddev->sync_thread->tsk);
6306
6307         mddev_unlock(mddev);
6308         wait_event(resync_wait, (mddev->sync_thread == NULL &&
6309                                  !test_bit(MD_RECOVERY_RUNNING,
6310                                            &mddev->recovery)));
6311         mddev_lock_nointr(mddev);
6312
6313         mutex_lock(&mddev->open_mutex);
6314         if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
6315             mddev->sysfs_active ||
6316             mddev->sync_thread ||
6317             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
6318                 pr_warn("md: %s still in use.\n",mdname(mddev));
6319                 mutex_unlock(&mddev->open_mutex);
6320                 if (did_freeze) {
6321                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6322                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6323                         md_wakeup_thread(mddev->thread);
6324                 }
6325                 return -EBUSY;
6326         }
6327         if (mddev->pers) {
6328                 if (mddev->ro)
6329                         set_disk_ro(disk, 0);
6330
6331                 __md_stop_writes(mddev);
6332                 __md_stop(mddev);
6333                 mddev->queue->backing_dev_info->congested_fn = NULL;
6334
6335                 /* tell userspace to handle 'inactive' */
6336                 sysfs_notify_dirent_safe(mddev->sysfs_state);
6337
6338                 rdev_for_each(rdev, mddev)
6339                         if (rdev->raid_disk >= 0)
6340                                 sysfs_unlink_rdev(mddev, rdev);
6341
6342                 set_capacity(disk, 0);
6343                 mutex_unlock(&mddev->open_mutex);
6344                 mddev->changed = 1;
6345                 revalidate_disk(disk);
6346
6347                 if (mddev->ro)
6348                         mddev->ro = 0;
6349         } else
6350                 mutex_unlock(&mddev->open_mutex);
6351         /*
6352          * Free resources if final stop
6353          */
6354         if (mode == 0) {
6355                 pr_info("md: %s stopped.\n", mdname(mddev));
6356
6357                 if (mddev->bitmap_info.file) {
6358                         struct file *f = mddev->bitmap_info.file;
6359                         spin_lock(&mddev->lock);
6360                         mddev->bitmap_info.file = NULL;
6361                         spin_unlock(&mddev->lock);
6362                         fput(f);
6363                 }
6364                 mddev->bitmap_info.offset = 0;
6365
6366                 export_array(mddev);
6367
6368                 md_clean(mddev);
6369                 if (mddev->hold_active == UNTIL_STOP)
6370                         mddev->hold_active = 0;
6371         }
6372         md_new_event(mddev);
6373         sysfs_notify_dirent_safe(mddev->sysfs_state);
6374         return 0;
6375 }
6376
6377 #ifndef MODULE
6378 static void autorun_array(struct mddev *mddev)
6379 {
6380         struct md_rdev *rdev;
6381         int err;
6382
6383         if (list_empty(&mddev->disks))
6384                 return;
6385
6386         pr_info("md: running: ");
6387
6388         rdev_for_each(rdev, mddev) {
6389                 char b[BDEVNAME_SIZE];
6390                 pr_cont("<%s>", bdevname(rdev->bdev,b));
6391         }
6392         pr_cont("\n");
6393
6394         err = do_md_run(mddev);
6395         if (err) {
6396                 pr_warn("md: do_md_run() returned %d\n", err);
6397                 do_md_stop(mddev, 0, NULL);
6398         }
6399 }
6400
6401 /*
6402  * lets try to run arrays based on all disks that have arrived
6403  * until now. (those are in pending_raid_disks)
6404  *
6405  * the method: pick the first pending disk, collect all disks with
6406  * the same UUID, remove all from the pending list and put them into
6407  * the 'same_array' list. Then order this list based on superblock
6408  * update time (freshest comes first), kick out 'old' disks and
6409  * compare superblocks. If everything's fine then run it.
6410  *
6411  * If "unit" is allocated, then bump its reference count
6412  */
6413 static void autorun_devices(int part)
6414 {
6415         struct md_rdev *rdev0, *rdev, *tmp;
6416         struct mddev *mddev;
6417         char b[BDEVNAME_SIZE];
6418
6419         pr_info("md: autorun ...\n");
6420         while (!list_empty(&pending_raid_disks)) {
6421                 int unit;
6422                 dev_t dev;
6423                 LIST_HEAD(candidates);
6424                 rdev0 = list_entry(pending_raid_disks.next,
6425                                          struct md_rdev, same_set);
6426
6427                 pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
6428                 INIT_LIST_HEAD(&candidates);
6429                 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
6430                         if (super_90_load(rdev, rdev0, 0) >= 0) {
6431                                 pr_debug("md:  adding %s ...\n",
6432                                          bdevname(rdev->bdev,b));
6433                                 list_move(&rdev->same_set, &candidates);
6434                         }
6435                 /*
6436                  * now we have a set of devices, with all of them having
6437                  * mostly sane superblocks. It's time to allocate the
6438                  * mddev.
6439                  */
6440                 if (part) {
6441                         dev = MKDEV(mdp_major,
6442                                     rdev0->preferred_minor << MdpMinorShift);
6443                         unit = MINOR(dev) >> MdpMinorShift;
6444                 } else {
6445                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
6446                         unit = MINOR(dev);
6447                 }
6448                 if (rdev0->preferred_minor != unit) {
6449                         pr_warn("md: unit number in %s is bad: %d\n",
6450                                 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
6451                         break;
6452                 }
6453
6454                 md_probe(dev, NULL, NULL);
6455                 mddev = mddev_find(dev);
6456                 if (!mddev || !mddev->gendisk) {
6457                         if (mddev)
6458                                 mddev_put(mddev);
6459                         break;
6460                 }
6461                 if (mddev_lock(mddev))
6462                         pr_warn("md: %s locked, cannot run\n", mdname(mddev));
6463                 else if (mddev->raid_disks || mddev->major_version
6464                          || !list_empty(&mddev->disks)) {
6465                         pr_warn("md: %s already running, cannot run %s\n",
6466                                 mdname(mddev), bdevname(rdev0->bdev,b));
6467                         mddev_unlock(mddev);
6468                 } else {
6469                         pr_debug("md: created %s\n", mdname(mddev));
6470                         mddev->persistent = 1;
6471                         rdev_for_each_list(rdev, tmp, &candidates) {
6472                                 list_del_init(&rdev->same_set);
6473                                 if (bind_rdev_to_array(rdev, mddev))
6474                                         export_rdev(rdev);
6475                         }
6476                         autorun_array(mddev);
6477                         mddev_unlock(mddev);
6478                 }
6479                 /* on success, candidates will be empty, on error
6480                  * it won't...
6481                  */
6482                 rdev_for_each_list(rdev, tmp, &candidates) {
6483                         list_del_init(&rdev->same_set);
6484                         export_rdev(rdev);
6485                 }
6486                 mddev_put(mddev);
6487         }
6488         pr_info("md: ... autorun DONE.\n");
6489 }
6490 #endif /* !MODULE */
6491
6492 static int get_version(void __user *arg)
6493 {
6494         mdu_version_t ver;
6495
6496         ver.major = MD_MAJOR_VERSION;
6497         ver.minor = MD_MINOR_VERSION;
6498         ver.patchlevel = MD_PATCHLEVEL_VERSION;
6499
6500         if (copy_to_user(arg, &ver, sizeof(ver)))
6501                 return -EFAULT;
6502
6503         return 0;
6504 }
6505
6506 static int get_array_info(struct mddev *mddev, void __user *arg)
6507 {
6508         mdu_array_info_t info;
6509         int nr,working,insync,failed,spare;
6510         struct md_rdev *rdev;
6511
6512         nr = working = insync = failed = spare = 0;
6513         rcu_read_lock();
6514         rdev_for_each_rcu(rdev, mddev) {
6515                 nr++;
6516                 if (test_bit(Faulty, &rdev->flags))
6517                         failed++;
6518                 else {
6519                         working++;
6520                         if (test_bit(In_sync, &rdev->flags))
6521                                 insync++;
6522                         else if (test_bit(Journal, &rdev->flags))
6523                                 /* TODO: add journal count to md_u.h */
6524                                 ;
6525                         else
6526                                 spare++;
6527                 }
6528         }
6529         rcu_read_unlock();
6530
6531         info.major_version = mddev->major_version;
6532         info.minor_version = mddev->minor_version;
6533         info.patch_version = MD_PATCHLEVEL_VERSION;
6534         info.ctime         = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
6535         info.level         = mddev->level;
6536         info.size          = mddev->dev_sectors / 2;
6537         if (info.size != mddev->dev_sectors / 2) /* overflow */
6538                 info.size = -1;
6539         info.nr_disks      = nr;
6540         info.raid_disks    = mddev->raid_disks;
6541         info.md_minor      = mddev->md_minor;
6542         info.not_persistent= !mddev->persistent;
6543
6544         info.utime         = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
6545         info.state         = 0;
6546         if (mddev->in_sync)
6547                 info.state = (1<<MD_SB_CLEAN);
6548         if (mddev->bitmap && mddev->bitmap_info.offset)
6549                 info.state |= (1<<MD_SB_BITMAP_PRESENT);
6550         if (mddev_is_clustered(mddev))
6551                 info.state |= (1<<MD_SB_CLUSTERED);
6552         info.active_disks  = insync;
6553         info.working_disks = working;
6554         info.failed_disks  = failed;
6555         info.spare_disks   = spare;
6556
6557         info.layout        = mddev->layout;
6558         info.chunk_size    = mddev->chunk_sectors << 9;
6559
6560         if (copy_to_user(arg, &info, sizeof(info)))
6561                 return -EFAULT;
6562
6563         return 0;
6564 }
6565
6566 static int get_bitmap_file(struct mddev *mddev, void __user * arg)
6567 {
6568         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
6569         char *ptr;
6570         int err;
6571
6572         file = kzalloc(sizeof(*file), GFP_NOIO);
6573         if (!file)
6574                 return -ENOMEM;
6575
6576         err = 0;
6577         spin_lock(&mddev->lock);
6578         /* bitmap enabled */
6579         if (mddev->bitmap_info.file) {
6580                 ptr = file_path(mddev->bitmap_info.file, file->pathname,
6581                                 sizeof(file->pathname));
6582                 if (IS_ERR(ptr))
6583                         err = PTR_ERR(ptr);
6584                 else
6585                         memmove(file->pathname, ptr,
6586                                 sizeof(file->pathname)-(ptr-file->pathname));
6587         }
6588         spin_unlock(&mddev->lock);
6589
6590         if (err == 0 &&
6591             copy_to_user(arg, file, sizeof(*file)))
6592                 err = -EFAULT;
6593
6594         kfree(file);
6595         return err;
6596 }
6597
6598 static int get_disk_info(struct mddev *mddev, void __user * arg)
6599 {
6600         mdu_disk_info_t info;
6601         struct md_rdev *rdev;
6602
6603         if (copy_from_user(&info, arg, sizeof(info)))
6604                 return -EFAULT;
6605
6606         rcu_read_lock();
6607         rdev = md_find_rdev_nr_rcu(mddev, info.number);
6608         if (rdev) {
6609                 info.major = MAJOR(rdev->bdev->bd_dev);
6610                 info.minor = MINOR(rdev->bdev->bd_dev);
6611                 info.raid_disk = rdev->raid_disk;
6612                 info.state = 0;
6613                 if (test_bit(Faulty, &rdev->flags))
6614                         info.state |= (1<<MD_DISK_FAULTY);
6615                 else if (test_bit(In_sync, &rdev->flags)) {
6616                         info.state |= (1<<MD_DISK_ACTIVE);
6617                         info.state |= (1<<MD_DISK_SYNC);
6618                 }
6619                 if (test_bit(Journal, &rdev->flags))
6620                         info.state |= (1<<MD_DISK_JOURNAL);
6621                 if (test_bit(WriteMostly, &rdev->flags))
6622                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
6623                 if (test_bit(FailFast, &rdev->flags))
6624                         info.state |= (1<<MD_DISK_FAILFAST);
6625         } else {
6626                 info.major = info.minor = 0;
6627                 info.raid_disk = -1;
6628                 info.state = (1<<MD_DISK_REMOVED);
6629         }
6630         rcu_read_unlock();
6631
6632         if (copy_to_user(arg, &info, sizeof(info)))
6633                 return -EFAULT;
6634
6635         return 0;
6636 }
6637
6638 static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
6639 {
6640         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
6641         struct md_rdev *rdev;
6642         dev_t dev = MKDEV(info->major,info->minor);
6643
6644         if (mddev_is_clustered(mddev) &&
6645                 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
6646                 pr_warn("%s: Cannot add to clustered mddev.\n",
6647                         mdname(mddev));
6648                 return -EINVAL;
6649         }
6650
6651         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
6652                 return -EOVERFLOW;
6653
6654         if (!mddev->raid_disks) {
6655                 int err;
6656                 /* expecting a device which has a superblock */
6657                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
6658                 if (IS_ERR(rdev)) {
6659                         pr_warn("md: md_import_device returned %ld\n",
6660                                 PTR_ERR(rdev));
6661                         return PTR_ERR(rdev);
6662                 }
6663                 if (!list_empty(&mddev->disks)) {
6664                         struct md_rdev *rdev0
6665                                 = list_entry(mddev->disks.next,
6666                                              struct md_rdev, same_set);
6667                         err = super_types[mddev->major_version]
6668                                 .load_super(rdev, rdev0, mddev->minor_version);
6669                         if (err < 0) {
6670                                 pr_warn("md: %s has different UUID to %s\n",
6671                                         bdevname(rdev->bdev,b),
6672                                         bdevname(rdev0->bdev,b2));
6673                                 export_rdev(rdev);
6674                                 return -EINVAL;
6675                         }
6676                 }
6677                 err = bind_rdev_to_array(rdev, mddev);
6678                 if (err)
6679                         export_rdev(rdev);
6680                 return err;
6681         }
6682
6683         /*
6684          * add_new_disk can be used once the array is assembled
6685          * to add "hot spares".  They must already have a superblock
6686          * written
6687          */
6688         if (mddev->pers) {
6689                 int err;
6690                 if (!mddev->pers->hot_add_disk) {
6691                         pr_warn("%s: personality does not support diskops!\n",
6692                                 mdname(mddev));
6693                         return -EINVAL;
6694                 }
6695                 if (mddev->persistent)
6696                         rdev = md_import_device(dev, mddev->major_version,
6697                                                 mddev->minor_version);
6698                 else
6699                         rdev = md_import_device(dev, -1, -1);
6700                 if (IS_ERR(rdev)) {
6701                         pr_warn("md: md_import_device returned %ld\n",
6702                                 PTR_ERR(rdev));
6703                         return PTR_ERR(rdev);
6704                 }
6705                 /* set saved_raid_disk if appropriate */
6706                 if (!mddev->persistent) {
6707                         if (info->state & (1<<MD_DISK_SYNC)  &&
6708                             info->raid_disk < mddev->raid_disks) {
6709                                 rdev->raid_disk = info->raid_disk;
6710                                 set_bit(In_sync, &rdev->flags);
6711                                 clear_bit(Bitmap_sync, &rdev->flags);
6712                         } else
6713                                 rdev->raid_disk = -1;
6714                         rdev->saved_raid_disk = rdev->raid_disk;
6715                 } else
6716                         super_types[mddev->major_version].
6717                                 validate_super(mddev, rdev);
6718                 if ((info->state & (1<<MD_DISK_SYNC)) &&
6719                      rdev->raid_disk != info->raid_disk) {
6720                         /* This was a hot-add request, but events doesn't
6721                          * match, so reject it.
6722                          */
6723                         export_rdev(rdev);
6724                         return -EINVAL;
6725                 }
6726
6727                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
6728                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6729                         set_bit(WriteMostly, &rdev->flags);
6730                 else
6731                         clear_bit(WriteMostly, &rdev->flags);
6732                 if (info->state & (1<<MD_DISK_FAILFAST))
6733                         set_bit(FailFast, &rdev->flags);
6734                 else
6735                         clear_bit(FailFast, &rdev->flags);
6736
6737                 if (info->state & (1<<MD_DISK_JOURNAL)) {
6738                         struct md_rdev *rdev2;
6739                         bool has_journal = false;
6740
6741                         /* make sure no existing journal disk */
6742                         rdev_for_each(rdev2, mddev) {
6743                                 if (test_bit(Journal, &rdev2->flags)) {
6744                                         has_journal = true;
6745                                         break;
6746                                 }
6747                         }
6748                         if (has_journal || mddev->bitmap) {
6749                                 export_rdev(rdev);
6750                                 return -EBUSY;
6751                         }
6752                         set_bit(Journal, &rdev->flags);
6753                 }
6754                 /*
6755                  * check whether the device shows up in other nodes
6756                  */
6757                 if (mddev_is_clustered(mddev)) {
6758                         if (info->state & (1 << MD_DISK_CANDIDATE))
6759                                 set_bit(Candidate, &rdev->flags);
6760                         else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
6761                                 /* --add initiated by this node */
6762                                 err = md_cluster_ops->add_new_disk(mddev, rdev);
6763                                 if (err) {
6764                                         export_rdev(rdev);
6765                                         return err;
6766                                 }
6767                         }
6768                 }
6769
6770                 rdev->raid_disk = -1;
6771                 err = bind_rdev_to_array(rdev, mddev);
6772
6773                 if (err)
6774                         export_rdev(rdev);
6775
6776                 if (mddev_is_clustered(mddev)) {
6777                         if (info->state & (1 << MD_DISK_CANDIDATE)) {
6778                                 if (!err) {
6779                                         err = md_cluster_ops->new_disk_ack(mddev,
6780                                                 err == 0);
6781                                         if (err)
6782                                                 md_kick_rdev_from_array(rdev);
6783                                 }
6784                         } else {
6785                                 if (err)
6786                                         md_cluster_ops->add_new_disk_cancel(mddev);
6787                                 else
6788                                         err = add_bound_rdev(rdev);
6789                         }
6790
6791                 } else if (!err)
6792                         err = add_bound_rdev(rdev);
6793
6794                 return err;
6795         }
6796
6797         /* otherwise, add_new_disk is only allowed
6798          * for major_version==0 superblocks
6799          */
6800         if (mddev->major_version != 0) {
6801                 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
6802                 return -EINVAL;
6803         }
6804
6805         if (!(info->state & (1<<MD_DISK_FAULTY))) {
6806                 int err;
6807                 rdev = md_import_device(dev, -1, 0);
6808                 if (IS_ERR(rdev)) {
6809                         pr_warn("md: error, md_import_device() returned %ld\n",
6810                                 PTR_ERR(rdev));
6811                         return PTR_ERR(rdev);
6812                 }
6813                 rdev->desc_nr = info->number;
6814                 if (info->raid_disk < mddev->raid_disks)
6815                         rdev->raid_disk = info->raid_disk;
6816                 else
6817                         rdev->raid_disk = -1;
6818
6819                 if (rdev->raid_disk < mddev->raid_disks)
6820                         if (info->state & (1<<MD_DISK_SYNC))
6821                                 set_bit(In_sync, &rdev->flags);
6822
6823                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6824                         set_bit(WriteMostly, &rdev->flags);
6825                 if (info->state & (1<<MD_DISK_FAILFAST))
6826                         set_bit(FailFast, &rdev->flags);
6827
6828                 if (!mddev->persistent) {
6829                         pr_debug("md: nonpersistent superblock ...\n");
6830                         rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6831                 } else
6832                         rdev->sb_start = calc_dev_sboffset(rdev);
6833                 rdev->sectors = rdev->sb_start;
6834
6835                 err = bind_rdev_to_array(rdev, mddev);
6836                 if (err) {
6837                         export_rdev(rdev);
6838                         return err;
6839                 }
6840         }
6841
6842         return 0;
6843 }
6844
6845 static int hot_remove_disk(struct mddev *mddev, dev_t dev)
6846 {
6847         char b[BDEVNAME_SIZE];
6848         struct md_rdev *rdev;
6849
6850         if (!mddev->pers)
6851                 return -ENODEV;
6852
6853         rdev = find_rdev(mddev, dev);
6854         if (!rdev)
6855                 return -ENXIO;
6856
6857         if (rdev->raid_disk < 0)
6858                 goto kick_rdev;
6859
6860         clear_bit(Blocked, &rdev->flags);
6861         remove_and_add_spares(mddev, rdev);
6862
6863         if (rdev->raid_disk >= 0)
6864                 goto busy;
6865
6866 kick_rdev:
6867         if (mddev_is_clustered(mddev))
6868                 md_cluster_ops->remove_disk(mddev, rdev);
6869
6870         md_kick_rdev_from_array(rdev);
6871         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6872         if (mddev->thread)
6873                 md_wakeup_thread(mddev->thread);
6874         else
6875                 md_update_sb(mddev, 1);
6876         md_new_event(mddev);
6877
6878         return 0;
6879 busy:
6880         pr_debug("md: cannot remove active disk %s from %s ...\n",
6881                  bdevname(rdev->bdev,b), mdname(mddev));
6882         return -EBUSY;
6883 }
6884
6885 static int hot_add_disk(struct mddev *mddev, dev_t dev)
6886 {
6887         char b[BDEVNAME_SIZE];
6888         int err;
6889         struct md_rdev *rdev;
6890
6891         if (!mddev->pers)
6892                 return -ENODEV;
6893
6894         if (mddev->major_version != 0) {
6895                 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
6896                         mdname(mddev));
6897                 return -EINVAL;
6898         }
6899         if (!mddev->pers->hot_add_disk) {
6900                 pr_warn("%s: personality does not support diskops!\n",
6901                         mdname(mddev));
6902                 return -EINVAL;
6903         }
6904
6905         rdev = md_import_device(dev, -1, 0);
6906         if (IS_ERR(rdev)) {
6907                 pr_warn("md: error, md_import_device() returned %ld\n",
6908                         PTR_ERR(rdev));
6909                 return -EINVAL;
6910         }
6911
6912         if (mddev->persistent)
6913                 rdev->sb_start = calc_dev_sboffset(rdev);
6914         else
6915                 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6916
6917         rdev->sectors = rdev->sb_start;
6918
6919         if (test_bit(Faulty, &rdev->flags)) {
6920                 pr_warn("md: can not hot-add faulty %s disk to %s!\n",
6921                         bdevname(rdev->bdev,b), mdname(mddev));
6922                 err = -EINVAL;
6923                 goto abort_export;
6924         }
6925
6926         clear_bit(In_sync, &rdev->flags);
6927         rdev->desc_nr = -1;
6928         rdev->saved_raid_disk = -1;
6929         err = bind_rdev_to_array(rdev, mddev);
6930         if (err)
6931                 goto abort_export;
6932
6933         /*
6934          * The rest should better be atomic, we can have disk failures
6935          * noticed in interrupt contexts ...
6936          */
6937
6938         rdev->raid_disk = -1;
6939
6940         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6941         if (!mddev->thread)
6942                 md_update_sb(mddev, 1);
6943         /*
6944          * Kick recovery, maybe this spare has to be added to the
6945          * array immediately.
6946          */
6947         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6948         md_wakeup_thread(mddev->thread);
6949         md_new_event(mddev);
6950         return 0;
6951
6952 abort_export:
6953         export_rdev(rdev);
6954         return err;
6955 }
6956
6957 static int set_bitmap_file(struct mddev *mddev, int fd)
6958 {
6959         int err = 0;
6960
6961         if (mddev->pers) {
6962                 if (!mddev->pers->quiesce || !mddev->thread)
6963                         return -EBUSY;
6964                 if (mddev->recovery || mddev->sync_thread)
6965                         return -EBUSY;
6966                 /* we should be able to change the bitmap.. */
6967         }
6968
6969         if (fd >= 0) {
6970                 struct inode *inode;
6971                 struct file *f;
6972
6973                 if (mddev->bitmap || mddev->bitmap_info.file)
6974                         return -EEXIST; /* cannot add when bitmap is present */
6975                 f = fget(fd);
6976
6977                 if (f == NULL) {
6978                         pr_warn("%s: error: failed to get bitmap file\n",
6979                                 mdname(mddev));
6980                         return -EBADF;
6981                 }
6982
6983                 inode = f->f_mapping->host;
6984                 if (!S_ISREG(inode->i_mode)) {
6985                         pr_warn("%s: error: bitmap file must be a regular file\n",
6986                                 mdname(mddev));
6987                         err = -EBADF;
6988                 } else if (!(f->f_mode & FMODE_WRITE)) {
6989                         pr_warn("%s: error: bitmap file must open for write\n",
6990                                 mdname(mddev));
6991                         err = -EBADF;
6992                 } else if (atomic_read(&inode->i_writecount) != 1) {
6993                         pr_warn("%s: error: bitmap file is already in use\n",
6994                                 mdname(mddev));
6995                         err = -EBUSY;
6996                 }
6997                 if (err) {
6998                         fput(f);
6999                         return err;
7000                 }
7001                 mddev->bitmap_info.file = f;
7002                 mddev->bitmap_info.offset = 0; /* file overrides offset */
7003         } else if (mddev->bitmap == NULL)
7004                 return -ENOENT; /* cannot remove what isn't there */
7005         err = 0;
7006         if (mddev->pers) {
7007                 if (fd >= 0) {
7008                         struct bitmap *bitmap;
7009
7010                         bitmap = md_bitmap_create(mddev, -1);
7011                         mddev_suspend(mddev);
7012                         if (!IS_ERR(bitmap)) {
7013                                 mddev->bitmap = bitmap;
7014                                 err = md_bitmap_load(mddev);
7015                         } else
7016                                 err = PTR_ERR(bitmap);
7017                         if (err) {
7018                                 md_bitmap_destroy(mddev);
7019                                 fd = -1;
7020                         }
7021                         mddev_resume(mddev);
7022                 } else if (fd < 0) {
7023                         mddev_suspend(mddev);
7024                         md_bitmap_destroy(mddev);
7025                         mddev_resume(mddev);
7026                 }
7027         }
7028         if (fd < 0) {
7029                 struct file *f = mddev->bitmap_info.file;
7030                 if (f) {
7031                         spin_lock(&mddev->lock);
7032                         mddev->bitmap_info.file = NULL;
7033                         spin_unlock(&mddev->lock);
7034                         fput(f);
7035                 }
7036         }
7037
7038         return err;
7039 }
7040
7041 /*
7042  * set_array_info is used two different ways
7043  * The original usage is when creating a new array.
7044  * In this usage, raid_disks is > 0 and it together with
7045  *  level, size, not_persistent,layout,chunksize determine the
7046  *  shape of the array.
7047  *  This will always create an array with a type-0.90.0 superblock.
7048  * The newer usage is when assembling an array.
7049  *  In this case raid_disks will be 0, and the major_version field is
7050  *  use to determine which style super-blocks are to be found on the devices.
7051  *  The minor and patch _version numbers are also kept incase the
7052  *  super_block handler wishes to interpret them.
7053  */
7054 static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
7055 {
7056
7057         if (info->raid_disks == 0) {
7058                 /* just setting version number for superblock loading */
7059                 if (info->major_version < 0 ||
7060                     info->major_version >= ARRAY_SIZE(super_types) ||
7061                     super_types[info->major_version].name == NULL) {
7062                         /* maybe try to auto-load a module? */
7063                         pr_warn("md: superblock version %d not known\n",
7064                                 info->major_version);
7065                         return -EINVAL;
7066                 }
7067                 mddev->major_version = info->major_version;
7068                 mddev->minor_version = info->minor_version;
7069                 mddev->patch_version = info->patch_version;
7070                 mddev->persistent = !info->not_persistent;
7071                 /* ensure mddev_put doesn't delete this now that there
7072                  * is some minimal configuration.
7073                  */
7074                 mddev->ctime         = ktime_get_real_seconds();
7075                 return 0;
7076         }
7077         mddev->major_version = MD_MAJOR_VERSION;
7078         mddev->minor_version = MD_MINOR_VERSION;
7079         mddev->patch_version = MD_PATCHLEVEL_VERSION;
7080         mddev->ctime         = ktime_get_real_seconds();
7081
7082         mddev->level         = info->level;
7083         mddev->clevel[0]     = 0;
7084         mddev->dev_sectors   = 2 * (sector_t)info->size;
7085         mddev->raid_disks    = info->raid_disks;
7086         /* don't set md_minor, it is determined by which /dev/md* was
7087          * openned
7088          */
7089         if (info->state & (1<<MD_SB_CLEAN))
7090                 mddev->recovery_cp = MaxSector;
7091         else
7092                 mddev->recovery_cp = 0;
7093         mddev->persistent    = ! info->not_persistent;
7094         mddev->external      = 0;
7095
7096         mddev->layout        = info->layout;
7097         if (mddev->level == 0)
7098                 /* Cannot trust RAID0 layout info here */
7099                 mddev->layout = -1;
7100         mddev->chunk_sectors = info->chunk_size >> 9;
7101
7102         if (mddev->persistent) {
7103                 mddev->max_disks = MD_SB_DISKS;
7104                 mddev->flags = 0;
7105                 mddev->sb_flags = 0;
7106         }
7107         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
7108
7109         mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
7110         mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
7111         mddev->bitmap_info.offset = 0;
7112
7113         mddev->reshape_position = MaxSector;
7114
7115         /*
7116          * Generate a 128 bit UUID
7117          */
7118         get_random_bytes(mddev->uuid, 16);
7119
7120         mddev->new_level = mddev->level;
7121         mddev->new_chunk_sectors = mddev->chunk_sectors;
7122         mddev->new_layout = mddev->layout;
7123         mddev->delta_disks = 0;
7124         mddev->reshape_backwards = 0;
7125
7126         return 0;
7127 }
7128
7129 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
7130 {
7131         lockdep_assert_held(&mddev->reconfig_mutex);
7132
7133         if (mddev->external_size)
7134                 return;
7135
7136         mddev->array_sectors = array_sectors;
7137 }
7138 EXPORT_SYMBOL(md_set_array_sectors);
7139
7140 static int update_size(struct mddev *mddev, sector_t num_sectors)
7141 {
7142         struct md_rdev *rdev;
7143         int rv;
7144         int fit = (num_sectors == 0);
7145         sector_t old_dev_sectors = mddev->dev_sectors;
7146
7147         if (mddev->pers->resize == NULL)
7148                 return -EINVAL;
7149         /* The "num_sectors" is the number of sectors of each device that
7150          * is used.  This can only make sense for arrays with redundancy.
7151          * linear and raid0 always use whatever space is available. We can only
7152          * consider changing this number if no resync or reconstruction is
7153          * happening, and if the new size is acceptable. It must fit before the
7154          * sb_start or, if that is <data_offset, it must fit before the size
7155          * of each device.  If num_sectors is zero, we find the largest size
7156          * that fits.
7157          */
7158         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
7159             mddev->sync_thread)
7160                 return -EBUSY;
7161         if (mddev->ro)
7162                 return -EROFS;
7163
7164         rdev_for_each(rdev, mddev) {
7165                 sector_t avail = rdev->sectors;
7166
7167                 if (fit && (num_sectors == 0 || num_sectors > avail))
7168                         num_sectors = avail;
7169                 if (avail < num_sectors)
7170                         return -ENOSPC;
7171         }
7172         rv = mddev->pers->resize(mddev, num_sectors);
7173         if (!rv) {
7174                 if (mddev_is_clustered(mddev))
7175                         md_cluster_ops->update_size(mddev, old_dev_sectors);
7176                 else if (mddev->queue) {
7177                         set_capacity(mddev->gendisk, mddev->array_sectors);
7178                         revalidate_disk(mddev->gendisk);
7179                 }
7180         }
7181         return rv;
7182 }
7183
7184 static int update_raid_disks(struct mddev *mddev, int raid_disks)
7185 {
7186         int rv;
7187         struct md_rdev *rdev;
7188         /* change the number of raid disks */
7189         if (mddev->pers->check_reshape == NULL)
7190                 return -EINVAL;
7191         if (mddev->ro)
7192                 return -EROFS;
7193         if (raid_disks <= 0 ||
7194             (mddev->max_disks && raid_disks >= mddev->max_disks))
7195                 return -EINVAL;
7196         if (mddev->sync_thread ||
7197             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
7198             mddev->reshape_position != MaxSector)
7199                 return -EBUSY;
7200
7201         rdev_for_each(rdev, mddev) {
7202                 if (mddev->raid_disks < raid_disks &&
7203                     rdev->data_offset < rdev->new_data_offset)
7204                         return -EINVAL;
7205                 if (mddev->raid_disks > raid_disks &&
7206                     rdev->data_offset > rdev->new_data_offset)
7207                         return -EINVAL;
7208         }
7209
7210         mddev->delta_disks = raid_disks - mddev->raid_disks;
7211         if (mddev->delta_disks < 0)
7212                 mddev->reshape_backwards = 1;
7213         else if (mddev->delta_disks > 0)
7214                 mddev->reshape_backwards = 0;
7215
7216         rv = mddev->pers->check_reshape(mddev);
7217         if (rv < 0) {
7218                 mddev->delta_disks = 0;
7219                 mddev->reshape_backwards = 0;
7220         }
7221         return rv;
7222 }
7223
7224 /*
7225  * update_array_info is used to change the configuration of an
7226  * on-line array.
7227  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
7228  * fields in the info are checked against the array.
7229  * Any differences that cannot be handled will cause an error.
7230  * Normally, only one change can be managed at a time.
7231  */
7232 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
7233 {
7234         int rv = 0;
7235         int cnt = 0;
7236         int state = 0;
7237
7238         /* calculate expected state,ignoring low bits */
7239         if (mddev->bitmap && mddev->bitmap_info.offset)
7240                 state |= (1 << MD_SB_BITMAP_PRESENT);
7241
7242         if (mddev->major_version != info->major_version ||
7243             mddev->minor_version != info->minor_version ||
7244 /*          mddev->patch_version != info->patch_version || */
7245             mddev->ctime         != info->ctime         ||
7246             mddev->level         != info->level         ||
7247 /*          mddev->layout        != info->layout        || */
7248             mddev->persistent    != !info->not_persistent ||
7249             mddev->chunk_sectors != info->chunk_size >> 9 ||
7250             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
7251             ((state^info->state) & 0xfffffe00)
7252                 )
7253                 return -EINVAL;
7254         /* Check there is only one change */
7255         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
7256                 cnt++;
7257         if (mddev->raid_disks != info->raid_disks)
7258                 cnt++;
7259         if (mddev->layout != info->layout)
7260                 cnt++;
7261         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
7262                 cnt++;
7263         if (cnt == 0)
7264                 return 0;
7265         if (cnt > 1)
7266                 return -EINVAL;
7267
7268         if (mddev->layout != info->layout) {
7269                 /* Change layout
7270                  * we don't need to do anything at the md level, the
7271                  * personality will take care of it all.
7272                  */
7273                 if (mddev->pers->check_reshape == NULL)
7274                         return -EINVAL;
7275                 else {
7276                         mddev->new_layout = info->layout;
7277                         rv = mddev->pers->check_reshape(mddev);
7278                         if (rv)
7279                                 mddev->new_layout = mddev->layout;
7280                         return rv;
7281                 }
7282         }
7283         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
7284                 rv = update_size(mddev, (sector_t)info->size * 2);
7285
7286         if (mddev->raid_disks    != info->raid_disks)
7287                 rv = update_raid_disks(mddev, info->raid_disks);
7288
7289         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
7290                 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
7291                         rv = -EINVAL;
7292                         goto err;
7293                 }
7294                 if (mddev->recovery || mddev->sync_thread) {
7295                         rv = -EBUSY;
7296                         goto err;
7297                 }
7298                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
7299                         struct bitmap *bitmap;
7300                         /* add the bitmap */
7301                         if (mddev->bitmap) {
7302                                 rv = -EEXIST;
7303                                 goto err;
7304                         }
7305                         if (mddev->bitmap_info.default_offset == 0) {
7306                                 rv = -EINVAL;
7307                                 goto err;
7308                         }
7309                         mddev->bitmap_info.offset =
7310                                 mddev->bitmap_info.default_offset;
7311                         mddev->bitmap_info.space =
7312                                 mddev->bitmap_info.default_space;
7313                         bitmap = md_bitmap_create(mddev, -1);
7314                         mddev_suspend(mddev);
7315                         if (!IS_ERR(bitmap)) {
7316                                 mddev->bitmap = bitmap;
7317                                 rv = md_bitmap_load(mddev);
7318                         } else
7319                                 rv = PTR_ERR(bitmap);
7320                         if (rv)
7321                                 md_bitmap_destroy(mddev);
7322                         mddev_resume(mddev);
7323                 } else {
7324                         /* remove the bitmap */
7325                         if (!mddev->bitmap) {
7326                                 rv = -ENOENT;
7327                                 goto err;
7328                         }
7329                         if (mddev->bitmap->storage.file) {
7330                                 rv = -EINVAL;
7331                                 goto err;
7332                         }
7333                         if (mddev->bitmap_info.nodes) {
7334                                 /* hold PW on all the bitmap lock */
7335                                 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
7336                                         pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
7337                                         rv = -EPERM;
7338                                         md_cluster_ops->unlock_all_bitmaps(mddev);
7339                                         goto err;
7340                                 }
7341
7342                                 mddev->bitmap_info.nodes = 0;
7343                                 md_cluster_ops->leave(mddev);
7344                         }
7345                         mddev_suspend(mddev);
7346                         md_bitmap_destroy(mddev);
7347                         mddev_resume(mddev);
7348                         mddev->bitmap_info.offset = 0;
7349                 }
7350         }
7351         md_update_sb(mddev, 1);
7352         return rv;
7353 err:
7354         return rv;
7355 }
7356
7357 static int set_disk_faulty(struct mddev *mddev, dev_t dev)
7358 {
7359         struct md_rdev *rdev;
7360         int err = 0;
7361
7362         if (mddev->pers == NULL)
7363                 return -ENODEV;
7364
7365         rcu_read_lock();
7366         rdev = md_find_rdev_rcu(mddev, dev);
7367         if (!rdev)
7368                 err =  -ENODEV;
7369         else {
7370                 md_error(mddev, rdev);
7371                 if (!test_bit(Faulty, &rdev->flags))
7372                         err = -EBUSY;
7373         }
7374         rcu_read_unlock();
7375         return err;
7376 }
7377
7378 /*
7379  * We have a problem here : there is no easy way to give a CHS
7380  * virtual geometry. We currently pretend that we have a 2 heads
7381  * 4 sectors (with a BIG number of cylinders...). This drives
7382  * dosfs just mad... ;-)
7383  */
7384 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
7385 {
7386         struct mddev *mddev = bdev->bd_disk->private_data;
7387
7388         geo->heads = 2;
7389         geo->sectors = 4;
7390         geo->cylinders = mddev->array_sectors / 8;
7391         return 0;
7392 }
7393
7394 static inline bool md_ioctl_valid(unsigned int cmd)
7395 {
7396         switch (cmd) {
7397         case ADD_NEW_DISK:
7398         case BLKROSET:
7399         case GET_ARRAY_INFO:
7400         case GET_BITMAP_FILE:
7401         case GET_DISK_INFO:
7402         case HOT_ADD_DISK:
7403         case HOT_REMOVE_DISK:
7404         case RAID_AUTORUN:
7405         case RAID_VERSION:
7406         case RESTART_ARRAY_RW:
7407         case RUN_ARRAY:
7408         case SET_ARRAY_INFO:
7409         case SET_BITMAP_FILE:
7410         case SET_DISK_FAULTY:
7411         case STOP_ARRAY:
7412         case STOP_ARRAY_RO:
7413         case CLUSTERED_DISK_NACK:
7414                 return true;
7415         default:
7416                 return false;
7417         }
7418 }
7419
7420 static int md_ioctl(struct block_device *bdev, fmode_t mode,
7421                         unsigned int cmd, unsigned long arg)
7422 {
7423         int err = 0;
7424         void __user *argp = (void __user *)arg;
7425         struct mddev *mddev = NULL;
7426         int ro;
7427         bool did_set_md_closing = false;
7428
7429         if (!md_ioctl_valid(cmd))
7430                 return -ENOTTY;
7431
7432         switch (cmd) {
7433         case RAID_VERSION:
7434         case GET_ARRAY_INFO:
7435         case GET_DISK_INFO:
7436                 break;
7437         default:
7438                 if (!capable(CAP_SYS_ADMIN))
7439                         return -EACCES;
7440         }
7441
7442         /*
7443          * Commands dealing with the RAID driver but not any
7444          * particular array:
7445          */
7446         switch (cmd) {
7447         case RAID_VERSION:
7448                 err = get_version(argp);
7449                 goto out;
7450
7451 #ifndef MODULE
7452         case RAID_AUTORUN:
7453                 err = 0;
7454                 autostart_arrays(arg);
7455                 goto out;
7456 #endif
7457         default:;
7458         }
7459
7460         /*
7461          * Commands creating/starting a new array:
7462          */
7463
7464         mddev = bdev->bd_disk->private_data;
7465
7466         if (!mddev) {
7467                 BUG();
7468                 goto out;
7469         }
7470
7471         /* Some actions do not requires the mutex */
7472         switch (cmd) {
7473         case GET_ARRAY_INFO:
7474                 if (!mddev->raid_disks && !mddev->external)
7475                         err = -ENODEV;
7476                 else
7477                         err = get_array_info(mddev, argp);
7478                 goto out;
7479
7480         case GET_DISK_INFO:
7481                 if (!mddev->raid_disks && !mddev->external)
7482                         err = -ENODEV;
7483                 else
7484                         err = get_disk_info(mddev, argp);
7485                 goto out;
7486
7487         case SET_DISK_FAULTY:
7488                 err = set_disk_faulty(mddev, new_decode_dev(arg));
7489                 goto out;
7490
7491         case GET_BITMAP_FILE:
7492                 err = get_bitmap_file(mddev, argp);
7493                 goto out;
7494
7495         }
7496
7497         if (cmd == ADD_NEW_DISK)
7498                 /* need to ensure md_delayed_delete() has completed */
7499                 flush_workqueue(md_misc_wq);
7500
7501         if (cmd == HOT_REMOVE_DISK)
7502                 /* need to ensure recovery thread has run */
7503                 wait_event_interruptible_timeout(mddev->sb_wait,
7504                                                  !test_bit(MD_RECOVERY_NEEDED,
7505                                                            &mddev->recovery),
7506                                                  msecs_to_jiffies(5000));
7507         if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
7508                 /* Need to flush page cache, and ensure no-one else opens
7509                  * and writes
7510                  */
7511                 mutex_lock(&mddev->open_mutex);
7512                 if (mddev->pers && atomic_read(&mddev->openers) > 1) {
7513                         mutex_unlock(&mddev->open_mutex);
7514                         err = -EBUSY;
7515                         goto out;
7516                 }
7517                 WARN_ON_ONCE(test_bit(MD_CLOSING, &mddev->flags));
7518                 set_bit(MD_CLOSING, &mddev->flags);
7519                 did_set_md_closing = true;
7520                 mutex_unlock(&mddev->open_mutex);
7521                 sync_blockdev(bdev);
7522         }
7523         err = mddev_lock(mddev);
7524         if (err) {
7525                 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
7526                          err, cmd);
7527                 goto out;
7528         }
7529
7530         if (cmd == SET_ARRAY_INFO) {
7531                 mdu_array_info_t info;
7532                 if (!arg)
7533                         memset(&info, 0, sizeof(info));
7534                 else if (copy_from_user(&info, argp, sizeof(info))) {
7535                         err = -EFAULT;
7536                         goto unlock;
7537                 }
7538                 if (mddev->pers) {
7539                         err = update_array_info(mddev, &info);
7540                         if (err) {
7541                                 pr_warn("md: couldn't update array info. %d\n", err);
7542                                 goto unlock;
7543                         }
7544                         goto unlock;
7545                 }
7546                 if (!list_empty(&mddev->disks)) {
7547                         pr_warn("md: array %s already has disks!\n", mdname(mddev));
7548                         err = -EBUSY;
7549                         goto unlock;
7550                 }
7551                 if (mddev->raid_disks) {
7552                         pr_warn("md: array %s already initialised!\n", mdname(mddev));
7553                         err = -EBUSY;
7554                         goto unlock;
7555                 }
7556                 err = set_array_info(mddev, &info);
7557                 if (err) {
7558                         pr_warn("md: couldn't set array info. %d\n", err);
7559                         goto unlock;
7560                 }
7561                 goto unlock;
7562         }
7563
7564         /*
7565          * Commands querying/configuring an existing array:
7566          */
7567         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
7568          * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
7569         if ((!mddev->raid_disks && !mddev->external)
7570             && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
7571             && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
7572             && cmd != GET_BITMAP_FILE) {
7573                 err = -ENODEV;
7574                 goto unlock;
7575         }
7576
7577         /*
7578          * Commands even a read-only array can execute:
7579          */
7580         switch (cmd) {
7581         case RESTART_ARRAY_RW:
7582                 err = restart_array(mddev);
7583                 goto unlock;
7584
7585         case STOP_ARRAY:
7586                 err = do_md_stop(mddev, 0, bdev);
7587                 goto unlock;
7588
7589         case STOP_ARRAY_RO:
7590                 err = md_set_readonly(mddev, bdev);
7591                 goto unlock;
7592
7593         case HOT_REMOVE_DISK:
7594                 err = hot_remove_disk(mddev, new_decode_dev(arg));
7595                 goto unlock;
7596
7597         case ADD_NEW_DISK:
7598                 /* We can support ADD_NEW_DISK on read-only arrays
7599                  * only if we are re-adding a preexisting device.
7600                  * So require mddev->pers and MD_DISK_SYNC.
7601                  */
7602                 if (mddev->pers) {
7603                         mdu_disk_info_t info;
7604                         if (copy_from_user(&info, argp, sizeof(info)))
7605                                 err = -EFAULT;
7606                         else if (!(info.state & (1<<MD_DISK_SYNC)))
7607                                 /* Need to clear read-only for this */
7608                                 break;
7609                         else
7610                                 err = add_new_disk(mddev, &info);
7611                         goto unlock;
7612                 }
7613                 break;
7614
7615         case BLKROSET:
7616                 if (get_user(ro, (int __user *)(arg))) {
7617                         err = -EFAULT;
7618                         goto unlock;
7619                 }
7620                 err = -EINVAL;
7621
7622                 /* if the bdev is going readonly the value of mddev->ro
7623                  * does not matter, no writes are coming
7624                  */
7625                 if (ro)
7626                         goto unlock;
7627
7628                 /* are we are already prepared for writes? */
7629                 if (mddev->ro != 1)
7630                         goto unlock;
7631
7632                 /* transitioning to readauto need only happen for
7633                  * arrays that call md_write_start
7634                  */
7635                 if (mddev->pers) {
7636                         err = restart_array(mddev);
7637                         if (err == 0) {
7638                                 mddev->ro = 2;
7639                                 set_disk_ro(mddev->gendisk, 0);
7640                         }
7641                 }
7642                 goto unlock;
7643         }
7644
7645         /*
7646          * The remaining ioctls are changing the state of the
7647          * superblock, so we do not allow them on read-only arrays.
7648          */
7649         if (mddev->ro && mddev->pers) {
7650                 if (mddev->ro == 2) {
7651                         mddev->ro = 0;
7652                         sysfs_notify_dirent_safe(mddev->sysfs_state);
7653                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7654                         /* mddev_unlock will wake thread */
7655                         /* If a device failed while we were read-only, we
7656                          * need to make sure the metadata is updated now.
7657                          */
7658                         if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
7659                                 mddev_unlock(mddev);
7660                                 wait_event(mddev->sb_wait,
7661                                            !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
7662                                            !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
7663                                 mddev_lock_nointr(mddev);
7664                         }
7665                 } else {
7666                         err = -EROFS;
7667                         goto unlock;
7668                 }
7669         }
7670
7671         switch (cmd) {
7672         case ADD_NEW_DISK:
7673         {
7674                 mdu_disk_info_t info;
7675                 if (copy_from_user(&info, argp, sizeof(info)))
7676                         err = -EFAULT;
7677                 else
7678                         err = add_new_disk(mddev, &info);
7679                 goto unlock;
7680         }
7681
7682         case CLUSTERED_DISK_NACK:
7683                 if (mddev_is_clustered(mddev))
7684                         md_cluster_ops->new_disk_ack(mddev, false);
7685                 else
7686                         err = -EINVAL;
7687                 goto unlock;
7688
7689         case HOT_ADD_DISK:
7690                 err = hot_add_disk(mddev, new_decode_dev(arg));
7691                 goto unlock;
7692
7693         case RUN_ARRAY:
7694                 err = do_md_run(mddev);
7695                 goto unlock;
7696
7697         case SET_BITMAP_FILE:
7698                 err = set_bitmap_file(mddev, (int)arg);
7699                 goto unlock;
7700
7701         default:
7702                 err = -EINVAL;
7703                 goto unlock;
7704         }
7705
7706 unlock:
7707         if (mddev->hold_active == UNTIL_IOCTL &&
7708             err != -EINVAL)
7709                 mddev->hold_active = 0;
7710         mddev_unlock(mddev);
7711 out:
7712         if(did_set_md_closing)
7713                 clear_bit(MD_CLOSING, &mddev->flags);
7714         return err;
7715 }
7716 #ifdef CONFIG_COMPAT
7717 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
7718                     unsigned int cmd, unsigned long arg)
7719 {
7720         switch (cmd) {
7721         case HOT_REMOVE_DISK:
7722         case HOT_ADD_DISK:
7723         case SET_DISK_FAULTY:
7724         case SET_BITMAP_FILE:
7725                 /* These take in integer arg, do not convert */
7726                 break;
7727         default:
7728                 arg = (unsigned long)compat_ptr(arg);
7729                 break;
7730         }
7731
7732         return md_ioctl(bdev, mode, cmd, arg);
7733 }
7734 #endif /* CONFIG_COMPAT */
7735
7736 static int md_open(struct block_device *bdev, fmode_t mode)
7737 {
7738         /*
7739          * Succeed if we can lock the mddev, which confirms that
7740          * it isn't being stopped right now.
7741          */
7742         struct mddev *mddev = mddev_find(bdev->bd_dev);
7743         int err;
7744
7745         if (!mddev)
7746                 return -ENODEV;
7747
7748         if (mddev->gendisk != bdev->bd_disk) {
7749                 /* we are racing with mddev_put which is discarding this
7750                  * bd_disk.
7751                  */
7752                 mddev_put(mddev);
7753                 /* Wait until bdev->bd_disk is definitely gone */
7754                 flush_workqueue(md_misc_wq);
7755                 /* Then retry the open from the top */
7756                 return -ERESTARTSYS;
7757         }
7758         BUG_ON(mddev != bdev->bd_disk->private_data);
7759
7760         if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
7761                 goto out;
7762
7763         if (test_bit(MD_CLOSING, &mddev->flags)) {
7764                 mutex_unlock(&mddev->open_mutex);
7765                 err = -ENODEV;
7766                 goto out;
7767         }
7768
7769         err = 0;
7770         atomic_inc(&mddev->openers);
7771         mutex_unlock(&mddev->open_mutex);
7772
7773         check_disk_change(bdev);
7774  out:
7775         if (err)
7776                 mddev_put(mddev);
7777         return err;
7778 }
7779
7780 static void md_release(struct gendisk *disk, fmode_t mode)
7781 {
7782         struct mddev *mddev = disk->private_data;
7783
7784         BUG_ON(!mddev);
7785         atomic_dec(&mddev->openers);
7786         mddev_put(mddev);
7787 }
7788
7789 static int md_media_changed(struct gendisk *disk)
7790 {
7791         struct mddev *mddev = disk->private_data;
7792
7793         return mddev->changed;
7794 }
7795
7796 static int md_revalidate(struct gendisk *disk)
7797 {
7798         struct mddev *mddev = disk->private_data;
7799
7800         mddev->changed = 0;
7801         return 0;
7802 }
7803 static const struct block_device_operations md_fops =
7804 {
7805         .owner          = THIS_MODULE,
7806         .open           = md_open,
7807         .release        = md_release,
7808         .ioctl          = md_ioctl,
7809 #ifdef CONFIG_COMPAT
7810         .compat_ioctl   = md_compat_ioctl,
7811 #endif
7812         .getgeo         = md_getgeo,
7813         .media_changed  = md_media_changed,
7814         .revalidate_disk= md_revalidate,
7815 };
7816
7817 static int md_thread(void *arg)
7818 {
7819         struct md_thread *thread = arg;
7820
7821         /*
7822          * md_thread is a 'system-thread', it's priority should be very
7823          * high. We avoid resource deadlocks individually in each
7824          * raid personality. (RAID5 does preallocation) We also use RR and
7825          * the very same RT priority as kswapd, thus we will never get
7826          * into a priority inversion deadlock.
7827          *
7828          * we definitely have to have equal or higher priority than
7829          * bdflush, otherwise bdflush will deadlock if there are too
7830          * many dirty RAID5 blocks.
7831          */
7832
7833         allow_signal(SIGKILL);
7834         while (!kthread_should_stop()) {
7835
7836                 /* We need to wait INTERRUPTIBLE so that
7837                  * we don't add to the load-average.
7838                  * That means we need to be sure no signals are
7839                  * pending
7840                  */
7841                 if (signal_pending(current))
7842                         flush_signals(current);
7843
7844                 wait_event_interruptible_timeout
7845                         (thread->wqueue,
7846                          test_bit(THREAD_WAKEUP, &thread->flags)
7847                          || kthread_should_stop() || kthread_should_park(),
7848                          thread->timeout);
7849
7850                 clear_bit(THREAD_WAKEUP, &thread->flags);
7851                 if (kthread_should_park())
7852                         kthread_parkme();
7853                 if (!kthread_should_stop())
7854                         thread->run(thread);
7855         }
7856
7857         return 0;
7858 }
7859
7860 void md_wakeup_thread(struct md_thread *thread)
7861 {
7862         if (thread) {
7863                 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
7864                 set_bit(THREAD_WAKEUP, &thread->flags);
7865                 wake_up(&thread->wqueue);
7866         }
7867 }
7868 EXPORT_SYMBOL(md_wakeup_thread);
7869
7870 struct md_thread *md_register_thread(void (*run) (struct md_thread *),
7871                 struct mddev *mddev, const char *name)
7872 {
7873         struct md_thread *thread;
7874
7875         thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
7876         if (!thread)
7877                 return NULL;
7878
7879         init_waitqueue_head(&thread->wqueue);
7880
7881         thread->run = run;
7882         thread->mddev = mddev;
7883         thread->timeout = MAX_SCHEDULE_TIMEOUT;
7884         thread->tsk = kthread_run(md_thread, thread,
7885                                   "%s_%s",
7886                                   mdname(thread->mddev),
7887                                   name);
7888         if (IS_ERR(thread->tsk)) {
7889                 kfree(thread);
7890                 return NULL;
7891         }
7892         return thread;
7893 }
7894 EXPORT_SYMBOL(md_register_thread);
7895
7896 void md_unregister_thread(struct md_thread **threadp)
7897 {
7898         struct md_thread *thread = *threadp;
7899         if (!thread)
7900                 return;
7901         pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
7902         /* Locking ensures that mddev_unlock does not wake_up a
7903          * non-existent thread
7904          */
7905         spin_lock(&pers_lock);
7906         *threadp = NULL;
7907         spin_unlock(&pers_lock);
7908
7909         kthread_stop(thread->tsk);
7910         kfree(thread);
7911 }
7912 EXPORT_SYMBOL(md_unregister_thread);
7913
7914 void md_error(struct mddev *mddev, struct md_rdev *rdev)
7915 {
7916         if (!rdev || test_bit(Faulty, &rdev->flags))
7917                 return;
7918
7919         if (!mddev->pers || !mddev->pers->error_handler)
7920                 return;
7921         mddev->pers->error_handler(mddev,rdev);
7922         if (mddev->degraded)
7923                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7924         sysfs_notify_dirent_safe(rdev->sysfs_state);
7925         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7926         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7927         md_wakeup_thread(mddev->thread);
7928         if (mddev->event_work.func)
7929                 queue_work(md_misc_wq, &mddev->event_work);
7930         md_new_event(mddev);
7931 }
7932 EXPORT_SYMBOL(md_error);
7933
7934 /* seq_file implementation /proc/mdstat */
7935
7936 static void status_unused(struct seq_file *seq)
7937 {
7938         int i = 0;
7939         struct md_rdev *rdev;
7940
7941         seq_printf(seq, "unused devices: ");
7942
7943         list_for_each_entry(rdev, &pending_raid_disks, same_set) {
7944                 char b[BDEVNAME_SIZE];
7945                 i++;
7946                 seq_printf(seq, "%s ",
7947                               bdevname(rdev->bdev,b));
7948         }
7949         if (!i)
7950                 seq_printf(seq, "<none>");
7951
7952         seq_printf(seq, "\n");
7953 }
7954
7955 static int status_resync(struct seq_file *seq, struct mddev *mddev)
7956 {
7957         sector_t max_sectors, resync, res;
7958         unsigned long dt, db = 0;
7959         sector_t rt, curr_mark_cnt, resync_mark_cnt;
7960         int scale, recovery_active;
7961         unsigned int per_milli;
7962
7963         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
7964             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7965                 max_sectors = mddev->resync_max_sectors;
7966         else
7967                 max_sectors = mddev->dev_sectors;
7968
7969         resync = mddev->curr_resync;
7970         if (resync <= 3) {
7971                 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7972                         /* Still cleaning up */
7973                         resync = max_sectors;
7974         } else if (resync > max_sectors)
7975                 resync = max_sectors;
7976         else
7977                 resync -= atomic_read(&mddev->recovery_active);
7978
7979         if (resync == 0) {
7980                 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) {
7981                         struct md_rdev *rdev;
7982
7983                         rdev_for_each(rdev, mddev)
7984                                 if (rdev->raid_disk >= 0 &&
7985                                     !test_bit(Faulty, &rdev->flags) &&
7986                                     rdev->recovery_offset != MaxSector &&
7987                                     rdev->recovery_offset) {
7988                                         seq_printf(seq, "\trecover=REMOTE");
7989                                         return 1;
7990                                 }
7991                         if (mddev->reshape_position != MaxSector)
7992                                 seq_printf(seq, "\treshape=REMOTE");
7993                         else
7994                                 seq_printf(seq, "\tresync=REMOTE");
7995                         return 1;
7996                 }
7997                 if (mddev->recovery_cp < MaxSector) {
7998                         seq_printf(seq, "\tresync=PENDING");
7999                         return 1;
8000                 }
8001                 return 0;
8002         }
8003         if (resync < 3) {
8004                 seq_printf(seq, "\tresync=DELAYED");
8005                 return 1;
8006         }
8007
8008         WARN_ON(max_sectors == 0);
8009         /* Pick 'scale' such that (resync>>scale)*1000 will fit
8010          * in a sector_t, and (max_sectors>>scale) will fit in a
8011          * u32, as those are the requirements for sector_div.
8012          * Thus 'scale' must be at least 10
8013          */
8014         scale = 10;
8015         if (sizeof(sector_t) > sizeof(unsigned long)) {
8016                 while ( max_sectors/2 > (1ULL<<(scale+32)))
8017                         scale++;
8018         }
8019         res = (resync>>scale)*1000;
8020         sector_div(res, (u32)((max_sectors>>scale)+1));
8021
8022         per_milli = res;
8023         {
8024                 int i, x = per_milli/50, y = 20-x;
8025                 seq_printf(seq, "[");
8026                 for (i = 0; i < x; i++)
8027                         seq_printf(seq, "=");
8028                 seq_printf(seq, ">");
8029                 for (i = 0; i < y; i++)
8030                         seq_printf(seq, ".");
8031                 seq_printf(seq, "] ");
8032         }
8033         seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
8034                    (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
8035                     "reshape" :
8036                     (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
8037                      "check" :
8038                      (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
8039                       "resync" : "recovery"))),
8040                    per_milli/10, per_milli % 10,
8041                    (unsigned long long) resync/2,
8042                    (unsigned long long) max_sectors/2);
8043
8044         /*
8045          * dt: time from mark until now
8046          * db: blocks written from mark until now
8047          * rt: remaining time
8048          *
8049          * rt is a sector_t, which is always 64bit now. We are keeping
8050          * the original algorithm, but it is not really necessary.
8051          *
8052          * Original algorithm:
8053          *   So we divide before multiply in case it is 32bit and close
8054          *   to the limit.
8055          *   We scale the divisor (db) by 32 to avoid losing precision
8056          *   near the end of resync when the number of remaining sectors
8057          *   is close to 'db'.
8058          *   We then divide rt by 32 after multiplying by db to compensate.
8059          *   The '+1' avoids division by zero if db is very small.
8060          */
8061         dt = ((jiffies - mddev->resync_mark) / HZ);
8062         if (!dt) dt++;
8063
8064         curr_mark_cnt = mddev->curr_mark_cnt;
8065         recovery_active = atomic_read(&mddev->recovery_active);
8066         resync_mark_cnt = mddev->resync_mark_cnt;
8067
8068         if (curr_mark_cnt >= (recovery_active + resync_mark_cnt))
8069                 db = curr_mark_cnt - (recovery_active + resync_mark_cnt);
8070
8071         rt = max_sectors - resync;    /* number of remaining sectors */
8072         rt = div64_u64(rt, db/32+1);
8073         rt *= dt;
8074         rt >>= 5;
8075
8076         seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
8077                    ((unsigned long)rt % 60)/6);
8078
8079         seq_printf(seq, " speed=%ldK/sec", db/2/dt);
8080         return 1;
8081 }
8082
8083 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
8084 {
8085         struct list_head *tmp;
8086         loff_t l = *pos;
8087         struct mddev *mddev;
8088
8089         if (l >= 0x10000)
8090                 return NULL;
8091         if (!l--)
8092                 /* header */
8093                 return (void*)1;
8094
8095         spin_lock(&all_mddevs_lock);
8096         list_for_each(tmp,&all_mddevs)
8097                 if (!l--) {
8098                         mddev = list_entry(tmp, struct mddev, all_mddevs);
8099                         mddev_get(mddev);
8100                         spin_unlock(&all_mddevs_lock);
8101                         return mddev;
8102                 }
8103         spin_unlock(&all_mddevs_lock);
8104         if (!l--)
8105                 return (void*)2;/* tail */
8106         return NULL;
8107 }
8108
8109 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
8110 {
8111         struct list_head *tmp;
8112         struct mddev *next_mddev, *mddev = v;
8113
8114         ++*pos;
8115         if (v == (void*)2)
8116                 return NULL;
8117
8118         spin_lock(&all_mddevs_lock);
8119         if (v == (void*)1)
8120                 tmp = all_mddevs.next;
8121         else
8122                 tmp = mddev->all_mddevs.next;
8123         if (tmp != &all_mddevs)
8124                 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
8125         else {
8126                 next_mddev = (void*)2;
8127                 *pos = 0x10000;
8128         }
8129         spin_unlock(&all_mddevs_lock);
8130
8131         if (v != (void*)1)
8132                 mddev_put(mddev);
8133         return next_mddev;
8134
8135 }
8136
8137 static void md_seq_stop(struct seq_file *seq, void *v)
8138 {
8139         struct mddev *mddev = v;
8140
8141         if (mddev && v != (void*)1 && v != (void*)2)
8142                 mddev_put(mddev);
8143 }
8144
8145 static int md_seq_show(struct seq_file *seq, void *v)
8146 {
8147         struct mddev *mddev = v;
8148         sector_t sectors;
8149         struct md_rdev *rdev;
8150
8151         if (v == (void*)1) {
8152                 struct md_personality *pers;
8153                 seq_printf(seq, "Personalities : ");
8154                 spin_lock(&pers_lock);
8155                 list_for_each_entry(pers, &pers_list, list)
8156                         seq_printf(seq, "[%s] ", pers->name);
8157
8158                 spin_unlock(&pers_lock);
8159                 seq_printf(seq, "\n");
8160                 seq->poll_event = atomic_read(&md_event_count);
8161                 return 0;
8162         }
8163         if (v == (void*)2) {
8164                 status_unused(seq);
8165                 return 0;
8166         }
8167
8168         spin_lock(&mddev->lock);
8169         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
8170                 seq_printf(seq, "%s : %sactive", mdname(mddev),
8171                                                 mddev->pers ? "" : "in");
8172                 if (mddev->pers) {
8173                         if (mddev->ro==1)
8174                                 seq_printf(seq, " (read-only)");
8175                         if (mddev->ro==2)
8176                                 seq_printf(seq, " (auto-read-only)");
8177                         seq_printf(seq, " %s", mddev->pers->name);
8178                 }
8179
8180                 sectors = 0;
8181                 rcu_read_lock();
8182                 rdev_for_each_rcu(rdev, mddev) {
8183                         char b[BDEVNAME_SIZE];
8184                         seq_printf(seq, " %s[%d]",
8185                                 bdevname(rdev->bdev,b), rdev->desc_nr);
8186                         if (test_bit(WriteMostly, &rdev->flags))
8187                                 seq_printf(seq, "(W)");
8188                         if (test_bit(Journal, &rdev->flags))
8189                                 seq_printf(seq, "(J)");
8190                         if (test_bit(Faulty, &rdev->flags)) {
8191                                 seq_printf(seq, "(F)");
8192                                 continue;
8193                         }
8194                         if (rdev->raid_disk < 0)
8195                                 seq_printf(seq, "(S)"); /* spare */
8196                         if (test_bit(Replacement, &rdev->flags))
8197                                 seq_printf(seq, "(R)");
8198                         sectors += rdev->sectors;
8199                 }
8200                 rcu_read_unlock();
8201
8202                 if (!list_empty(&mddev->disks)) {
8203                         if (mddev->pers)
8204                                 seq_printf(seq, "\n      %llu blocks",
8205                                            (unsigned long long)
8206                                            mddev->array_sectors / 2);
8207                         else
8208                                 seq_printf(seq, "\n      %llu blocks",
8209                                            (unsigned long long)sectors / 2);
8210                 }
8211                 if (mddev->persistent) {
8212                         if (mddev->major_version != 0 ||
8213                             mddev->minor_version != 90) {
8214                                 seq_printf(seq," super %d.%d",
8215                                            mddev->major_version,
8216                                            mddev->minor_version);
8217                         }
8218                 } else if (mddev->external)
8219                         seq_printf(seq, " super external:%s",
8220                                    mddev->metadata_type);
8221                 else
8222                         seq_printf(seq, " super non-persistent");
8223
8224                 if (mddev->pers) {
8225                         mddev->pers->status(seq, mddev);
8226                         seq_printf(seq, "\n      ");
8227                         if (mddev->pers->sync_request) {
8228                                 if (status_resync(seq, mddev))
8229                                         seq_printf(seq, "\n      ");
8230                         }
8231                 } else
8232                         seq_printf(seq, "\n       ");
8233
8234                 md_bitmap_status(seq, mddev->bitmap);
8235
8236                 seq_printf(seq, "\n");
8237         }
8238         spin_unlock(&mddev->lock);
8239
8240         return 0;
8241 }
8242
8243 static const struct seq_operations md_seq_ops = {
8244         .start  = md_seq_start,
8245         .next   = md_seq_next,
8246         .stop   = md_seq_stop,
8247         .show   = md_seq_show,
8248 };
8249
8250 static int md_seq_open(struct inode *inode, struct file *file)
8251 {
8252         struct seq_file *seq;
8253         int error;
8254
8255         error = seq_open(file, &md_seq_ops);
8256         if (error)
8257                 return error;
8258
8259         seq = file->private_data;
8260         seq->poll_event = atomic_read(&md_event_count);
8261         return error;
8262 }
8263
8264 static int md_unloading;
8265 static __poll_t mdstat_poll(struct file *filp, poll_table *wait)
8266 {
8267         struct seq_file *seq = filp->private_data;
8268         __poll_t mask;
8269
8270         if (md_unloading)
8271                 return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
8272         poll_wait(filp, &md_event_waiters, wait);
8273
8274         /* always allow read */
8275         mask = EPOLLIN | EPOLLRDNORM;
8276
8277         if (seq->poll_event != atomic_read(&md_event_count))
8278                 mask |= EPOLLERR | EPOLLPRI;
8279         return mask;
8280 }
8281
8282 static const struct file_operations md_seq_fops = {
8283         .owner          = THIS_MODULE,
8284         .open           = md_seq_open,
8285         .read           = seq_read,
8286         .llseek         = seq_lseek,
8287         .release        = seq_release,
8288         .poll           = mdstat_poll,
8289 };
8290
8291 int register_md_personality(struct md_personality *p)
8292 {
8293         pr_debug("md: %s personality registered for level %d\n",
8294                  p->name, p->level);
8295         spin_lock(&pers_lock);
8296         list_add_tail(&p->list, &pers_list);
8297         spin_unlock(&pers_lock);
8298         return 0;
8299 }
8300 EXPORT_SYMBOL(register_md_personality);
8301
8302 int unregister_md_personality(struct md_personality *p)
8303 {
8304         pr_debug("md: %s personality unregistered\n", p->name);
8305         spin_lock(&pers_lock);
8306         list_del_init(&p->list);
8307         spin_unlock(&pers_lock);
8308         return 0;
8309 }
8310 EXPORT_SYMBOL(unregister_md_personality);
8311
8312 int register_md_cluster_operations(struct md_cluster_operations *ops,
8313                                    struct module *module)
8314 {
8315         int ret = 0;
8316         spin_lock(&pers_lock);
8317         if (md_cluster_ops != NULL)
8318                 ret = -EALREADY;
8319         else {
8320                 md_cluster_ops = ops;
8321                 md_cluster_mod = module;
8322         }
8323         spin_unlock(&pers_lock);
8324         return ret;
8325 }
8326 EXPORT_SYMBOL(register_md_cluster_operations);
8327
8328 int unregister_md_cluster_operations(void)
8329 {
8330         spin_lock(&pers_lock);
8331         md_cluster_ops = NULL;
8332         spin_unlock(&pers_lock);
8333         return 0;
8334 }
8335 EXPORT_SYMBOL(unregister_md_cluster_operations);
8336
8337 int md_setup_cluster(struct mddev *mddev, int nodes)
8338 {
8339         if (!md_cluster_ops)
8340                 request_module("md-cluster");
8341         spin_lock(&pers_lock);
8342         /* ensure module won't be unloaded */
8343         if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
8344                 pr_warn("can't find md-cluster module or get it's reference.\n");
8345                 spin_unlock(&pers_lock);
8346                 return -ENOENT;
8347         }
8348         spin_unlock(&pers_lock);
8349
8350         return md_cluster_ops->join(mddev, nodes);
8351 }
8352
8353 void md_cluster_stop(struct mddev *mddev)
8354 {
8355         if (!md_cluster_ops)
8356                 return;
8357         md_cluster_ops->leave(mddev);
8358         module_put(md_cluster_mod);
8359 }
8360
8361 static int is_mddev_idle(struct mddev *mddev, int init)
8362 {
8363         struct md_rdev *rdev;
8364         int idle;
8365         int curr_events;
8366
8367         idle = 1;
8368         rcu_read_lock();
8369         rdev_for_each_rcu(rdev, mddev) {
8370                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
8371                 curr_events = (int)part_stat_read_accum(&disk->part0, sectors) -
8372                               atomic_read(&disk->sync_io);
8373                 /* sync IO will cause sync_io to increase before the disk_stats
8374                  * as sync_io is counted when a request starts, and
8375                  * disk_stats is counted when it completes.
8376                  * So resync activity will cause curr_events to be smaller than
8377                  * when there was no such activity.
8378                  * non-sync IO will cause disk_stat to increase without
8379                  * increasing sync_io so curr_events will (eventually)
8380                  * be larger than it was before.  Once it becomes
8381                  * substantially larger, the test below will cause
8382                  * the array to appear non-idle, and resync will slow
8383                  * down.
8384                  * If there is a lot of outstanding resync activity when
8385                  * we set last_event to curr_events, then all that activity
8386                  * completing might cause the array to appear non-idle
8387                  * and resync will be slowed down even though there might
8388                  * not have been non-resync activity.  This will only
8389                  * happen once though.  'last_events' will soon reflect
8390                  * the state where there is little or no outstanding
8391                  * resync requests, and further resync activity will
8392                  * always make curr_events less than last_events.
8393                  *
8394                  */
8395                 if (init || curr_events - rdev->last_events > 64) {
8396                         rdev->last_events = curr_events;
8397                         idle = 0;
8398                 }
8399         }
8400         rcu_read_unlock();
8401         return idle;
8402 }
8403
8404 void md_done_sync(struct mddev *mddev, int blocks, int ok)
8405 {
8406         /* another "blocks" (512byte) blocks have been synced */
8407         atomic_sub(blocks, &mddev->recovery_active);
8408         wake_up(&mddev->recovery_wait);
8409         if (!ok) {
8410                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8411                 set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
8412                 md_wakeup_thread(mddev->thread);
8413                 // stop recovery, signal do_sync ....
8414         }
8415 }
8416 EXPORT_SYMBOL(md_done_sync);
8417
8418 /* md_write_start(mddev, bi)
8419  * If we need to update some array metadata (e.g. 'active' flag
8420  * in superblock) before writing, schedule a superblock update
8421  * and wait for it to complete.
8422  * A return value of 'false' means that the write wasn't recorded
8423  * and cannot proceed as the array is being suspend.
8424  */
8425 bool md_write_start(struct mddev *mddev, struct bio *bi)
8426 {
8427         int did_change = 0;
8428
8429         if (bio_data_dir(bi) != WRITE)
8430                 return true;
8431
8432         BUG_ON(mddev->ro == 1);
8433         if (mddev->ro == 2) {
8434                 /* need to switch to read/write */
8435                 mddev->ro = 0;
8436                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8437                 md_wakeup_thread(mddev->thread);
8438                 md_wakeup_thread(mddev->sync_thread);
8439                 did_change = 1;
8440         }
8441         rcu_read_lock();
8442         percpu_ref_get(&mddev->writes_pending);
8443         smp_mb(); /* Match smp_mb in set_in_sync() */
8444         if (mddev->safemode == 1)
8445                 mddev->safemode = 0;
8446         /* sync_checkers is always 0 when writes_pending is in per-cpu mode */
8447         if (mddev->in_sync || mddev->sync_checkers) {
8448                 spin_lock(&mddev->lock);
8449                 if (mddev->in_sync) {
8450                         mddev->in_sync = 0;
8451                         set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8452                         set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
8453                         md_wakeup_thread(mddev->thread);
8454                         did_change = 1;
8455                 }
8456                 spin_unlock(&mddev->lock);
8457         }
8458         rcu_read_unlock();
8459         if (did_change)
8460                 sysfs_notify_dirent_safe(mddev->sysfs_state);
8461         if (!mddev->has_superblocks)
8462                 return true;
8463         wait_event(mddev->sb_wait,
8464                    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
8465                    mddev->suspended);
8466         if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
8467                 percpu_ref_put(&mddev->writes_pending);
8468                 return false;
8469         }
8470         return true;
8471 }
8472 EXPORT_SYMBOL(md_write_start);
8473
8474 /* md_write_inc can only be called when md_write_start() has
8475  * already been called at least once of the current request.
8476  * It increments the counter and is useful when a single request
8477  * is split into several parts.  Each part causes an increment and
8478  * so needs a matching md_write_end().
8479  * Unlike md_write_start(), it is safe to call md_write_inc() inside
8480  * a spinlocked region.
8481  */
8482 void md_write_inc(struct mddev *mddev, struct bio *bi)
8483 {
8484         if (bio_data_dir(bi) != WRITE)
8485                 return;
8486         WARN_ON_ONCE(mddev->in_sync || mddev->ro);
8487         percpu_ref_get(&mddev->writes_pending);
8488 }
8489 EXPORT_SYMBOL(md_write_inc);
8490
8491 void md_write_end(struct mddev *mddev)
8492 {
8493         percpu_ref_put(&mddev->writes_pending);
8494
8495         if (mddev->safemode == 2)
8496                 md_wakeup_thread(mddev->thread);
8497         else if (mddev->safemode_delay)
8498                 /* The roundup() ensures this only performs locking once
8499                  * every ->safemode_delay jiffies
8500                  */
8501                 mod_timer(&mddev->safemode_timer,
8502                           roundup(jiffies, mddev->safemode_delay) +
8503                           mddev->safemode_delay);
8504 }
8505
8506 EXPORT_SYMBOL(md_write_end);
8507
8508 /* md_allow_write(mddev)
8509  * Calling this ensures that the array is marked 'active' so that writes
8510  * may proceed without blocking.  It is important to call this before
8511  * attempting a GFP_KERNEL allocation while holding the mddev lock.
8512  * Must be called with mddev_lock held.
8513  */
8514 void md_allow_write(struct mddev *mddev)
8515 {
8516         if (!mddev->pers)
8517                 return;
8518         if (mddev->ro)
8519                 return;
8520         if (!mddev->pers->sync_request)
8521                 return;
8522
8523         spin_lock(&mddev->lock);
8524         if (mddev->in_sync) {
8525                 mddev->in_sync = 0;
8526                 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8527                 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
8528                 if (mddev->safemode_delay &&
8529                     mddev->safemode == 0)
8530                         mddev->safemode = 1;
8531                 spin_unlock(&mddev->lock);
8532                 md_update_sb(mddev, 0);
8533                 sysfs_notify_dirent_safe(mddev->sysfs_state);
8534                 /* wait for the dirty state to be recorded in the metadata */
8535                 wait_event(mddev->sb_wait,
8536                            !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
8537         } else
8538                 spin_unlock(&mddev->lock);
8539 }
8540 EXPORT_SYMBOL_GPL(md_allow_write);
8541
8542 #define SYNC_MARKS      10
8543 #define SYNC_MARK_STEP  (3*HZ)
8544 #define UPDATE_FREQUENCY (5*60*HZ)
8545 void md_do_sync(struct md_thread *thread)
8546 {
8547         struct mddev *mddev = thread->mddev;
8548         struct mddev *mddev2;
8549         unsigned int currspeed = 0, window;
8550         sector_t max_sectors,j, io_sectors, recovery_done;
8551         unsigned long mark[SYNC_MARKS];
8552         unsigned long update_time;
8553         sector_t mark_cnt[SYNC_MARKS];
8554         int last_mark,m;
8555         struct list_head *tmp;
8556         sector_t last_check;
8557         int skipped = 0;
8558         struct md_rdev *rdev;
8559         char *desc, *action = NULL;
8560         struct blk_plug plug;
8561         int ret;
8562
8563         /* just incase thread restarts... */
8564         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
8565             test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
8566                 return;
8567         if (mddev->ro) {/* never try to sync a read-only array */
8568                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8569                 return;
8570         }
8571
8572         if (mddev_is_clustered(mddev)) {
8573                 ret = md_cluster_ops->resync_start(mddev);
8574                 if (ret)
8575                         goto skip;
8576
8577                 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
8578                 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8579                         test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
8580                         test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
8581                      && ((unsigned long long)mddev->curr_resync_completed
8582                          < (unsigned long long)mddev->resync_max_sectors))
8583                         goto skip;
8584         }
8585
8586         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8587                 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
8588                         desc = "data-check";
8589                         action = "check";
8590                 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
8591                         desc = "requested-resync";
8592                         action = "repair";
8593                 } else
8594                         desc = "resync";
8595         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
8596                 desc = "reshape";
8597         else
8598                 desc = "recovery";
8599
8600         mddev->last_sync_action = action ?: desc;
8601
8602         /* we overload curr_resync somewhat here.
8603          * 0 == not engaged in resync at all
8604          * 2 == checking that there is no conflict with another sync
8605          * 1 == like 2, but have yielded to allow conflicting resync to
8606          *              commence
8607          * other == active in resync - this many blocks
8608          *
8609          * Before starting a resync we must have set curr_resync to
8610          * 2, and then checked that every "conflicting" array has curr_resync
8611          * less than ours.  When we find one that is the same or higher
8612          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
8613          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
8614          * This will mean we have to start checking from the beginning again.
8615          *
8616          */
8617
8618         do {
8619                 int mddev2_minor = -1;
8620                 mddev->curr_resync = 2;
8621
8622         try_again:
8623                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8624                         goto skip;
8625                 for_each_mddev(mddev2, tmp) {
8626                         if (mddev2 == mddev)
8627                                 continue;
8628                         if (!mddev->parallel_resync
8629                         &&  mddev2->curr_resync
8630                         &&  match_mddev_units(mddev, mddev2)) {
8631                                 DEFINE_WAIT(wq);
8632                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
8633                                         /* arbitrarily yield */
8634                                         mddev->curr_resync = 1;
8635                                         wake_up(&resync_wait);
8636                                 }
8637                                 if (mddev > mddev2 && mddev->curr_resync == 1)
8638                                         /* no need to wait here, we can wait the next
8639                                          * time 'round when curr_resync == 2
8640                                          */
8641                                         continue;
8642                                 /* We need to wait 'interruptible' so as not to
8643                                  * contribute to the load average, and not to
8644                                  * be caught by 'softlockup'
8645                                  */
8646                                 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
8647                                 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8648                                     mddev2->curr_resync >= mddev->curr_resync) {
8649                                         if (mddev2_minor != mddev2->md_minor) {
8650                                                 mddev2_minor = mddev2->md_minor;
8651                                                 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
8652                                                         desc, mdname(mddev),
8653                                                         mdname(mddev2));
8654                                         }
8655                                         mddev_put(mddev2);
8656                                         if (signal_pending(current))
8657                                                 flush_signals(current);
8658                                         schedule();
8659                                         finish_wait(&resync_wait, &wq);
8660                                         goto try_again;
8661                                 }
8662                                 finish_wait(&resync_wait, &wq);
8663                         }
8664                 }
8665         } while (mddev->curr_resync < 2);
8666
8667         j = 0;
8668         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8669                 /* resync follows the size requested by the personality,
8670                  * which defaults to physical size, but can be virtual size
8671                  */
8672                 max_sectors = mddev->resync_max_sectors;
8673                 atomic64_set(&mddev->resync_mismatches, 0);
8674                 /* we don't use the checkpoint if there's a bitmap */
8675                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8676                         j = mddev->resync_min;
8677                 else if (!mddev->bitmap)
8678                         j = mddev->recovery_cp;
8679
8680         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
8681                 max_sectors = mddev->resync_max_sectors;
8682                 /*
8683                  * If the original node aborts reshaping then we continue the
8684                  * reshaping, so set j again to avoid restart reshape from the
8685                  * first beginning
8686                  */
8687                 if (mddev_is_clustered(mddev) &&
8688                     mddev->reshape_position != MaxSector)
8689                         j = mddev->reshape_position;
8690         } else {
8691                 /* recovery follows the physical size of devices */
8692                 max_sectors = mddev->dev_sectors;
8693                 j = MaxSector;
8694                 rcu_read_lock();
8695                 rdev_for_each_rcu(rdev, mddev)
8696                         if (rdev->raid_disk >= 0 &&
8697                             !test_bit(Journal, &rdev->flags) &&
8698                             !test_bit(Faulty, &rdev->flags) &&
8699                             !test_bit(In_sync, &rdev->flags) &&
8700                             rdev->recovery_offset < j)
8701                                 j = rdev->recovery_offset;
8702                 rcu_read_unlock();
8703
8704                 /* If there is a bitmap, we need to make sure all
8705                  * writes that started before we added a spare
8706                  * complete before we start doing a recovery.
8707                  * Otherwise the write might complete and (via
8708                  * bitmap_endwrite) set a bit in the bitmap after the
8709                  * recovery has checked that bit and skipped that
8710                  * region.
8711                  */
8712                 if (mddev->bitmap) {
8713                         mddev->pers->quiesce(mddev, 1);
8714                         mddev->pers->quiesce(mddev, 0);
8715                 }
8716         }
8717
8718         pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
8719         pr_debug("md: minimum _guaranteed_  speed: %d KB/sec/disk.\n", speed_min(mddev));
8720         pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
8721                  speed_max(mddev), desc);
8722
8723         is_mddev_idle(mddev, 1); /* this initializes IO event counters */
8724
8725         io_sectors = 0;
8726         for (m = 0; m < SYNC_MARKS; m++) {
8727                 mark[m] = jiffies;
8728                 mark_cnt[m] = io_sectors;
8729         }
8730         last_mark = 0;
8731         mddev->resync_mark = mark[last_mark];
8732         mddev->resync_mark_cnt = mark_cnt[last_mark];
8733
8734         /*
8735          * Tune reconstruction:
8736          */
8737         window = 32 * (PAGE_SIZE / 512);
8738         pr_debug("md: using %dk window, over a total of %lluk.\n",
8739                  window/2, (unsigned long long)max_sectors/2);
8740
8741         atomic_set(&mddev->recovery_active, 0);
8742         last_check = 0;
8743
8744         if (j>2) {
8745                 pr_debug("md: resuming %s of %s from checkpoint.\n",
8746                          desc, mdname(mddev));
8747                 mddev->curr_resync = j;
8748         } else
8749                 mddev->curr_resync = 3; /* no longer delayed */
8750         mddev->curr_resync_completed = j;
8751         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8752         md_new_event(mddev);
8753         update_time = jiffies;
8754
8755         blk_start_plug(&plug);
8756         while (j < max_sectors) {
8757                 sector_t sectors;
8758
8759                 skipped = 0;
8760
8761                 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8762                     ((mddev->curr_resync > mddev->curr_resync_completed &&
8763                       (mddev->curr_resync - mddev->curr_resync_completed)
8764                       > (max_sectors >> 4)) ||
8765                      time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
8766                      (j - mddev->curr_resync_completed)*2
8767                      >= mddev->resync_max - mddev->curr_resync_completed ||
8768                      mddev->curr_resync_completed > mddev->resync_max
8769                             )) {
8770                         /* time to update curr_resync_completed */
8771                         wait_event(mddev->recovery_wait,
8772                                    atomic_read(&mddev->recovery_active) == 0);
8773                         mddev->curr_resync_completed = j;
8774                         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
8775                             j > mddev->recovery_cp)
8776                                 mddev->recovery_cp = j;
8777                         update_time = jiffies;
8778                         set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8779                         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8780                 }
8781
8782                 while (j >= mddev->resync_max &&
8783                        !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8784                         /* As this condition is controlled by user-space,
8785                          * we can block indefinitely, so use '_interruptible'
8786                          * to avoid triggering warnings.
8787                          */
8788                         flush_signals(current); /* just in case */
8789                         wait_event_interruptible(mddev->recovery_wait,
8790                                                  mddev->resync_max > j
8791                                                  || test_bit(MD_RECOVERY_INTR,
8792                                                              &mddev->recovery));
8793                 }
8794
8795                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8796                         break;
8797
8798                 sectors = mddev->pers->sync_request(mddev, j, &skipped);
8799                 if (sectors == 0) {
8800                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8801                         break;
8802                 }
8803
8804                 if (!skipped) { /* actual IO requested */
8805                         io_sectors += sectors;
8806                         atomic_add(sectors, &mddev->recovery_active);
8807                 }
8808
8809                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8810                         break;
8811
8812                 j += sectors;
8813                 if (j > max_sectors)
8814                         /* when skipping, extra large numbers can be returned. */
8815                         j = max_sectors;
8816                 if (j > 2)
8817                         mddev->curr_resync = j;
8818                 mddev->curr_mark_cnt = io_sectors;
8819                 if (last_check == 0)
8820                         /* this is the earliest that rebuild will be
8821                          * visible in /proc/mdstat
8822                          */
8823                         md_new_event(mddev);
8824
8825                 if (last_check + window > io_sectors || j == max_sectors)
8826                         continue;
8827
8828                 last_check = io_sectors;
8829         repeat:
8830                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
8831                         /* step marks */
8832                         int next = (last_mark+1) % SYNC_MARKS;
8833
8834                         mddev->resync_mark = mark[next];
8835                         mddev->resync_mark_cnt = mark_cnt[next];
8836                         mark[next] = jiffies;
8837                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
8838                         last_mark = next;
8839                 }
8840
8841                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8842                         break;
8843
8844                 /*
8845                  * this loop exits only if either when we are slower than
8846                  * the 'hard' speed limit, or the system was IO-idle for
8847                  * a jiffy.
8848                  * the system might be non-idle CPU-wise, but we only care
8849                  * about not overloading the IO subsystem. (things like an
8850                  * e2fsck being done on the RAID array should execute fast)
8851                  */
8852                 cond_resched();
8853
8854                 recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
8855                 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
8856                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
8857
8858                 if (currspeed > speed_min(mddev)) {
8859                         if (currspeed > speed_max(mddev)) {
8860                                 msleep(500);
8861                                 goto repeat;
8862                         }
8863                         if (!is_mddev_idle(mddev, 0)) {
8864                                 /*
8865                                  * Give other IO more of a chance.
8866                                  * The faster the devices, the less we wait.
8867                                  */
8868                                 wait_event(mddev->recovery_wait,
8869                                            !atomic_read(&mddev->recovery_active));
8870                         }
8871                 }
8872         }
8873         pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
8874                 test_bit(MD_RECOVERY_INTR, &mddev->recovery)
8875                 ? "interrupted" : "done");
8876         /*
8877          * this also signals 'finished resyncing' to md_stop
8878          */
8879         blk_finish_plug(&plug);
8880         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
8881
8882         if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8883             !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8884             mddev->curr_resync > 3) {
8885                 mddev->curr_resync_completed = mddev->curr_resync;
8886                 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8887         }
8888         mddev->pers->sync_request(mddev, max_sectors, &skipped);
8889
8890         if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
8891             mddev->curr_resync > 3) {
8892                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8893                         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8894                                 if (mddev->curr_resync >= mddev->recovery_cp) {
8895                                         pr_debug("md: checkpointing %s of %s.\n",
8896                                                  desc, mdname(mddev));
8897                                         if (test_bit(MD_RECOVERY_ERROR,
8898                                                 &mddev->recovery))
8899                                                 mddev->recovery_cp =
8900                                                         mddev->curr_resync_completed;
8901                                         else
8902                                                 mddev->recovery_cp =
8903                                                         mddev->curr_resync;
8904                                 }
8905                         } else
8906                                 mddev->recovery_cp = MaxSector;
8907                 } else {
8908                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8909                                 mddev->curr_resync = MaxSector;
8910                         if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8911                             test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
8912                                 rcu_read_lock();
8913                                 rdev_for_each_rcu(rdev, mddev)
8914                                         if (rdev->raid_disk >= 0 &&
8915                                             mddev->delta_disks >= 0 &&
8916                                             !test_bit(Journal, &rdev->flags) &&
8917                                             !test_bit(Faulty, &rdev->flags) &&
8918                                             !test_bit(In_sync, &rdev->flags) &&
8919                                             rdev->recovery_offset < mddev->curr_resync)
8920                                                 rdev->recovery_offset = mddev->curr_resync;
8921                                 rcu_read_unlock();
8922                         }
8923                 }
8924         }
8925  skip:
8926         /* set CHANGE_PENDING here since maybe another update is needed,
8927          * so other nodes are informed. It should be harmless for normal
8928          * raid */
8929         set_mask_bits(&mddev->sb_flags, 0,
8930                       BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
8931
8932         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8933                         !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8934                         mddev->delta_disks > 0 &&
8935                         mddev->pers->finish_reshape &&
8936                         mddev->pers->size &&
8937                         mddev->queue) {
8938                 mddev_lock_nointr(mddev);
8939                 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
8940                 mddev_unlock(mddev);
8941                 if (!mddev_is_clustered(mddev)) {
8942                         set_capacity(mddev->gendisk, mddev->array_sectors);
8943                         revalidate_disk(mddev->gendisk);
8944                 }
8945         }
8946
8947         spin_lock(&mddev->lock);
8948         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8949                 /* We completed so min/max setting can be forgotten if used. */
8950                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8951                         mddev->resync_min = 0;
8952                 mddev->resync_max = MaxSector;
8953         } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8954                 mddev->resync_min = mddev->curr_resync_completed;
8955         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
8956         mddev->curr_resync = 0;
8957         spin_unlock(&mddev->lock);
8958
8959         wake_up(&resync_wait);
8960         md_wakeup_thread(mddev->thread);
8961         return;
8962 }
8963 EXPORT_SYMBOL_GPL(md_do_sync);
8964
8965 static int remove_and_add_spares(struct mddev *mddev,
8966                                  struct md_rdev *this)
8967 {
8968         struct md_rdev *rdev;
8969         int spares = 0;
8970         int removed = 0;
8971         bool remove_some = false;
8972
8973         if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
8974                 /* Mustn't remove devices when resync thread is running */
8975                 return 0;
8976
8977         rdev_for_each(rdev, mddev) {
8978                 if ((this == NULL || rdev == this) &&
8979                     rdev->raid_disk >= 0 &&
8980                     !test_bit(Blocked, &rdev->flags) &&
8981                     test_bit(Faulty, &rdev->flags) &&
8982                     atomic_read(&rdev->nr_pending)==0) {
8983                         /* Faulty non-Blocked devices with nr_pending == 0
8984                          * never get nr_pending incremented,
8985                          * never get Faulty cleared, and never get Blocked set.
8986                          * So we can synchronize_rcu now rather than once per device
8987                          */
8988                         remove_some = true;
8989                         set_bit(RemoveSynchronized, &rdev->flags);
8990                 }
8991         }
8992
8993         if (remove_some)
8994                 synchronize_rcu();
8995         rdev_for_each(rdev, mddev) {
8996                 if ((this == NULL || rdev == this) &&
8997                     rdev->raid_disk >= 0 &&
8998                     !test_bit(Blocked, &rdev->flags) &&
8999                     ((test_bit(RemoveSynchronized, &rdev->flags) ||
9000                      (!test_bit(In_sync, &rdev->flags) &&
9001                       !test_bit(Journal, &rdev->flags))) &&
9002                     atomic_read(&rdev->nr_pending)==0)) {
9003                         if (mddev->pers->hot_remove_disk(
9004                                     mddev, rdev) == 0) {
9005                                 sysfs_unlink_rdev(mddev, rdev);
9006                                 rdev->saved_raid_disk = rdev->raid_disk;
9007                                 rdev->raid_disk = -1;
9008                                 removed++;
9009                         }
9010                 }
9011                 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
9012                         clear_bit(RemoveSynchronized, &rdev->flags);
9013         }
9014
9015         if (removed && mddev->kobj.sd)
9016                 sysfs_notify(&mddev->kobj, NULL, "degraded");
9017
9018         if (this && removed)
9019                 goto no_add;
9020
9021         rdev_for_each(rdev, mddev) {
9022                 if (this && this != rdev)
9023                         continue;
9024                 if (test_bit(Candidate, &rdev->flags))
9025                         continue;
9026                 if (rdev->raid_disk >= 0 &&
9027                     !test_bit(In_sync, &rdev->flags) &&
9028                     !test_bit(Journal, &rdev->flags) &&
9029                     !test_bit(Faulty, &rdev->flags))
9030                         spares++;
9031                 if (rdev->raid_disk >= 0)
9032                         continue;
9033                 if (test_bit(Faulty, &rdev->flags))
9034                         continue;
9035                 if (!test_bit(Journal, &rdev->flags)) {
9036                         if (mddev->ro &&
9037                             ! (rdev->saved_raid_disk >= 0 &&
9038                                !test_bit(Bitmap_sync, &rdev->flags)))
9039                                 continue;
9040
9041                         rdev->recovery_offset = 0;
9042                 }
9043                 if (mddev->pers->
9044                     hot_add_disk(mddev, rdev) == 0) {
9045                         if (sysfs_link_rdev(mddev, rdev))
9046                                 /* failure here is OK */;
9047                         if (!test_bit(Journal, &rdev->flags))
9048                                 spares++;
9049                         md_new_event(mddev);
9050                         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
9051                 }
9052         }
9053 no_add:
9054         if (removed)
9055                 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
9056         return spares;
9057 }
9058
9059 static void md_start_sync(struct work_struct *ws)
9060 {
9061         struct mddev *mddev = container_of(ws, struct mddev, del_work);
9062
9063         mddev->sync_thread = md_register_thread(md_do_sync,
9064                                                 mddev,
9065                                                 "resync");
9066         if (!mddev->sync_thread) {
9067                 pr_warn("%s: could not start resync thread...\n",
9068                         mdname(mddev));
9069                 /* leave the spares where they are, it shouldn't hurt */
9070                 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9071                 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9072                 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9073                 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9074                 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9075                 wake_up(&resync_wait);
9076                 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
9077                                        &mddev->recovery))
9078                         if (mddev->sysfs_action)
9079                                 sysfs_notify_dirent_safe(mddev->sysfs_action);
9080         } else
9081                 md_wakeup_thread(mddev->sync_thread);
9082         sysfs_notify_dirent_safe(mddev->sysfs_action);
9083         md_new_event(mddev);
9084 }
9085
9086 /*
9087  * This routine is regularly called by all per-raid-array threads to
9088  * deal with generic issues like resync and super-block update.
9089  * Raid personalities that don't have a thread (linear/raid0) do not
9090  * need this as they never do any recovery or update the superblock.
9091  *
9092  * It does not do any resync itself, but rather "forks" off other threads
9093  * to do that as needed.
9094  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
9095  * "->recovery" and create a thread at ->sync_thread.
9096  * When the thread finishes it sets MD_RECOVERY_DONE
9097  * and wakeups up this thread which will reap the thread and finish up.
9098  * This thread also removes any faulty devices (with nr_pending == 0).
9099  *
9100  * The overall approach is:
9101  *  1/ if the superblock needs updating, update it.
9102  *  2/ If a recovery thread is running, don't do anything else.
9103  *  3/ If recovery has finished, clean up, possibly marking spares active.
9104  *  4/ If there are any faulty devices, remove them.
9105  *  5/ If array is degraded, try to add spares devices
9106  *  6/ If array has spares or is not in-sync, start a resync thread.
9107  */
9108 void md_check_recovery(struct mddev *mddev)
9109 {
9110         if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
9111                 /* Write superblock - thread that called mddev_suspend()
9112                  * holds reconfig_mutex for us.
9113                  */
9114                 set_bit(MD_UPDATING_SB, &mddev->flags);
9115                 smp_mb__after_atomic();
9116                 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
9117                         md_update_sb(mddev, 0);
9118                 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
9119                 wake_up(&mddev->sb_wait);
9120         }
9121
9122         if (mddev->suspended)
9123                 return;
9124
9125         if (mddev->bitmap)
9126                 md_bitmap_daemon_work(mddev);
9127
9128         if (signal_pending(current)) {
9129                 if (mddev->pers->sync_request && !mddev->external) {
9130                         pr_debug("md: %s in immediate safe mode\n",
9131                                  mdname(mddev));
9132                         mddev->safemode = 2;
9133                 }
9134                 flush_signals(current);
9135         }
9136
9137         if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
9138                 return;
9139         if ( ! (
9140                 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
9141                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
9142                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
9143                 (mddev->external == 0 && mddev->safemode == 1) ||
9144                 (mddev->safemode == 2
9145                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
9146                 ))
9147                 return;
9148
9149         if (mddev_trylock(mddev)) {
9150                 int spares = 0;
9151                 bool try_set_sync = mddev->safemode != 0;
9152
9153                 if (!mddev->external && mddev->safemode == 1)
9154                         mddev->safemode = 0;
9155
9156                 if (mddev->ro) {
9157                         struct md_rdev *rdev;
9158                         if (!mddev->external && mddev->in_sync)
9159                                 /* 'Blocked' flag not needed as failed devices
9160                                  * will be recorded if array switched to read/write.
9161                                  * Leaving it set will prevent the device
9162                                  * from being removed.
9163                                  */
9164                                 rdev_for_each(rdev, mddev)
9165                                         clear_bit(Blocked, &rdev->flags);
9166                         /* On a read-only array we can:
9167                          * - remove failed devices
9168                          * - add already-in_sync devices if the array itself
9169                          *   is in-sync.
9170                          * As we only add devices that are already in-sync,
9171                          * we can activate the spares immediately.
9172                          */
9173                         remove_and_add_spares(mddev, NULL);
9174                         /* There is no thread, but we need to call
9175                          * ->spare_active and clear saved_raid_disk
9176                          */
9177                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
9178                         md_reap_sync_thread(mddev);
9179                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9180                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9181                         clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
9182                         goto unlock;
9183                 }
9184
9185                 if (mddev_is_clustered(mddev)) {
9186                         struct md_rdev *rdev;
9187                         /* kick the device if another node issued a
9188                          * remove disk.
9189                          */
9190                         rdev_for_each(rdev, mddev) {
9191                                 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
9192                                                 rdev->raid_disk < 0)
9193                                         md_kick_rdev_from_array(rdev);
9194                         }
9195                 }
9196
9197                 if (try_set_sync && !mddev->external && !mddev->in_sync) {
9198                         spin_lock(&mddev->lock);
9199                         set_in_sync(mddev);
9200                         spin_unlock(&mddev->lock);
9201                 }
9202
9203                 if (mddev->sb_flags)
9204                         md_update_sb(mddev, 0);
9205
9206                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
9207                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
9208                         /* resync/recovery still happening */
9209                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9210                         goto unlock;
9211                 }
9212                 if (mddev->sync_thread) {
9213                         md_reap_sync_thread(mddev);
9214                         goto unlock;
9215                 }
9216                 /* Set RUNNING before clearing NEEDED to avoid
9217                  * any transients in the value of "sync_action".
9218                  */
9219                 mddev->curr_resync_completed = 0;
9220                 spin_lock(&mddev->lock);
9221                 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9222                 spin_unlock(&mddev->lock);
9223                 /* Clear some bits that don't mean anything, but
9224                  * might be left set
9225                  */
9226                 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
9227                 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
9228
9229                 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
9230                     test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
9231                         goto not_running;
9232                 /* no recovery is running.
9233                  * remove any failed drives, then
9234                  * add spares if possible.
9235                  * Spares are also removed and re-added, to allow
9236                  * the personality to fail the re-add.
9237                  */
9238
9239                 if (mddev->reshape_position != MaxSector) {
9240                         if (mddev->pers->check_reshape == NULL ||
9241                             mddev->pers->check_reshape(mddev) != 0)
9242                                 /* Cannot proceed */
9243                                 goto not_running;
9244                         set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9245                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9246                 } else if ((spares = remove_and_add_spares(mddev, NULL))) {
9247                         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9248                         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9249                         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9250                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9251                 } else if (mddev->recovery_cp < MaxSector) {
9252                         set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9253                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9254                 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
9255                         /* nothing to be done ... */
9256                         goto not_running;
9257
9258                 if (mddev->pers->sync_request) {
9259                         if (spares) {
9260                                 /* We are adding a device or devices to an array
9261                                  * which has the bitmap stored on all devices.
9262                                  * So make sure all bitmap pages get written
9263                                  */
9264                                 md_bitmap_write_all(mddev->bitmap);
9265                         }
9266                         INIT_WORK(&mddev->del_work, md_start_sync);
9267                         queue_work(md_misc_wq, &mddev->del_work);
9268                         goto unlock;
9269                 }
9270         not_running:
9271                 if (!mddev->sync_thread) {
9272                         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9273                         wake_up(&resync_wait);
9274                         if (test_and_clear_bit(MD_RECOVERY_RECOVER,
9275                                                &mddev->recovery))
9276                                 if (mddev->sysfs_action)
9277                                         sysfs_notify_dirent_safe(mddev->sysfs_action);
9278                 }
9279         unlock:
9280                 wake_up(&mddev->sb_wait);
9281                 mddev_unlock(mddev);
9282         }
9283 }
9284 EXPORT_SYMBOL(md_check_recovery);
9285
9286 void md_reap_sync_thread(struct mddev *mddev)
9287 {
9288         struct md_rdev *rdev;
9289         sector_t old_dev_sectors = mddev->dev_sectors;
9290         bool is_reshaped = false;
9291
9292         /* resync has finished, collect result */
9293         md_unregister_thread(&mddev->sync_thread);
9294         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9295             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
9296             mddev->degraded != mddev->raid_disks) {
9297                 /* success...*/
9298                 /* activate any spares */
9299                 if (mddev->pers->spare_active(mddev)) {
9300                         sysfs_notify(&mddev->kobj, NULL,
9301                                      "degraded");
9302                         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
9303                 }
9304         }
9305         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9306             mddev->pers->finish_reshape) {
9307                 mddev->pers->finish_reshape(mddev);
9308                 if (mddev_is_clustered(mddev))
9309                         is_reshaped = true;
9310         }
9311
9312         /* If array is no-longer degraded, then any saved_raid_disk
9313          * information must be scrapped.
9314          */
9315         if (!mddev->degraded)
9316                 rdev_for_each(rdev, mddev)
9317                         rdev->saved_raid_disk = -1;
9318
9319         md_update_sb(mddev, 1);
9320         /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
9321          * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
9322          * clustered raid */
9323         if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
9324                 md_cluster_ops->resync_finish(mddev);
9325         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9326         clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
9327         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9328         clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9329         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9330         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9331         /*
9332          * We call md_cluster_ops->update_size here because sync_size could
9333          * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
9334          * so it is time to update size across cluster.
9335          */
9336         if (mddev_is_clustered(mddev) && is_reshaped
9337                                       && !test_bit(MD_CLOSING, &mddev->flags))
9338                 md_cluster_ops->update_size(mddev, old_dev_sectors);
9339         wake_up(&resync_wait);
9340         /* flag recovery needed just to double check */
9341         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9342         sysfs_notify_dirent_safe(mddev->sysfs_action);
9343         md_new_event(mddev);
9344         if (mddev->event_work.func)
9345                 queue_work(md_misc_wq, &mddev->event_work);
9346 }
9347 EXPORT_SYMBOL(md_reap_sync_thread);
9348
9349 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
9350 {
9351         sysfs_notify_dirent_safe(rdev->sysfs_state);
9352         wait_event_timeout(rdev->blocked_wait,
9353                            !test_bit(Blocked, &rdev->flags) &&
9354                            !test_bit(BlockedBadBlocks, &rdev->flags),
9355                            msecs_to_jiffies(5000));
9356         rdev_dec_pending(rdev, mddev);
9357 }
9358 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
9359
9360 void md_finish_reshape(struct mddev *mddev)
9361 {
9362         /* called be personality module when reshape completes. */
9363         struct md_rdev *rdev;
9364
9365         rdev_for_each(rdev, mddev) {
9366                 if (rdev->data_offset > rdev->new_data_offset)
9367                         rdev->sectors += rdev->data_offset - rdev->new_data_offset;
9368                 else
9369                         rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
9370                 rdev->data_offset = rdev->new_data_offset;
9371         }
9372 }
9373 EXPORT_SYMBOL(md_finish_reshape);
9374
9375 /* Bad block management */
9376
9377 /* Returns 1 on success, 0 on failure */
9378 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
9379                        int is_new)
9380 {
9381         struct mddev *mddev = rdev->mddev;
9382         int rv;
9383         if (is_new)
9384                 s += rdev->new_data_offset;
9385         else
9386                 s += rdev->data_offset;
9387         rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
9388         if (rv == 0) {
9389                 /* Make sure they get written out promptly */
9390                 if (test_bit(ExternalBbl, &rdev->flags))
9391                         sysfs_notify(&rdev->kobj, NULL,
9392                                      "unacknowledged_bad_blocks");
9393                 sysfs_notify_dirent_safe(rdev->sysfs_state);
9394                 set_mask_bits(&mddev->sb_flags, 0,
9395                               BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
9396                 md_wakeup_thread(rdev->mddev->thread);
9397                 return 1;
9398         } else
9399                 return 0;
9400 }
9401 EXPORT_SYMBOL_GPL(rdev_set_badblocks);
9402
9403 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
9404                          int is_new)
9405 {
9406         int rv;
9407         if (is_new)
9408                 s += rdev->new_data_offset;
9409         else
9410                 s += rdev->data_offset;
9411         rv = badblocks_clear(&rdev->badblocks, s, sectors);
9412         if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
9413                 sysfs_notify(&rdev->kobj, NULL, "bad_blocks");
9414         return rv;
9415 }
9416 EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
9417
9418 static int md_notify_reboot(struct notifier_block *this,
9419                             unsigned long code, void *x)
9420 {
9421         struct list_head *tmp;
9422         struct mddev *mddev;
9423         int need_delay = 0;
9424
9425         for_each_mddev(mddev, tmp) {
9426                 if (mddev_trylock(mddev)) {
9427                         if (mddev->pers)
9428                                 __md_stop_writes(mddev);
9429                         if (mddev->persistent)
9430                                 mddev->safemode = 2;
9431                         mddev_unlock(mddev);
9432                 }
9433                 need_delay = 1;
9434         }
9435         /*
9436          * certain more exotic SCSI devices are known to be
9437          * volatile wrt too early system reboots. While the
9438          * right place to handle this issue is the given
9439          * driver, we do want to have a safe RAID driver ...
9440          */
9441         if (need_delay)
9442                 mdelay(1000*1);
9443
9444         return NOTIFY_DONE;
9445 }
9446
9447 static struct notifier_block md_notifier = {
9448         .notifier_call  = md_notify_reboot,
9449         .next           = NULL,
9450         .priority       = INT_MAX, /* before any real devices */
9451 };
9452
9453 static void md_geninit(void)
9454 {
9455         pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
9456
9457         proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
9458 }
9459
9460 static int __init md_init(void)
9461 {
9462         int ret = -ENOMEM;
9463
9464         md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
9465         if (!md_wq)
9466                 goto err_wq;
9467
9468         md_misc_wq = alloc_workqueue("md_misc", 0, 0);
9469         if (!md_misc_wq)
9470                 goto err_misc_wq;
9471
9472         if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
9473                 goto err_md;
9474
9475         if ((ret = register_blkdev(0, "mdp")) < 0)
9476                 goto err_mdp;
9477         mdp_major = ret;
9478
9479         blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE,
9480                             md_probe, NULL, NULL);
9481         blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
9482                             md_probe, NULL, NULL);
9483
9484         register_reboot_notifier(&md_notifier);
9485         raid_table_header = register_sysctl_table(raid_root_table);
9486
9487         md_geninit();
9488         return 0;
9489
9490 err_mdp:
9491         unregister_blkdev(MD_MAJOR, "md");
9492 err_md:
9493         destroy_workqueue(md_misc_wq);
9494 err_misc_wq:
9495         destroy_workqueue(md_wq);
9496 err_wq:
9497         return ret;
9498 }
9499
9500 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
9501 {
9502         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
9503         struct md_rdev *rdev2;
9504         int role, ret;
9505         char b[BDEVNAME_SIZE];
9506
9507         /*
9508          * If size is changed in another node then we need to
9509          * do resize as well.
9510          */
9511         if (mddev->dev_sectors != le64_to_cpu(sb->size)) {
9512                 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size));
9513                 if (ret)
9514                         pr_info("md-cluster: resize failed\n");
9515                 else
9516                         md_bitmap_update_sb(mddev->bitmap);
9517         }
9518
9519         /* Check for change of roles in the active devices */
9520         rdev_for_each(rdev2, mddev) {
9521                 if (test_bit(Faulty, &rdev2->flags))
9522                         continue;
9523
9524                 /* Check if the roles changed */
9525                 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
9526
9527                 if (test_bit(Candidate, &rdev2->flags)) {
9528                         if (role == 0xfffe) {
9529                                 pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
9530                                 md_kick_rdev_from_array(rdev2);
9531                                 continue;
9532                         }
9533                         else
9534                                 clear_bit(Candidate, &rdev2->flags);
9535                 }
9536
9537                 if (role != rdev2->raid_disk) {
9538                         /*
9539                          * got activated except reshape is happening.
9540                          */
9541                         if (rdev2->raid_disk == -1 && role != 0xffff &&
9542                             !(le32_to_cpu(sb->feature_map) &
9543                               MD_FEATURE_RESHAPE_ACTIVE)) {
9544                                 rdev2->saved_raid_disk = role;
9545                                 ret = remove_and_add_spares(mddev, rdev2);
9546                                 pr_info("Activated spare: %s\n",
9547                                         bdevname(rdev2->bdev,b));
9548                                 /* wakeup mddev->thread here, so array could
9549                                  * perform resync with the new activated disk */
9550                                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9551                                 md_wakeup_thread(mddev->thread);
9552                         }
9553                         /* device faulty
9554                          * We just want to do the minimum to mark the disk
9555                          * as faulty. The recovery is performed by the
9556                          * one who initiated the error.
9557                          */
9558                         if ((role == 0xfffe) || (role == 0xfffd)) {
9559                                 md_error(mddev, rdev2);
9560                                 clear_bit(Blocked, &rdev2->flags);
9561                         }
9562                 }
9563         }
9564
9565         if (mddev->raid_disks != le32_to_cpu(sb->raid_disks))
9566                 update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
9567
9568         /*
9569          * Since mddev->delta_disks has already updated in update_raid_disks,
9570          * so it is time to check reshape.
9571          */
9572         if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9573             (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9574                 /*
9575                  * reshape is happening in the remote node, we need to
9576                  * update reshape_position and call start_reshape.
9577                  */
9578                 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
9579                 if (mddev->pers->update_reshape_pos)
9580                         mddev->pers->update_reshape_pos(mddev);
9581                 if (mddev->pers->start_reshape)
9582                         mddev->pers->start_reshape(mddev);
9583         } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9584                    mddev->reshape_position != MaxSector &&
9585                    !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9586                 /* reshape is just done in another node. */
9587                 mddev->reshape_position = MaxSector;
9588                 if (mddev->pers->update_reshape_pos)
9589                         mddev->pers->update_reshape_pos(mddev);
9590         }
9591
9592         /* Finally set the event to be up to date */
9593         mddev->events = le64_to_cpu(sb->events);
9594 }
9595
9596 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
9597 {
9598         int err;
9599         struct page *swapout = rdev->sb_page;
9600         struct mdp_superblock_1 *sb;
9601
9602         /* Store the sb page of the rdev in the swapout temporary
9603          * variable in case we err in the future
9604          */
9605         rdev->sb_page = NULL;
9606         err = alloc_disk_sb(rdev);
9607         if (err == 0) {
9608                 ClearPageUptodate(rdev->sb_page);
9609                 rdev->sb_loaded = 0;
9610                 err = super_types[mddev->major_version].
9611                         load_super(rdev, NULL, mddev->minor_version);
9612         }
9613         if (err < 0) {
9614                 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
9615                                 __func__, __LINE__, rdev->desc_nr, err);
9616                 if (rdev->sb_page)
9617                         put_page(rdev->sb_page);
9618                 rdev->sb_page = swapout;
9619                 rdev->sb_loaded = 1;
9620                 return err;
9621         }
9622
9623         sb = page_address(rdev->sb_page);
9624         /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
9625          * is not set
9626          */
9627
9628         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
9629                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
9630
9631         /* The other node finished recovery, call spare_active to set
9632          * device In_sync and mddev->degraded
9633          */
9634         if (rdev->recovery_offset == MaxSector &&
9635             !test_bit(In_sync, &rdev->flags) &&
9636             mddev->pers->spare_active(mddev))
9637                 sysfs_notify(&mddev->kobj, NULL, "degraded");
9638
9639         put_page(swapout);
9640         return 0;
9641 }
9642
9643 void md_reload_sb(struct mddev *mddev, int nr)
9644 {
9645         struct md_rdev *rdev;
9646         int err;
9647
9648         /* Find the rdev */
9649         rdev_for_each_rcu(rdev, mddev) {
9650                 if (rdev->desc_nr == nr)
9651                         break;
9652         }
9653
9654         if (!rdev || rdev->desc_nr != nr) {
9655                 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
9656                 return;
9657         }
9658
9659         err = read_rdev(mddev, rdev);
9660         if (err < 0)
9661                 return;
9662
9663         check_sb_changes(mddev, rdev);
9664
9665         /* Read all rdev's to update recovery_offset */
9666         rdev_for_each_rcu(rdev, mddev) {
9667                 if (!test_bit(Faulty, &rdev->flags))
9668                         read_rdev(mddev, rdev);
9669         }
9670 }
9671 EXPORT_SYMBOL(md_reload_sb);
9672
9673 #ifndef MODULE
9674
9675 /*
9676  * Searches all registered partitions for autorun RAID arrays
9677  * at boot time.
9678  */
9679
9680 static DEFINE_MUTEX(detected_devices_mutex);
9681 static LIST_HEAD(all_detected_devices);
9682 struct detected_devices_node {
9683         struct list_head list;
9684         dev_t dev;
9685 };
9686
9687 void md_autodetect_dev(dev_t dev)
9688 {
9689         struct detected_devices_node *node_detected_dev;
9690
9691         node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
9692         if (node_detected_dev) {
9693                 node_detected_dev->dev = dev;
9694                 mutex_lock(&detected_devices_mutex);
9695                 list_add_tail(&node_detected_dev->list, &all_detected_devices);
9696                 mutex_unlock(&detected_devices_mutex);
9697         }
9698 }
9699
9700 static void autostart_arrays(int part)
9701 {
9702         struct md_rdev *rdev;
9703         struct detected_devices_node *node_detected_dev;
9704         dev_t dev;
9705         int i_scanned, i_passed;
9706
9707         i_scanned = 0;
9708         i_passed = 0;
9709
9710         pr_info("md: Autodetecting RAID arrays.\n");
9711
9712         mutex_lock(&detected_devices_mutex);
9713         while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
9714                 i_scanned++;
9715                 node_detected_dev = list_entry(all_detected_devices.next,
9716                                         struct detected_devices_node, list);
9717                 list_del(&node_detected_dev->list);
9718                 dev = node_detected_dev->dev;
9719                 kfree(node_detected_dev);
9720                 mutex_unlock(&detected_devices_mutex);
9721                 rdev = md_import_device(dev,0, 90);
9722                 mutex_lock(&detected_devices_mutex);
9723                 if (IS_ERR(rdev))
9724                         continue;
9725
9726                 if (test_bit(Faulty, &rdev->flags))
9727                         continue;
9728
9729                 set_bit(AutoDetected, &rdev->flags);
9730                 list_add(&rdev->same_set, &pending_raid_disks);
9731                 i_passed++;
9732         }
9733         mutex_unlock(&detected_devices_mutex);
9734
9735         pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
9736
9737         autorun_devices(part);
9738 }
9739
9740 #endif /* !MODULE */
9741
9742 static __exit void md_exit(void)
9743 {
9744         struct mddev *mddev;
9745         struct list_head *tmp;
9746         int delay = 1;
9747
9748         blk_unregister_region(MKDEV(MD_MAJOR,0), 512);
9749         blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
9750
9751         unregister_blkdev(MD_MAJOR,"md");
9752         unregister_blkdev(mdp_major, "mdp");
9753         unregister_reboot_notifier(&md_notifier);
9754         unregister_sysctl_table(raid_table_header);
9755
9756         /* We cannot unload the modules while some process is
9757          * waiting for us in select() or poll() - wake them up
9758          */
9759         md_unloading = 1;
9760         while (waitqueue_active(&md_event_waiters)) {
9761                 /* not safe to leave yet */
9762                 wake_up(&md_event_waiters);
9763                 msleep(delay);
9764                 delay += delay;
9765         }
9766         remove_proc_entry("mdstat", NULL);
9767
9768         for_each_mddev(mddev, tmp) {
9769                 export_array(mddev);
9770                 mddev->ctime = 0;
9771                 mddev->hold_active = 0;
9772                 /*
9773                  * for_each_mddev() will call mddev_put() at the end of each
9774                  * iteration.  As the mddev is now fully clear, this will
9775                  * schedule the mddev for destruction by a workqueue, and the
9776                  * destroy_workqueue() below will wait for that to complete.
9777                  */
9778         }
9779         destroy_workqueue(md_misc_wq);
9780         destroy_workqueue(md_wq);
9781 }
9782
9783 subsys_initcall(md_init);
9784 module_exit(md_exit)
9785
9786 static int get_ro(char *buffer, const struct kernel_param *kp)
9787 {
9788         return sprintf(buffer, "%d", start_readonly);
9789 }
9790 static int set_ro(const char *val, const struct kernel_param *kp)
9791 {
9792         return kstrtouint(val, 10, (unsigned int *)&start_readonly);
9793 }
9794
9795 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
9796 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
9797 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
9798 module_param(create_on_open, bool, S_IRUSR|S_IWUSR);
9799
9800 MODULE_LICENSE("GPL");
9801 MODULE_DESCRIPTION("MD RAID framework");
9802 MODULE_ALIAS("md");
9803 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);